filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_7960 | # User role
ADMIN = 0
STAFF = 1
USER = 2
ROLE = {
ADMIN: 'admin',
STAFF: 'staff',
USER: 'user',
}
# User status
INACTIVE = 0
LOGOUT = 1
LOGIN = 2
PLAY = 3
STATUS = {
INACTIVE: 'inactive',
LOGOUT: 'logout',
LOGIN: 'login',
PLAY: 'play',
}
|
the-stack_0_7961 | import math
def fibonacciIterative(n):
if(n == 0):
return 0
if(n == 1):
return 1
first = 0
second = 1
for i in range(1,n):
tmp = first + second
first = second
second = tmp
return second
def main():
n = int(input("Enter a number: "))
if n >=0:
print(f"Fibonacci de {n} es: {fibonacciIterative(n)}")
else:
print("Choose another number")
if __name__ == "__main__":
main()
|
the-stack_0_7963 | #!/usr/bin/python3
import os
import sys
import time
import shutil
import hashlib
projectRoot = "https://www.sansay.co.uk/jamstack"
# Parse any options set by the user on the command line.
validBooleanOptions = []
validValueOptions = ["-domainName", "-contentFolderPath", "-jekyllFolderPath", "-buildPassword"]
userOptions = {}
optionCount = 1
while optionCount < len(sys.argv):
if sys.argv[optionCount] in validBooleanOptions:
userOptions[sys.argv[optionCount]] = True
elif sys.argv[optionCount] in validValueOptions:
userOptions[sys.argv[optionCount]] = sys.argv[optionCount+1]
optionCount = optionCount + 1
optionCount = optionCount + 1
def runIfPathMissing(thePath, theCommand):
if not os.path.exists(thePath):
print("Running: " + theCommand)
os.system(theCommand)
def downloadFile(src, dest, mode=None):
print("Copying file " + src + " to " + dest)
os.system("curl -s " + projectRoot + "/" + src + " -o " + dest)
if not mode == None:
os.system("chmod " + mode + " " + dest)
def getUserOption(optionName, theMessage):
if not optionName in userOptions.keys():
userOptions[optionName] = input(theMessage + ": ")
return(userOptions[optionName])
def askUserMenu(theOptions):
for optionCount in range(0, len(theOptions)):
print(str(optionCount+1) + ": " + theOptions[optionCount])
userSelection = input("Selection: ")
return(int(userSelection))
def readFile(theFilename):
fileDataHandle = open(theFilename, "r")
fileData = fileDataHandle.read()
fileDataHandle.close()
return(fileData)
def writeFile(theFilename, theFileData):
fileDataHandle = open(theFilename, "w")
if isinstance(theFileData, list):
fileDataHandle.write("\n".join(theFileData))
else:
fileDataHandle.write(theFileData)
fileDataHandle.close()
def replaceVariables(theFile, theKeyValues):
fileData = readFile(theFile)
for keyValue in theKeyValues.keys():
fileData = fileData.replace("<<" + keyValue + ">>", theKeyValues[keyValue])
writeFile(theFile, fileData)
def runExpect(inputArray):
writeFile("temp.expect", inputArray)
os.system("expect temp.expect")
os.system("rm temp.expect")
print("Installing JAMStack...")
# Make sure dos2unix (line-end conversion utility) is installed.
runIfPathMissing("/usr/bin/dos2unix", "apt-get install -y dos2unix")
# Make sure Pip3 (Python 3 package manager) is installed.
runIfPathMissing("/usr/bin/pip3", "apt-get install -y python3-pip")
# Figure out what version of Python3 we have installed.
pythonVersion = os.popen("ls /usr/local/lib | grep python3").read().strip()
# Make sure Git (source code control client) is installed.
runIfPathMissing("/usr/bin/git", "apt-get install -y git")
# Make sure curl (utility to get files from the web) is installed.
runIfPathMissing("/usr/bin/curl", "apt-get install -y curl")
# Make sure build-essential (Debian build environment, should include most tools you need to build other packages) is installed.
runIfPathMissing("/usr/share/doc/build-essential", "apt-get install -y build-essential")
# Make sure ZLib (compression library, required for building other packages) is installed.
runIfPathMissing("/usr/share/doc/zlib1g-dev", "apt-get install -y zlib1g-dev")
# Make sure ruby-dev (the Ruby development environment, needed for Jekyll) is installed.
runIfPathMissing("/usr/share/doc/ruby-dev", "apt-get install -y ruby-dev")
# Make sure Jekyll (static site generation tool) is installed.
runIfPathMissing("/usr/local/bin/jekyll", "gem install bundler jekyll concurrent-ruby")
runIfPathMissing("/root/.bundle", "bundle install")
os.system("mkdir /.bundle > /dev/null 2>&1")
os.system("chown www-data:www-data /.bundle > /dev/null 2>&1")
# Make sure Pandoc (conversion utility for converting various file formats, in this case DOCX to Markdown) is installed.
# Note that we need at least version 2.7.1, released March 2019, as it contains a bug fix to handle O365-created DOCX files properly - the version included by Debian Stretch is not yet up to date.
runIfPathMissing("/usr/bin/pandoc", "wget https://github.com/jgm/pandoc/releases/download/2.7.1/pandoc-2.7.1-1-amd64.deb; dpkg -i pandoc-2.7.1-1-amd64.deb; rm pandoc-2.7.1-1-amd64.deb")
# Make sure Flask (Python web-publishing framework) is installed.
runIfPathMissing("/usr/local/lib/"+pythonVersion+"/dist-packages/flask", "pip3 install flask")
# Make sure XLRD (Python library for handling Excel files, required for Excel support in Pandas) is installed.
runIfPathMissing("/usr/local/lib/"+pythonVersion+"/dist-packages/xlrd", "pip3 install xlrd")
# Make sure Pandas (Python data-analysis library) is installed.
runIfPathMissing("/usr/local/lib/"+pythonVersion+"/dist-packages/pandas", "pip3 install pandas")
# Make sure Numpy (Python maths library) is installed.
runIfPathMissing("/usr/local/lib/"+pythonVersion+"/dist-packages/numpy", "pip3 install numpy")
# Make sure Expect (command-line automation utility) is installed.
runIfPathMissing("/usr/bin/expect", "apt-get -y install expect")
# Make sure rclone (for mounting cloud-based filesystems such as Google Drive) is installed.
runIfPathMissing("/usr/bin/rclone", "curl https://rclone.org/install.sh | sudo bash")
# Make sure FUSE (for mounting user filesystems, used by rclone) is installed.
runIfPathMissing("/usr/bin/fusermount", "apt-get -y install fuse")
# Make sure Caddy (web server) is installed.
runIfPathMissing("/usr/bin/caddy", "echo \"deb [trusted=yes] https://apt.fury.io/caddy/ /\" | sudo tee -a /etc/apt/sources.list.d/caddy-fury.list; apt-get update; apt-get install caddy")
getUserOption("-domainName", "Please enter this site's domain name")
# Copy over the Caddy configuration file.
downloadFile("Caddyfile", "/etc/caddy/Caddyfile", mode="0744")
replaceVariables("/etc/caddy/Caddyfile", {"DOMAINNAME":userOptions["-domainName"]})
# Make sure Web Console (simple web user interface for command-line applications) is installed...
os.system("curl -s https://www.sansay.co.uk/web-console/install.sh | sudo bash")
# ...and configured.
if not os.path.exists("/etc/webconsole/tasks/build"):
getUserOption("-buildPassword", "Please enter this site's build password")
os.system("webconsole --new --newTaskID build --newTaskTitle \"Build Site\" --newTaskSecret " + userOptions["-buildPassword"] + " --newTaskPublic N --newTaskCommand \"bash build.sh\"")
downloadFile("webconsoleConfig.csv", "/etc/webconsole/config.csv", mode="0744")
# Make sure Rclone is set up to connect to the user's cloud storage - we might need to ask the user for some details.
if not os.path.exists("/root/.config/rclone/rclone.conf"):
print("Configuring rclone...")
getUserOption("-contentFolderPath", "Please enter the path that contains the content")
getUserOption("-jekyllFolderPath", "Please enter the path that contains the Jekyll setup")
runExpect([
"spawn /usr/bin/rclone config",
"expect \"n/s/q>\"",
"send \"n\\r\"",
"expect \"name>\"",
"send \"drive\\r\"",
"expect \"Storage>\"",
"send \"drive\\r\"",
"expect \"client_id>\"",
"expect_user -timeout 3600 -re \"(.*)\\n\"",
"send \"$expect_out(1,string)\\r\"",
"expect \"client_secret>\"",
"expect_user -timeout 3600 -re \"(.*)\\n\"",
"send \"$expect_out(1,string)\\r\"",
"expect \"scope>\"",
"send \"drive.readonly\\r\"",
"expect \"root_folder_id>\"",
"send \"\\r\"",
"expect \"service_account_file>\"",
"send \"\\r\"",
"expect \"y/n>\"",
"send \"n\\r\"",
"expect \"y/n>\"",
"send \"n\\r\"",
"expect \"Enter verification code>\"",
"expect_user -timeout 3600 -re \"(.*)\\n\"",
"send \"$expect_out(1,string)\\r\"",
"expect \"y/n>\"",
"send \"n\\r\"",
"expect \"y/e/d>\"",
"send \"y\\r\"",
"expect \"e/n/d/r/c/s/q>\"",
"send \"n\\r\"",
"expect \"name>\"",
"send \"content\\r\"",
"expect \"Storage>\"",
"send \"cache\\r\"",
"expect \"remote>\"",
"send \"drive:"+userOptions["-contentFolderPath"]+"\\r\"",
"expect \"plex_url>\"",
"send \"\\r\"",
"expect \"plex_username>\"",
"send \"\\r\"",
"expect \"y/g/n>\"",
"send \"n\\r\"",
"expect \"chunk_size>\"",
"send \"10M\\r\"",
"expect \"info_age>\"",
"send \"1y\\r\"",
"expect \"chunk_total_size>\"",
"send \"1G\\r\"",
"expect \"y/n>\"",
"send \"n\\r\"",
"expect \"y/e/d>\"",
"send \"y\\r\"",
"expect \"e/n/d/r/c/s/q>\"",
"send \"n\\r\"",
"expect \"name>\"",
"send \"jekyll\\r\"",
"expect \"Storage>\"",
"send \"cache\\r\"",
"expect \"remote>\"",
"send \"drive:"+userOptions["-jekyllFolderPath"]+"\\r\"",
"expect \"plex_url>\"",
"send \"\\r\"",
"expect \"plex_username>\"",
"send \"\\r\"",
"expect \"y/g/n>\"",
"send \"n\\r\"",
"expect \"chunk_size>\"",
"send \"10M\\r\"",
"expect \"info_age>\"",
"send \"1y\\r\"",
"expect \"chunk_total_size>\"",
"send \"1G\\r\"",
"expect \"y/n>\"",
"send \"n\\r\"",
"expect \"y/e/d>\"",
"send \"y\\r\"",
"send \"q\\r\""
])
# Set up rclone to mount the user's cloud storage - first, stop any existing rclone mount process...
os.system("systemctl stop rclone-content")
os.system("systemctl stop rclone-jekyll")
# ...make sure FUSE is configured to allow non-root users to access mounts...
downloadFile("fuse.conf", "/etc/fuse.conf", mode="644")
# ...make sure the mount point and cache folders exist...
os.makedirs("/mnt/content", exist_ok=True)
os.makedirs("/mnt/jekyll", exist_ok=True)
os.makedirs("/var/cache/rclone-content", exist_ok=True)
os.makedirs("/var/cache/rclone-jekyll", exist_ok=True)
# ...then set up systemd to mount the repository.
downloadFile("rclone-content.service", "/etc/systemd/system/rclone-content.service", mode="644")
downloadFile("rclone-jekyll.service", "/etc/systemd/system/rclone-jekyll.service", mode="644")
os.system("systemctl start rclone-content")
os.system("systemctl start rclone-jekyll")
os.system("systemctl enable rclone-content")
os.system("systemctl enable rclone-jekyll")
# Copy accross the build.sh script.
downloadFile("build.sh", "/etc/webconsole/tasks/build/build.sh", mode="755")
# Copy over the Python scipt that cleans up HTML files.
downloadFile("tidyHTML.py", "/usr/local/bin/tidyHTML.py", mode="0755")
os.system("chown www-data:www-data /usr/local/bin/tidyHTML.py")
# Install DocsToMarkdown.
runIfPathMissing("/usr/local/bin/docsToMarkdown.py", "curl https://raw.githubusercontent.com/dhicks6345789/docs-to-markdown/master/docsToMarkdown.py -o /usr/local/bin/docsToMarkdown.py; chmod a+x /usr/local/bin/docsToMarkdown.py; echo > /var/log/build.log; chown www-data:www-data /var/log/build.log")
runIfPathMissing("/var/local/jekyll", "mkdir /var/local/jekyll; chown www-data:www-data /var/local/jekyll")
downloadFile("docsToMarkdown.json", "/var/local/docsToMarkdown.json", mode="644")
os.system("chown www-data:www-data /var/local/docsToMarkdown.json")
|
the-stack_0_7964 | from socket import *
serverName = '0-pc'
serverPort = 12001
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName, serverPort))
sentence = input("input lowercase sentence")
clientSocket.send(sentence.encode())#字符串转化为字节类型
modifiedSentence = clientSocket.recv(1024)
print("from server:", modifiedSentence.decode())
clientSocket.close()
|
the-stack_0_7965 | import json
import os
from torch.utils.data import Dataset
from config import global_config
from pipeline.actor import Actor
class EvalDataset(Dataset):
def __init__(self, dataset_file, memory, controller):
with open(dataset_file, "r") as f:
self.dataset = json.load(f)
self.folder = os.path.dirname(dataset_file)
self.memory = memory
self.lut = Actor._make_depth_correction(global_config.resolution, global_config.resolution, 90)
self.controller = controller
def __len__(self):
return len(self.dataset)
def load_meta(self, thor_meta):
scene = thor_meta["scene"]
seed = thor_meta["seed"]
position = thor_meta["position"]
rotation = thor_meta["rotation"]
horizon = thor_meta["horizon"]
self.controller.reset(scene)
self.controller.step(action='InitialRandomSpawn', seed=seed,
forceVisible=True, numPlacementAttempts=5)
self.controller.step(action='MakeAllObjectsMoveable')
event = self.controller.step(action='TeleportFull', x=position['x'], y=position['y'],
z=position['z'], rotation=rotation, horizon=horizon)
return event
def __getitem__(self, item):
entry = self.dataset[item]
evt = self.load_meta(entry["thor_meta"])
rgb = evt.frame.copy()
if global_config.depth:
dist = (evt.depth_frame.copy() - .1) * self.lut
rgbd = self.memory.base_image_transform((rgb, dist))
else:
rgbd = self.memory.base_image_transform(rgb)
return rgbd, entry["image_id"]
class ActiveDataset(EvalDataset):
def __init__(self, dataset_file, memory, controller, conn):
super().__init__(dataset_file, memory, controller)
self.conn = conn
def process(self):
while True:
item = self.conn.recv()
if item is None:
break
self.conn.send(self.__getitem__(item))
|
the-stack_0_7966 | from datasette.plugins import DEFAULT_PLUGINS
from datasette.utils import detect_json1
from datasette.version import __version__
from .fixtures import ( # noqa
app_client,
app_client_no_files,
app_client_with_hash,
app_client_shorter_time_limit,
app_client_larger_cache_size,
app_client_returned_rows_matches_page_size,
app_client_two_attached_databases,
app_client_two_attached_databases_one_immutable,
app_client_conflicting_database_names,
app_client_with_cors,
app_client_with_dot,
app_client_immutable_and_inspect_file,
generate_compound_rows,
generate_sortable_rows,
make_app_client,
EXPECTED_PLUGINS,
METADATA,
)
import json
import pytest
import sys
import urllib
def test_homepage(app_client):
response = app_client.get("/.json")
assert response.status == 200
assert "application/json; charset=utf-8" == response.headers["content-type"]
assert response.json.keys() == {"fixtures": 0}.keys()
d = response.json["fixtures"]
assert d["name"] == "fixtures"
assert d["tables_count"] == 24
assert len(d["tables_and_views_truncated"]) == 5
assert d["tables_and_views_more"] is True
# 4 hidden FTS tables + no_primary_key (hidden in metadata)
assert d["hidden_tables_count"] == 5
# 201 in no_primary_key, plus 5 in other hidden tables:
assert d["hidden_table_rows_sum"] == 206
assert d["views_count"] == 4
def test_homepage_sort_by_relationships(app_client):
response = app_client.get("/.json?_sort=relationships")
assert response.status == 200
tables = [
t["name"] for t in response.json["fixtures"]["tables_and_views_truncated"]
]
assert [
"simple_primary_key",
"complex_foreign_keys",
"roadside_attraction_characteristics",
"searchable_tags",
"foreign_key_references",
] == tables
def test_database_page(app_client):
response = app_client.get("/fixtures.json")
assert response.status == 200
data = response.json
assert "fixtures" == data["database"]
assert [
{
"name": "123_starts_with_digits",
"columns": ["content"],
"primary_keys": [],
"count": 0,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "Table With Space In Name",
"columns": ["pk", "content"],
"primary_keys": ["pk"],
"count": 0,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "attraction_characteristic",
"columns": ["pk", "name"],
"primary_keys": ["pk"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "roadside_attraction_characteristics",
"column": "pk",
"other_column": "characteristic_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "binary_data",
"columns": ["data"],
"primary_keys": [],
"count": 3,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "complex_foreign_keys",
"columns": ["pk", "f1", "f2", "f3"],
"primary_keys": ["pk"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "simple_primary_key",
"column": "f3",
"other_column": "id",
},
{
"other_table": "simple_primary_key",
"column": "f2",
"other_column": "id",
},
{
"other_table": "simple_primary_key",
"column": "f1",
"other_column": "id",
},
],
},
"private": False,
},
{
"name": "compound_primary_key",
"columns": ["pk1", "pk2", "content"],
"primary_keys": ["pk1", "pk2"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "compound_three_primary_keys",
"columns": ["pk1", "pk2", "pk3", "content"],
"primary_keys": ["pk1", "pk2", "pk3"],
"count": 1001,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "custom_foreign_key_label",
"columns": ["pk", "foreign_key_with_custom_label"],
"primary_keys": ["pk"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "primary_key_multiple_columns_explicit_label",
"column": "foreign_key_with_custom_label",
"other_column": "id",
}
],
},
"private": False,
},
{
"name": "facet_cities",
"columns": ["id", "name"],
"primary_keys": ["id"],
"count": 4,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "facetable",
"column": "id",
"other_column": "city_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "facetable",
"columns": [
"pk",
"created",
"planet_int",
"on_earth",
"state",
"city_id",
"neighborhood",
"tags",
"complex_array",
"distinct_some_null",
],
"primary_keys": ["pk"],
"count": 15,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "facet_cities",
"column": "city_id",
"other_column": "id",
}
],
},
"private": False,
},
{
"name": "foreign_key_references",
"columns": ["pk", "foreign_key_with_label", "foreign_key_with_no_label"],
"primary_keys": ["pk"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "primary_key_multiple_columns",
"column": "foreign_key_with_no_label",
"other_column": "id",
},
{
"other_table": "simple_primary_key",
"column": "foreign_key_with_label",
"other_column": "id",
},
],
},
"private": False,
},
{
"name": "infinity",
"columns": ["value"],
"primary_keys": [],
"count": 3,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "primary_key_multiple_columns",
"columns": ["id", "content", "content2"],
"primary_keys": ["id"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "foreign_key_references",
"column": "id",
"other_column": "foreign_key_with_no_label",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "primary_key_multiple_columns_explicit_label",
"columns": ["id", "content", "content2"],
"primary_keys": ["id"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "custom_foreign_key_label",
"column": "id",
"other_column": "foreign_key_with_custom_label",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "roadside_attraction_characteristics",
"columns": ["attraction_id", "characteristic_id"],
"primary_keys": [],
"count": 5,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{
"other_table": "attraction_characteristic",
"column": "characteristic_id",
"other_column": "pk",
},
{
"other_table": "roadside_attractions",
"column": "attraction_id",
"other_column": "pk",
},
],
},
"private": False,
},
{
"name": "roadside_attractions",
"columns": ["pk", "name", "address", "latitude", "longitude"],
"primary_keys": ["pk"],
"count": 4,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "roadside_attraction_characteristics",
"column": "pk",
"other_column": "attraction_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "searchable",
"columns": ["pk", "text1", "text2", "name with . and spaces"],
"primary_keys": ["pk"],
"count": 2,
"hidden": False,
"fts_table": "searchable_fts",
"foreign_keys": {
"incoming": [
{
"other_table": "searchable_tags",
"column": "pk",
"other_column": "searchable_id",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "searchable_tags",
"columns": ["searchable_id", "tag"],
"primary_keys": ["searchable_id", "tag"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [],
"outgoing": [
{"other_table": "tags", "column": "tag", "other_column": "tag"},
{
"other_table": "searchable",
"column": "searchable_id",
"other_column": "pk",
},
],
},
"private": False,
},
{
"name": "select",
"columns": ["group", "having", "and", "json"],
"primary_keys": [],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "simple_primary_key",
"columns": ["id", "content"],
"primary_keys": ["id"],
"count": 4,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "foreign_key_references",
"column": "id",
"other_column": "foreign_key_with_label",
},
{
"other_table": "complex_foreign_keys",
"column": "id",
"other_column": "f3",
},
{
"other_table": "complex_foreign_keys",
"column": "id",
"other_column": "f2",
},
{
"other_table": "complex_foreign_keys",
"column": "id",
"other_column": "f1",
},
],
"outgoing": [],
},
"private": False,
},
{
"name": "sortable",
"columns": [
"pk1",
"pk2",
"content",
"sortable",
"sortable_with_nulls",
"sortable_with_nulls_2",
"text",
],
"primary_keys": ["pk1", "pk2"],
"count": 201,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "table/with/slashes.csv",
"columns": ["pk", "content"],
"primary_keys": ["pk"],
"count": 1,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "tags",
"columns": ["tag"],
"primary_keys": ["tag"],
"count": 2,
"hidden": False,
"fts_table": None,
"foreign_keys": {
"incoming": [
{
"other_table": "searchable_tags",
"column": "tag",
"other_column": "tag",
}
],
"outgoing": [],
},
"private": False,
},
{
"name": "units",
"columns": ["pk", "distance", "frequency"],
"primary_keys": ["pk"],
"count": 3,
"hidden": False,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "no_primary_key",
"columns": ["content", "a", "b", "c"],
"primary_keys": [],
"count": 201,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts",
"columns": ["text1", "text2", "name with . and spaces", "content"],
"primary_keys": [],
"count": 2,
"hidden": True,
"fts_table": "searchable_fts",
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts_content",
"columns": [
"docid",
"c0text1",
"c1text2",
"c2name with . and spaces",
"c3content",
],
"primary_keys": ["docid"],
"count": 2,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts_segdir",
"columns": [
"level",
"idx",
"start_block",
"leaves_end_block",
"end_block",
"root",
],
"primary_keys": ["level", "idx"],
"count": 1,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
{
"name": "searchable_fts_segments",
"columns": ["blockid", "block"],
"primary_keys": ["blockid"],
"count": 0,
"hidden": True,
"fts_table": None,
"foreign_keys": {"incoming": [], "outgoing": []},
"private": False,
},
] == data["tables"]
def test_no_files_uses_memory_database(app_client_no_files):
response = app_client_no_files.get("/.json")
assert response.status == 200
assert {
":memory:": {
"hash": None,
"color": "f7935d",
"hidden_table_rows_sum": 0,
"hidden_tables_count": 0,
"name": ":memory:",
"show_table_row_counts": False,
"path": "/:memory:",
"table_rows_sum": 0,
"tables_count": 0,
"tables_and_views_more": False,
"tables_and_views_truncated": [],
"views_count": 0,
"private": False,
}
} == response.json
# Try that SQL query
response = app_client_no_files.get(
"/:memory:.json?sql=select+sqlite_version()&_shape=array"
)
assert 1 == len(response.json)
assert ["sqlite_version()"] == list(response.json[0].keys())
def test_database_page_for_database_with_dot_in_name(app_client_with_dot):
response = app_client_with_dot.get("/fixtures.dot.json")
assert 200 == response.status
def test_custom_sql(app_client):
response = app_client.get(
"/fixtures.json?sql=select+content+from+simple_primary_key&_shape=objects"
)
data = response.json
assert {"sql": "select content from simple_primary_key", "params": {}} == data[
"query"
]
assert [
{"content": "hello"},
{"content": "world"},
{"content": ""},
{"content": "RENDER_CELL_DEMO"},
] == data["rows"]
assert ["content"] == data["columns"]
assert "fixtures" == data["database"]
assert not data["truncated"]
def test_sql_time_limit(app_client_shorter_time_limit):
response = app_client_shorter_time_limit.get("/fixtures.json?sql=select+sleep(0.5)")
assert 400 == response.status
assert "SQL Interrupted" == response.json["title"]
def test_custom_sql_time_limit(app_client):
response = app_client.get("/fixtures.json?sql=select+sleep(0.01)")
assert 200 == response.status
response = app_client.get("/fixtures.json?sql=select+sleep(0.01)&_timelimit=5")
assert 400 == response.status
assert "SQL Interrupted" == response.json["title"]
def test_invalid_custom_sql(app_client):
response = app_client.get("/fixtures.json?sql=.schema")
assert response.status == 400
assert response.json["ok"] is False
assert "Statement must be a SELECT" == response.json["error"]
def test_table_json(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=objects")
assert response.status == 200
data = response.json
assert (
data["query"]["sql"]
== "select id, content from simple_primary_key order by id limit 51"
)
assert data["query"]["params"] == {}
assert data["rows"] == [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
]
def test_table_not_exists_json(app_client):
assert {
"ok": False,
"error": "Table not found: blah",
"status": 404,
"title": None,
} == app_client.get("/fixtures/blah.json").json
def test_jsono_redirects_to_shape_objects(app_client_with_hash):
response_1 = app_client_with_hash.get(
"/fixtures/simple_primary_key.jsono", allow_redirects=False
)
response = app_client_with_hash.get(
response_1.headers["Location"], allow_redirects=False
)
assert response.status == 302
assert response.headers["Location"].endswith("?_shape=objects")
def test_table_shape_arrays(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=arrays")
assert [
["1", "hello"],
["2", "world"],
["3", ""],
["4", "RENDER_CELL_DEMO"],
] == response.json["rows"]
def test_table_shape_arrayfirst(app_client):
response = app_client.get(
"/fixtures.json?"
+ urllib.parse.urlencode(
{
"sql": "select content from simple_primary_key order by id",
"_shape": "arrayfirst",
}
)
)
assert ["hello", "world", "", "RENDER_CELL_DEMO"] == response.json
def test_table_shape_objects(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=objects")
assert [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
] == response.json["rows"]
def test_table_shape_array(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=array")
assert [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
] == response.json
def test_table_shape_array_nl(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=array&_nl=on")
lines = response.text.split("\n")
results = [json.loads(line) for line in lines]
assert [
{"id": "1", "content": "hello"},
{"id": "2", "content": "world"},
{"id": "3", "content": ""},
{"id": "4", "content": "RENDER_CELL_DEMO"},
] == results
def test_table_shape_invalid(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=invalid")
assert {
"ok": False,
"error": "Invalid _shape: invalid",
"status": 400,
"title": None,
} == response.json
def test_table_shape_object(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_shape=object")
assert {
"1": {"id": "1", "content": "hello"},
"2": {"id": "2", "content": "world"},
"3": {"id": "3", "content": ""},
"4": {"id": "4", "content": "RENDER_CELL_DEMO"},
} == response.json
def test_table_shape_object_compound_primary_Key(app_client):
response = app_client.get("/fixtures/compound_primary_key.json?_shape=object")
assert {"a,b": {"pk1": "a", "pk2": "b", "content": "c"}} == response.json
@pytest.mark.xfail
def test_table_with_slashes_in_name(app_client):
response = app_client.get(
"/fixtures/table%2Fwith%2Fslashes.csv?_shape=objects&_format=json"
)
assert response.status == 200
data = response.json
assert data["rows"] == [{"pk": "3", "content": "hey"}]
def test_table_with_reserved_word_name(app_client):
response = app_client.get("/fixtures/select.json?_shape=objects")
assert response.status == 200
data = response.json
assert data["rows"] == [
{
"rowid": 1,
"group": "group",
"having": "having",
"and": "and",
"json": '{"href": "http://example.com/", "label":"Example"}',
}
]
@pytest.mark.parametrize(
"path,expected_rows,expected_pages",
[
("/fixtures/no_primary_key.json", 201, 5),
("/fixtures/paginated_view.json", 201, 9),
("/fixtures/no_primary_key.json?_size=25", 201, 9),
("/fixtures/paginated_view.json?_size=50", 201, 5),
("/fixtures/paginated_view.json?_size=max", 201, 3),
("/fixtures/123_starts_with_digits.json", 0, 1),
# Ensure faceting doesn't break pagination:
("/fixtures/compound_three_primary_keys.json?_facet=pk1", 1001, 21),
# Paginating while sorted by an expanded foreign key should work
(
"/fixtures/roadside_attraction_characteristics.json?_size=2&_sort=attraction_id&_labels=on",
5,
3,
),
],
)
def test_paginate_tables_and_views(app_client, path, expected_rows, expected_pages):
fetched = []
count = 0
while path:
response = app_client.get(path)
assert 200 == response.status
count += 1
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
assert urllib.parse.urlencode({"_next": response.json["next"]}) in path
path = path.replace("http://localhost", "")
assert count < 30, "Possible infinite loop detected"
assert expected_rows == len(fetched)
assert expected_pages == count
@pytest.mark.parametrize(
"path,expected_error",
[
("/fixtures/no_primary_key.json?_size=-4", "_size must be a positive integer"),
("/fixtures/no_primary_key.json?_size=dog", "_size must be a positive integer"),
("/fixtures/no_primary_key.json?_size=1001", "_size must be <= 100"),
],
)
def test_validate_page_size(app_client, path, expected_error):
response = app_client.get(path)
assert expected_error == response.json["error"]
assert 400 == response.status
def test_page_size_zero(app_client):
"For _size=0 we return the counts, empty rows and no continuation token"
response = app_client.get("/fixtures/no_primary_key.json?_size=0")
assert 200 == response.status
assert [] == response.json["rows"]
assert 201 == response.json["filtered_table_rows_count"]
assert None is response.json["next"]
assert None is response.json["next_url"]
def test_paginate_compound_keys(app_client):
fetched = []
path = "/fixtures/compound_three_primary_keys.json?_shape=objects"
page = 0
while path:
page += 1
response = app_client.get(path)
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert page < 100
assert 1001 == len(fetched)
assert 21 == page
# Should be correctly ordered
contents = [f["content"] for f in fetched]
expected = [r[3] for r in generate_compound_rows(1001)]
assert expected == contents
def test_paginate_compound_keys_with_extra_filters(app_client):
fetched = []
path = (
"/fixtures/compound_three_primary_keys.json?content__contains=d&_shape=objects"
)
page = 0
while path:
page += 1
assert page < 100
response = app_client.get(path)
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert 2 == page
expected = [r[3] for r in generate_compound_rows(1001) if "d" in r[3]]
assert expected == [f["content"] for f in fetched]
@pytest.mark.parametrize(
"query_string,sort_key,human_description_en",
[
("_sort=sortable", lambda row: row["sortable"], "sorted by sortable"),
(
"_sort_desc=sortable",
lambda row: -row["sortable"],
"sorted by sortable descending",
),
(
"_sort=sortable_with_nulls",
lambda row: (
1 if row["sortable_with_nulls"] is not None else 0,
row["sortable_with_nulls"],
),
"sorted by sortable_with_nulls",
),
(
"_sort_desc=sortable_with_nulls",
lambda row: (
1 if row["sortable_with_nulls"] is None else 0,
-row["sortable_with_nulls"]
if row["sortable_with_nulls"] is not None
else 0,
row["content"],
),
"sorted by sortable_with_nulls descending",
),
# text column contains '$null' - ensure it doesn't confuse pagination:
("_sort=text", lambda row: row["text"], "sorted by text"),
],
)
def test_sortable(app_client, query_string, sort_key, human_description_en):
path = "/fixtures/sortable.json?_shape=objects&{}".format(query_string)
fetched = []
page = 0
while path:
page += 1
assert page < 100
response = app_client.get(path)
assert human_description_en == response.json["human_description_en"]
fetched.extend(response.json["rows"])
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert 5 == page
expected = list(generate_sortable_rows(201))
expected.sort(key=sort_key)
assert [r["content"] for r in expected] == [r["content"] for r in fetched]
def test_sortable_and_filtered(app_client):
path = (
"/fixtures/sortable.json"
"?content__contains=d&_sort_desc=sortable&_shape=objects"
)
response = app_client.get(path)
fetched = response.json["rows"]
assert (
'where content contains "d" sorted by sortable descending'
== response.json["human_description_en"]
)
expected = [row for row in generate_sortable_rows(201) if "d" in row["content"]]
assert len(expected) == response.json["filtered_table_rows_count"]
expected.sort(key=lambda row: -row["sortable"])
assert [r["content"] for r in expected] == [r["content"] for r in fetched]
def test_sortable_argument_errors(app_client):
response = app_client.get("/fixtures/sortable.json?_sort=badcolumn")
assert "Cannot sort table by badcolumn" == response.json["error"]
response = app_client.get("/fixtures/sortable.json?_sort_desc=badcolumn2")
assert "Cannot sort table by badcolumn2" == response.json["error"]
response = app_client.get(
"/fixtures/sortable.json?_sort=sortable_with_nulls&_sort_desc=sortable"
)
assert "Cannot use _sort and _sort_desc at the same time" == response.json["error"]
def test_sortable_columns_metadata(app_client):
response = app_client.get("/fixtures/sortable.json?_sort=content")
assert "Cannot sort table by content" == response.json["error"]
# no_primary_key has ALL sort options disabled
for column in ("content", "a", "b", "c"):
response = app_client.get("/fixtures/sortable.json?_sort={}".format(column))
assert "Cannot sort table by {}".format(column) == response.json["error"]
@pytest.mark.parametrize(
"path,expected_rows",
[
(
"/fixtures/searchable.json?_search=dog",
[
[1, "barry cat", "terry dog", "panther"],
[2, "terry dog", "sara weasel", "puma"],
],
),
(
# Special keyword shouldn't break FTS query
"/fixtures/searchable.json?_search=AND",
[],
),
(
# Without _searchmode=raw this should return no results
"/fixtures/searchable.json?_search=te*+AND+do*",
[],
),
(
# _searchmode=raw
"/fixtures/searchable.json?_search=te*+AND+do*&_searchmode=raw",
[
[1, "barry cat", "terry dog", "panther"],
[2, "terry dog", "sara weasel", "puma"],
],
),
(
"/fixtures/searchable.json?_search=weasel",
[[2, "terry dog", "sara weasel", "puma"]],
),
(
"/fixtures/searchable.json?_search_text2=dog",
[[1, "barry cat", "terry dog", "panther"]],
),
(
"/fixtures/searchable.json?_search_name%20with%20.%20and%20spaces=panther",
[[1, "barry cat", "terry dog", "panther"]],
),
],
)
def test_searchable(app_client, path, expected_rows):
response = app_client.get(path)
assert expected_rows == response.json["rows"]
@pytest.mark.parametrize(
"path,expected_rows",
[
(
"/fixtures/searchable_view_configured_by_metadata.json?_search=weasel",
[[2, "terry dog", "sara weasel", "puma"]],
),
# This should return all results because search is not configured:
(
"/fixtures/searchable_view.json?_search=weasel",
[
[1, "barry cat", "terry dog", "panther"],
[2, "terry dog", "sara weasel", "puma"],
],
),
(
"/fixtures/searchable_view.json?_search=weasel&_fts_table=searchable_fts&_fts_pk=pk",
[[2, "terry dog", "sara weasel", "puma"]],
),
],
)
def test_searchable_views(app_client, path, expected_rows):
response = app_client.get(path)
assert expected_rows == response.json["rows"]
def test_searchable_invalid_column(app_client):
response = app_client.get("/fixtures/searchable.json?_search_invalid=x")
assert 400 == response.status
assert {
"ok": False,
"error": "Cannot search by that column",
"status": 400,
"title": None,
} == response.json
@pytest.mark.parametrize(
"path,expected_rows",
[
("/fixtures/simple_primary_key.json?content=hello", [["1", "hello"]]),
(
"/fixtures/simple_primary_key.json?content__contains=o",
[["1", "hello"], ["2", "world"], ["4", "RENDER_CELL_DEMO"]],
),
("/fixtures/simple_primary_key.json?content__exact=", [["3", ""]]),
(
"/fixtures/simple_primary_key.json?content__not=world",
[["1", "hello"], ["3", ""], ["4", "RENDER_CELL_DEMO"]],
),
],
)
def test_table_filter_queries(app_client, path, expected_rows):
response = app_client.get(path)
assert expected_rows == response.json["rows"]
def test_table_filter_queries_multiple_of_same_type(app_client):
response = app_client.get(
"/fixtures/simple_primary_key.json?content__not=world&content__not=hello"
)
assert [["3", ""], ["4", "RENDER_CELL_DEMO"]] == response.json["rows"]
@pytest.mark.skipif(not detect_json1(), reason="Requires the SQLite json1 module")
def test_table_filter_json_arraycontains(app_client):
response = app_client.get("/fixtures/facetable.json?tags__arraycontains=tag1")
assert [
[
1,
"2019-01-14 08:00:00",
1,
1,
"CA",
1,
"Mission",
'["tag1", "tag2"]',
'[{"foo": "bar"}]',
"one",
],
[
2,
"2019-01-14 08:00:00",
1,
1,
"CA",
1,
"Dogpatch",
'["tag1", "tag3"]',
"[]",
"two",
],
] == response.json["rows"]
def test_table_filter_extra_where(app_client):
response = app_client.get("/fixtures/facetable.json?_where=neighborhood='Dogpatch'")
assert [
[
2,
"2019-01-14 08:00:00",
1,
1,
"CA",
1,
"Dogpatch",
'["tag1", "tag3"]',
"[]",
"two",
]
] == response.json["rows"]
def test_table_filter_extra_where_invalid(app_client):
response = app_client.get("/fixtures/facetable.json?_where=neighborhood=Dogpatch'")
assert 400 == response.status
assert "Invalid SQL" == response.json["title"]
def test_table_filter_extra_where_disabled_if_no_sql_allowed():
with make_app_client(metadata={"allow_sql": {}}) as client:
response = client.get("/fixtures/facetable.json?_where=neighborhood='Dogpatch'")
assert 403 == response.status
assert "_where= is not allowed" == response.json["error"]
def test_table_through(app_client):
# Just the museums:
response = app_client.get(
'/fixtures/roadside_attractions.json?_through={"table":"roadside_attraction_characteristics","column":"characteristic_id","value":"1"}'
)
assert [
[
3,
"Burlingame Museum of PEZ Memorabilia",
"214 California Drive, Burlingame, CA 94010",
37.5793,
-122.3442,
],
[
4,
"Bigfoot Discovery Museum",
"5497 Highway 9, Felton, CA 95018",
37.0414,
-122.0725,
],
] == response.json["rows"]
assert (
'where roadside_attraction_characteristics.characteristic_id = "1"'
== response.json["human_description_en"]
)
def test_max_returned_rows(app_client):
response = app_client.get("/fixtures.json?sql=select+content+from+no_primary_key")
data = response.json
assert {"sql": "select content from no_primary_key", "params": {}} == data["query"]
assert data["truncated"]
assert 100 == len(data["rows"])
def test_view(app_client):
response = app_client.get("/fixtures/simple_view.json?_shape=objects")
assert response.status == 200
data = response.json
assert data["rows"] == [
{"upper_content": "HELLO", "content": "hello"},
{"upper_content": "WORLD", "content": "world"},
{"upper_content": "", "content": ""},
{"upper_content": "RENDER_CELL_DEMO", "content": "RENDER_CELL_DEMO"},
]
def test_row(app_client):
response = app_client.get("/fixtures/simple_primary_key/1.json?_shape=objects")
assert response.status == 200
assert [{"id": "1", "content": "hello"}] == response.json["rows"]
def test_row_format_in_querystring(app_client):
# regression test for https://github.com/simonw/datasette/issues/563
response = app_client.get(
"/fixtures/simple_primary_key/1?_format=json&_shape=objects"
)
assert response.status == 200
assert [{"id": "1", "content": "hello"}] == response.json["rows"]
@pytest.mark.xfail
def test_row_strange_table_name(app_client):
response = app_client.get(
"/fixtures/table%2Fwith%2Fslashes.csv/3.json?_shape=objects"
)
assert response.status == 200
assert [{"pk": "3", "content": "hey"}] == response.json["rows"]
def test_row_foreign_key_tables(app_client):
response = app_client.get(
"/fixtures/simple_primary_key/1.json?_extras=foreign_key_tables"
)
assert response.status == 200
assert [
{
"column": "id",
"count": 1,
"other_column": "foreign_key_with_label",
"other_table": "foreign_key_references",
},
{
"column": "id",
"count": 1,
"other_column": "f3",
"other_table": "complex_foreign_keys",
},
{
"column": "id",
"count": 0,
"other_column": "f2",
"other_table": "complex_foreign_keys",
},
{
"column": "id",
"count": 1,
"other_column": "f1",
"other_table": "complex_foreign_keys",
},
] == response.json["foreign_key_tables"]
def test_unit_filters(app_client):
response = app_client.get(
"/fixtures/units.json?distance__lt=75km&frequency__gt=1kHz"
)
assert response.status == 200
data = response.json
assert data["units"]["distance"] == "m"
assert data["units"]["frequency"] == "Hz"
assert len(data["rows"]) == 1
assert data["rows"][0][0] == 2
def test_databases_json(app_client_two_attached_databases_one_immutable):
response = app_client_two_attached_databases_one_immutable.get("/-/databases.json")
databases = response.json
assert 2 == len(databases)
extra_database, fixtures_database = databases
assert "extra database" == extra_database["name"]
assert None == extra_database["hash"]
assert True == extra_database["is_mutable"]
assert False == extra_database["is_memory"]
assert "fixtures" == fixtures_database["name"]
assert fixtures_database["hash"] is not None
assert False == fixtures_database["is_mutable"]
assert False == fixtures_database["is_memory"]
def test_metadata_json(app_client):
response = app_client.get("/-/metadata.json")
assert METADATA == response.json
def test_threads_json(app_client):
response = app_client.get("/-/threads.json")
expected_keys = {"threads", "num_threads"}
if sys.version_info >= (3, 7, 0):
expected_keys.update({"tasks", "num_tasks"})
assert expected_keys == set(response.json.keys())
def test_plugins_json(app_client):
response = app_client.get("/-/plugins.json")
assert EXPECTED_PLUGINS == sorted(response.json, key=lambda p: p["name"])
# Try with ?all=1
response = app_client.get("/-/plugins.json?all=1")
names = {p["name"] for p in response.json}
assert names.issuperset(p["name"] for p in EXPECTED_PLUGINS)
assert names.issuperset(DEFAULT_PLUGINS)
def test_versions_json(app_client):
response = app_client.get("/-/versions.json")
assert "python" in response.json
assert "3.0" == response.json.get("asgi")
assert "version" in response.json["python"]
assert "full" in response.json["python"]
assert "datasette" in response.json
assert "version" in response.json["datasette"]
assert response.json["datasette"]["version"] == __version__
assert "sqlite" in response.json
assert "version" in response.json["sqlite"]
assert "fts_versions" in response.json["sqlite"]
assert "compile_options" in response.json["sqlite"]
def test_config_json(app_client):
response = app_client.get("/-/config.json")
assert {
"default_page_size": 50,
"default_facet_size": 30,
"facet_suggest_time_limit_ms": 50,
"facet_time_limit_ms": 200,
"max_returned_rows": 100,
"sql_time_limit_ms": 200,
"allow_download": True,
"allow_facet": True,
"suggest_facets": True,
"default_cache_ttl": 5,
"default_cache_ttl_hashed": 365 * 24 * 60 * 60,
"num_sql_threads": 1,
"cache_size_kb": 0,
"allow_csv_stream": True,
"max_csv_mb": 100,
"truncate_cells_html": 2048,
"force_https_urls": False,
"hash_urls": False,
"template_debug": False,
"base_url": "/",
} == response.json
def test_page_size_matching_max_returned_rows(
app_client_returned_rows_matches_page_size,
):
fetched = []
path = "/fixtures/no_primary_key.json"
while path:
response = app_client_returned_rows_matches_page_size.get(path)
fetched.extend(response.json["rows"])
assert len(response.json["rows"]) in (1, 50)
path = response.json["next_url"]
if path:
path = path.replace("http://localhost", "")
assert 201 == len(fetched)
@pytest.mark.parametrize(
"path,expected_facet_results",
[
(
"/fixtures/facetable.json?_facet=state&_facet=city_id",
{
"state": {
"name": "state",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=city_id",
"results": [
{
"value": "CA",
"label": "CA",
"count": 10,
"toggle_url": "_facet=state&_facet=city_id&state=CA",
"selected": False,
},
{
"value": "MI",
"label": "MI",
"count": 4,
"toggle_url": "_facet=state&_facet=city_id&state=MI",
"selected": False,
},
{
"value": "MC",
"label": "MC",
"count": 1,
"toggle_url": "_facet=state&_facet=city_id&state=MC",
"selected": False,
},
],
"truncated": False,
},
"city_id": {
"name": "city_id",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=state",
"results": [
{
"value": 1,
"label": "San Francisco",
"count": 6,
"toggle_url": "_facet=state&_facet=city_id&city_id=1",
"selected": False,
},
{
"value": 2,
"label": "Los Angeles",
"count": 4,
"toggle_url": "_facet=state&_facet=city_id&city_id=2",
"selected": False,
},
{
"value": 3,
"label": "Detroit",
"count": 4,
"toggle_url": "_facet=state&_facet=city_id&city_id=3",
"selected": False,
},
{
"value": 4,
"label": "Memnonia",
"count": 1,
"toggle_url": "_facet=state&_facet=city_id&city_id=4",
"selected": False,
},
],
"truncated": False,
},
},
),
(
"/fixtures/facetable.json?_facet=state&_facet=city_id&state=MI",
{
"state": {
"name": "state",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=city_id&state=MI",
"results": [
{
"value": "MI",
"label": "MI",
"count": 4,
"selected": True,
"toggle_url": "_facet=state&_facet=city_id",
}
],
"truncated": False,
},
"city_id": {
"name": "city_id",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?_facet=state&state=MI",
"results": [
{
"value": 3,
"label": "Detroit",
"count": 4,
"selected": False,
"toggle_url": "_facet=state&_facet=city_id&state=MI&city_id=3",
}
],
"truncated": False,
},
},
),
(
"/fixtures/facetable.json?_facet=planet_int",
{
"planet_int": {
"name": "planet_int",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json",
"results": [
{
"value": 1,
"label": 1,
"count": 14,
"selected": False,
"toggle_url": "_facet=planet_int&planet_int=1",
},
{
"value": 2,
"label": 2,
"count": 1,
"selected": False,
"toggle_url": "_facet=planet_int&planet_int=2",
},
],
"truncated": False,
}
},
),
(
# planet_int is an integer field:
"/fixtures/facetable.json?_facet=planet_int&planet_int=1",
{
"planet_int": {
"name": "planet_int",
"hideable": True,
"type": "column",
"toggle_url": "/fixtures/facetable.json?planet_int=1",
"results": [
{
"value": 1,
"label": 1,
"count": 14,
"selected": True,
"toggle_url": "_facet=planet_int",
}
],
"truncated": False,
}
},
),
],
)
def test_facets(app_client, path, expected_facet_results):
response = app_client.get(path)
facet_results = response.json["facet_results"]
# We only compare the querystring portion of the taggle_url
for facet_name, facet_info in facet_results.items():
assert facet_name == facet_info["name"]
assert False is facet_info["truncated"]
for facet_value in facet_info["results"]:
facet_value["toggle_url"] = facet_value["toggle_url"].split("?")[1]
assert expected_facet_results == facet_results
def test_suggested_facets(app_client):
suggestions = [
{
"name": suggestion["name"],
"querystring": suggestion["toggle_url"].split("?")[-1],
}
for suggestion in app_client.get("/fixtures/facetable.json").json[
"suggested_facets"
]
]
expected = [
{"name": "created", "querystring": "_facet=created"},
{"name": "planet_int", "querystring": "_facet=planet_int"},
{"name": "on_earth", "querystring": "_facet=on_earth"},
{"name": "state", "querystring": "_facet=state"},
{"name": "city_id", "querystring": "_facet=city_id"},
{"name": "neighborhood", "querystring": "_facet=neighborhood"},
{"name": "tags", "querystring": "_facet=tags"},
{"name": "complex_array", "querystring": "_facet=complex_array"},
{"name": "created", "querystring": "_facet_date=created"},
]
if detect_json1():
expected.append({"name": "tags", "querystring": "_facet_array=tags"})
assert expected == suggestions
def test_allow_facet_off():
with make_app_client(config={"allow_facet": False}) as client:
assert 400 == client.get("/fixtures/facetable.json?_facet=planet_int").status
# Should not suggest any facets either:
assert [] == client.get("/fixtures/facetable.json").json["suggested_facets"]
def test_suggest_facets_off():
with make_app_client(config={"suggest_facets": False}) as client:
# Now suggested_facets should be []
assert [] == client.get("/fixtures/facetable.json").json["suggested_facets"]
def test_expand_labels(app_client):
response = app_client.get(
"/fixtures/facetable.json?_shape=object&_labels=1&_size=2"
"&neighborhood__contains=c"
)
assert {
"2": {
"pk": 2,
"created": "2019-01-14 08:00:00",
"planet_int": 1,
"on_earth": 1,
"state": "CA",
"city_id": {"value": 1, "label": "San Francisco"},
"neighborhood": "Dogpatch",
"tags": '["tag1", "tag3"]',
"complex_array": "[]",
"distinct_some_null": "two",
},
"13": {
"pk": 13,
"created": "2019-01-17 08:00:00",
"planet_int": 1,
"on_earth": 1,
"state": "MI",
"city_id": {"value": 3, "label": "Detroit"},
"neighborhood": "Corktown",
"tags": "[]",
"complex_array": "[]",
"distinct_some_null": None,
},
} == response.json
def test_expand_label(app_client):
response = app_client.get(
"/fixtures/foreign_key_references.json?_shape=object"
"&_label=foreign_key_with_label&_size=1"
)
assert {
"1": {
"pk": "1",
"foreign_key_with_label": {"value": "1", "label": "hello"},
"foreign_key_with_no_label": "1",
}
} == response.json
@pytest.mark.parametrize(
"path,expected_cache_control",
[
("/fixtures/facetable.json", "max-age=5"),
("/fixtures/facetable.json?_ttl=invalid", "max-age=5"),
("/fixtures/facetable.json?_ttl=10", "max-age=10"),
("/fixtures/facetable.json?_ttl=0", "no-cache"),
],
)
def test_ttl_parameter(app_client, path, expected_cache_control):
response = app_client.get(path)
assert expected_cache_control == response.headers["Cache-Control"]
@pytest.mark.parametrize(
"path,expected_redirect",
[
("/fixtures/facetable.json?_hash=1", "/fixtures-HASH/facetable.json"),
(
"/fixtures/facetable.json?city_id=1&_hash=1",
"/fixtures-HASH/facetable.json?city_id=1",
),
],
)
def test_hash_parameter(
app_client_two_attached_databases_one_immutable, path, expected_redirect
):
# First get the current hash for the fixtures database
current_hash = app_client_two_attached_databases_one_immutable.ds.databases[
"fixtures"
].hash[:7]
response = app_client_two_attached_databases_one_immutable.get(
path, allow_redirects=False
)
assert response.status == 302
location = response.headers["Location"]
assert expected_redirect.replace("HASH", current_hash) == location
def test_hash_parameter_ignored_for_mutable_databases(app_client):
path = "/fixtures/facetable.json?_hash=1"
response = app_client.get(path, allow_redirects=False)
assert response.status == 200
test_json_columns_default_expected = [
{"intval": 1, "strval": "s", "floatval": 0.5, "jsonval": '{"foo": "bar"}'}
]
@pytest.mark.parametrize(
"extra_args,expected",
[
("", test_json_columns_default_expected),
("&_json=intval", test_json_columns_default_expected),
("&_json=strval", test_json_columns_default_expected),
("&_json=floatval", test_json_columns_default_expected),
(
"&_json=jsonval",
[{"intval": 1, "strval": "s", "floatval": 0.5, "jsonval": {"foo": "bar"}}],
),
],
)
def test_json_columns(app_client, extra_args, expected):
sql = """
select 1 as intval, "s" as strval, 0.5 as floatval,
'{"foo": "bar"}' as jsonval
"""
path = "/fixtures.json?" + urllib.parse.urlencode({"sql": sql, "_shape": "array"})
path += extra_args
response = app_client.get(path)
assert expected == response.json
def test_config_cache_size(app_client_larger_cache_size):
response = app_client_larger_cache_size.get("/fixtures/pragma_cache_size.json")
assert [[-2500]] == response.json["rows"]
def test_config_force_https_urls():
with make_app_client(config={"force_https_urls": True}) as client:
response = client.get("/fixtures/facetable.json?_size=3&_facet=state")
assert response.json["next_url"].startswith("https://")
assert response.json["facet_results"]["state"]["results"][0][
"toggle_url"
].startswith("https://")
assert response.json["suggested_facets"][0]["toggle_url"].startswith("https://")
# Also confirm that request.url and request.scheme are set correctly
response = client.get("/")
assert client.ds._last_request.url.startswith("https://")
assert client.ds._last_request.scheme == "https"
def test_infinity_returned_as_null(app_client):
response = app_client.get("/fixtures/infinity.json?_shape=array")
assert [
{"rowid": 1, "value": None},
{"rowid": 2, "value": None},
{"rowid": 3, "value": 1.5},
] == response.json
def test_infinity_returned_as_invalid_json_if_requested(app_client):
response = app_client.get("/fixtures/infinity.json?_shape=array&_json_infinity=1")
assert [
{"rowid": 1, "value": float("inf")},
{"rowid": 2, "value": float("-inf")},
{"rowid": 3, "value": 1.5},
] == response.json
def test_custom_query_with_unicode_characters(app_client):
response = app_client.get("/fixtures/𝐜𝐢𝐭𝐢𝐞𝐬.json?_shape=array")
assert [{"id": 1, "name": "San Francisco"}] == response.json
def test_trace(app_client):
response = app_client.get("/fixtures/simple_primary_key.json?_trace=1")
data = response.json
assert "_trace" in data
trace_info = data["_trace"]
assert isinstance(trace_info["request_duration_ms"], float)
assert isinstance(trace_info["sum_trace_duration_ms"], float)
assert isinstance(trace_info["num_traces"], int)
assert isinstance(trace_info["traces"], list)
assert len(trace_info["traces"]) == trace_info["num_traces"]
for trace in trace_info["traces"]:
assert isinstance(trace["type"], str)
assert isinstance(trace["start"], float)
assert isinstance(trace["end"], float)
assert trace["duration_ms"] == (trace["end"] - trace["start"]) * 1000
assert isinstance(trace["traceback"], list)
assert isinstance(trace["database"], str)
assert isinstance(trace["sql"], str)
assert isinstance(trace["params"], (list, dict, None.__class__))
@pytest.mark.parametrize(
"path,status_code",
[
("/fixtures.db", 200),
("/fixtures.json", 200),
("/fixtures/no_primary_key.json", 200),
# A 400 invalid SQL query should still have the header:
("/fixtures.json?sql=select+blah", 400),
],
)
def test_cors(app_client_with_cors, path, status_code):
response = app_client_with_cors.get(path)
assert response.status == status_code
assert "*" == response.headers["Access-Control-Allow-Origin"]
@pytest.mark.parametrize(
"path",
(
"/",
".json",
"/searchable",
"/searchable.json",
"/searchable_view",
"/searchable_view.json",
),
)
def test_database_with_space_in_name(app_client_two_attached_databases, path):
response = app_client_two_attached_databases.get("/extra database" + path)
assert response.status == 200
def test_common_prefix_database_names(app_client_conflicting_database_names):
# https://github.com/simonw/datasette/issues/597
assert ["fixtures", "foo", "foo-bar"] == [
d["name"]
for d in app_client_conflicting_database_names.get("/-/databases.json").json
]
for db_name, path in (("foo", "/foo.json"), ("foo-bar", "/foo-bar.json")):
data = app_client_conflicting_database_names.get(path).json
assert db_name == data["database"]
def test_null_foreign_keys_are_not_expanded(app_client):
response = app_client.get(
"/fixtures/foreign_key_references.json?_shape=array&_labels=on"
)
assert [
{
"pk": "1",
"foreign_key_with_label": {"value": "1", "label": "hello"},
"foreign_key_with_no_label": {"value": "1", "label": "1"},
},
{
"pk": "2",
"foreign_key_with_label": None,
"foreign_key_with_no_label": None,
},
] == response.json
def test_inspect_file_used_for_count(app_client_immutable_and_inspect_file):
response = app_client_immutable_and_inspect_file.get("/fixtures/sortable.json")
assert response.json["filtered_table_rows_count"] == 100
@pytest.mark.parametrize(
"path,expected_json,expected_text",
[
(
"/fixtures/binary_data.json?_shape=array",
[
{"rowid": 1, "data": {"$base64": True, "encoded": "FRwCx60F/g=="}},
{"rowid": 2, "data": {"$base64": True, "encoded": "FRwDx60F/g=="}},
{"rowid": 3, "data": None},
],
None,
),
(
"/fixtures/binary_data.json?_shape=array&_nl=on",
None,
(
'{"rowid": 1, "data": {"$base64": true, "encoded": "FRwCx60F/g=="}}\n'
'{"rowid": 2, "data": {"$base64": true, "encoded": "FRwDx60F/g=="}}\n'
'{"rowid": 3, "data": null}'
),
),
],
)
def test_binary_data_in_json(app_client, path, expected_json, expected_text):
response = app_client.get(path)
if expected_json:
assert response.json == expected_json
else:
assert response.text == expected_text
@pytest.mark.parametrize(
"qs",
[
"",
"?_shape=arrays",
"?_shape=arrayfirst",
"?_shape=object",
"?_shape=objects",
"?_shape=array",
"?_shape=array&_nl=on",
],
)
def test_paginate_using_link_header(app_client, qs):
path = "/fixtures/compound_three_primary_keys.json{}".format(qs)
num_pages = 0
while path:
response = app_client.get(path)
assert response.status == 200
num_pages += 1
link = response.headers.get("link")
if link:
assert link.startswith("<")
assert link.endswith('>; rel="next"')
path = link[1:].split(">")[0]
path = path.replace("http://localhost", "")
else:
path = None
assert num_pages == 21
|
the-stack_0_7968 | """Additional in template functions for the lattedb module
"""
from django import template
register = template.Library() # pylint: disable=C0103
@register.inclusion_tag("progress-bar.html")
def render_progress_bar(danger, warning, info, success, total):
if total > 0:
context = {
"danger": danger / total * 100,
"warning": warning / total * 100,
"info": info / total * 100,
"success": success / total * 100,
"total": total,
}
else:
context = {
"danger": 0,
"warning": 0,
"info": 0,
"success": 0,
"total": 0,
}
return context
|
the-stack_0_7969 | from fastapi import APIRouter, Request, HTTPException, Depends, Query
from fastapi.responses import StreamingResponse
import aiohttp
import csv
import io
router = APIRouter()
@router.get("/battlefy/{tournament_id}")
async def battlefy_seed_csv(request: Request, tournament_id: str):
"""Returns a CSV of teams and players for seeding use"""
async with aiohttp.ClientSession() as session:
async with session.get(f"https://dtmwra1jsgyb0.cloudfront.net/tournaments/{tournament_id}/teams") as resp:
data = await resp.json()
if resp.status != 200:
raise HTTPException(status_code=resp.status, detail=f"{data['error']}")
# If status is 200
# Create in-memory store for csv writer
csv_file = io.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(
["team", "player1", "player2", "player3", "player4", "player5", "player6", "player7", "player8"])
for team in data:
team_row = [team['name']]
for p in team.get('players'):
name = p['inGameName']
if name[0] is "=":
name = f".{name}"
team_row.append(name)
csv_writer.writerow(team_row)
# Return CSV
response = StreamingResponse(iter([csv_file.getvalue()]), media_type="text/csv")
response.headers["Content-Disposition"] = "attachment; filename=teams.csv"
return response
|
the-stack_0_7970 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sphinx_py3doc_enhanced_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'disentangle'
year = '2020'
author = 'Yukun Chen'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.0.4'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/yukunchen113/disentangle/issues/%s', '#'),
'pr': ('https://github.com/yukunchen113/disentangle/pull/%s', 'PR #'),
}
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/yukunchen113/disentangle/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
the-stack_0_7972 | # coding: utf-8
from __future__ import division
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import data
def _get_shape(i, o, keepdims):
if (i == 1 or o == 1) and not keepdims:
return [max(i,o),]
else:
return [i, o]
def _slice(tensor, size, i):
"""Gets slice of columns of the tensor"""
return tensor[:, i*size:(i+1)*size]
def weights_Glorot(i, o, name, rng, is_logistic_sigmoid=False, keepdims=False):
#http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
d = np.sqrt(6. / (i + o))
if is_logistic_sigmoid:
d *= 4.
return tf.Variable(tf.random.uniform(_get_shape(i, o, keepdims), -d, d))
def load(file_path, x, p=None):
import models
import pickle
import numpy as np
with open(file_path, 'rb') as f:
state = pickle.load(f)
Model = getattr(models, state["type"])
rng = np.random
rng.set_state(state["random_state"])
net = Model(
rng=rng,
x=x,
n_hidden=state["n_hidden"]
)
for net_param, state_param in zip(net.params, state["params"]):
net_param.assign(state_param)
return net, (state["learning_rate"], state["validation_ppl_history"], state["epoch"], rng)
class GRUCell(layers.Layer):
def __init__(self, rng, n_in, n_out, minibatch_size):
super(GRUCell, self).__init__()
# Notation from: An Empirical Exploration of Recurrent Network Architectures
self.n_in = n_in
self.n_out = n_out
# Initial hidden state
self.h0 = tf.zeros([minibatch_size, n_out])
# Gate parameters:
self.W_x = weights_Glorot(n_in, n_out*2, 'W_x', rng)
self.W_h = weights_Glorot(n_out, n_out*2, 'W_h', rng)
self.b = tf.Variable(tf.zeros([1, n_out*2]))
# Input parameters
self.W_x_h = weights_Glorot(n_in, n_out, 'W_x_h', rng)
self.W_h_h = weights_Glorot(n_out, n_out, 'W_h_h', rng)
self.b_h = tf.Variable(tf.zeros([1, n_out]))
self.params = [self.W_x, self.W_h, self.b, self.W_x_h, self.W_h_h, self.b_h]
# inputs = x_t, h_tm1
def call(self, inputs):
rz = tf.nn.sigmoid(tf.matmul(inputs[0], self.W_x) + tf.matmul(inputs[1], self.W_h) + self.b)
r = _slice(rz, self.n_out, 0)
z = _slice(rz, self.n_out, 1)
h = tf.nn.tanh(tf.matmul(inputs[0], self.W_x_h) + tf.matmul(inputs[1] * r, self.W_h_h) + self.b_h)
h_t = z * inputs[1] + (1. - z) * h
return h_t
class GRU(tf.keras.Model):
def __init__(self, rng, x, n_hidden):
super(GRU, self).__init__()
self.minibatch_size = tf.shape(x)[1]
self.n_hidden = n_hidden
self.x_vocabulary = data.read_vocabulary(data.WORD_VOCAB_FILE)
self.y_vocabulary = data.read_vocabulary(data.PUNCT_VOCAB_FILE)
self.x_vocabulary_size = len(self.x_vocabulary)
self.y_vocabulary_size = len(self.y_vocabulary)
# input model
self.We = weights_Glorot(self.x_vocabulary_size, n_hidden, 'We', rng) # Share embeddings between forward and backward model
self.GRU_f = GRUCell(rng=rng, n_in=n_hidden, n_out=n_hidden, minibatch_size=self.minibatch_size)
self.GRU_b = GRUCell(rng=rng, n_in=n_hidden, n_out=n_hidden, minibatch_size=self.minibatch_size)
# output model
self.GRU = GRUCell(rng=rng, n_in=n_hidden*2, n_out=n_hidden, minibatch_size=self.minibatch_size)
self.Wy = tf.Variable(tf.zeros([n_hidden, self.y_vocabulary_size]))
self.by = tf.Variable(tf.zeros([1, self.y_vocabulary_size]))
# attention model
n_attention = n_hidden * 2 # to match concatenated forward and reverse model states
self.Wa_h = weights_Glorot(n_hidden, n_attention, 'Wa_h', rng) # output model previous hidden state to attention model weights
self.Wa_c = weights_Glorot(n_attention, n_attention, 'Wa_c', rng) # contexts to attention model weights
self.ba = tf.Variable(tf.zeros([1, n_attention]))
self.Wa_y = weights_Glorot(n_attention, 1, 'Wa_y', rng) # gives weights to contexts
# Late fusion parameters
self.Wf_h = tf.Variable(tf.zeros([n_hidden, n_hidden]))
self.Wf_c = tf.Variable(tf.zeros([n_attention, n_hidden]))
self.Wf_f = tf.Variable(tf.zeros([n_hidden, n_hidden]))
self.bf = tf.Variable(tf.zeros([1, n_hidden]))
self.params = [self.We,
self.Wy, self.by,
self.Wa_h, self.Wa_c, self.ba, self.Wa_y,
self.Wf_h, self.Wf_c, self.Wf_f, self.bf]
self.params += self.GRU.params + self.GRU_f.params + self.GRU_b.params
print([x.shape for x in self.params])
def call(self, inputs, training=None):
# bi-directional recurrence
def input_recurrence(initializer, elems):
x_f_t, x_b_t = elems
h_f_tm1, h_b_tm1 = initializer
h_f_t = self.GRU_f(inputs=(tf.nn.embedding_lookup(self.We, x_f_t), h_f_tm1))
h_b_t = self.GRU_b(inputs=(tf.nn.embedding_lookup(self.We, x_b_t), h_b_tm1))
return [h_f_t, h_b_t]
[h_f_t, h_b_t] = tf.scan(
fn=input_recurrence,
elems=[inputs, inputs[::-1]], # forward and backward sequences
initializer=[self.GRU_f.h0, self.GRU_b.h0]
)
# 0-axis is time steps, 1-axis is batch size and 2-axis is hidden layer size
context = tf.concat([h_f_t, h_b_t[::-1]], axis=2)
#projected_context = tf.matmul(context, self.Wa_c) + self.ba for each tensor slice
projected_context = tf.matmul(context, tf.tile(tf.expand_dims(self.Wa_c, 0), tf.stack([tf.shape(context)[0], 1, 1]))) + self.ba
def output_recurrence(initializer, elems):
x_t = elems
h_tm1, _, _ = initializer
# Attention model
h_a = tf.nn.tanh(projected_context + tf.matmul(h_tm1, self.Wa_h))
#alphas = tf.exp(tf.matmul(h_a, self.Wa_y))
#alphas = tf.reshape(alphas, [tf.shape(alphas)[0], tf.shape(alphas)[1]]) # drop 2-axis (sized 1) is replaced by:
#sess.run(tf.reshape(tf.matmul(tf.reshape(x, [-1, tf.shape(x)[-1]]), tf.expand_dims(z,-1)), tf.shape(x)[:2]))
alphas = tf.exp(tf.reshape(tf.matmul(tf.reshape(h_a, [-1, tf.shape(h_a)[-1]]), tf.expand_dims(self.Wa_y, -1)), tf.shape(h_a)[:2]))
alphas = alphas / tf.reduce_sum(alphas, axis=0, keepdims=True)
weighted_context = tf.reduce_sum(context * alphas[:,:,None], axis=0)
h_t = self.GRU(inputs=(x_t, h_tm1))
# Late fusion
lfc = tf.matmul(weighted_context, self.Wf_c) # late fused context
fw = tf.nn.sigmoid(tf.matmul(lfc, self.Wf_f) + tf.matmul(h_t, self.Wf_h) + self.bf) # fusion weights
hf_t = lfc * fw + h_t # weighted fused context + hidden state
z = tf.matmul(hf_t, self.Wy) + self.by
y_t = z#tf.nn.softmax(z)
return [h_t, hf_t, y_t]
[_, self.last_hidden_states, self.y] = tf.scan(
fn=output_recurrence,
elems=context[1:], # ignore the 1st word in context, because there's no punctuation before that
initializer=[self.GRU.h0, self.GRU.h0, tf.zeros([self.minibatch_size, self.y_vocabulary_size])]
)
return self.y
def cost(y_pred, y_true):
return tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true))
def save(model, file_path, learning_rate=None, validation_ppl_history=None, best_validation_ppl=None, epoch=None, random_state=None):
import pickle
state = {
"type": model.__class__.__name__,
"n_hidden": model.n_hidden,
"params": [p for p in model.params],
"learning_rate": learning_rate,
"validation_ppl_history": validation_ppl_history,
"epoch": epoch,
"random_state": random_state
}
print([x.shape for x in state["params"]])
with open(file_path, 'wb') as f:
pickle.dump(state, f, protocol=pickle.HIGHEST_PROTOCOL)
|
the-stack_0_7973 | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2020> Gabriel Falcão <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import os
import time
import socket
from tornado.web import Application
from tornado.web import RequestHandler
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from httpretty import HTTPretty
from httpretty.core import old_socket as true_socket
from multiprocessing import Process
def utf8(s):
if isinstance(s, str):
s = s.encode('utf-8')
return bytes(s)
class BubblesHandler(RequestHandler):
def get(self):
self.write(". o O 0 O o . o O 0 O o . o O 0 O o . o O 0 O o . o O 0 O o .")
class ComeHandler(RequestHandler):
def get(self):
self.write("<- HELLO WORLD ->")
class TornadoServer(object):
is_running = False
def __init__(self, port):
self.port = int(port)
self.process = None
@classmethod
def get_handlers(cls):
return Application([
(r"/go-for-bubbles/?", BubblesHandler),
(r"/come-again/?", ComeHandler),
])
def start(self):
def go(app, port, data={}):
from httpretty import HTTPretty
HTTPretty.disable()
http = HTTPServer(app)
HTTPretty.disable()
http.listen(int(port))
IOLoop.instance().start()
app = self.get_handlers()
data = {}
args = (app, self.port, data)
HTTPretty.disable()
self.process = Process(target=go, args=args)
self.process.start()
time.sleep(1)
def stop(self):
try:
os.kill(self.process.pid, 9)
except OSError:
self.process.terminate()
finally:
self.is_running = False
class TCPServer(object):
def __init__(self, port):
self.port = int(port)
def start(self):
HTTPretty.disable()
def go(port):
from httpretty import HTTPretty
HTTPretty.disable()
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', port))
s.listen(True)
conn, addr = s.accept()
while True:
data = conn.recv(1024)
conn.send(b"RECEIVED: " + bytes(data))
conn.close()
args = [self.port]
self.process = Process(target=go, args=args)
self.process.start()
time.sleep(1)
def stop(self):
try:
os.kill(self.process.pid, 9)
except OSError:
self.process.terminate()
finally:
self.is_running = False
class TCPClient(object):
def __init__(self, port):
self.port = int(port)
self.sock = true_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(('localhost', self.port))
def send(self, data):
if isinstance(data, str):
data = data.encode('utf-8')
self.sock.sendall(data)
return self.sock.recv(len(data) + 11)
def close(self):
try:
self.sock.close()
except socket.error:
pass # already closed
def __del__(self):
self.close()
|
the-stack_0_7974 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
from yunionclient.common import utils
class Manager(object):
"""
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
service_type = 'compute_v2'
is_admin_api = False
_columns = None
_admin_columns = None
_version = ''
def __init__(self, api):
self.api = api
@property
def columns(self):
cols = []
cols.extend(self._columns)
if self.api.is_system_admin() and self._admin_columns is not None:
cols.extend(self._admin_columns)
return cols
def tenant_id(self):
return self.api.get_default_tenant().get_id()
def tenant_name(self):
return self.api.get_default_tenant().get_name()
def _get_versioned_url(self, url):
if self._version is not None and len(self._version) > 0:
while url.startswith('/'):
url = url[1:]
url = r'/%s/%s' % (self._version, url)
return url
def json_request(self, method, url, **kwargs):
return self.api.json_request(self.service_type, self.is_admin_api,
method, self._get_versioned_url(url), **kwargs)
def raw_request(self, method, url, **kwargs):
return self.api.raw_request(self.service_type, self.is_admin_api,
method, self._get_versioned_url(url), **kwargs)
def get_urllib2_raw_request(self, url, **kwargs):
return self.api.get_urllib2_raw_request(self.service_type,
self.is_admin_api, self._get_versioned_url(url), **kwargs)
def _dict_to_object(self, dict_obj, obj_class):
cls = obj_class
if cls is None:
cls = self.resource_class
if cls is not None:
if isinstance(dict_obj, dict):
return cls(self.api, dict_obj)
elif isinstance(dict_obj, list):
rets = []
for o in dict_obj:
rets.append(self._dict_to_object(o, obj_class))
return rets
else:
return dict_obj
def _get(self, url, response_key, obj_class=None):
resp, body = self.json_request('GET', url)
if len(response_key) == 0:
return body
data = body[response_key]
return self._dict_to_object(data, obj_class)
def _list(self, url, response_key, obj_class=None):
resp, body = self.json_request('GET', url)
if 'total' in body:
total = body['total']
if 'limit' in body:
limit = body['limit']
else:
limit = 0
if 'offset' in body:
offset = body['offset']
else:
offset = 0
else:
total = 0
limit = 0
offset = 0
data = body[response_key]
return (self._dict_to_object(data, obj_class), total, limit, offset)
def _create(self, url, body, response_key, obj_class=None):
resp, body = self.json_request('POST', url, body=body)
return self._dict_to_object(body[response_key], obj_class)
def _delete(self, url, response_key, obj_class=None):
resp, body = self.json_request('DELETE', url)
# DELETE requests may not return a body
if body is not None and response_key in body:
return self._dict_to_object(body[response_key], obj_class)
else:
return None
def _update(self, url, body, response_key, obj_class=None):
resp, body = self.json_request('PUT', url, body=body)
# PUT requests may not return a body
if body is not None and response_key in body:
return self._dict_to_object(body[response_key], obj_class)
else:
return None
def clean_kwargs(kwargs):
newkw = {}
for k in list(kwargs.keys()):
if kwargs[k] is not None:
newkw[k] = kwargs[k]
return newkw
class StandaloneManager(Manager):
@classmethod
def keyword_url(cls):
return cls.keyword.replace(':', '/')
@classmethod
def keyword_plural_url(cls):
return cls.keyword_plural.replace(':', '/')
def get(self, idstr, **kwargs):
url = r'/%s/%s' % (self.keyword_plural_url(), idstr)
newkw = clean_kwargs(kwargs)
if len(newkw) > 0:
url += '?' + utils.urlencode(newkw)
return self._get(url, self.keyword)
def get_specific(self, idstr, spec, **kwargs):
url = r'/%s/%s/%s' % (self.keyword_plural_url(), idstr, spec)
newkw = clean_kwargs(kwargs)
if len(newkw) > 0:
url += '?' + utils.urlencode(newkw)
return self._get(url, self.keyword)
def get_metadata(self, idstr, **kwargs):
return self.get_specific(idstr, 'metadata', **kwargs)
def set_metadata(self, idstr, **kwargs):
return self.perform_action(idstr, 'metadata', **kwargs)
def set_user_metadata(self, idstr, **kwargs):
return self.perform_action(idstr, 'user-metadata', **kwargs)
def get_descendent(self, idstr, desc_cls, desc_idstr, **kwargs):
if desc_idstr is None:
desc_idstr = '_'
url = r'/%s/%s/%s/%s' % (self.keyword_plural_url(), idstr,
desc_cls.keyword_plural_url(),
desc_idstr)
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?' + utils.urlencode(kwargs)
return self._get(url, desc_cls.keyword)
def get_descendent_specific(self, idstr, desc_cls, desc_idstr,
spec, **kwargs):
if desc_idstr is None:
desc_idstr = '_'
url = r'/%s/%s/%s/%s/%s' % (self.keyword_plural_url(), idstr,
desc_cls.keyword_plural_url(),
desc_idstr, spec)
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?' + utils.urlencode(kwargs)
return self._get(url, desc_cls.keyword)
def list(self, **kwargs):
url = r'/%s' % (self.keyword_plural_url())
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?' + utils.urlencode(kwargs)
return self._list(url, self.keyword_plural)
def list_descendent(self, idstr, *args, **kwargs):
url = r'/%s/%s' % (self.keyword_plural_url(), idstr)
if len(args) > 1:
for i in range(0, len(args)-1, 2):
url += r'/%s/%s' % (args[i].keyword_plural_url(), args[i+1])
desc_cls = args[-1]
url += '/' + desc_cls.keyword_plural_url()
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?' + utils.urlencode(kwargs)
return self._list(url, desc_cls.keyword_plural)
def delete(self, idstr, **kwargs):
url = r'/%s/%s' % (self.keyword_plural_url(), idstr)
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?' + utils.urlencode(kwargs)
return self._delete(url, self.keyword)
def delete_descendent(self, idstr, desc_cls, desc_idstr, *args, **kwargs):
if desc_idstr is None:
desc_idstr = '_'
url = r'/%s/%s/%s/%s' % (self.keyword_plural_url(), idstr,
desc_cls.keyword_plural_url(),
desc_idstr)
if len(args) > 0:
for i in range(0, len(args), 2):
url += r'/%s/%s' % (args[i].keyword_plural_url(), args[i+1])
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?' + utils.urlencode(kwargs)
return self._delete(url, desc_cls.keyword)
def create(self, **kwargs):
return self.batch_create(1, **kwargs)
def batch_create(self, count_, **kwargs):
resp_key = self.keyword
body = {}
body[self.keyword] = kwargs
if count_ > 1:
resp_key = self.keyword_plural
body['count'] = count_
url = r'/%s' % (self.keyword_plural_url())
return self._create(url, body, resp_key)
def create_descendent(self, idstr, desc_cls, **kwargs):
return self.batch_create_descendent(idstr, desc_cls, 1, **kwargs)
def batch_create_descendent(self, idstr, desc_cls, count_, **kwargs):
resp_key = self.keyword
body = {}
if count_ > 1:
resp_key = self.keyword_plural
body['count'] = count_
body[desc_cls.keyword] = kwargs
url = r'/%s/%s/%s' % (self.keyword_plural_url(), idstr,
desc_cls.keyword_plural_url())
return self._create(url, body, resp_key)
def update(self, idstr, **kwargs):
body = {}
body[self.keyword] = kwargs
if idstr is None:
url = r'/%s' % self.keyword_plural_url()
else:
url = r'/%s/%s' % (self.keyword_plural_url(), idstr)
return self._update(url, body, self.keyword)
def update_descendent(self, idstr, desc_cls, desc_idstr, *args, **kwargs):
if desc_idstr is None:
desc_idstr = '_'
url = r'/%s/%s/%s/%s' % (self.keyword_plural_url(), idstr,
desc_cls.keyword_plural_url(), desc_idstr)
if len(args) > 0:
for i in range(0, len(args), 2):
url += r'/%s/%s' % (args[i].keyword_plural_url(), args[i+1])
body = {}
body[desc_cls.keyword] = kwargs
return self._update(url, body, desc_cls.keyword)
def perform_action(self, idstr, action, **kwargs):
url = r'/%s/%s/%s' % (self.keyword_plural_url(), idstr, action)
body = {}
body[self.keyword] = kwargs
resp, body = self.json_request('POST', url, body=body)
return body[self.keyword]
def perform_class_action(self, action, **kwargs):
url = r'/%s/%s' % (self.keyword_plural_url(), action)
body = {}
body[self.keyword] = kwargs
resp, body = self.json_request('POST', url, body=body)
return body[self.keyword]
def perform_action_descendent(self, idstr, desc_cls, desc_idstr,
action, **kwargs):
if desc_idstr is None:
desc_idstr = '_'
url = r'/%s/%s/%s/%s/%s' % (self.keyword_plural_url(), idstr,
desc_cls.keyword_plural_url(),
desc_idstr, action)
body = {}
body[desc_cls.keyword] = kwargs
resp, body = self.json_request('POST', url, body=body)
return body[desc_cls.keyword]
class ImageManager(StandaloneManager):
service_type = 'image'
_version = 'v1'
class JointManager(Manager):
@classmethod
def keyword_url(cls):
return cls.keyword.replace(':', '/')
@classmethod
def keyword_plural_url(cls):
return cls.keyword_plural.replace(':', '/')
def get(self, mid, sid):
url = r'/%s/%s/%s/%s' % (self.master_class().keyword_plural_url(),
mid, self.slave_class().keyword_plural_url(), sid)
return self._get(url, self.keyword)
def list(self, **kwargs):
url = r'/%s' % (self.keyword_plural_url())
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?%s' % utils.urlencode(kwargs)
return self._list(url, self.keyword_plural)
def list_descendent(self, mid, **kwargs):
url = r'/%s/%s/%s' % (self.master_class().keyword_plural_url(),
mid, self.slave_class().keyword_plural_url())
kwargs = clean_kwargs(kwargs)
if len(kwargs) > 0:
url += '?%s' % utils.urlencode(kwargs)
return self._list(url, self.keyword_plural)
def attach(self, mid, sid, **kwargs):
body = {}
body[self.keyword] = kwargs
url = r'/%s/%s/%s/%s' % (self.master_class().keyword_plural_url(),
mid, self.slave_class().keyword_plural_url(), sid)
return self._create(url, body, self.keyword)
def detach(self, mid, sid):
url = r'/%s/%s/%s/%s' % (self.master_class().keyword_plural_url(),
mid, self.slave_class().keyword_plural_url(), sid)
return self._delete(url, self.keyword)
def update(self, mid, sid, **kwargs):
body = {}
body[self.keyword] = kwargs
url = r'/%s/%s/%s/%s' % (self.master_class().keyword_plural_url(),
mid, self.slave_class().keyword_plural_url(), sid)
return self._update(url, body, self.keyword)
class IdentityManager(StandaloneManager):
service_type = 'identity'
_version = 'v3'
class IdentityJointManager(JointManager):
service_type = 'identity'
_version = 'v3'
class ResourceBase(object):
def __init__(self, api, attr_dict):
self._client_api = api
attr_dict = self._normalize_attribute_dict(attr_dict)
for (k, v) in attr_dict.items():
attr_name = k.replace('-', '_')
setattr(self, attr_name, v)
def _normalize_attribute_dict(self, attr_dict):
return attr_dict
def __repr__(self):
reprkeys = sorted(k for k in list(self.__dict__.keys()) if k[0] != '_')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def __getitem__(self, key):
if len(key) > 0 and key[0] != '_':
return getattr(self, key, None)
return None
def get(self, key):
return self[key]
def to_dict(self):
d = {}
for k in dir(self):
if k[0] != '_':
v = getattr(self, k, None)
if v is not None:
if not callable(v):
d[k] = v
return d
class MeterManager(StandaloneManager):
service_type = 'meter'
class LoggerManager(StandaloneManager):
service_type = 'log'
|
the-stack_0_7975 | """HTTP/1.1 client library
A backport of the Python 3.3 http/client.py module for python-future.
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import bytes, int, str, super
from future.utils import PY2
from future.backports.email import parser as email_parser
from future.backports.email import message as email_message
from future.backports.misc import create_connection as socket_create_connection
import io
import os
import socket
import collections
from future.backports.urllib.parse import urlsplit
import warnings
from array import array
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
PRECONDITION_REQUIRED = 428
TOO_MANY_REQUESTS = 429
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
NETWORK_AUTHENTICATION_REQUIRED = 511
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
511: 'Network Authentication Required',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
class HTTPMessage(email_message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = bytes(b'').join(headers).decode('iso-8859-1')
return email_parser.Parser(_class=_class).parsestr(hstring)
_strict_sentinel = object()
class HTTPResponse(io.RawIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, strict=_strict_sentinel, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
if strict is not _strict_sentinel:
warnings.warn("the 'strict' argument isn't supported anymore; "
"http.client now always assumes HTTP/1.x compliant servers.",
DeprecationWarning, 2)
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.headers.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
super().close() # set "closed" flag
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return bytes(b"")
if self._method == "HEAD":
self._close_conn()
return bytes(b"")
if amt is not None:
# Amount is given, so call base class version
# (which is implemented in terms of self.readinto)
return bytes(super(HTTPResponse, self).read(amt))
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return bytes(s)
def readinto(self, b):
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
if PY2:
data = self.fp.read(len(b))
n = len(data)
b[:n] = data
else:
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
try:
chunk_left = self._read_next_chunk_size()
if chunk_left == 0:
break
except ValueError:
raise IncompleteRead(bytes(b'').join(value))
value.append(self._safe_read(chunk_left))
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
return bytes(b'').join(value)
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
total_bytes = 0
mvb = memoryview(b)
while True:
if chunk_left is None:
try:
chunk_left = self._read_next_chunk_size()
if chunk_left == 0:
break
except ValueError:
raise IncompleteRead(bytes(b[0:total_bytes]))
if len(mvb) < chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
elif len(mvb) == chunk_left:
n = self._safe_readinto(mvb)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return total_bytes + n
else:
temp_mvb = mvb[0:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
return total_bytes
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(bytes(b'').join(s), amt)
s.append(chunk)
amt -= len(chunk)
return bytes(b"").join(s)
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
total_bytes = 0
mvb = memoryview(b)
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
if PY2:
data = self.fp.read(len(temp_mvb))
n = len(data)
temp_mvb[:n] = data
else:
n = self.fp.readinto(temp_mvb)
else:
if PY2:
data = self.fp.read(len(mvb))
n = len(data)
mvb[:n] = data
else:
n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
total_bytes += n
return total_bytes
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
return self.headers
def geturl(self):
return self.url
def getcode(self):
return self.status
class HTTPConnection(object):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
def __init__(self, host, port=None, strict=_strict_sentinel,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
if strict is not _strict_sentinel:
warnings.warn("the 'strict' argument isn't supported anymore; "
"http.client now always assumes HTTP/1.x compliant servers.",
DeprecationWarning, 2)
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self._set_hostport(self._tunnel_host, self._tunnel_port)
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(bytes(b'\r\n'))
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket_create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
blocksize = 8192
# Python 2.7 array objects have a read method which is incompatible
# with the 2-arg calling syntax below.
if hasattr(data, "read") and not isinstance(data, array):
if self.debuglevel > 0:
print("sendIng a read()able")
encode = False
try:
mode = data.mode
except AttributeError:
# io.BytesIO and other file-like objects don't have a `mode`
# attribute.
pass
else:
if "b" not in mode:
encode = True
if self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((bytes(b""), bytes(b"")))
msg = bytes(b"\r\n").join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if self.host.find(':') >= 0:
host_enc = bytes(b'[' + host_enc + b']')
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
value = bytes(b'\r\n\t').join(values)
header = header + bytes(b': ') + value
self._output(header)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request. The message body will be sent in the same packet as the
message headers if it is a string, otherwise it is sent as a separate
packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError as te:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print("Cannot stat!!")
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body is not None and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = body.encode('iso-8859-1')
self.endheaders(body)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
class the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
try:
import ssl
from ssl import SSLContext
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=_strict_sentinel, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, **_3to2kwargs):
if 'check_hostname' in _3to2kwargs: check_hostname = _3to2kwargs['check_hostname']; del _3to2kwargs['check_hostname']
else: check_hostname = None
if 'context' in _3to2kwargs: context = _3to2kwargs['context']; del _3to2kwargs['context']
else: context = None
super(HTTPSConnection, self).__init__(host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
# Some reasonable defaults
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = will_verify
elif check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
self._check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket_create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self.host if ssl.HAS_SNI else None
self.sock = self._context.wrap_socket(sock,
server_hostname=server_hostname)
try:
if self._check_hostname:
ssl.match_hostname(self.sock.getpeercert(), self.host)
except Exception:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
__all__.append("HTTPSConnection")
# ######################################
# # We use the old HTTPSConnection class from Py2.7, because ssl.SSLContext
# # doesn't exist in the Py2.7 stdlib
# class HTTPSConnection(HTTPConnection):
# "This class allows communication via SSL."
# default_port = HTTPS_PORT
# def __init__(self, host, port=None, key_file=None, cert_file=None,
# strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
# source_address=None):
# HTTPConnection.__init__(self, host, port, strict, timeout,
# source_address)
# self.key_file = key_file
# self.cert_file = cert_file
# def connect(self):
# "Connect to a host on a given (SSL) port."
# sock = socket_create_connection((self.host, self.port),
# self.timeout, self.source_address)
# if self._tunnel_host:
# self.sock = sock
# self._tunnel()
# self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
# __all__.append("HTTPSConnection")
# ######################################
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
|
the-stack_0_7977 | from scipy.spatial.transform.rotation import Rotation
from alitra import Euler, Quaternion
def quaternion_to_euler(
quaternion: Quaternion, sequence: str = "ZYX", degrees: bool = False
) -> Euler:
"""
Transform a quaternion into Euler angles.
:param quaternion: A Quaternion object.
:param sequence: Rotation sequence for the Euler angles.
:param degrees: Set to true if the resulting Euler angles should be in degrees. Default is radians.
:return: Euler object.
"""
rotation_object: Rotation = Rotation.from_quat(quaternion.as_np_array())
euler: Euler = Euler.from_array(
rotation_object.as_euler(sequence, degrees=degrees), frame="robot"
)
return euler
|
the-stack_0_7978 |
## 这里我们用逻辑回归来判断手写数字的扫描图像来判断数字是多少。
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
## 导入数据
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
## 拆分训练集和测试集
X_train = X_digits[:.9 * n_samples]
y_train = y_digits[:.9 * n_samples]
X_test = X_digits[.9 * n_samples:]
y_test = y_digits[.9 * n_samples:]
model = LogisticRegression()
## 训练模型
model.fit(X_train, y_train)
## 进行预测
prediction = model.predict(X_test)
score = model.score(X_test, y_test)
print(score)
|
the-stack_0_7980 | from __future__ import division
import chainer
import chainer.functions as F
from chainercv.links import Conv2DBNActiv
from chainercv.links import SeparableConv2DBNActiv
class SeparableASPP(chainer.Chain):
"""Atrous Spatial Pyramid Pooling with Separable Convolution.
average pooling with FC layer
1x1 Convolution
in --> Separable Convolution(k=12) --> concat --> 1x1 Convolution
Separable Convolution(k=24)
Separable Convolution(k=36)
Args:
in_channels (int): Number of channels of input arrays.
out_channels (int): Number of channels of output arrays.
dilate_list (tuple of ints): Tuple of Dilation factors.
the length of this tuple must be 3.
bn_kwargs (dict): Keywod arguments passed to initialize the batch
normalization layers of :class:`chainercv.links.Conv2DBNActiv` and
:class:`chainercv.links.SeparableConv2DBNActiv`.
"""
def __init__(self, in_channels, out_channels,
dilate_list=(12, 24, 36), bn_kwargs={}):
super(SeparableASPP, self).__init__()
with self.init_scope():
self.image_pooling_conv = Conv2DBNActiv(
in_channels, out_channels, 1, bn_kwargs=bn_kwargs)
self.conv1x1 = Conv2DBNActiv(
in_channels, out_channels, 1, bn_kwargs=bn_kwargs)
self.atrous1 = SeparableConv2DBNActiv(
in_channels, out_channels, 3, 1,
dilate_list[0], dilate_list[0], nobias=True,
dw_activ=F.relu, pw_activ=F.relu, bn_kwargs=bn_kwargs)
self.atrous2 = SeparableConv2DBNActiv(
in_channels, out_channels, 3, 1,
dilate_list[1], dilate_list[1], nobias=True,
dw_activ=F.relu, pw_activ=F.relu, bn_kwargs=bn_kwargs)
self.atrous3 = SeparableConv2DBNActiv(
in_channels, out_channels, 3, 1,
dilate_list[2], dilate_list[2], nobias=True,
dw_activ=F.relu, pw_activ=F.relu, bn_kwargs=bn_kwargs)
self.proj = Conv2DBNActiv(
out_channels * 5, out_channels, 1, bn_kwargs=bn_kwargs)
def image_pooling(self, x):
_, _, H, W = x.shape
x = F.average(x, axis=(2, 3), keepdims=True)
x = self.image_pooling_conv(x)
B, C, _, _ = x.shape
x = F.broadcast_to(x, (B, C, H, W))
return x
def forward(self, x):
h = []
h.append(self.image_pooling(x))
h.append(self.conv1x1(x))
h.append(self.atrous1(x))
h.append(self.atrous2(x))
h.append(self.atrous3(x))
h = F.concat(h, axis=1)
h = self.proj(h)
h = F.dropout(h)
return h
|
the-stack_0_7981 | from setuptools import setup
import sys
if not sys.version_info[0] == 3 and sys.version_info[1] < 5:
sys.exit('Python < 3.5 is not supported')
version = '0.74'
setup(
name='steampy',
packages=['steampy', 'test', 'examples', ],
version=version,
description='A Steam lib for trade automation',
author='Michał Bukowski',
author_email='[email protected]',
license='MIT',
url='https://github.com/bukson/steampy',
download_url='https://github.com/bukson/steampy/tarball/' + version,
keywords=['steam', 'trade', ],
classifiers=[],
install_requires=[
"requests",
"beautifulsoup4",
"rsa"
],
package_data = {
'steampy': ['py.typed'],
},
)
|
the-stack_0_7982 | """Moses tests."""
from typing import ClassVar, Type
import pytest
from gt4sd.algorithms.conditional_generation.guacamol import (
AaeGenerator,
MosesGenerator,
OrganGenerator,
VaeGenerator,
)
from gt4sd.algorithms.core import AlgorithmConfiguration
from gt4sd.algorithms.registry import ApplicationsRegistry
def get_classvar_type(class_var):
"""Extract type from ClassVar type annotation: `ClassVar[T]] -> T`."""
return class_var.__args__[0]
@pytest.mark.parametrize(
"config_class, algorithm_type, domain, algorithm_name",
[
(
AaeGenerator,
"conditional_generation",
"materials",
MosesGenerator.__name__,
),
(
VaeGenerator,
"conditional_generation",
"materials",
MosesGenerator.__name__,
),
(
OrganGenerator,
"conditional_generation",
"materials",
MosesGenerator.__name__,
),
],
)
def test_config_class(
config_class: Type[AlgorithmConfiguration],
algorithm_type: str,
domain: str,
algorithm_name: str,
):
assert config_class.algorithm_type == algorithm_type
assert config_class.domain == domain
assert config_class.algorithm_name == algorithm_name
for keyword, type_annotation in config_class.__annotations__.items():
if keyword in ("algorithm_type", "domain", "algorithm_name"):
assert type_annotation.__origin__ is ClassVar # type: ignore
assert str == get_classvar_type(type_annotation)
@pytest.mark.parametrize(
"config_class",
[(AaeGenerator), (VaeGenerator), (OrganGenerator)],
)
def test_config_instance(config_class: Type[AlgorithmConfiguration]):
config = config_class() # type:ignore
assert config.algorithm_application == config_class.__name__
@pytest.mark.parametrize(
"config_class",
[(AaeGenerator), (VaeGenerator), (OrganGenerator)],
)
def test_available_versions(config_class: Type[AlgorithmConfiguration]):
versions = config_class.list_versions()
assert "v0" in versions
@pytest.mark.parametrize(
"config, algorithm",
[
(AaeGenerator, MosesGenerator),
(VaeGenerator, MosesGenerator),
(OrganGenerator, MosesGenerator),
],
)
def test_generation_via_import(config, algorithm):
config = config()
algorithm = algorithm(configuration=config, target="")
items = list(algorithm.sample(2))
assert len(items) == 2
@pytest.mark.parametrize(
"algorithm_application, algorithm_type, domain, algorithm_name",
[
(
AaeGenerator.__name__,
"conditional_generation",
"materials",
MosesGenerator.__name__,
),
(
VaeGenerator.__name__,
"conditional_generation",
"materials",
MosesGenerator.__name__,
),
(
OrganGenerator.__name__,
"conditional_generation",
"materials",
MosesGenerator.__name__,
),
],
)
def test_generation_via_registry(
algorithm_type, domain, algorithm_name, algorithm_application
):
algorithm = ApplicationsRegistry.get_application_instance(
algorithm_type=algorithm_type,
domain=domain,
algorithm_name=algorithm_name,
algorithm_application=algorithm_application,
)
items = list(algorithm.sample(5))
assert len(items) == 5
|
the-stack_0_7983 | """
Copyright (c) 2019 Imperial College London.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
from . import net_utils
class _Residual_Block(nn.Module):
def __init__(self, num_chans=64):
super(_Residual_Block, self).__init__()
bias = True
#res1
self.conv1 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu2 = nn.PReLU()
self.conv3 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu4 = nn.PReLU()
#res1
#concat1
self.conv5 = nn.Conv2d(num_chans, num_chans * 2, kernel_size=3, stride=2, padding=1, bias=bias)
self.relu6 = nn.PReLU()
#res2
self.conv7 = nn.Conv2d(num_chans * 2, num_chans * 2, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu8 = nn.PReLU()
#res2
#concat2
self.conv9 = nn.Conv2d(num_chans * 2, num_chans * 4, kernel_size=3, stride=2, padding=1, bias=bias)
self.relu10 = nn.PReLU()
#res3
self.conv11 = nn.Conv2d(num_chans * 4, num_chans * 4, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu12 = nn.PReLU()
#res3
self.conv13 = nn.Conv2d(num_chans * 4, num_chans * 8, kernel_size=1, stride=1, padding=0, bias=bias)
self.up14 = nn.PixelShuffle(2)
#concat2
self.conv15 = nn.Conv2d(num_chans * 4, num_chans * 2, kernel_size=1, stride=1, padding=0, bias=bias)
#res4
self.conv16 = nn.Conv2d(num_chans * 2, num_chans * 2, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu17 = nn.PReLU()
#res4
self.conv18 = nn.Conv2d(num_chans * 2, num_chans * 4, kernel_size=1, stride=1, padding=0, bias=bias)
self.up19 = nn.PixelShuffle(2)
#concat1
self.conv20 = nn.Conv2d(num_chans * 2, num_chans, kernel_size=1, stride=1, padding=0, bias=bias)
#res5
self.conv21 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu22 = nn.PReLU()
self.conv23 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu24 = nn.PReLU()
#res5
self.conv25 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
def forward(self, x):
res1 = x
out = self.relu4(self.conv3(self.relu2(self.conv1(x))))
out = torch.add(res1, out)
cat1 = out
out = self.relu6(self.conv5(out))
res2 = out
out = self.relu8(self.conv7(out))
out = torch.add(res2, out)
cat2 = out
out = self.relu10(self.conv9(out))
res3 = out
out = self.relu12(self.conv11(out))
out = torch.add(res3, out)
out = self.up14(self.conv13(out))
out = torch.cat([out, cat2], 1)
out = self.conv15(out)
res4 = out
out = self.relu17(self.conv16(out))
out = torch.add(res4, out)
out = self.up19(self.conv18(out))
out = torch.cat([out, cat1], 1)
out = self.conv20(out)
res5 = out
out = self.relu24(self.conv23(self.relu22(self.conv21(out))))
out = torch.add(res5, out)
out = self.conv25(out)
out = torch.add(out, res1)
return out
class Recon_Block(nn.Module):
def __init__(self, num_chans=64):
super(Recon_Block, self).__init__()
bias=True
self.conv1 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu2 = nn.PReLU()
self.conv3 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu4 = nn.PReLU()
self.conv5 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu6= nn.PReLU()
self.conv7 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu8 = nn.PReLU()
self.conv9 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu10 = nn.PReLU()
self.conv11 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu12 = nn.PReLU()
self.conv13 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu14 = nn.PReLU()
self.conv15 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu16 = nn.PReLU()
self.conv17 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
def forward(self, x):
res1 = x
output = self.relu4(self.conv3(self.relu2(self.conv1(x))))
output = torch.add(output, res1)
res2 = output
output = self.relu8(self.conv7(self.relu6(self.conv5(output))))
output = torch.add(output, res2)
res3 = output
output = self.relu12(self.conv11(self.relu10(self.conv9(output))))
output = torch.add(output, res3)
res4 = output
output = self.relu16(self.conv15(self.relu14(self.conv13(output))))
output = torch.add(output, res4)
output = self.conv17(output)
output = torch.add(output, res1)
return output
class DIDN(nn.Module):
"""
Deep Iterative Down-Up Network, NTIRE denoising challenge winning entry
Source: http://openaccess.thecvf.com/content_CVPRW_2019/papers/NTIRE/Yu_Deep_Iterative_Down-Up_CNN_for_Image_Denoising_CVPRW_2019_paper.pdfp
"""
def __init__(self, in_chans, out_chans, num_chans=64,
pad_data=True, global_residual=True, n_res_blocks=6):
super().__init__()
self.pad_data = pad_data
self.global_residual = global_residual
bias=True
self.conv_input = nn.Conv2d(in_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu1 = nn.PReLU()
self.conv_down = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=2, padding=1, bias=bias)
self.relu2 = nn.PReLU()
self.n_res_blocks = n_res_blocks
recursive = []
for i in range(self.n_res_blocks):
recursive.append(_Residual_Block(num_chans))
self.recursive = torch.nn.ModuleList(recursive)
self.conv_mid = nn.Conv2d(num_chans * self.n_res_blocks, num_chans, kernel_size=1, stride=1, padding=0, bias=bias)
self.relu3 = nn.PReLU()
self.conv_mid2 = nn.Conv2d(num_chans, num_chans, kernel_size=3, stride=1, padding=1, bias=bias)
self.relu4 = nn.PReLU()
self.subpixel = nn.PixelShuffle(2)
self.conv_output = nn.Conv2d(num_chans // 4, out_chans, kernel_size=3, stride=1, padding=1, bias=bias)
def forward(self, x):
if self.pad_data:
orig_shape2d = x.shape[-2:]
p2d = net_utils.calculate_downsampling_padding2d(x, 3)
x = net_utils.pad2d(x, p2d)
residual = x
out = self.relu1(self.conv_input(x))
out = self.relu2(self.conv_down(out))
recons = []
for i in range(self.n_res_blocks):
out = self.recursive[i](out)
recons.append(out)
out = torch.cat(recons, 1)
out = self.relu3(self.conv_mid(out))
residual2 = out
out = self.relu4(self.conv_mid2(out))
out = torch.add(out, residual2)
out= self.subpixel(out)
out = self.conv_output(out)
if self.global_residual:
out = torch.add(out, residual)
if self.pad_data:
out = net_utils.unpad2d(out, orig_shape2d)
return out
|
the-stack_0_7984 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import, division, print_function
import functools
import tensorflow as tf
from nets import (alexnet, cifarnet, inception, lenet, mobilenet_v1, overfeat,
resnet_v1, resnet_v2, vgg)
from nets.mobilenet import mobilenet_v2
from nets.nasnet import nasnet, pnasnet
slim = tf.contrib.slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2': mobilenet_v2.mobilenet,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
}
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2':
inception.inception_resnet_v2_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification. If 0 or None,
the logits layer is omitted and its input features are returned instead.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
net, end_points = network_fn(images)
The `images` input is a tensor of shape [batch_size, height, width, 3]
with height = width = network_fn.default_image_size. (The permissibility
and treatment of other sizes depends on the network_fn.)
The returned `end_points` are a dictionary of intermediate activations.
The returned `net` is the topmost layer, depending on `num_classes`:
If `num_classes` was a non-zero integer, `net` is a logits tensor
of shape [batch_size, num_classes].
If `num_classes` was 0 or `None`, `net` is a tensor with the input
to the logits layer of shape [batch_size, 1, 1, num_features] or
[batch_size, num_features]. Dropout has not been applied to this
(even if the network's original classification does); it remains for
the caller to do this or not.
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes, is_training=is_training, **kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
|
the-stack_0_7985 | import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available. 할당된 설문이 없습니다. /admin에서 설문을 생성하세요.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
the-stack_0_7986 | import os
import re
import shutil
import sys
import ctypes
from pathlib import Path
from colorama import Fore, Back, Style
from .settings import *
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
print("\nPlease restart with Python 3.6+\n")
print("Current Python version:", sys.version_info)
exit(-1)
ti_core = None
def in_docker():
if os.environ.get("TI_IN_DOCKER", "") == "":
return False
else:
return True
def import_ti_core(tmp_dir=None):
global ti_core
if get_os_name() != 'win':
old_flags = sys.getdlopenflags()
sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND
else:
pyddir = os.path.join(package_root(), 'lib')
os.environ['PATH'] += ';' + pyddir
try:
import taichi_core as core
except Exception as e:
if isinstance(e, ImportError):
print(
Fore.YELLOW + "Share object taichi_core import failed, "
"check this page for possible solutions:\n"
"https://taichi.readthedocs.io/en/stable/install.html#troubleshooting"
+ Fore.RESET)
raise e
ti_core = core
if get_os_name() != 'win':
sys.setdlopenflags(old_flags)
lib_dir = os.path.join(package_root(), 'lib')
core.set_lib_dir(locale_encode(lib_dir))
if tmp_dir is not None:
core.set_tmp_dir(locale_encode(tmp_dir))
def locale_encode(path):
try:
import locale
return path.encode(locale.getdefaultlocale()[1])
except:
try:
import sys
return path.encode(sys.getfilesystemencoding())
except:
try:
return path.encode()
except:
return path
def is_ci():
return os.environ.get('TI_CI', '') == '1'
def package_root():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
def is_release():
return os.environ.get('TAICHI_REPO_DIR', '') == ''
def get_core_shared_object():
if is_release():
directory = os.path.join(package_root(), 'lib')
else:
directory = get_bin_directory()
return os.path.join(directory, 'libtaichi_core.so')
def get_repo():
from git import Repo
repo = Repo(get_repo_directory())
return repo
def print_red_bold(*args, **kwargs):
print(Fore.RED + Style.BRIGHT, end='')
print(*args, **kwargs)
print(Style.RESET_ALL, end='')
create_sand_box_on_windows = True
def build():
tmp_cwd = os.getcwd()
bin_dir = get_build_directory()
try:
os.mkdir(bin_dir)
except:
pass
os.chdir(bin_dir)
import multiprocessing
print('Building taichi...')
num_make_threads = min(20, multiprocessing.cpu_count())
if get_os_name() == 'win':
make_ret = os.system(
"msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln")
else:
make_ret = os.system('make -j {}'.format(num_make_threads))
if make_ret != 0:
print(' Error: Build failed.')
exit(-1)
os.chdir(tmp_cwd)
def check_exists(src):
if not os.path.exists(src):
raise FileNotFoundError(
f'File "{src}" not exist. Installation corrupted or build incomplete?'
)
def prepare_sandbox():
'''
Returns a temporary directory, which will be automatically deleted on exit.
It may contain the taichi_core shared object or some misc. files.
'''
import atexit
import shutil
from tempfile import mkdtemp
tmp_dir = mkdtemp(prefix='taichi-')
atexit.register(shutil.rmtree, tmp_dir)
print(f'[Taichi] preparing sandbox at {tmp_dir}')
os.mkdir(os.path.join(tmp_dir, 'runtime/'))
return tmp_dir
def get_unique_task_id():
import datetime
import random
return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + (
'%05d' % random.randint(0, 10000))
if is_release():
print("[Taichi] mode=release")
sys.path.append(os.path.join(package_root(), 'lib'))
if get_os_name() != 'win':
link_src = os.path.join(package_root(), 'lib', 'taichi_core.so')
link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so')
# For llvm jit to find the runtime symbols
if not os.path.exists(link_dst):
os.symlink(link_src, link_dst)
import_ti_core()
if get_os_name() != 'win':
dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_LOCAL)
# The C backend needs a temporary directory for the generated .c and compiled .so files:
ti_core.set_tmp_dir(locale_encode(prepare_sandbox(
))) # TODO: always allocate a tmp_dir for all situations
ti_core.set_python_package_dir(package_root())
os.makedirs(ti_core.get_repo_dir(), exist_ok=True)
else:
print("[Taichi] mode=development")
if get_os_name() == 'osx':
bin_dir = get_bin_directory()
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory()
lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib')
tmp_cwd = os.getcwd()
tmp_dir = prepare_sandbox()
check_exists(lib_path)
shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so'))
os.chdir(tmp_dir)
sys.path.append(tmp_dir)
import taichi_core as ti_core
os.chdir(tmp_cwd)
# TODO: unify importing infrastructure:
elif get_os_name() == 'linux':
bin_dir = get_bin_directory()
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/'
else:
os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/'
lib_path = os.path.join(bin_dir, 'libtaichi_core.so')
check_exists(lib_path)
tmp_cwd = os.getcwd()
tmp_dir = prepare_sandbox()
check_exists(lib_path)
shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so'))
os.chdir(tmp_dir)
sys.path.append(tmp_dir)
try:
import_ti_core(tmp_dir)
except Exception as e:
from colorama import Fore, Back, Style
print_red_bold("Taichi core import failed: ", end='')
print(e)
print(
Fore.YELLOW + "check this page for possible solutions:\n"
"https://taichi.readthedocs.io/en/stable/install.html#troubleshooting"
+ Fore.RESET)
exit(-1)
os.chdir(tmp_cwd)
elif get_os_name() == 'win':
bin_dir = get_bin_directory()
dll_path_invalid = os.path.join(bin_dir, 'libtaichi_core.dll')
assert not os.path.exists(dll_path_invalid)
possible_folders = ['Debug', 'RelWithDebInfo', 'Release']
detected_dlls = []
for folder in possible_folders:
dll_path = os.path.join(bin_dir, folder, 'taichi_core.dll')
if os.path.exists(dll_path):
detected_dlls.append(dll_path)
if len(detected_dlls) == 0:
raise FileNotFoundError(
f'Cannot find Taichi core dll under {get_bin_directory()}/{possible_folders}'
)
elif len(detected_dlls) != 1:
print('Warning: multiple Taichi core dlls found:')
for dll in detected_dlls:
print(' ', dll)
print(f'Using {detected_dlls[0]}')
dll_path = detected_dlls[0]
# On windows when an dll/pyd is loaded, we cannot write to it any more
old_wd = os.getcwd()
os.chdir(bin_dir)
if create_sand_box_on_windows:
# Create a sandbox for separated core lib development and loading
folder = os.path.join(get_output_directory(), 'tmp',
get_unique_task_id())
lib_dir = os.path.join(get_repo_directory(), 'external', 'lib')
os.environ['PATH'] += ';' + lib_dir
os.makedirs(folder)
shutil.copy(dll_path, os.path.join(folder, 'taichi_core.pyd'))
os.environ['PATH'] += ';' + folder
sys.path.append(folder)
else:
shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd'))
sys.path.append(bin_dir)
try:
import taichi_core as ti_core
except Exception as e:
print(e)
print()
print(
'Hint: please make sure the major and minor versions of the Python executable is correct.'
)
print()
raise e
os.chdir(old_wd)
log_level = os.environ.get('TI_LOG_LEVEL', '')
if log_level:
ti_core.set_logging_level(log_level)
def get_dll_name(name):
if get_os_name() == 'linux':
return 'libtaichi_%s.so' % name
elif get_os_name() == 'osx':
return 'libtaichi_%s.dylib' % name
elif get_os_name() == 'win':
return 'taichi_%s.dll' % name
else:
raise Exception(f"Unknown OS: {get_os_name()}")
def load_module(name, verbose=True):
if verbose:
print('Loading module', name)
try:
if get_os_name() == 'osx':
mode = ctypes.RTLD_LOCAL
else:
mode = ctypes.RTLD_GLOBAL
if '.so' in name:
ctypes.PyDLL(name, mode=mode)
else:
ctypes.PyDLL(os.path.join(get_repo_directory(), 'build',
get_dll_name(name)),
mode=mode)
except Exception as e:
print(Fore.YELLOW +
"Warning: module [{}] loading failed: {}".format(name, e) +
Style.RESET_ALL)
def at_startup():
if not is_release():
output_dir = get_output_directory()
if not os.path.exists(output_dir):
print('Making output directory')
os.mkdir(output_dir)
ti_core.set_core_state_python_imported(True)
def start_memory_monitoring(output_fn, pid=-1, interval=1):
# removing dependency on psutil
return
import os, psutil, time
if pid == -1:
pid = os.getpid()
import multiprocessing
def task():
with open(output_fn, 'w') as f:
process = psutil.Process(pid)
while True:
try:
mem = process.memory_info().rss
except:
mem = -1
time.sleep(interval)
print(time.time(), mem, file=f)
f.flush()
proc = multiprocessing.Process(target=task, daemon=True)
proc.start()
def require_version(major, minor=None, patch=None):
versions = [
int(ti_core.get_version_major()),
int(ti_core.get_version_minor()),
int(ti_core.get_version_patch()),
]
match = major == versions[0] and (
minor < versions[1] or minor == versions[1] and patch <= versions[2])
if match:
return
else:
print("Taichi version mismatch. required >= {}.{}.{}".format(
major, minor, patch))
print("Installed =", ti_core.get_version_string())
raise Exception("Taichi version mismatch")
at_startup()
def _print_taichi_header():
dev_mode = not is_release()
header = '[Taichi] '
if dev_mode:
header += '<dev mode>, '
else:
header += f'version {ti_core.get_version_string()}, '
llvm_version = ti_core.get_llvm_version_string()
header += f'llvm {llvm_version}, '
commit_hash = ti_core.get_commit_hash()
commit_hash = commit_hash[:8]
header += f'commit {commit_hash}, '
header += f'{get_os_name()}, '
py_ver = '.'.join(str(x) for x in sys.version_info[:3])
header += f'python {py_ver}'
print(header)
_print_taichi_header()
__all__ = [
'ti_core',
'build',
'load_module',
'start_memory_monitoring',
'is_release',
'package_root',
'require_version',
]
|
the-stack_0_7988 | import json
import botsdk.Bot
import botsdk.BotRequest
from botsdk.tool.MessageChain import MessageChain
from botsdk.tool.BotPlugin import BotPlugin
from botsdk.tool.Cookie import *
class plugin(BotPlugin):
def __init__(self):
super().__init__()
self.listenType = []
#[["type1",func],["type2",func],...,["typen",func]]
self.listenTarget = [["GroupMessage", "空调", self.kongtiao]]
#[["type1","target",func],["type2","target",func],...,["typen","target",func]]
self.name = "空调"
#"插件名称"
self.info = "好热哦"
#"插件信息"
self.help = "/空调"
#"插件帮助"
self.permissionSet = {"OWNER","ADMINISTRATOR","MEMBER"}
self.canDetach = True
async def kongtiao(self, request):
bot = request.bot
groupid = request.groupId
await bot.sendGroupMessage(request.groupId, MessageChain().text("https://ac.yunyoujun.cn/#/").getData())
def handle(*args, **kwargs):
return plugin(*args, **kwargs)
|
the-stack_0_7992 | import copy
site = {
'html': {
'head': {
'title': 'Куплю/продам телефон недорого'
},
'body': {
'h2': 'У нас самая низкая цена на iPhone',
'div': 'Купить',
'p': 'Продать'
}
}
}
def f(n,data=site, new_list =list()):
if n == 0:
return
name = input('Введите название сайта:')
data['html']['head']['title'] = f'куплю\продам {name} не дорого'
data['html']['body']['h2'] = f'У нас самая низкая цена на {name}'
new_list.append(str(data))
for i in new_list:
print(i)
f(n-1)
f(2)
|
the-stack_0_7996 | from unittest.mock import Mock, patch
from urllib.parse import urlencode
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import translation
from phonenumber_field.phonenumber import PhoneNumber
from two_factor.gateways.fake import Fake
from two_factor.gateways.twilio.gateway import Twilio
class TwilioGatewayTest(TestCase):
def test_call_app(self):
url = reverse('two_factor_twilio:call_app', args=['123456'])
response = self.client.get(url)
self.assertEqual(response.content,
b'<?xml version="1.0" encoding="UTF-8" ?>'
b'<Response>'
b' <Gather timeout="15" numDigits="1" finishOnKey="">'
b' <Say language="en">Hi, this is testserver calling. '
b'Press any key to continue.</Say>'
b' </Gather>'
b' <Say language="en">You didn\'t press any keys. Good bye.</Say>'
b'</Response>')
url = reverse('two_factor_twilio:call_app', args=['123456'])
response = self.client.post(url)
self.assertEqual(response.content,
b'<?xml version="1.0" encoding="UTF-8" ?>'
b'<Response>'
b' <Say language="en">Your token is 1. 2. 3. 4. 5. 6. '
b'Repeat: 1. 2. 3. 4. 5. 6. Good bye.</Say>'
b'</Response>')
# there is a en-gb voice
response = self.client.get('%s?%s' % (url, urlencode({'locale': 'en-gb'})))
self.assertContains(response, '<Say language="en-gb">')
# there is no Frysian voice
response = self.client.get('%s?%s' % (url, urlencode({'locale': 'fy-nl'})))
self.assertContains(response, '<Say language="en">')
@override_settings(
TWILIO_ACCOUNT_SID='SID',
TWILIO_AUTH_TOKEN='TOKEN',
TWILIO_CALLER_ID='+456',
)
@patch('two_factor.gateways.twilio.gateway.Client')
def test_gateway(self, client):
twilio = Twilio()
client.assert_called_with('SID', 'TOKEN')
for code in ['654321', '054321', '87654321', '07654321']:
twilio.make_call(device=Mock(number=PhoneNumber.from_string('+123')), token=code)
client.return_value.calls.create.assert_called_with(
from_='+456', to='+123', method='GET', timeout=15,
url='http://testserver/twilio/inbound/two_factor/%s/?locale=en-us' % code)
twilio.send_sms(device=Mock(number=PhoneNumber.from_string('+123')), token=code)
client.return_value.messages.create.assert_called_with(
to='+123', body='Your authentication token is %s' % code, from_='+456')
client.return_value.calls.create.reset_mock()
with translation.override('en-gb'):
twilio.make_call(device=Mock(number=PhoneNumber.from_string('+123')), token=code)
client.return_value.calls.create.assert_called_with(
from_='+456', to='+123', method='GET', timeout=15,
url='http://testserver/twilio/inbound/two_factor/%s/?locale=en-gb' % code)
client.return_value.calls.create.reset_mock()
with translation.override('en-gb'):
twilio.make_call(device=Mock(number=PhoneNumber.from_string('+123')), token=code)
client.return_value.calls.create.assert_called_with(
from_='+456', to='+123', method='GET', timeout=15,
url='http://testserver/twilio/inbound/two_factor/%s/?locale=en-gb' % code)
@override_settings(
TWILIO_ACCOUNT_SID='SID',
TWILIO_AUTH_TOKEN='TOKEN',
TWILIO_CALLER_ID='+456',
)
@patch('two_factor.gateways.twilio.gateway.Client')
def test_invalid_twilio_language(self, client):
# This test assumes an invalid twilio voice language being present in
# the Arabic translation. Might need to create a faux translation when
# the translation is fixed.
url = reverse('two_factor_twilio:call_app', args=['123456'])
with self.assertRaises(NotImplementedError):
self.client.get('%s?%s' % (url, urlencode({'locale': 'ar'})))
# make_call doesn't use the voice_language, but it should raise early
# to ease debugging.
with self.assertRaises(NotImplementedError):
twilio = Twilio()
with translation.override('ar'):
twilio.make_call(device=Mock(number='+123'), token='654321')
class FakeGatewayTest(TestCase):
@patch('two_factor.gateways.fake.logger')
def test_gateway(self, logger):
fake = Fake()
for code in ['654321', '87654321']:
fake.make_call(device=Mock(number=PhoneNumber.from_string('+123')), token=code)
logger.info.assert_called_with(
'Fake call to %s: "Your token is: %s"', '+123', code)
fake.send_sms(device=Mock(number=PhoneNumber.from_string('+123')), token=code)
logger.info.assert_called_with(
'Fake SMS to %s: "Your token is: %s"', '+123', code)
|
the-stack_0_7997 | """This module contains the general information for LsbootUsbFlashStorageImage ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class LsbootUsbFlashStorageImageConsts:
TYPE_EMBEDDED_LOCAL_JBOD = "embedded-local-jbod"
TYPE_EMBEDDED_LOCAL_LUN = "embedded-local-lun"
TYPE_LOCAL_ANY = "local-any"
TYPE_LOCAL_JBOD = "local-jbod"
TYPE_LOCAL_LUN = "local-lun"
TYPE_NVME = "nvme"
TYPE_SD_CARD = "sd-card"
TYPE_USB_EXTERN = "usb-extern"
TYPE_USB_INTERN = "usb-intern"
class LsbootUsbFlashStorageImage(ManagedObject):
"""This is LsbootUsbFlashStorageImage class."""
consts = LsbootUsbFlashStorageImageConsts()
naming_props = set([])
mo_meta = MoMeta("LsbootUsbFlashStorageImage", "lsbootUsbFlashStorageImage", "sd-card", VersionMeta.Version221b, "InputOutput", 0x3f, [], ["admin", "ls-compute", "ls-config", "ls-config-policy", "ls-server", "ls-server-policy", "ls-storage", "ls-storage-policy"], [u'lsbootLocalStorage'], [u'lsbootUEFIBootParam'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version221b, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"order": MoPropertyMeta("order", "order", "ushort", VersionMeta.Version221b, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-16"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version221b, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["embedded-local-jbod", "embedded-local-lun", "local-any", "local-jbod", "local-lun", "nvme", "sd-card", "usb-extern", "usb-intern"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"order": "order",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.order = None
self.sacl = None
self.status = None
self.type = None
ManagedObject.__init__(self, "LsbootUsbFlashStorageImage", parent_mo_or_dn, **kwargs)
|
the-stack_0_7998 | from typing import Dict, List
from allennlp.common.checks import ConfigurationError
# from allennlp.common.params import Params
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.vocabulary import Vocabulary
def _make_bos_eos(
character: int,
padding_character: int,
beginning_of_word_character: int,
end_of_word_character: int,
max_word_length: int
):
char_ids = [padding_character] * max_word_length
char_ids[0] = beginning_of_word_character
char_ids[1] = character
char_ids[2] = end_of_word_character
return char_ids
class ELMoCharacterMapper:
"""
Maps individual tokens to sequences of character ids, compatible with ELMo.
To be consistent with previously trained models, we include it here as special of existing
character indexers.
"""
max_word_length = 50
# char ids 0-255 come from utf-8 encoding bytes
# assign 256-300 to special chars
beginning_of_sentence_character = 256 # <begin sentence>
end_of_sentence_character = 257 # <end sentence>
beginning_of_word_character = 258 # <begin word>
end_of_word_character = 259 # <end word>
padding_character = 260 # <padding>
beginning_of_sentence_characters = _make_bos_eos(
beginning_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length
)
end_of_sentence_characters = _make_bos_eos(
end_of_sentence_character,
padding_character,
beginning_of_word_character,
end_of_word_character,
max_word_length
)
bos_token = '<S>'
eos_token = '</S>'
@staticmethod
def convert_word_to_char_ids(word: str) -> List[int]:
if word == ELMoCharacterMapper.bos_token:
char_ids = ELMoCharacterMapper.beginning_of_sentence_characters
elif word == ELMoCharacterMapper.eos_token:
char_ids = ELMoCharacterMapper.end_of_sentence_characters
else:
word_encoded = word.encode('utf-8', 'ignore')[:(ELMoCharacterMapper.max_word_length-2)]
char_ids = [ELMoCharacterMapper.padding_character] * ELMoCharacterMapper.max_word_length
char_ids[0] = ELMoCharacterMapper.beginning_of_word_character
for k, chr_id in enumerate(word_encoded, start=1):
char_ids[k] = chr_id
char_ids[len(word_encoded) + 1] = ELMoCharacterMapper.end_of_word_character
# +1 one for masking
return [c + 1 for c in char_ids]
@TokenIndexer.register("elmo_characters")
class ELMoTokenCharactersIndexer(TokenIndexer[List[int]]):
"""
Convert a token to an array of character ids to compute ELMo representations.
Parameters
----------
namespace : ``str``, optional (default=``elmo_characters``)
"""
# pylint: disable=no-self-use
def __init__(self,
namespace: str = 'elmo_characters') -> None:
self._namespace = namespace
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
pass
def token_to_indices(self, token: Token, vocabulary: Vocabulary) -> List[int]:
# pylint: disable=unused-argument
if token.text is None:
raise ConfigurationError('ELMoTokenCharactersIndexer needs a tokenizer '
'that retains text')
return ELMoCharacterMapper.convert_word_to_char_ids(token.text)
def get_padding_lengths(self, token: List[int]) -> Dict[str, int]:
# pylint: disable=unused-argument
return {}
def get_padding_token(self) -> List[int]:
return []
@staticmethod
def _default_value_for_padding():
return [0] * ELMoCharacterMapper.max_word_length
def pad_token_sequence(self,
tokens: List[List[int]],
desired_num_tokens: int,
padding_lengths: Dict[str, int]) -> List[List[int]]:
# pylint: disable=unused-argument
return pad_sequence_to_length(tokens, desired_num_tokens,
default_value=self._default_value_for_padding)
# @classmethod
# def from_params(cls, params: Params) -> 'ELMoTokenCharactersIndexer':
# """
# Parameters
# ----------
# namespace : ``str``, optional (default=``elmo_characters``)
# """
# namespace = params.pop('namespace', 'elmo_characters')
# params.assert_empty(cls.__name__)
# return cls(namespace=namespace)
|
the-stack_0_8001 | from flask import render_template, Blueprint
from flask_login import login_required, current_user
import datetime
from project import app, db, localSystem
from project.models import *
home_blueprint = Blueprint(
'home', __name__,
template_folder = 'templates'
)
@home_blueprint.route('/')
def home():
localSystem = BoxOffice.query.first()
data = {}
data['news'] = db.session.query(Announcement).all()
data['changes'] = db.session.query(MovieChange).all()
data['dateChanges'] = db.session.query(DateChange).all()
data['boxOffice'] = Results.query.filter_by(date=(localSystem.currentDate - datetime.timedelta(days=1))).order_by(Results.movie_gross.desc()).all()
return render_template("index.html", user=current_user, system=localSystem, data=data)
|
the-stack_0_8003 | #!/usr/bin/env vpython
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
import time
import unittest
import mock
from parameterized import parameterized
import test_runner
class TestRunnerTest(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
time.sleep = mock.Mock()
def tearDown(self):
logging.disable(logging.NOTSET)
@parameterized.expand([
'url_unittests',
'./url_unittests',
'out/release/url_unittests',
'./out/release/url_unittests',
])
@mock.patch.object(os.path, 'isfile', return_value=True)
@mock.patch.object(test_runner, '_DownloadAshChromeIfNecessary')
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock())
# Tests that the test runner doesn't attempt to download ash-chrome if not
# required.
def test_do_not_require_ash_chrome(self, command, mock_popen, mock_download,
_):
args = ['script_name', 'test', command]
with mock.patch.object(sys, 'argv', args):
test_runner.Main()
self.assertEqual(1, mock_popen.call_count)
mock_popen.assert_called_with([command])
self.assertFalse(mock_download.called)
@parameterized.expand([
'browser_tests',
'components_browsertests',
'content_browsertests',
'lacros_chrome_browsertests',
])
@mock.patch.object(os,
'listdir',
return_value=['wayland-0', 'wayland-0.lock'])
@mock.patch.object(tempfile,
'mkdtemp',
side_effect=['/tmp/xdg', '/tmp/ash-data'])
@mock.patch.object(os.environ, 'copy', side_effect=[{}, {}])
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(os.path, 'isfile', return_value=True)
@mock.patch.object(test_runner,
'_GetLatestVersionOfAshChrome',
return_value='793554')
@mock.patch.object(test_runner, '_DownloadAshChromeIfNecessary')
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock())
# Tests that the test runner downloads and spawns ash-chrome if ash-chrome is
# required.
def test_require_ash_chrome(self, command, mock_popen, mock_download, *_):
args = ['script_name', 'test', command]
with mock.patch.object(sys, 'argv', args):
test_runner.Main()
mock_download.assert_called_with('793554')
self.assertEqual(2, mock_popen.call_count)
ash_chrome_args = mock_popen.call_args_list[0][0][0]
self.assertTrue(ash_chrome_args[0].endswith(
'build/lacros/prebuilt_ash_chrome/793554/test_ash_chrome'))
expected_ash_chrome_args = [
'--user-data-dir=/tmp/ash-data',
'--enable-wayland-server',
'--no-startup-window',
]
if command == 'lacros_chrome_browsertests':
expected_ash_chrome_args.append(
'--lacros-mojo-socket-for-testing=/tmp/ash-data/lacros.sock')
self.assertListEqual(expected_ash_chrome_args, ash_chrome_args[1:])
ash_chrome_env = mock_popen.call_args_list[0][1].get('env', {})
self.assertDictEqual({'XDG_RUNTIME_DIR': '/tmp/xdg'}, ash_chrome_env)
test_args = mock_popen.call_args_list[1][0][0]
if command == 'lacros_chrome_browsertests':
self.assertListEqual([
command,
'--lacros-mojo-socket-for-testing=/tmp/ash-data/lacros.sock'
], test_args)
else:
self.assertListEqual([command], test_args)
test_env = mock_popen.call_args_list[1][1].get('env', {})
self.assertDictEqual(
{
'XDG_RUNTIME_DIR': '/tmp/xdg',
'EGL_PLATFORM': 'surfaceless'
}, test_env)
@mock.patch.object(os,
'listdir',
return_value=['wayland-0', 'wayland-0.lock'])
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(os.path, 'isfile', return_value=True)
@mock.patch.object(test_runner,
'_GetLatestVersionOfAshChrome',
return_value='793554')
@mock.patch.object(test_runner, '_DownloadAshChromeIfNecessary')
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock())
# Tests that when a ash-chrome version is specified, that version is used
# instead of the latest one.
def test_specify_ash_chrome_version(self, mock_popen, mock_download, *_):
args = [
'script_name', 'test', 'browser_tests', '--ash-chrome-version', '781122'
]
with mock.patch.object(sys, 'argv', args):
test_runner.Main()
mock_download.assert_called_with('781122')
@mock.patch.object(os,
'listdir',
return_value=['wayland-0', 'wayland-0.lock'])
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(os.path, 'isfile', return_value=True)
@mock.patch.object(test_runner, '_DownloadAshChromeIfNecessary')
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock())
# Tests that if a ash-chrome version is specified, uses ash-chrome to run
# tests anyway even if |_TARGETS_REQUIRE_ASH_CHROME| indicates an ash-chrome
# is not required.
def test_overrides_do_not_require_ash_chrome(self, mock_popen, mock_download,
*_):
args = [
'script_name', 'test', './url_unittests', '--ash-chrome-version',
'793554'
]
with mock.patch.object(sys, 'argv', args):
test_runner.Main()
mock_download.assert_called_with('793554')
self.assertEqual(2, mock_popen.call_count)
@mock.patch.object(os,
'listdir',
return_value=['wayland-0', 'wayland-0.lock'])
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(os.path, 'isfile', return_value=True)
@mock.patch.object(test_runner, '_GetLatestVersionOfAshChrome')
@mock.patch.object(test_runner, '_DownloadAshChromeIfNecessary')
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock())
# Tests that when an ash-chrome path is specified, the test runner doesn't try
# to download prebuilt ash-chrome.
def test_specify_ash_chrome_path(self, mock_popen, mock_download,
mock_get_latest_version, *_):
args = [
'script_name',
'test',
'browser_tests',
'--ash-chrome-path',
'/ash/test_ash_chrome',
]
with mock.patch.object(sys, 'argv', args):
test_runner.Main()
self.assertFalse(mock_get_latest_version.called)
self.assertFalse(mock_download.called)
@mock.patch.object(os.path, 'isfile', return_value=True)
@mock.patch.object(test_runner, '_DownloadAshChromeIfNecessary')
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock())
# Tests that arguments not known to the test runner are forwarded to the
# command that invokes tests.
def test_command_arguments(self, mock_popen, mock_download, _):
args = [
'script_name', 'test', './url_unittests', '--gtest_filter=Suite.Test'
]
with mock.patch.object(sys, 'argv', args):
test_runner.Main()
mock_popen.assert_called_with(
['./url_unittests', '--gtest_filter=Suite.Test'])
self.assertFalse(mock_download.called)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_8006 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from django.http import JsonResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators import csrf
# from django.contrib.auth.decorators import login_required
from tool.tools import createId
from reader.models import reader
from author.models import author
# connect to mysql and check
def loginReader(request):
lastUrl = ""
if "lastUrl" in request.POST:
lastUrl = request.POST['lastUrl']
context = {}
if "readerId" in request.session:
context['status'] = "success"
if lastUrl == "null":
# context['message'] = "/reader/readerIndex/"
return HttpResponseRedirect("/reader/index/")
elif lastUrl == "" or lastUrl is None:
context['status'] = "fail"
context['message'] = "錯誤的訪問"
return JsonResponse(context)
else:
# context['message'] = lastUrl
return HttpResponseRedirect(lastUrl)
# return JsonResponse(context)
if 'userName' not in request.POST and 'passwd' not in request.POST :
context['status'] = "fail"
context['message'] = "請重載後輸入 Email 和密碼"
return JsonResponse(context)
# return render(request, 'reader/login.html')
userName = unicode(request.POST['userName'])
passwd = createId(96,request.POST['passwd'])
try:
readerObj = reader.objects.get(email=userName)
if passwd != readerObj.passwd:
context['status'] = "fail"
context['message'] = "密碼錯誤!請重新登錄!"
return JsonResponse(context)
# return render(request, 'reader/loginFail.html', {'message': u'密碼錯誤!請重新登錄!'})
if readerObj.status == "allowed":
request.session["readerId"] = readerObj.id
request.session["userName"] = readerObj.name
# check user is or not author and author's status
isAuthor = author.isExist(readerObj.id)
request.session["isAuthor"] = isAuthor
authorStatus = author.getStatus(readerObj.id)
if not isAuthor:
request.session["authorStatus"] = ""
context['status'] = "success"
if lastUrl == "null":
context['message'] = "/reader/index/"
else:
context['message'] = lastUrl
return JsonResponse(context)
authorId = author.getId(readerObj.id)
if authorId != "":
request.session["authorId"] = authorId
if authorStatus == "active":
request.session["authorStatus"] = "active"
else:
request.session["authorStatus"] = authorStatus
context['status'] = "success"
if lastUrl == "null":
context['message'] = "/reader/index/"
else:
context['message'] = lastUrl
return JsonResponse(context)
elif readerObj.status == "abuse":
context['status'] = "fail"
context['message'] = "您尚未驗證郵箱!請前往注冊郵箱驗證身份!"
return JsonResponse(context)
else :
context['status'] = "fail"
context['message'] = '您的帳號狀態異常,無法登錄,目前狀態爲:' + str(readerObj.status) + '請聯繫管理員或重新註冊。'
return JsonResponse(context)
except reader.DoesNotExist:
context['status'] = "fail"
context['message'] = '用戶不存在!請重新登錄!'
return JsonResponse(context)
def logout(request):
# delete session
if "readerId" in request.session:
del request.session["readerId"] # if not exists, report error
del request.session["userName"] # if not exists, report error
del request.session["isAuthor"] # if not exists, report error
if 'authorId' in request.session:
del request.session["authorId"] # if not exists, report error
del request.session["authorStatus"] # if not exists, report error
request.session.flush()
return HttpResponseRedirect('/reader/login/')
else:
return HttpResponseRedirect('/reader/login/')
|
the-stack_0_8007 | from dataclasses import asdict
from functools import wraps
import json
from protobuf_to_dict import protobuf_to_dict
from dacite import from_dict
from schemes.graph import GraphNode, GraphRelation
from configs.config import logger
def raise_customized_error(capture, target):
def _raise_customized_error(func):
@wraps(func)
def wapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except capture:
raise target
return wapper
return _raise_customized_error
def raise_grpc_error(capture, grpc_status_code):
def _raise_grpc_error(func):
@wraps(func)
def wrapper(self, request, context):
try:
return func(self, request, context)
except capture as e:
context.set_code(grpc_status_code)
if hasattr(e, "desc"):
context.set_details(e.desc)
else:
context.set_details("Maybe RPC Error.")
return wrapper
return _raise_grpc_error
def deco_log_error(logger):
def _deco_log_error(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if logger:
logger.exception(e)
raise e
# return {"errors": {"code": e.code, "desc": e.desc}}
return wrapper
return _deco_log_error
def convert_node_to_graphnode(node):
label = str(node.labels)[1:]
dct = dict(node)
name = dct.pop("name")
gn = GraphNode(label, name, dct)
return gn
def convert_relation_to_graph_relation(relation):
start = convert_node_to_graphnode(relation.start_node)
end = convert_node_to_graphnode(relation.end_node)
kind = list(relation.types())[0]
props = dict(relation)
gr = GraphRelation(start, end, kind, props)
return gr
def convert_query_to_scheme():
def _convert_query_to_scheme(func):
@wraps(func)
def wrapper(self, qin, **kwargs):
query = func(self, qin, **kwargs)
result = []
for gobj in query:
if gobj.relationships:
obj = convert_relation_to_graph_relation(gobj)
else:
obj = convert_node_to_graphnode(gobj)
result.append(obj)
return result
return wrapper
return _convert_query_to_scheme
def convert_request_to(target):
"""
convert different kinds of request to needed input.
there are 4 needed inputs:
- GraphNode
- GraphRelation
- RawString
- ExtractorInput
"""
def _convert_request_to(func):
@wraps(func)
def wrapper(self, request, context):
dctreq = protobuf_to_dict(request)
if "props" in dctreq:
req_props = dctreq["props"]
dctreq["props"] = json.loads(req_props)
if "start" in dctreq:
start_props = dctreq["start"]["props"]
dctreq["start"]["props"] = json.loads(start_props)
if "end" in dctreq:
end_props = dctreq["end"]["props"]
dctreq["end"]["props"] = json.loads(end_props)
request = from_dict(target, dctreq)
result = func(self, request, context)
return result
return wrapper
return _convert_request_to
def convert_graphobj_to_dict(graphobj):
"""
A graphobj is a GraphNode or GraphRelation
"""
dct = asdict(graphobj)
if "props" in dct:
dct["props"] = json.dumps(dct["props"])
if "start" in dct:
start_props = dct["start"]["props"]
dct["start"]["props"] = json.dumps(start_props)
if "end" in dct:
end_props = dct["end"]["props"]
dct["end"]["props"] = json.dumps(end_props)
return dct
|
the-stack_0_8008 | import multiprocessing as mp
import os
from glob import glob
from subprocess import run
import pandas as pd
def ensure_file(file):
"""Ensure a single file exists, returns the full path of the file if True or throws an Assertion error if not"""
# tilde expansion
file_path = os.path.normpath(os.path.expanduser(file))
assert os.path.isfile(file_path), "The file {} doesn't exist. Please create the file first".format(file)
return file_path
def ensure_dir(file_path, create_if_not=True):
"""The function ensures the dir exists, if it doesn't it creates it and returns the path"""
# tilde expansion
file_path = os.path.normpath(os.path.expanduser(file_path))
if os.path.isfile(file_path):
directory = os.path.dirname(file_path)
else:
directory = file_path
if not os.path.exists(directory):
if create_if_not:
try:
os.makedirs(directory)
except FileExistsError:
pass
else:
raise FileNotFoundError(f"The directory {directory} doesnt exist, create it or pass create_if_not=True")
return directory
def empty_dir(dir_path, force=False):
if force:
run(f'rm -v {dir_path}/*', shell=True)
else:
files = os.listdir(dir_path)
if len(files) and not mp.current_process().daemon:
answer = input(
f'The directory {dir_path} contains {len(files)} files, do you want to remove them?\n [yes\\No] ')
if answer.lower() == 'yes':
run(f'rm -v {dir_path}/*', shell=True)
def convert_vid_to_qid(df: pd.DataFrame):
if df.index.name != 'qid' and df.index.name != 'topic':
if 'qid' in df.columns:
_df = df.set_index('qid')
elif 'topic' in df.columns:
_df = df.set_index('topic')
else:
assert False, "The DF doesn't has qid or topic"
else:
_df = df
_df.rename(index=lambda x: f'{x.split("-")[0]}', inplace=True)
return _df
def add_topic_to_qdf(qdf: pd.DataFrame):
"""This functions will add a topic column to the queries DF"""
if 'topic' not in qdf.columns:
if 'qid' in qdf.columns:
qdf = qdf.assign(topic=lambda x: x.qid.apply(lambda y: y.split('-')[0]))
else:
qdf = qdf.reset_index().assign(topic=lambda x: x.qid.apply(lambda y: y.split('-')[0]))
return qdf
def read_rm_prob_files(data_dir, number_of_docs, clipping='*'):
"""The function creates a DF from files, the probabilities are p(w|RM1) for all query words
If a query term doesn't appear in the file, it's implies p(w|R)=0"""
data_files = glob(f'{data_dir}/probabilities-{number_of_docs}+{clipping}')
if len(data_files) < 1:
data_files = glob(f'{data_dir}/probabilities-{number_of_docs}')
_list = []
for _file in data_files:
_col = f'{_file.rsplit("/")[-1].rsplit("-")[-1]}'
_df = pd.read_csv(_file, names=['qid', 'term', _col], sep=' ')
_df = _df.astype({'qid': str}).set_index(['qid', 'term'])
_list.append(_df)
return pd.concat(_list, axis=1).fillna(0)
def set_environment_paths(base_path=None):
base_path = base_path if base_path else os.path.dirname(os.path.abspath(__file__))
results_dir = ensure_dir(f'{base_path}/QppUqvProj/Results')
data_dir = ensure_dir(f'{base_path}/QppUqvProj/data')
return results_dir, data_dir
def char_range(a, z):
"""Creates a generator that iterates the characters from `c1` to `c2`, inclusive."""
# ord returns the ASCII value, chr returns the char of ASCII value
for c in range(ord(a), ord(z) + 1):
yield chr(c)
|
the-stack_0_8009 | import asyncio
from aiogram import types, Dispatcher
from aiogram.dispatcher import DEFAULT_RATE_LIMIT
from aiogram.dispatcher.handler import CancelHandler, current_handler
from aiogram.dispatcher.middlewares import BaseMiddleware
from aiogram.utils.exceptions import Throttled
class ThrottlingMiddleware(BaseMiddleware):
"""
Simple middleware
"""
def __init__(self, limit=DEFAULT_RATE_LIMIT, key_prefix='antiflood_'):
self.rate_limit = limit
self.prefix = key_prefix
super(ThrottlingMiddleware, self).__init__()
async def on_process_message(self, message: types.Message, data: dict):
"""
This handler is called when dispatcher receives a message
:param message:
"""
# Get current handler
handler = current_handler.get()
# Get dispatcher from context
dispatcher = Dispatcher.get_current()
# If handler was configured, get rate limit and key from handler
if handler:
limit = getattr(handler, 'throttling_rate_limit', self.rate_limit)
key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}")
else:
limit = self.rate_limit
key = f"{self.prefix}_message"
# Use Dispatcher.throttle method.
try:
await dispatcher.throttle(key, rate=limit)
except Throttled as t:
# Execute action
await self.message_throttled(message, t)
# Cancel current handler
raise CancelHandler()
async def message_throttled(self, message: types.Message, throttled: Throttled):
"""
Notify user only on first exceed and notify about unlocking only on last exceed
:param message:
:param throttled:
"""
handler = current_handler.get()
dispatcher = Dispatcher.get_current()
if handler:
key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}")
else:
key = f"{self.prefix}_message"
# Calculate how many time is left till the block ends
delta = throttled.rate - throttled.delta
# Prevent flooding
if throttled.exceeded_count <= 2:
await message.reply('Слишком много запросов')
# Sleep
await asyncio.sleep(delta)
# Check lock status
thr = await dispatcher.check_key(key)
# If current message is not last with current key - do not send message
if thr.exceeded_count == throttled.exceeded_count:
await message.reply('Разбанен')
|
the-stack_0_8010 | #!/usr/bin/env python
from common.dbconnect import mongo_connect, find_session
from common.hashmethods import *
from common.entities import pcapFile
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from canari.maltego.entities import EmailAddress
from canari.maltego.message import UIMessage
from canari.framework import configure
import re
from canari.config import config
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2014, sniffmypacketsv2 Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform'
]
@configure(
label='Extract Email Address(s)',
description='Extract email addresses from a pcap file',
uuids=['sniffMyPacketsv2.v2.pcap_2_emailaddr'],
inputs=[('[SmP] - Email', pcapFile)],
debug=True
)
def dotransform(request, response):
pcap = request.value
lookfor = ['MAIL FROM:', 'RCPT TO:']
pkts = rdpcap(pcap)
usedb = config['working/usedb']
# Check to see if we are using the database or not
if usedb > 0:
d = mongo_connect()
c = d['CREDS']
# Hash the pcap file
try:
md5pcap = md5_for_file(pcap)
except Exception as e:
return response + UIMessage(str(e))
x = find_session(md5pcap)
pcap_id = x[0]
else:
pass
addr = []
try:
for p in pkts:
for m in lookfor:
if p.haslayer(TCP) and p.haslayer(Raw):
raw = p[Raw].load
if m in raw:
for s in re.finditer('<([\S.-]+@[\S-]+)>', raw):
addr.append(s.group(1))
except Exception as e:
return response + UIMessage(str(e))
for x in addr:
if usedb > 0:
data = {'PCAP ID': pcap_id, 'Type': 'Email Address', 'Record': x}
t = d.CREDS.find({'Record': x}).count()
if t > 0:
pass
else:
c.insert(data)
else:
pass
e = EmailAddress(x)
response += e
return response
|
the-stack_0_8011 | import pandas as pd
import numpy as np
import itertools as it
import functools as ft
from numpy import zeros, arange
from collections import defaultdict
try:
from numba import jit, njit
except ImportError:
print('Install numba')
def multi_args(function, constants, variables, isProduct=False, maxLimit=None):
"""
Run a function on different parameters and
aggregate results
function
function to be parametrized
constants
arguments that would remain constant
throughtout all the scenarios
dictionary with key being argument name
and value being the argument value
variables
arguments that need to be varied
dictionary with key being argument name
and value being list of argument values
to substitute
isProduct
list of variables for which all combinations
are to be tried out.
maxLimit
Maximum number of simulations to be run
before terminating. Useful in case of long
running simulations.
default 1000
By default, this function zips through each of the
variables but if you need to have the Cartesian
product, specify those variables in isProduct.
returns a Series with different variables and
the results
"""
from functools import partial
import concurrent.futures
if maxLimit:
MAX_LIMIT = maxLimit
else:
MAX_LIMIT = 1000
func = partial(function, **constants)
arg_list = []
if isProduct:
args = it.product(*variables.values())
else:
args = zip(*variables.values())
keys = variables.keys()
with concurrent.futures.ProcessPoolExecutor() as executor:
tasks = []
for i, arg in enumerate(args):
kwds = {a: b for a, b in zip(keys, arg)}
tasks.append(executor.submit(func, **kwds))
arg_list.append(arg)
i += 1
if i >= MAX_LIMIT:
print('MAX LIMIT reached', MAX_LIMIT)
break
result = [task.result() for task in tasks]
s = pd.Series(result)
s.name = 'values'
s.index = pd.MultiIndex.from_tuples(arg_list, names=keys)
return s
def stop_loss(price, stop_loss, order='B', tick_size=0.05):
"""
Return the stop loss for the order
price
price from which stop loss is to be calculated
stop_loss
stop loss percentage from price
order
the original order type - B for Buy and S for Sell
If the original order is buy, then a sell stop
loss is generated and vice-versa
tick_size
tick_size to be rounded off
>>> stop_loss(100, 3)
>>> 97
Notes
------
* passing a negative value may throw unexpected results
* raises ValueError if order is other than B or S
"""
if order == 'B':
return tick(price * (1 - stop_loss * 0.01), tick_size)
elif order == 'S':
return tick(price * (1 + stop_loss * 0.01), tick_size)
else:
raise ValueError('order should be either B or S')
def tick(price, tick_size=0.05):
"""
Rounds a given price to the requested tick
"""
return round(price / tick_size)*tick_size
def create_orders(data, rename, **kwargs):
"""
create an orders dataframe from an existing dataframe
by renaming columns and providing additional columns
data
dataframe
rename
columns to be renamed as dictionary
kwargs
key value pairs with key being column names
and values being dataframe values
"""
data = data.rename(rename, axis='columns')
for k, v in kwargs.items():
data[k] = v
return data
def recursive_merge(dfs, on=None, how='inner', columns={}):
"""
Recursively merge all dataframes in the given list
Given a list of dataframes, merge them based on index or columns.
By default, dataframes are merged on index. Specify the **on**
argument to merge by columns. The "on" columns should be available
in all the dataframes
Parameters
-----------
dfs
list of dataframes
on
columns on which the dataframes are to be merged.
By default, merge is done on index
how
how to apply the merge
{'left', 'right', 'outer', 'inner'}, default 'inner'.
Same as pandas merge
columns
To return only specific columns from specific dataframes,
pass them as a dictionary with key being the index of the
dataframe in the list and value being the list of columns
to merge. **your keys should be string**
See examples for more details
>>> recursive_merge(dfs, columns = {'1': ['one', 'two']})
Fetch only the columns one and two from the second dataframe
"""
data = dfs[0]
for i, d in enumerate(dfs[1:], 1):
if columns.get(str(i)):
cols = list(columns.get(str(i)))
cols.extend(on)
else:
cols = d.columns
if on is None:
data = data.merge(d[cols], how=how, left_index=True, right_index=True)
else:
data = data.merge(d[cols], how=how, on=on)
return data
def get_nearest_option(spot, n=1, opt='C', step=100):
"""
Given a spot price, calculate the nearest options
spot
spot price of the instrument
n
number of nearest option prices
opt
call or put option. 'C' for call and 'P' for put
step
step size of the option price
returns a list of options
>>> get_nearest_option(23457, 2)
>>> [23400, 23500]
>>> get_nearest_option(23457, 2, 'P')
>>> [23400, 23300]
All calculations are based on in the money option. So,
get_nearest_option(24499) would return 24400
"""
in_money = int(spot/step) * step
option_prices = []
for i in range(n):
if opt == 'C':
strike = in_money + step*i
option_prices.append(strike)
elif opt == 'P':
strike = in_money - step*i
option_prices.append(strike)
else:
print('Option type not recognized; Check the opt argument')
return option_prices
def calendar(start, end, holidays=None, alldays=False,
start_time=None, end_time=None, freq='D', **kwargs):
"""
Generate a calendar removing the list of
given holidays.
Provide date arguments as strings in the
format **YYYY-MM-DD**
start
start date of the period
end
end date of the period
holidays
list of holidays as strings
alldays
True/False
True to generate dates for all days
including weekends. default: False
start_time
start time for each day as string
end_time
end time for each day as string
freq
frequency of the calendar
kwargs
kwargs to the pandas date range function
Note
-----
1) This function is slow, especially when generating
timestamps. So, use them only once at the start
of your program for better performance
2) This function generates calendar only for
business days. To use all the available days,
se the alldays argument to True
"""
if alldays:
dfunc = ft.partial(pd.date_range, freq='D', **kwargs)
else:
dfunc = ft.partial(pd.bdate_range, freq='B', **kwargs)
dates = list(dfunc(start=start, end=end))
if (holidays):
holidays = [pd.to_datetime(dt) for dt in holidays]
for hol in holidays:
dates.remove(hol)
# Initialize times
if (start_time or end_time):
if not(start_time):
start_time = "00:00:00"
if not(end_time):
end_time = "23:59:59"
timestamps = []
fmt = "{:%Y%m%d} {}"
for d in dates:
start_ts = fmt.format(d, start_time)
end_ts = fmt.format(d, end_time)
ts = pd.date_range(start=start_ts, end=end_ts, freq=freq, **kwargs)
timestamps.extend(ts)
return timestamps
else:
return dates
def get_ohlc_intraday(data, start_time, end_time, date_col=None,
col_mappings=None, sort=False):
"""
Get ohlc for a specific period in a day for all days
for all the symbols.
data
dataframe with symbol, timestamp, date, open, high, low, close columns.
The timestamp and date columns are assumed to be of pandas datetime type.
Each row represents data for a single stock at a specified period of time
If you have different column names, use the col_mappings argument
to rename the columns
start_time
start time for each day
end_time
end time for each day
date_col
date column to aggregate; this is in addition to time column.
If no date column is specified, a date column is created.
col_mappings
column mappings as a dictionary
(Eg.) if the symbol column is named as assetName and timestamp
as ts, then pass rename={'assetName': 'symbol', 'ts': 'timestamp'}
sort
Whether the data is sorted by timestamp.
If True, data is not sorted else data is sorted
returns
a dataframe with symbol, date, open, high, low and close columns
Note
-----
To speed up computation
1) If the data is already sorted, pass sort=True
2) If date column is already available, then pass date_col=column_name
Timestamp and date are assumed to be pandas datetime
"""
if col_mappings:
data = data.rename(col_mappings, axis='columns')
if not(sort):
data = data.sort_values(by='timestamp')
if not(date_col):
data['date'] = data['timestamp'].dt.date
date_col = 'date'
data = data.set_index('timestamp')
def calculate_ohlc(df):
"""
Internal function to calculate OHLC
"""
date = df.iloc[0].at[date_col].strftime('%Y-%m-%d')
fmt = "{date} {time}" # date time format
s = fmt.format(date=date, time=start_time)
e = fmt.format(date=date, time=end_time)
temp = df.loc[s:e]
agg = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last'}
return temp.groupby('symbol').agg(agg)
return data.groupby([date_col]).apply(calculate_ohlc)
def get_expanding_ohlc(data, freq, col_mappings=None):
"""
Given a dataframe with OHLC, timestamp and symbol columns
return a OHLC dataframe with open price, expanding high,
expanding low and close prices
data
dataframe with OHLC, timestamp and symbol columns
freq
frequency by which the data is to be resampled.
A pandas frequency string
col_mappings
column mappings as a dictionary
(Eg.) if the symbol column is named as assetName and timestamp
as ts, then pass rename={'assetName': 'symbol', 'ts': 'timestamp'}
Note
-----
The returned dataframe has the same length and index of the
original dataframe. The resampling is done only to calculate the
expanding high, low prices
"""
if col_mappings:
data = data.rename(col_mappings, axis='columns')
def calculate_ohlc(df):
temp = pd.DataFrame({
'high': df['high'].expanding().max(),
'low': df['low'].expanding().min()
})
temp['close'] = df['close']
temp['open'] = df['open'].iloc[0]
return temp
cols = ['open', 'high', 'low', 'close'] # for sorting return value
return data.resample(freq).apply(calculate_ohlc)[cols]
def generate_index(index, changes, dates=None):
"""
index
list of symbols that make up the latest index
changes
changes to the index as a dataframe.
The dataframe should have the following three columns
in the following order
1. date - date of change
2. symbol - security involving the change
3. flag - True/False indicating inclusion/exclusion into the index
True indicates inclusion and False exclusion
dates
list of dates to generate index
returns a dataframe with symbols for each date
Note
-----
* The changes dataframe is expected in the exact order.
Any other columns are discarded
"""
collect = {}
idx = index[:]
changes = changes.sort_values(by='date', ascending=False)
dates = [x for x in reversed(dates)]
uniq_dates = [x for x in changes.date.unique()]
for d in dates:
if d in uniq_dates:
formula = f'date=="{d}"'
chx = changes.query(formula)
for i, row in chx.iterrows():
try:
if not(row['flag']):
idx.append(row['symbol'])
else:
idx.remove(row['symbol'])
except Exception as e:
print(e, d, row)
collect[d] = idx[:]
frame = pd.melt(pd.DataFrame.from_dict(collect))
frame.columns = ['date', 'symbol']
return frame.sort_values(by='date').reset_index(drop=True)
def custom_index(data, on, window=30, function='median', num=30, sort_mode=False):
"""
Generate a custom index
data
dataframe with symbol and timestamp columns
on
column on which the index is to be generated
window
look back window
function
function to be applied
out
number of stocks to pick each day
sort_mode
whether to pick top stocks or bottom stocks
"""
from fastbt.datasource import DataSource
ds = DataSource(data)
ds.add_rolling(on=on, window=window, function=function,
lag=1, col_name='custom_index')
grouped = ds.data.groupby('timestamp')
if sort_mode:
return grouped.apply(lambda x: x.sort_values(
by='custom_index').head(num)).reset_index(drop=True)
else:
return grouped.apply(lambda x: x.sort_values(
by='custom_index').tail(num)).reset_index(drop=True)
@jit
def streak(values):
"""
Calculates the continuous streak of a variable.
Given an array of discrete values, calculate the
continuous streak of each value.
values
numpy array of values
Note
-----
1) Pass numpy arrays for faster computation. In case of pandas series,
pass series.values
2) Calculates the streak based on number of consecutive
values that appear in the array
"""
l = len(values)
arr = zeros(l)
arr[0] = 1
cnt = 1
for i in arange(1, l):
if values[i] == values[i-1]:
cnt += 1
else:
cnt = 1
arr[i] = cnt
return arr
@njit
def trend(up, down, threshold=2/3):
"""
up
numpy array
up values as the difference between open and high
down
numpy array
down values as the difference between open and low
threshold
threshold considered as a valid trend
"""
total = up+down
up_vals = up/total
down_vals = down/total
length = len(total)
arr = np.zeros(length)
for i in np.arange(length):
if up_vals[i] > threshold:
arr[i] = 1
elif down_vals[i] > threshold:
arr[i] = -1
else:
arr[i] = 0
return arr
def generate_weights(n=2, size=1):
"""
Generate random weights that sum to one; uses the dirichlet
distribution to generate weights
"""
return np.random.dirichlet(np.ones(n), size)
|
the-stack_0_8012 | # Copyright (c) 2014 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import tempfile
import luigi.scheduler
import pickle
import unittest
import time
luigi.notifications.DEBUG = True
class SchedulerTest(unittest.TestCase):
def test_load_old_state(self):
tasks = {}
active_workers = {'Worker1': 1e9, 'Worker2': time.time()}
with tempfile.NamedTemporaryFile(delete=True) as fn:
with open(fn.name, 'w') as fobj:
state = (tasks, active_workers)
pickle.dump(state, fobj)
scheduler = luigi.scheduler.CentralPlannerScheduler(
state_path=fn.name)
scheduler.load()
scheduler.prune()
self.assertEquals(list(scheduler._active_workers.keys()),
['Worker2'])
def test_load_broken_state(self):
with tempfile.NamedTemporaryFile(delete=True) as fn:
with open(fn.name, 'w') as fobj:
print >> fobj, "b0rk"
scheduler = luigi.scheduler.CentralPlannerScheduler(
state_path=fn.name)
scheduler.load() # bad if this crashes
self.assertEquals(list(scheduler._active_workers.keys()), [])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_8013 | # Warning
import warnings
import sklearn.exceptions
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
# Python
import numpy as np
import pandas as pd
import tqdm
# Pytorch for Deep Learning
import torch
from torch.utils.data import DataLoader
from config import Config
from data import SETIDataset
from model import SwinNet
from transforms import Transforms
from utils import prepare_data
from main import best_model_name, best_epoch
train_df = pd.read_csv('../input/seti-breakthrough-listen/train_labels.csv')
test_df = pd.read_csv('../input/seti-breakthrough-listen/sample_submission.csv')
prepare_data(train_df, test_df)
if __name__ == '__main__':
model = SwinNet()
model.load_state_dict(torch.load(best_model_name))
model = model.to(Config.device)
model.eval()
predicted_labels = None
for i in range(Config.num_tta):
test_dataset = SETIDataset(
images_filepaths = test_df['image_path'].values,
targets = test_df['target'].values,
transform = Transforms.test_transforms
)
test_loader = DataLoader(
test_dataset, batch_size=Config.batch_size,
shuffle=False, num_workers=Config.num_workers,
pin_memory=True
)
temp_preds = None
with torch.no_grad():
for (images, target) in tqdm(test_loader):
images = images.to(Config.device, non_blocking=True)
output = model(images)
predictions = torch.sigmoid(output).cpu().numpy()
if temp_preds is None:
temp_preds = predictions
else:
temp_preds = np.vstack((temp_preds, predictions))
if predicted_labels is None:
predicted_labels = temp_preds
else:
predicted_labels += temp_preds
predicted_labels /= Config.num_tta
torch.save(model.state_dict(), f"{Config.model}_{best_epoch}epochs_weights.pth")
sub_df = pd.DataFrame()
sub_df['id'] = test_df['id']
sub_df['target'] = predicted_labels
sub_df.to_csv('submission.csv', index=False) |
the-stack_0_8016 | """Core classes and exceptions for Simple-Salesforce"""
# has to be defined prior to login import
DEFAULT_API_VERSION = '29.0'
import logging
import warnings
import requests
import json
try:
from urlparse import urlparse, urljoin
except ImportError:
# Python 3+
from urllib.parse import urlparse, urljoin
from simple_salesforce.login import SalesforceLogin
from simple_salesforce.util import date_to_iso8601, SalesforceError
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
#pylint: disable=invalid-name
logger = logging.getLogger(__name__)
def _warn_request_deprecation():
"""Deprecation for (Salesforce/SFType).request attribute"""
warnings.warn(
'The request attribute has been deprecated and will be removed in a '
'future version. Please use Salesforce.session instead.',
DeprecationWarning
)
# pylint: disable=too-many-instance-attributes
class Salesforce(object):
"""Salesforce Instance
An instance of Salesforce is a handy way to wrap a Salesforce session
for easy use of the Salesforce REST API.
"""
# pylint: disable=too-many-arguments
def __init__(
self, username=None, password=None, security_token=None,
session_id=None, instance=None, instance_url=None,
organizationId=None, sandbox=False, version=DEFAULT_API_VERSION,
proxies=None, session=None, client_id=None):
"""Initialize the instance with the given parameters.
Available kwargs
Password Authentication:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* sandbox -- True if you want to login to `test.salesforce.com`, False
if you want to login to `login.salesforce.com`.
Direct Session and Instance Access:
* session_id -- Access token for this session
Then either
* instance -- Domain of your Salesforce instance, i.e.
`na1.salesforce.com`
OR
* instance_url -- Full URL of your instance i.e.
`https://na1.salesforce.com
Universal Kwargs:
* version -- the version of the Salesforce API to use, for example
`29.0`
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
# Determine if the user passed in the optional version and/or sandbox
# kwargs
self.sf_version = version
self.sandbox = sandbox
self.session = session or requests.Session()
self.proxies = self.session.proxies
# override custom session proxies dance
if proxies is not None:
if not session:
self.session.proxies = self.proxies = proxies
else:
logger.warning(
'Proxies must be defined on custom session object, '
'ignoring proxies: %s', proxies
)
# Determine if the user wants to use our username/password auth or pass
# in their own information
if all(arg is not None for arg in (
username, password, security_token)):
self.auth_type = "password"
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
password=password,
security_token=security_token,
sandbox=self.sandbox,
sf_version=self.sf_version,
proxies=self.proxies,
client_id=client_id)
elif all(arg is not None for arg in (
session_id, instance or instance_url)):
self.auth_type = "direct"
self.session_id = session_id
# If the user provides the full url (as returned by the OAuth
# interface for example) extract the hostname (which we rely on)
if instance_url is not None:
self.sf_instance = urlparse(instance_url).hostname
else:
self.sf_instance = instance
elif all(arg is not None for arg in (
username, password, organizationId)):
self.auth_type = 'ipfilter'
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
password=password,
organizationId=organizationId,
sandbox=self.sandbox,
sf_version=self.sf_version,
proxies=self.proxies,
client_id=client_id)
else:
raise TypeError(
'You must provide login information or an instance and token'
)
if self.sandbox:
self.auth_site = 'https://test.salesforce.com'
else:
self.auth_site = 'https://login.salesforce.com'
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
self.base_url = ('https://{instance}/services/data/v{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.apex_url = ('https://{instance}/services/apexrest/'
.format(instance=self.sf_instance))
def describe(self):
"""Describes all available objects
"""
url = self.base_url + "sobjects"
result = self._call_salesforce('GET', url)
if result.status_code != 200:
raise SalesforceGeneralError(url,
'describe',
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
# SObject Handler
def __getattr__(self, name):
"""Returns an `SFType` instance for the given Salesforce object type
(given in `name`).
The magic part of the SalesforceAPI, this function translates
calls such as `salesforce_api_instance.Lead.metadata()` into fully
constituted `SFType` instances to make a nice Python API wrapper
for the REST API.
Arguments:
* name -- the name of a Salesforce object type, e.g. Lead or Contact
"""
# fix to enable serialization
# (https://github.com/heroku/simple-salesforce/issues/60)
if name.startswith('__'):
return super(Salesforce, self).__getattr__(name)
return SFType(
name, self.session_id, self.sf_instance, sf_version=self.sf_version,
proxies=self.proxies, session=self.session)
# User utlity methods
def set_password(self, user, password):
"""Sets the password of a user
salesforce dev documentation link:
https://www.salesforce.com/us/developer/docs/api_rest/Content/dome_sobject_user_password.htm
Arguments:
* user: the userID of the user to set
* password: the new password
"""
url = self.base_url + 'sobjects/User/%s/password' % user
params = {'NewPassword': password}
result = self._call_salesforce('POST', url, data=json.dumps(params))
# salesforce return 204 No Content when the request is successful
if result.status_code != 200 and result.status_code != 204:
raise SalesforceGeneralError(url,
'User',
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
# pylint: disable=invalid-name
def setPassword(self, user, password):
# pylint: disable=line-too-long
"""Sets the password of a user
salesforce dev documentation link:
https://www.salesforce.com/us/developer/docs/api_rest/Content/dome_sobject_user_password.htm
Arguments:
* user: the userID of the user to set
* password: the new password
"""
warnings.warn(
"This method has been deprecated."
"Please use set_password instread.",
DeprecationWarning)
return self.set_password(user, password)
# Generic Rest Function
def restful(self, path, params, method='GET'):
"""Allows you to make a direct REST call if you know the path
Arguments:
* path: The path of the request
Example: sobjects/User/ABC123/password'
* params: dict of parameters to pass to the path
* method: HTTP request method, default GET
"""
url = self.base_url + path
result = self._call_salesforce(method, url, params=params)
if result.status_code != 200:
raise SalesforceGeneralError(url,
path,
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
# Search Functions
def search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the fully formatted SOSL search string, e.g.
`FIND {Waldo}`
"""
url = self.base_url + 'search/'
# `requests` will correctly encode the query string passed as `params`
params = {'q': search}
result = self._call_salesforce('GET', url, params=params)
if result.status_code != 200:
raise SalesforceGeneralError(url,
'search',
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
def quick_search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the non-SOSL search string, e.g. `Waldo`. This search
string will be wrapped to read `FIND {Waldo}` before being
sent to Salesforce
"""
search_string = u'FIND {{{search_string}}}'.format(search_string=search)
return self.search(search_string)
# Query Handler
def query(self, query, **kwargs):
"""Return the result of a Salesforce SOQL query as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
`SELECT Id FROM Lead WHERE Email = "[email protected]"`
"""
url = self.base_url + 'query/'
params = {'q': query}
# `requests` will correctly encode the query string passed as `params`
result = self._call_salesforce('GET', url, params=params, **kwargs)
if result.status_code != 200:
_exception_handler(result)
return result.json(object_pairs_hook=OrderedDict)
def query_more(
self, next_records_identifier, identifier_is_url=False, **kwargs):
"""Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
Arguments:
* next_records_identifier -- either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
* identifier_is_url -- True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.sf_instance,
next_record_url=next_records_identifier))
else:
url = self.base_url + 'query/{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
result = self._call_salesforce('GET', url, **kwargs)
if result.status_code != 200:
_exception_handler(result)
return result.json(object_pairs_hook=OrderedDict)
def query_all(self, query, **kwargs):
"""Returns the full set of results for the `query`. This is a
convenience
wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
Arguments
* query -- the SOQL query to send to Salesforce, e.g.
`SELECT Id FROM Lead WHERE Email = "[email protected]"`
"""
result = self.query(query, **kwargs)
all_records = []
while True:
all_records.extend(result['records'])
# fetch next batch if we're not done else break out of loop
if not result['done']:
result = self.query_more(result['nextRecordsUrl'],
True)
else:
break
result['records'] = all_records
return result
def apexecute(self, action, method='GET', data=None, **kwargs):
"""Makes an HTTP request to an APEX REST endpoint
Arguments:
* action -- The REST endpoint for the request.
* method -- HTTP method for the request (default GET)
* data -- A dict of parameters to send in a POST / PUT request
* kwargs -- Additional kwargs to pass to `requests.request`
"""
result = self._call_salesforce(method, self.apex_url + action,
data=json.dumps(data), **kwargs)
if result.status_code == 200:
try:
response_content = result.json()
# pylint: disable=broad-except
except Exception:
response_content = result.text
return response_content
def _call_salesforce(self, method, url, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
result = self.session.request(
method, url, headers=self.headers, **kwargs)
if result.status_code >= 300:
_exception_handler(result)
return result
@property
def request(self):
"""Deprecated access to self.session for backwards compatibility"""
_warn_request_deprecation()
return self.session
@request.setter
def request(self, session):
"""Deprecated setter for self.session"""
_warn_request_deprecation()
self.session = session
class SFType(object):
"""An interface to a specific type of SObject"""
# pylint: disable=too-many-arguments
def __init__(
self, object_name, session_id, sf_instance, sf_version='27.0',
proxies=None, session=None):
"""Initialize the instance with the given parameters.
Arguments:
* object_name -- the name of the type of SObject this represents,
e.g. `Lead` or `Contact`
* session_id -- the session ID for authenticating to Salesforce
* sf_instance -- the domain of the instance of Salesforce to use
* sf_version -- the version of the Salesforce API to use
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
self.session_id = session_id
self.name = object_name
self.session = session or requests.Session()
# don't wipe out original proxies with None
if not session and proxies is not None:
self.session.proxies = proxies
self.base_url = (
u'https://{instance}/services/data/v{sf_version}/sobjects'
'/{object_name}/'.format(instance=sf_instance,
object_name=object_name,
sf_version=sf_version))
def metadata(self, headers=None):
"""Returns the result of a GET to `.../{object_name}/` as a dict
decoded from the JSON payload returned by Salesforce.
Arguments:
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce('GET', self.base_url, headers=headers)
return result.json(object_pairs_hook=OrderedDict)
def describe(self, headers=None):
"""Returns the result of a GET to `.../{object_name}/describe` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='GET', url=urljoin(self.base_url, 'describe'),
headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def describe_layout(self, record_id, headers=None):
"""Returns the layout of the object
Returns the result of a GET to
`.../{object_name}/describe/layouts/<recordid>` as a dict decoded from
the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
* headers -- a dict with additional request headers.
"""
custom_url_part = 'describe/layouts/{record_id}'.format(
record_id=record_id
)
result = self._call_salesforce(
method='GET',
url=urljoin(self.base_url, custom_url_part),
headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def get(self, record_id, headers=None):
"""Returns the result of a GET to `.../{object_name}/{record_id}` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='GET', url=urljoin(self.base_url, record_id),
headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def get_by_custom_id(self, custom_id_field, custom_id, headers=None):
"""Return an ``SFType`` by custom ID
Returns the result of a GET to
`.../{object_name}/{custom_id_field}/{custom_id}` as a dict decoded
from the JSON payload returned by Salesforce.
Arguments:
* custom_id_field -- the API name of a custom field that was defined
as an External ID
* custom_id - the External ID value of the SObject to get
* headers -- a dict with additional request headers.
"""
custom_url = urljoin(
self.base_url, '{custom_id_field}/{custom_id}'.format(
custom_id_field=custom_id_field, custom_id=custom_id
)
)
result = self._call_salesforce(
method='GET', url=custom_url, headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def create(self, data, headers=None):
"""Creates a new SObject using a POST to `.../{object_name}/`.
Returns a dict decoded from the JSON payload returned by Salesforce.
Arguments:
* data -- a dict of the data to create the SObject from. It will be
JSON-encoded before being transmitted.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='POST', url=self.base_url,
data=json.dumps(data), headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def upsert(self, record_id, data, raw_response=False, headers=None):
"""Creates or updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- an identifier for the SObject as described in the
Salesforce documentation
* data -- a dict of the data to create or update the SObject from. It
will be JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='PATCH', url=urljoin(self.base_url, record_id),
data=json.dumps(data), headers=headers
)
return self._raw_response(result, raw_response)
def update(self, record_id, data, raw_response=False, headers=None):
"""Updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to update
* data -- a dict of the data to update the SObject from. It will be
JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='PATCH', url=urljoin(self.base_url, record_id),
data=json.dumps(data), headers=headers
)
return self._raw_response(result, raw_response)
def delete(self, record_id, raw_response=False, headers=None):
"""Deletes an SObject using a DELETE to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to delete
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='DELETE', url=urljoin(self.base_url, record_id),
headers=headers
)
return self._raw_response(result, raw_response)
def deleted(self, start, end, headers=None):
# pylint: disable=line-too-long
"""Gets a list of deleted records
Use the SObject Get Deleted resource to get a list of deleted records
for the specified object.
.../deleted/?start=2013-05-05T00:00:00+00:00&end=2013-05-10T00:00:00+00:00
* start -- start datetime object
* end -- end datetime object
* headers -- a dict with additional request headers.
"""
url = urljoin(
self.base_url, 'deleted/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end)
)
)
result = self._call_salesforce(method='GET', url=url, headers=headers)
return result.json(object_pairs_hook=OrderedDict)
def updated(self, start, end, headers=None):
# pylint: disable=line-too-long
"""Gets a list of updated records
Use the SObject Get Updated resource to get a list of updated
(modified or added) records for the specified object.
.../updated/?start=2014-03-20T00:00:00+00:00&end=2014-03-22T00:00:00+00:00
* start -- start datetime object
* end -- end datetime object
* headers -- a dict with additional request headers.
"""
url = urljoin(
self.base_url, 'updated/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end)
)
)
result = self._call_salesforce(method='GET', url=url, headers=headers)
return result.json(object_pairs_hook=OrderedDict)
def _call_salesforce(self, method, url, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
additional_headers = kwargs.pop('headers', dict())
headers.update(additional_headers or dict())
result = self.session.request(method, url, headers=headers, **kwargs)
if result.status_code >= 300:
_exception_handler(result, self.name)
return result
# pylint: disable=no-self-use
def _raw_response(self, response, body_flag):
"""Utility method for processing the response and returning either the
status code or the response object.
Returns either an `int` or a `requests.Response` object.
"""
if not body_flag:
return response.status_code
else:
return response
@property
def request(self):
"""Deprecated access to self.session for backwards compatibility"""
_warn_request_deprecation()
return self.session
@request.setter
def request(self, session):
"""Deprecated setter for self.session"""
_warn_request_deprecation()
self.session = session
class SalesforceAPI(Salesforce):
"""Deprecated SalesforceAPI Instance
This class implements the Username/Password Authentication Mechanism using
Arguments It has since been surpassed by the 'Salesforce' class, which
relies on kwargs
"""
# pylint: disable=too-many-arguments
def __init__(self, username, password, security_token, sandbox=False,
sf_version='27.0'):
"""Initialize the instance with the given parameters.
Arguments:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* sandbox -- True if you want to login to `test.salesforce.com`, False
if you want to login to `login.salesforce.com`.
* sf_version -- the version of the Salesforce API to use, for example
"27.0"
"""
warnings.warn(
"Use of login arguments has been deprecated. Please use kwargs",
DeprecationWarning
)
super(SalesforceAPI, self).__init__(username=username,
password=password,
security_token=security_token,
sandbox=sandbox,
version=sf_version)
def _exception_handler(result, name=""):
"""Exception router. Determines which error to raise for bad results"""
try:
response_content = result.json()
# pylint: disable=broad-except
except Exception:
response_content = result.text
exc_map = {
300: SalesforceMoreThanOneRecord,
400: SalesforceMalformedRequest,
401: SalesforceExpiredSession,
403: SalesforceRefusedRequest,
404: SalesforceResourceNotFound,
}
exc_cls = exc_map.get(result.status_code, SalesforceGeneralError)
raise exc_cls(result.url, result.status_code, name, response_content)
class SalesforceMoreThanOneRecord(SalesforceError):
"""
Error Code: 300
The value returned when an external ID exists in more than one record. The
response body contains the list of matching records.
"""
message = u"More than one record for {url}. Response content: {content}"
class SalesforceMalformedRequest(SalesforceError):
"""
Error Code: 400
The request couldn't be understood, usually becaue the JSON or XML body
contains an error.
"""
message = u"Malformed request {url}. Response content: {content}"
class SalesforceExpiredSession(SalesforceError):
"""
Error Code: 401
The session ID or OAuth token used has expired or is invalid. The response
body contains the message and errorCode.
"""
message = u"Expired session for {url}. Response content: {content}"
class SalesforceRefusedRequest(SalesforceError):
"""
Error Code: 403
The request has been refused. Verify that the logged-in user has
appropriate permissions.
"""
message = u"Request refused for {url}. Response content: {content}"
class SalesforceResourceNotFound(SalesforceError):
"""
Error Code: 404
The requested resource couldn't be found. Check the URI for errors, and
verify that there are no sharing issues.
"""
message = u'Resource {name} Not Found. Response content: {content}'
def __str__(self):
return self.message.format(name=self.resource_name,
content=self.content)
class SalesforceGeneralError(SalesforceError):
"""
A non-specific Salesforce error.
"""
message = u'Error Code {status}. Response content: {content}'
def __str__(self):
return self.message.format(status=self.status, content=self.content)
|
the-stack_0_8017 | import statistics
import numpy as np
import logging
# ### self defined class
from carViewLibV2 import runWithFPS
class landMark():
def __init__(self, id):
self.markVaildCount = 4
self.markPosXList = []
self.markPosYList = []
self.frameTimeList = []
self.id = id
def addPos(self, pos, frameTime = 1.0/30.0):
self.markPosXList.append(pos['x'])
self.markPosYList.append(pos['y'])
self.frameTimeList.append(frameTime)
def getLastPos(self):
try:
rX, rY = self.markPosXList[-1],self.markPosYList[-1]
except:
rX, rY = None, None
return rX, rY
def isVaildMark(self):
if len(self.frameTimeList)>=self.markVaildCount:
return True
else:
return False
def getVelocity(self):
### call this function when mark left view
# DISTANCE_FACTOR = 80.0 ### carView04.mp4
# DISTANCE_FACTOR = 30.0 ### outside3.mp4
# DISTANCE_FACTOR = 60.0 ### testDistance3.mp4
# totalT = sum(self.frameTimeList)
# velcity = DISTANCE_FACTOR / totalT
### count last self.markVaildCount as velocity
DISTANCE_FACTOR = 1
distance = self.markPosYList[-1] - self.markPosYList[-self.markVaildCount]
totalT = sum(self.frameTimeList[-5:])
velcity = distance * DISTANCE_FACTOR / totalT
return velcity
def isInPosList(self, markPosYList, ft):
DISTANCE_MARK = 30
mx, my = self.getLastPos()
for i, posY in enumerate(markPosYList):
if my-2 <= posY and my+DISTANCE_MARK > posY:
pos = {"x": 0, "y": posY}
self.addPos(pos, frameTime = ft)
markPosYList.pop(i)
# print("markPosYList pop.")
return True
return False
class traceMark():
# DISTANCE_MARK = 15
def __init__(self):
self.count = 0
self.markList = []
self.markIdList = []
self.velocityList = []
self.previousVelocity = 0
def addMark(self, pos, ft):
mark = landMark(self.count)
mark.addPos(pos, frameTime=ft)
self.markList.append(mark)
self.markIdList.append(self.count)
self.count += 1
def getMedVelocity(self):
if len(self.velocityList)>5:
self.velocityList = self.velocityList[-5:]
mean = statistics.mean(self.velocityList)
# vStd = statistics.stdev(self.velocityList)
# try:
# self.velocityList = [v for v in self.velocityList if v > mean-(4*vStd) and v < mean+(4*vStd)]
# vel = statistics.median(self.velocityList)
# return vel
# except:
# return mean
if self.previousVelocity==mean: ### This's prevent not get any mark
return 0
else:
self.previousVelocity = mean
return mean
elif len(self.velocityList)>0:
mean = statistics.mean(self.velocityList)
if self.previousVelocity==mean: ### This's prevent not get any mark
return 0
else:
self.previousVelocity = mean
return mean
else:
return 0
def processMark(self, maxLocation, fps = 1.0/30.0):
# DISTANCE_MARK = 20
DISTANCE_MARK = 30
# array1D = maxLocation[int(len(maxLocation)/2):] ### take only bottom half
array1D = maxLocation[int(len(maxLocation)/2)-50:-50] ### take only bottom half
xArray = np.array(range(len(array1D)))
zeroIdx = [i for i in range(len(array1D)) if array1D[i] == 0]
yArrayTrim = [array1D[i] for i in range(len(array1D)) if i not in zeroIdx]
xArrayTrim = [xArray[i] for i in range(len(xArray)) if i not in zeroIdx]
markPosYList = []
tmpPosYList = []
currentIdx = -1
for i in range(len(xArrayTrim)):
currentY = xArrayTrim[i]
if currentIdx < 0:
markPosYList.append(currentY)
tmpPosYList.append(currentY)
currentIdx += 1
elif currentIdx >=0 and tmpPosYList[currentIdx] > currentY -2:
tmpPosYList[currentIdx] = currentY
elif currentIdx >=0 and markPosYList[currentIdx] < currentY -DISTANCE_MARK:
markPosYList.append(currentY)
tmpPosYList.append(currentY)
currentIdx += 1
# print("markPosYList:",markPosYList)
if len(markPosYList) > 0 and markPosYList[0] == 0:
markPosYList.pop(0) ### remove 0 from list
newList = []
ft = fps if type(fps)==type(0.1) else fps.getTime()
for mark in self.markList:
logging.debug((f"marklsit len: {len(self.markList)}, markpos: {mark.markPosYList}, {mark.frameTimeList}"))
if mark.isInPosList(markPosYList, ft) :
newList.append(mark)
# elif mark.isVaildMark():
if mark.isVaildMark():
vel = mark.getVelocity()
if vel <200:
self.velocityList.append(vel)
# vel = self.getMedVelocity()
logging.debug((f"velocity: {vel:.1f}, len: {len(self.velocityList)}"))
# logging.warning((f"velocity: {vel:.1f}, len: {len(self.velocityList)}"))
# print(f"velocity: {vel:.1f}")
else:
logging.debug("Invalid mark.")
self.markList = newList
for posY in markPosYList:
# print("Mark added")
pos = {"x": 0, "y": posY}
self.addMark(pos, ft)
# print("self.markList",len(self.markList))
|
the-stack_0_8018 | #!/usr/bin/python
# Classification (U)
"""Program: rabbitmqadmin_list_vhost_topic_permissions.py
Description: Unit testing of RabbitMQAdmin.list_vhost_topic_permissions in
rabbitmq_class.py.
Usage:
test/unit/rabbitmq_class/rabbitmqadmin_list_vhost_topic_permissions.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import rabbitmq_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_basic
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "UserName"
self.japd = "japd"
self.rmq = rabbitmq_class.RabbitMQAdmin(self.name, self.japd)
self.data = {"key": "value"}
self.vhost = "VhostName"
self.results = {"key": "value"}
@mock.patch("rabbitmq_class.RabbitMQBase.api_get")
def test_basic(self, mock_get):
"""Function: test_basic
Description: Test with basic set up.
Arguments:
"""
mock_get.return_value = self.data
self.assertEqual(
self.rmq.list_vhost_topic_permissions(self.vhost), self.results)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_8019 | import numpy
import scipy.linalg
import time
from pauxy.estimators.mixed import (
variational_energy, variational_energy_ortho_det, local_energy
)
from pauxy.estimators.greens_function import gab, gab_spin, gab_mod, gab_mod_ovlp
from pauxy.estimators.ci import get_hmatel, get_one_body_matel
from pauxy.utils.io import (
get_input_value,
write_qmcpack_wfn
)
from pauxy.utils.mpi import get_shared_array
class MultiSlater(object):
def __init__(self, system, wfn, nbasis=None, options={},
init=None, verbose=False, orbs=None):
self.verbose = verbose
if verbose:
print ("# Parsing MultiSlater trial wavefunction input options.")
init_time = time.time()
self.name = "MultiSlater"
self.type = "MultiSlater"
# TODO : Fix for MSD.
# This is for the overlap trial
if len(wfn) == 3:
# CI type expansion.
self.from_phmsd(system, wfn, orbs)
self.ortho_expansion = True
else:
self.psi = wfn[1]
self.coeffs = numpy.array(wfn[0], dtype=numpy.complex128)
self.ortho_expansion = False
self.split_trial_local_energy = options.get('split_trial_local_energy', False)
if verbose:
print("# split_trial_local_energy = {}".format(self.split_trial_local_energy))
if self.split_trial_local_energy:
if verbose:
print("# taking the determinant with the largest coefficient as the local energy trial")
imax = numpy.argmax(numpy.abs(self.coeffs))
self.le_coeffs = numpy.array([self.coeffs[imax]], dtype=numpy.complex128)
self.le_psi = numpy.array([self.psi[imax,:,:]], dtype=self.psi.dtype)
self.le_ortho_expansion = self.ortho_expansion
else:
self.le_psi = self.psi.copy()
self.le_coeffs = self.coeffs.copy()
self.le_ortho_expansion = self.ortho_expansion
if self.verbose:
if self.ortho_expansion:
print("# Assuming orthogonal trial wavefunction expansion.")
else:
print("# Assuming non-orthogonal trial wavefunction expansion.")
print("# Trial wavefunction shape: {}".format(self.psi.shape))
self.ndets = len(self.coeffs)
if self.ndets == 1:
# self.psi = self.psi[0]
self.G, self.GH = gab_spin(self.psi[0], self.psi[0],
system.nup, system.ndown)
else:
self.G = None
self.GH = None
if init is not None:
if verbose:
print("# Using initial wavefunction from file.")
self.init = init
else:
if verbose:
print("# Setting initial wavefunction as first determinant in"
" expansion.")
if len(self.psi.shape) == 3:
self.init = self.psi[0].copy()
else:
self.init = self.psi.copy()
self.error = False
self.initialisation_time = time.time() - init_time
self._nalpha = system.nup
self._nbeta = system.ndown
self._nelec = system.nelec
self._nbasis = system.nbasis
self._rchol = None
self._UVT = None
self._eri = None
self._mem_required = 0.0
self.ecoul0 = None
self.exxa0 = None
self.exxb0 = None
write_wfn = options.get('write_wavefunction', False)
output_file = options.get('output_file', 'wfn.h5')
if write_wfn:
self.write_wavefunction(filename=output_file)
if verbose:
print ("# Finished setting up trial wavefunction.")
def local_energy_2body(self, system):
"""Compute walkers two-body local energy
Parameters
----------
system : object
System object.
Returns
-------
(E, T, V) : tuple
Mixed estimates for walker's energy components.
"""
nalpha, nbeta = system.nup, system.ndown
nbasis = system.nbasis
naux = self._rchol.shape[1]
Ga, Gb = self.GH[0], self.GH[1]
Xa = self._rchol[:nalpha*nbasis].T.dot(Ga.ravel())
Xb = self._rchol[nalpha*nbasis:].T.dot(Gb.ravel())
ecoul = numpy.dot(Xa,Xa)
ecoul += numpy.dot(Xb,Xb)
ecoul += 2*numpy.dot(Xa,Xb)
rchol_a, rchol_b = self._rchol[:nalpha*nbasis], self._rchol[nalpha*nbasis:]
rchol_a = rchol_a.T
rchol_b = rchol_b.T
Ta = numpy.zeros((naux, nalpha, nalpha), dtype=rchol_a.dtype)
Tb = numpy.zeros((naux, nbeta, nbeta), dtype=rchol_b.dtype)
GaT = Ga.T
GbT = Gb.T
for x in range(naux):
rmi_a = rchol_a[x].reshape((nalpha,nbasis))
Ta[x] = rmi_a.dot(GaT)
rmi_b = rchol_b[x].reshape((nbeta,nbasis))
Tb[x] = rmi_b.dot(GbT)
exxa = numpy.tensordot(Ta, Ta, axes=((0,1,2),(0,2,1)))
exxb = numpy.tensordot(Tb, Tb, axes=((0,1,2),(0,2,1)))
exx = exxa + exxb
e2b = 0.5 * (ecoul - exx)
return ecoul, exxa, exxb
def calculate_energy(self, system):
if self.verbose:
print("# Computing trial wavefunction energy.")
start = time.time()
# Cannot use usual energy evaluation routines if trial is orthogonal.
if self.ortho_expansion:
self.energy, self.e1b, self.e2b = (
variational_energy_ortho_det(system,
self.spin_occs,
self.coeffs)
)
else:
(self.energy, self.e1b, self.e2b) = (
variational_energy(system, self.psi, self.coeffs,
G=self.G, GH=self.GH,
rchol=self._rchol, eri=self._eri,
C0 = self.psi,
ecoul0 = self.ecoul0,
exxa0 = self.exxa0,
exxb0 = self.exxb0,
UVT=self._UVT)
)
if self.verbose:
print("# (E, E1B, E2B): (%13.8e, %13.8e, %13.8e)"
%(self.energy.real, self.e1b.real, self.e2b.real))
print("# Time to evaluate local energy: %f s"%(time.time()-start))
def from_phmsd(self, system, wfn, orbs):
ndets = len(wfn[0])
self.psi = numpy.zeros((ndets,system.nbasis,system.ne),
dtype=numpy.complex128)
if self.verbose:
print("# Creating trial wavefunction from CI-like expansion.")
if orbs is None:
if self.verbose:
print("# Assuming RHF reference.")
I = numpy.eye(system.nbasis, dtype=numpy.complex128)
# Store alpha electrons first followed by beta electrons.
nb = system.nbasis
dets = [list(a) + [i+nb for i in c] for (a,c) in zip(wfn[1],wfn[2])]
self.spin_occs = [numpy.sort(d) for d in dets]
self.occa = wfn[1]
self.occb = wfn[2]
self.coeffs = numpy.array(wfn[0], dtype=numpy.complex128)
for idet, (occa, occb) in enumerate(zip(wfn[1], wfn[2])):
self.psi[idet,:,:system.nup] = I[:,occa]
self.psi[idet,:,system.nup:] = I[:,occb]
def recompute_ci_coeffs(self, system):
H = numpy.zeros((self.ndets, self.ndets), dtype=numpy.complex128)
S = numpy.zeros((self.ndets, self.ndets), dtype=numpy.complex128)
m = system.nbasis
na = system.nup
nb = system.ndown
if self.ortho_expansion:
for i in range(self.ndets):
for j in range(i,self.ndets):
di = self.spin_occs[i]
dj = self.spin_occs[j]
H[i,j] = get_hmatel(system,di,dj)[0]
e, ev = scipy.linalg.eigh(H, lower=False)
else:
na = system.nup
for i, di in enumerate(self.psi):
for j, dj in enumerate(self.psi):
if j >= i:
ga, gha, ioa = gab_mod_ovlp(di[:,:na], dj[:,:na])
gb, ghb, iob = gab_mod_ovlp(di[:,na:], dj[:,na:])
G = numpy.array([ga,gb])
Ghalf = numpy.array([gha,ghb])
ovlp = 1.0/(scipy.linalg.det(ioa)*scipy.linalg.det(iob))
if abs(ovlp) > 1e-12:
if self._rchol is not None:
rchol = self.rchol(i)
else:
rchol = None
H[i,j] = ovlp * local_energy(system, G,
Ghalf=Ghalf,
rchol=rchol)[0]
S[i,j] = ovlp
H[j,i] = numpy.conjugate(H[i,j])
S[j,i] = numpy.conjugate(S[i,j])
e, ev = scipy.linalg.eigh(H, S, lower=False)
# if self.verbose:
# print("Old and New CI coefficients: ")
# for co,cn in zip(self.coeffs,ev[:,0]):
# print("{} {}".format(co, cn))
return numpy.array(ev[:,0], dtype=numpy.complex128)
def contract_one_body(self, ints):
numer = 0.0
denom = 0.0
na = self._nalpha
for i in range(self.ndets):
for j in range(self.ndets):
cfac = self.coeffs[i].conj()*self.coeffs[j].conj()
if self.ortho_expansion:
di = self.spin_occs[i]
dj = self.spin_occs[j]
tij = get_one_body_matel(ints,di,dj)
numer += cfac * tij
if i == j:
denom += self.coeffs[i].conj()*self.coeffs[i].conj()
else:
di = self.psi[i]
dj = self.psi[j]
ga, gha, ioa = gab_mod_ovlp(di[:,:na], dj[:,:na])
gb, ghb, iob = gab_mod_ovlp(di[:,na:], dj[:,na:])
ovlp = 1.0/(scipy.linalg.det(ioa)*scipy.linalg.det(iob))
tij = numpy.dot(ints.ravel(), ga.ravel()+gb.ravel())
numer += cfac * ovlp * tij
denom += cfac * ovlp
return numer / denom
def write_wavefunction(self, filename='wfn.h5', init=None, occs=False):
if occs:
wfn = (self.coeffs, self.occa, self.occb)
else:
wfn = (self.coeffs, self.psi)
write_qmcpack_wfn(filename, wfn, 'uhf', self._nelec, self._nbasis,
init=init)
def half_rotate(self, system, comm=None):
# Half rotated cholesky vectors (by trial wavefunction).
M = system.nbasis
na = system.nup
nb = system.ndown
nchol = system.chol_vecs.shape[-1]
if self.verbose:
print("# Constructing half rotated Cholesky vectors.")
if isinstance(system.chol_vecs, numpy.ndarray):
chol = system.chol_vecs.reshape((M,M,nchol))
else:
chol = system.chol_vecs.toarray().reshape((M,M,nchol))
if (system.exact_eri):
shape = (self.ndets,(M**2*(na**2+nb**2) + M**2*(na*nb)))
self._eri = get_shared_array(comm, shape, numpy.complex128)
self._mem_required = self._eri.nbytes / (1024.0**3.0)
for i, psi in enumerate(self.psi):
vipjq_aa = numpy.einsum("mpX,rqX,mi,rj->ipjq", chol, chol, psi[:,:na].conj(), psi[:,:na].conj(), optimize=True)
vipjq_bb = numpy.einsum("mpX,rqX,mi,rj->ipjq", chol, chol, psi[:,na:].conj(), psi[:,na:].conj(), optimize=True)
vipjq_ab = numpy.einsum("mpX,rqX,mi,rj->ipjq", chol, chol, psi[:,:na].conj(), psi[:,na:].conj(), optimize=True)
self._eri[i,:M**2*na**2] = vipjq_aa.ravel()
self._eri[i,M**2*na**2:M**2*na**2+M**2*nb**2] = vipjq_bb.ravel()
self._eri[i,M**2*na**2+M**2*nb**2:] = vipjq_ab.ravel()
if (system.pno):
thresh_pno = system.thresh_pno
UVT_aa = []
UVT_bb = []
UVT_ab = []
nocca = system.nup
noccb = system.ndown
nvira = system.nbasis - system.nup
nvirb = system.nbasis - system.ndown
r_aa = []
for i in range(na):
for j in range(i, na):
Vab = vipjq_aa[i,:,j,:]
U, s, VT = numpy.linalg.svd(Vab)
idx = s > thresh_pno
U = U[:,idx]
s = s[idx]
r_aa += [s.shape[0] / float(system.nbasis)]
VT = VT[idx,:]
U = U.dot(numpy.diag(numpy.sqrt(s)))
VT = numpy.diag(numpy.sqrt(s)).dot(VT)
UVT_aa += [(U, VT)]
r_aa = numpy.array(r_aa)
r_aa = numpy.mean(r_aa)
r_bb = []
for i in range(nb):
for j in range(i, nb):
Vab = vipjq_bb[i,:,j,:]
U, s, VT = numpy.linalg.svd(Vab)
idx = s > thresh_pno
U = U[:,idx]
s = s[idx]
r_bb += [s.shape[0] / float(system.nbasis)]
VT = VT[idx,:]
U = U.dot(numpy.diag(numpy.sqrt(s)))
VT = numpy.diag(numpy.sqrt(s)).dot(VT)
UVT_bb += [(U, VT)]
r_bb = numpy.array(r_bb)
r_bb = numpy.mean(r_bb)
r_ab = []
for i in range(na):
for j in range(nb):
Vab = vipjq_ab[i,:,j,:]
U, s, VT = numpy.linalg.svd(Vab)
idx = s > thresh_pno
U = U[:,idx]
s = s[idx]
r_ab += [s.shape[0] / float(system.nbasis)]
VT = VT[idx,:]
U = U.dot(numpy.diag(numpy.sqrt(s)))
VT = numpy.diag(numpy.sqrt(s)).dot(VT)
UVT_ab += [(U, VT)]
r_ab = numpy.array(r_ab)
r_ab = numpy.mean(r_ab)
self._UVT = [UVT_aa, UVT_bb, UVT_ab]
self._eri = None
if self.verbose:
print("# Average number of orbitals (relative to total) for aa, bb, ab = {}, {}, {}".format(r_aa, r_bb, r_ab))
if self.verbose:
print("# Memory required by exact ERIs: "
" {:.4f} GB.".format(self._mem_required))
if comm is not None:
comm.barrier()
# else:
shape = (self.ndets*(M*(na+nb)), nchol)
self._rchol = get_shared_array(comm, shape, numpy.complex128)
for i, psi in enumerate(self.psi):
start_time = time.time()
if self.verbose:
print("# Rotating Cholesky for determinant {} of "
"{}.".format(i+1,self.ndets))
start = i*M*(na+nb)
compute = True
# Distribute amongst MPI tasks on this node.
if comm is not None:
nwork_per_thread = chol.shape[-1] // comm.size
if nwork_per_thread == 0:
start_n = 0
end_n = nchol
if comm.rank != 0:
# Just run on root processor if problem too small.
compute = False
else:
start_n = comm.rank * nwork_per_thread
end_n = (comm.rank+1) * nwork_per_thread
if comm.rank == comm.size - 1:
end_n = nchol
else:
start_n = 0
end_n = chol.shape[-1]
nchol_loc = end_n - start_n
# if comm.rank == 0:
# print(start_n, end_n, nchol_loc)
# print(numpy.may_share_memory(chol, chol[:,start_n:end_n]))
if compute:
rup = numpy.tensordot(psi[:,:na].conj(),
chol[:,:,start_n:end_n],
axes=((0),(0))).reshape((na*M,nchol_loc))
self._rchol[start:start+M*na,start_n:end_n] = rup[:]
rdn = numpy.tensordot(psi[:,na:].conj(),
chol[:,:,start_n:end_n],
axes=((0),(0))).reshape((nb*M,nchol_loc))
self._rchol[start+M*na:start+M*(na+nb),start_n:end_n] = rdn[:]
self._mem_required = self._rchol.nbytes / (1024.0**3.0)
if self.verbose:
print("# Memory required by half-rotated integrals: "
" {:.4f} GB.".format(self._mem_required))
print("# Time to half rotate {} seconds.".format(time.time()-start_time))
if comm is not None:
comm.barrier()
self._rot_hs_pot = self._rchol
if(system.control_variate):
self.ecoul0, self.exxa0, self.exxb0 = self.local_energy_2body(system)
def rot_chol(self, idet=0, spin=None):
"""Helper function"""
if spin is None:
stride = self._nbasis * (self._nalpha + self._nbeta)
return self._rchol[idet*stride:(idet+1)*stride]
else:
stride = self._nbasis * (self._nalpha + self._nbeta)
alpha = self._nbasis * self._nalpha
if spin == 0:
return self._rchol[idet*stride:idet*stride+alpha]
else:
beta = self._nbasis * self._nbeta
return self._rchol[idet*stride+alpha:idet*stride+alpha+beta]
def rot_hs_pot(self, idet=0, spin=None):
"""Helper function"""
if spin is None:
stride = self._nbasis * (self._nalpha + self._nbeta)
return self._rot_hs_pot[idet*stride:(idet+1)*stride]
else:
stride = self._nbasis * (self._nalpha + self._nbeta)
alpha = self._nbasis * self._nalpha
if spin == 0:
return self._rot_hs_pot[idet*stride:idet*stride+alpha]
else:
beta = self._nbasis * self._nbeta
return self._rot_hs_pot[idet*stride+alpha:idet*stride+alpha+beta]
# TODO: Implement
# def half_rotate_cplx(self, system, comm=None):
# # Half rotated cholesky vectors (by trial wavefunction).
# M = system.nbasis
# na = system.nup
# nb = system.ndown
# nchol = system.chol_vecs.shape[-1]
# if self.verbose:
# print("# Constructing half rotated Cholesky vectors.")
# if isinstance(system.chol_vecs, numpy.ndarray):
# chol = system.chol_vecs.reshape((M,M,-1))
# else:
# chol = system.chol_vecs.toarray().reshape((M,M,-1))
# if comm is None or comm.rank == 0:
# shape = (self.ndets*(M*(na+nb)), nchol)
# else:
# shape = None
# self.rchol = get_shared_array(comm, shape, numpy.complex128)
# if comm is None or comm.rank == 0:
# for i, psi in enumerate(self.psi):
# start_time = time.time()
# if self.verbose:
# print("# Rotating Cholesky for determinant {} of "
# "{}.".format(i+1,self.ndets))
# start = i*M*(na+nb)
# rup = numpy.tensordot(psi[:,:na].conj(),
# chol,
# axes=((0),(0)))
# self.rchol[start:start+M*na] = rup[:].reshape((-1,nchol))
# rdn = numpy.tensordot(psi[:,na:].conj(),
# chol,
# axes=((0),(0)))
# self.rchol[start+M*na:start+M*(na+nb)] = rdn[:].reshape((-1,nchol))
# if self.verbose:
# print("# Time to half rotate {} seconds.".format(time.time()-start_time))
# self.rot_hs_pot = self.rchol
|
the-stack_0_8020 | # Standard libraries
import logging
import random
import re
import time
# third party libraries
import tweepy
class RetweetGiveaway:
def __init__(self, api, user):
"""
RetweetGiveaway class constructor, requires api object and user object
:param api tweepy.API: api object from tweepy library
:param user tweepy.API.me() : User object for current bot
"""
self.user = user
self.api = api
self.bot_action = []
def check_retweet(self, words_to_search, accounts_to_blacklist, hashtag_to_blacklist, giveaway_to_blacklist,
comment_with_hashtag, max_giveaway):
"""
Check for useful tweets by filtering out blacklisted
:param words_to_search list: List of Keywords to Search tweet for
:param accounts_to_blacklist list: List of Blacklisted Accounts to Ignore
:param hashtag_to_blacklist list: List of Blacklisted Hashtags in tweets to ignore
:param giveaway_to_blacklist list: List of Blacklisted Giveaways to Ignore
:param comment_with_hashtag boolean: If we comment with hashtag
:param max_giveaway integer: Maximum number of giveaway retrieve for each word
"""
action = []
regex_detect_tag = [r"\b(\w*INVIT(E|É)\w*)\b",
r"\b(\w*IDENTIFI(E|É)\w*)\b",
r"\b(\w*TAG\w*)\b",
r"\b(\w*MENTIONN(E|É)\w*)\b"]
regex_detect_tag = re.compile('|'.join(regex_detect_tag), re.IGNORECASE)
for word in words_to_search:
logging.info("Searching giveaway with the word : %s", word)
for tweet in tweepy.Cursor(self.api.search,
q=word, since=time.strftime('%Y-%m-%d', time.localtime()),
lang="fr", tweet_mode="extended").items(max_giveaway):
if tweet.retweet_count > 5:
is_in_blacklist = [ele for ele in giveaway_to_blacklist if (ele in tweet.full_text)]
if is_in_blacklist:
pass
else:
# Check if it's a retweet
if hasattr(tweet, 'retweeted_status'):
screen_name = tweet.retweeted_status.author.screen_name
entities = tweet.retweeted_status.entities
full_text = tweet.retweeted_status.full_text
extra = 0
else:
screen_name = tweet.user.screen_name
entities = tweet.entities
full_text = tweet.full_text
extra = 3
# Check if Tweet Author is blacklisted or not
if screen_name not in accounts_to_blacklist:
# Check for INVITE/TAG/MENTIONNE in retweet text
if re.search(regex_detect_tag, full_text):
# Check if tweet has Hashtags
if len(entities['hashtags']) > 0:
# if comment with hashtag is enabled
if comment_with_hashtag:
# Clean Hastags
h_list = self.manage_hashtag(entities['hashtags'],
hashtag_to_blacklist)
# If we find Hashtags -> Record the tweet
if h_list:
action.append(tweet)
action.append(1 + extra)
self.bot_action.append(action)
else:
action.append(tweet)
action.append(2 + extra)
self.bot_action.append(action)
else:
action.append(tweet)
action.append(2 + extra)
self.bot_action.append(action)
# Else Select Action 2
else:
action.append(tweet)
action.append(2 + extra)
self.bot_action.append(action)
# If regex-tags not found, record the tweet without action number
else:
action.append(tweet)
self.bot_action.append(action)
action = []
return self.bot_action
def manage_giveaway(self, list_giveaway, sentence_for_tag, list_name, hashtag_to_blacklist, managefollow,
like_giveaway, nb_account_to_tag):
"""
Handle Give away tweets by following/commenting/tagging depending on the giveaway levels
:param list_giveaway list: List of Giveaways tweets and (optional) Giveaway levels
:param sentence_for_tag list: List of Random Sentences to use for commenting
:param list_name list: List of Names to Randomly Tag on giveaways
:param hashtag_to_blacklist list: List of hastags to blacklist
:param managefollow managefollow: Database management object from ManageFollow
:param like_giveaway boolean: If we like giveaway
"""
for giveaway in list_giveaway:
tweet = giveaway[0]
try:
if hasattr(tweet, 'retweeted_status'):
retweeted = tweet.retweeted_status.retweeted
id_ = tweet.retweeted_status.id
author_id = tweet.retweeted_status.author.id
entities = tweet.retweeted_status.entities
screen_name = tweet.retweeted_status.user.screen_name
else:
retweeted = tweet.retweeted
id_ = tweet.id
author_id = tweet.user.id
entities = tweet.entities
screen_name = tweet.user.screen_name
if not retweeted:
self.api.retweet(id_)
if like_giveaway:
self.api.create_favorite(id_)
self.api.create_friendship(author_id)
if len(giveaway) == 2:
comment_level = giveaway[1]
self.comment(tweet, sentence_for_tag, comment_level, list_name, hashtag_to_blacklist,
nb_account_to_tag)
managefollow.update_table(author_id)
if len(entities['user_mentions']) > 0:
for mention in entities['user_mentions']:
self.api.create_friendship(mention['id'])
managefollow.update_table(mention['id'])
random_sleep_time = random.randrange(10, 20)
logging.info("You participated in the giveaway of : @%s. Sleeping for %ss...",
screen_name,
str(random_sleep_time))
time.sleep(random_sleep_time)
except tweepy.TweepError as e:
if e.api_code == 327:
pass
elif e.api_code == 161:
logging.warning("The account can no longer follow. We go to the next step.")
break
elif e.api_code == 136:
logging.info("You have been blocked by: %s", screen_name)
break
elif e.api_code == 326:
logging.warning("You have to do a captcha on the account: %s", self.user.screen_name)
break
else:
logging.error(e)
def comment(self, tweet, sentence_for_tag, hashtag, list_name, hashtag_to_blacklist, nb_account_to_tag):
"""
Add Comment to a given tweet using some rules.
:param tweet tweepy.tweet: Tweet object from tweepy library
:param sentence_for_tag list: List of random sentences
:param hashtag list: List of Hashtags
:param list_name list: List of user names
:param hashtag_to_blacklist list: List of Blacklisted Hastags to avoid
"""
random.shuffle(list_name)
nbrandom = random.randrange(0, len(sentence_for_tag))
randomsentence = sentence_for_tag[nbrandom]
# Random Sentence + Tag Comment + Hashtag Comment + Update Status
if hashtag == 1:
comment = "@" + tweet.retweeted_status.author.screen_name + " " + randomsentence + " "
comment = self.add_tag_comment(list_name, comment, nb_account_to_tag)
comment = self.add_hashtag_comment(comment, tweet.retweeted_status.entities['hashtags'],
hashtag_to_blacklist)
self.api.update_status(comment, tweet.retweeted_status.id)
# Random Sentence + Tag Comment + Update Status
elif hashtag == 2:
comment = "@" + tweet.retweeted_status.author.screen_name + " " + randomsentence + " "
comment = self.add_tag_comment(list_name, comment, nb_account_to_tag)
self.api.update_status(comment, tweet.retweeted_status.id)
# Hashtag Comment + Update Status
elif hashtag == 3:
comment = "@" + tweet.retweeted_status.author.screen_name + " "
comment = self.add_hashtag_comment(comment, tweet.retweeted_status.entities['hashtags'],
hashtag_to_blacklist)
self.api.update_status(comment, tweet.retweeted_status.id)
# User - Random Sentence + Tag Comment + Hashtag Comment + Update Status
elif hashtag == 4:
comment = "@" + tweet.user.screen_name + " " + randomsentence + " "
comment = self.add_tag_comment(list_name, comment, nb_account_to_tag)
comment = self.add_hashtag_comment(comment, tweet.entities['hashtags'],
hashtag_to_blacklist)
self.api.update_status(comment, tweet.id)
# User - Random Sentence + Tag Comment + Update Status
elif hashtag == 5:
comment = "@" + tweet.user.screen_name + " " + randomsentence + " "
comment = self.add_tag_comment(list_name, comment, nb_account_to_tag)
self.api.update_status(comment, tweet.id)
# User - Hashtag Comment + Update Status
elif hashtag == 6:
comment = "@" + tweet.user.screen_name + " "
comment = self.add_hashtag_comment(comment, tweet.entities['hashtags'],
hashtag_to_blacklist)
self.api.update_status(comment, tweet.id)
def manage_hashtag(self, hashtag_list, hashtag_to_blacklist):
"""
Filter Blacklisted Hastags
:param hashtag_list list: List of Hashtags from Tweet
:param hashtag_to_blacklist list: List of BlackListed Hashtags
"""
h_list = []
for h in hashtag_list:
h_list.append(h['text'].upper())
return list(set(h_list) - set(hashtag_to_blacklist))
def add_tag_comment(self, list_name, comment, nb_account_to_tag):
"""
Tag other users in comment.
:param list_name list: List of user names to add to comment
:param comment string: Tweet/text/Comment
"""
nbusernotif = 0
for username in list_name:
if nbusernotif < nb_account_to_tag:
# We don't want to tag ourselves
if username == "@" + self.user.screen_name:
pass
else:
comment = comment + username + " "
nbusernotif += 1
return comment
def add_hashtag_comment(self, comment, hashtag_list, hashtag_to_blacklist):
"""
Add hashtag in Comments
:param comment string: Comment to which to add hashtags
:param hashtag_list list: List of Hashtags
:param hashtag_to_blacklist list: List of Blacklisted Hashtags to avoid
"""
h_list = self.manage_hashtag(hashtag_list, hashtag_to_blacklist)
for hashtag in h_list:
comment = comment + "#" + hashtag + " "
return comment
|
the-stack_0_8023 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PROJECT_NAME Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
role_name = 'plugins'
target_name = 'openstack-ansible-' + role_name
title = 'OpenStack-Ansible Documentation: ' + role_name + 'role'
# Release notes do not need a version number in the title, they
# cover multiple releases.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/' + target_name
openstackdocs_bug_project = project.lower()
openstackdocs_bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackAnsibleReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'OpenStackAnsibleReleaseNotes.tex', u'OpenStack-Ansible Release Notes Documentation',
u'OpenStack-Ansible Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openstackansiblereleasenotes', u'OpenStack-Ansible Release Notes Documentation',
[u'OpenStack-Ansible Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenStackAnsibleReleaseNotes', u'OpenStack-Ansible Release Notes Documentation',
u'OpenStack-Ansible Developers', 'OpenStackAnsibleReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
|
the-stack_0_8026 | from kubernetes import client as k8s_client
from kubernetes.client import rest as k8s_rest
from kubernetes import config as k8s_config
import boto3
from botocore.client import Config
from botocore.exceptions import ClientError
import argparse
import os
import tarfile
class MinioUploader(object):
def __init__(self, minio_secret, minio_secret_key, region_name):
k8s_config.load_incluster_config()
self.api_client = k8s_client.CoreV1Api()
try:
self.minio_service_endpoint = self.api_client.read_namespaced_service(name='minio-service', namespace='kubeflow').spec.cluster_ip
self.minio_service_enpoint_port=self.api_client.read_namespaced_service(name='minio-service', namespace='kubeflow').spec.ports[0].port
except ApiException as e:
if e.status == 403:
logging.warning(f"The service account doesn't have sufficient privileges "
f"to get the kubeflow minio-service. "
f"You will have to manually enter the minio cluster-ip. "
f"To make this function work ask someone with cluster "
f"priveleges to create an appropriate "
f"clusterrolebinding by running a command.\n"
f"kubectl create --namespace=kubeflow rolebinding "
"--clusterrole=kubeflow-view "
"--serviceaccount=${NAMESPACE}:default-editor "
"${NAMESPACE}-minio-view")
logging.error("API access denied with reason: {e.reason}")
self.minio_endpoint = "http://"+ self.minio_service_endpoint + ":%s"%self.minio_service_enpoint_port
print("minio endopoint : ", self.minio_endpoint)
self.client = boto3.client('s3',
endpoint_url=self.minio_endpoint,
aws_access_key_id=minio_secret,
aws_secret_access_key=minio_secret_key,
config=Config(signature_version='s3v4'),
region_name=region_name,
use_ssl=False)
def create_bucket(self, bucket_name):
try:
self.client.head_bucket(Bucket=bucket_name)
except ClientError:
bucket = {'Bucket': bucket_name}
self.client.create_bucket(**bucket)
def upload_to_bucket(self, blob_name, bucket_name, file_to_upload):
self.create_bucket(bucket_name)
self.client.upload_file(file_to_upload, bucket_name, blob_name)
return "s3://{}/{}".format(bucket_name, blob_name)
def flatten(tarinfo):
tarinfo.name = os.path.basename(tarinfo.name)
return tarinfo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--minio-bucket', type=str, default='rasa', help='minio bucket name')
parser.add_argument('--minio-username', type=str, default='minio', help='minio secret name')
parser.add_argument('--minio-key', type=str, default='minio123', help='minio secret key')
parser.add_argument('--minio-region', type=str, default='us-east-1', help='minio region')
parser.add_argument('--model-name', type=str, default='rasa_model', help='trained model name')
parser.add_argument('--model-path', type=str, default='/mnt/models', help='trained model path')
FLAGS, unparsed = parser.parse_known_args()
#model_name=FLAGS.model_name + '.tar.gz'
#file_to_upload=FLAGS.model_path + '/' + model_name
minio_uploader = MinioUploader(minio_secret=FLAGS.minio_username, minio_secret_key=FLAGS.minio_key, region_name=FLAGS.minio_region)
tar = tarfile.open("models.tar.gz", "w:gz")
tar.add(FLAGS.model_path, arcname=os.path.basename("model"))
tar.close()
minio_uploader.upload_to_bucket("models.tar.gz", FLAGS.minio_bucket, "models.tar.gz")
print("uploaded successfully")
|
the-stack_0_8027 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Patterns supported CUTLASS."""
from functools import partial
from tvm import relay
from tvm.ir.transform import Sequential, PassContext
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import register_pattern_table # type: ignore
from ...dataflow_pattern import wildcard, is_op, is_constant
def make_gelu_pattern(bias_out, out_dtype="float16"):
mul = is_op("multiply")(bias_out, is_constant() | wildcard())
if out_dtype == "float16":
erf = is_op("cast")(is_op("erf")(is_op("cast")(mul)))
else:
erf = is_op("erf")(mul)
mul_half = is_op("multiply")(erf, is_constant() | wildcard())
add = is_op("add")(mul_half, is_constant() | wildcard())
return is_op("multiply")(add, bias_out)
def make_gemm_pattern(with_bias=True, with_act=None, out_dtype="float16"):
"""Create a pattern for dense op followed by activations."""
data = wildcard()
weight = wildcard()
bias = wildcard()
gemm = is_op("nn.dense")(data, weight)
if with_bias:
add_or_bias_add = is_op("add") | is_op("nn.bias_add")
gemm_out = add_or_bias_add(gemm, bias)
else:
gemm_out = gemm
if with_act is None:
return gemm_out
if isinstance(with_act, str) and with_act == "relu":
return is_op("nn.relu")(gemm_out)
assert isinstance(with_act, str) and with_act == "gelu"
return make_gelu_pattern(gemm_out, out_dtype)
def make_batch_matmul_pattern():
return is_op("nn.batch_matmul")(wildcard(), wildcard())
def make_conv2d_pattern(with_bias=False, with_act=None):
"""Create a pattern for dense op followed by activations."""
data = wildcard()
weight = wildcard()
bias = wildcard()
conv2d = is_op("nn.conv2d")(data, weight)
if with_bias:
add_or_bias_add = is_op("add") | is_op("nn.bias_add")
conv2d_out = add_or_bias_add(conv2d, bias)
else:
conv2d_out = conv2d
if with_act is not None:
if with_act == "relu":
return is_op("nn.relu")(conv2d_out)
if with_act == "sigmoid":
return is_op("sigmoid")(conv2d_out)
if with_act == "silu":
return is_op("multiply")(conv2d_out, is_op("sigmoid")(conv2d_out))
if with_act == "hardswish":
rhs = is_op("divide")(
is_op("clip")(is_op("add")(conv2d_out, is_constant())), is_constant()
)
return is_op("multiply")(conv2d_out, rhs)
raise ValueError("Unknown activation %s." % with_act)
return conv2d_out
def make_conv2d_transpose_pattern():
return is_op("nn.conv2d_transpose")(wildcard(), wildcard())
def make_conv2d_backward_weight_pattern():
return is_op("nn.conv2d_backward_weight")(wildcard(), wildcard())
def make_residual_block_pattern(tensor_op_out, binary_op="add", with_act="relu"):
"""Add pattern for residual blocks."""
residual_input = wildcard()
binary_out = is_op(binary_op)(tensor_op_out, residual_input) | is_op(binary_op)(
residual_input, tensor_op_out
)
if with_act is not None and with_act == "relu":
return is_op("nn.relu")(binary_out)
return binary_out
def check_dtype(lhs, rhs):
"""Check if dtypes in the given workload are supported by CUTLASS."""
return (
(lhs.dtype == "float16" and rhs.dtype == "float16")
or (lhs.dtype == "float32" and rhs.dtype == "float32")
or (lhs.dtype in ["int8", "uint8"] and rhs.dtype in ["int8", "uint8"])
)
def get_root_call(call, root_op_name):
if not isinstance(call, relay.Call):
return None
if str(call.op) == root_op_name:
return call
return get_root_call(call.args[0], root_op_name)
def check_gemm(call):
"""Check if the given dense workload can be offloaded to CUTLASS."""
dense = get_root_call(call, "nn.dense")
lhs = dense.args[0].checked_type
rhs = dense.args[1].checked_type
return check_dtype(lhs, rhs)
def check_batch_matmul(call):
"""Check if the given batch_matmul workload can be offloaded to CUTLASS."""
batch_matmul = get_root_call(call, "nn.batch_matmul")
lhs = batch_matmul.args[0].checked_type
rhs = batch_matmul.args[1].checked_type
transpose_a = batch_matmul.attrs.transpose_a
transpose_b = batch_matmul.attrs.transpose_b
return check_dtype(lhs, rhs) and not transpose_a and transpose_b
def is_depthwise_conv2d(ic, oc, groups):
return ic == oc == groups
def check_conv2d_common(op_name, expected_kernel_layout, call):
"""Check if the given conv2d workload can be offloaded to CUTLASS."""
conv2d = get_root_call(call, op_name)
data_layout = conv2d.attrs.data_layout
kernel_layout = conv2d.attrs.kernel_layout
data = conv2d.args[0].checked_type
weight = conv2d.args[1].checked_type
if (
data_layout != "NHWC"
or kernel_layout != expected_kernel_layout
or not check_dtype(data, weight)
):
return False
IC = data.shape[3]
OC = weight.shape[0]
return not is_depthwise_conv2d(IC, OC, conv2d.attrs.groups)
def check_conv2d(call):
return check_conv2d_common("nn.conv2d", "OHWI", call)
def check_conv2d_transpose(call):
# conv2d_transpose is implemented as dgrad, needs to swap the roles of C and K
return check_conv2d_common("nn.conv2d_transpose", "IHWO", call)
def check_conv2d_backward_weight(call):
return check_conv2d_common("nn.conv2d_backward_weight", "NHWC", call)
def check_conv2d_residual(call, binary_op):
"""Check if the given conv2d workload can be offloaded to CUTLASS."""
conv2d = get_root_call(call, "nn.conv2d")
if not check_conv2d(call):
return False
residual_binop = get_root_call(call, binary_op)
lhs = residual_binop.args[0]
rhs = residual_binop.args[1]
# residual_input is pattern-matched as a wildcard. Make sure it does not sit between
# residual binary op and the root conv2d of this pattern.
# If the root conv2d is the parent of both lhs and rhs, we should reject this pattern.
if get_root_call(lhs, "nn.conv2d") == conv2d and get_root_call(rhs, "nn.conv2d") == conv2d:
return False
return all(x == y for (x, y) in zip(lhs.checked_type.shape, rhs.checked_type.shape))
@register_pattern_table("cutlass")
def pattern_table():
"""Returns list of triples describing the name, dataflow pattern and predicate for all
the CUTLASS-supported operators."""
dense_pat = ("cutlass.dense", make_gemm_pattern(False, None), check_gemm)
dense_bias_pat = ("cutlass.dense_bias", make_gemm_pattern(True, None), check_gemm)
dense_bias_relu_pat = ("cutlass.dense_bias_relu", make_gemm_pattern(True, "relu"), check_gemm)
dense_bias_gelu_fp16_pat = (
"cutlass.dense_bias_gelu_fp16",
make_gemm_pattern(True, "gelu"),
check_gemm,
)
dense_bias_gelu_fp32_pat = (
"cutlass.dense_bias_gelu_fp32",
make_gemm_pattern(True, "gelu", out_dtype="float32"),
check_gemm,
)
dense_patterns = [
dense_bias_gelu_fp16_pat,
dense_bias_gelu_fp32_pat,
dense_bias_relu_pat,
dense_bias_pat,
dense_pat,
("cutlass.batch_matmul", make_batch_matmul_pattern(), check_batch_matmul),
]
conv2d_patterns = [
(
"cutlass.conv2d_bias_hardswish",
make_conv2d_pattern(with_bias=True, with_act="hardswish"),
check_conv2d,
),
(
"cutlass.conv2d_bias_silu",
make_conv2d_pattern(with_bias=True, with_act="silu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_relu",
make_conv2d_pattern(with_bias=True, with_act="relu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_sigmoid",
make_conv2d_pattern(with_bias=True, with_act="sigmoid"),
check_conv2d,
),
("cutlass.conv2d_bias", make_conv2d_pattern(with_bias=True), check_conv2d),
("cutlass.conv2d", make_conv2d_pattern(), check_conv2d),
]
# For now, no fusion for grad kernels
conv2d_grad_patterns = [
("cutlass.conv2d_transpose", make_conv2d_transpose_pattern(), check_conv2d_transpose),
(
"cutlass.conv2d_backward_weight",
make_conv2d_backward_weight_pattern(),
check_conv2d_backward_weight,
),
]
residual_block_patterns = []
for with_act, postfix in [("relu", "_relu"), (None, "")]:
for name, pat, _ in conv2d_patterns[:-1]:
for bin_op in ["add", "multiply"]:
residual_block_patterns.append(
(
name + "_residual_" + bin_op + postfix,
make_residual_block_pattern(pat, bin_op, with_act=with_act),
partial(check_conv2d_residual, binary_op=bin_op),
)
)
return residual_block_patterns + dense_patterns + conv2d_patterns + conv2d_grad_patterns
def partition_for_cutlass(mod, params=None):
"""Partition the input module into CUTLASS-supported subgraphs."""
if params is not None:
mod["main"] = bind_params_by_name(mod["main"], params)
remove_bn_pass = Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
with PassContext(opt_level=3):
mod = remove_bn_pass(mod)
cutlass_patterns = relay.op.contrib.get_pattern_table("cutlass")
seq = Sequential(
[
transform.InferType(),
transform.MergeComposite(cutlass_patterns),
transform.AnnotateTarget(["cutlass"], include_non_call_ops=False),
transform.PartitionGraph(bind_constants=False),
]
)
return seq(mod)
|
the-stack_0_8028 |
#Example case how to write a se file
#We write 3 cycles in a newly created file example.se.h5
import sewrite
#create the h5 output file
file_sewrite = sewrite.startfile('example.se.h5')
cycles=[1,2,3]
#writing global parameters:
#Make sure that you write the right units to ensure that MPPNP compute with the right values.
hattr_name = [ "codev", "modname", "mini", "zini", "rotini", "overini", "age_unit",
"mass_unit", "radius_unit", "rho_unit", "temperature_unit",
"dcoeff_unit","firstcycle"]
hattr_data = [ 'codev1', 'modname', 1., 0.02, 0., 0., 1., 1.,
1., 1., 1., 1.,cycles[0]]
file_sewrite.write_hattr(hattr_name, hattr_data)
rhot, tempt, mass, dcoeff, radius, delta_mass = [[1,2,3],[1,2,3],[1,2,3],[1,2,3],[1,2,3],[1,2,3]]
mtot, shellnb, age, deltat = [[1,2,3],[1,2,3],[1,2,3],[1,2,3]]
#write h5 cycle data
for i in range(len(cycles)):
#write data columns
dcol_name = ["rho", "temperature", "mass", "dcoeff", "radius", "delta_mass"]
dcol_data = [rhot, tempt, mass, dcoeff, radius, delta_mass]
file_sewrite.write_dcol(cycles[i], dcol_name, dcol_data)
#write data attributes
cattr_name = ["total_mass", "shellnb", "age", "deltat"]
cattr_data = [0.5, 1000,1234, 200]
file_sewrite.write_cattr(cycles[i], cattr_name, cattr_data)
|
the-stack_0_8030 | # -*- coding: utf-8 -*-
from urllib.parse import quote, quote_plus, unquote, urlencode
from plexapi import X_PLEX_CONTAINER_SIZE, log, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound
from plexapi.media import MediaTag
from plexapi.settings import Setting
class Library(PlexObject):
""" Represents a PlexServer library. This contains all sections of media defined
in your Plex server including video, shows and audio.
Attributes:
key (str): '/library'
identifier (str): Unknown ('com.plexapp.plugins.library').
mediaTagVersion (str): Unknown (/system/bundle/media/flags/)
server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to.
title1 (str): 'Plex Library' (not sure how useful this is).
title2 (str): Second title (this is blank on my setup).
"""
key = '/library'
def _loadData(self, data):
self._data = data
self._sectionsByID = {} # cached Section UUIDs
self.identifier = data.attrib.get('identifier')
self.mediaTagVersion = data.attrib.get('mediaTagVersion')
self.title1 = data.attrib.get('title1')
self.title2 = data.attrib.get('title2')
def sections(self):
""" Returns a list of all media sections in this library. Library sections may be any of
:class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`,
:class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`.
"""
key = '/library/sections'
sections = []
for elem in self._server.query(key):
for cls in (MovieSection, ShowSection, MusicSection, PhotoSection):
if elem.attrib.get('type') == cls.TYPE:
section = cls(self._server, elem, key)
self._sectionsByID[section.key] = section
sections.append(section)
return sections
def section(self, title=None):
""" Returns the :class:`~plexapi.library.LibrarySection` that matches the specified title.
Parameters:
title (str): Title of the section to return.
"""
for section in self.sections():
if section.title.lower() == title.lower():
return section
raise NotFound('Invalid library section: %s' % title)
def sectionByID(self, sectionID):
""" Returns the :class:`~plexapi.library.LibrarySection` that matches the specified sectionID.
Parameters:
sectionID (str): ID of the section to return.
"""
if not self._sectionsByID or sectionID not in self._sectionsByID:
self.sections()
return self._sectionsByID[sectionID]
def all(self, **kwargs):
""" Returns a list of all media from all library sections.
This may be a very large dataset to retrieve.
"""
items = []
for section in self.sections():
for item in section.all(**kwargs):
items.append(item)
return items
def onDeck(self):
""" Returns a list of all media items on deck. """
return self.fetchItems('/library/onDeck')
def recentlyAdded(self):
""" Returns a list of all media items recently added. """
return self.fetchItems('/library/recentlyAdded')
def search(self, title=None, libtype=None, **kwargs):
""" Searching within a library section is much more powerful. It seems certain
attributes on the media objects can be targeted to filter this search down
a bit, but I havent found the documentation for it.
Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items
such as actor=<id> seem to work, but require you already know the id of the actor.
TLDR: This is untested but seems to work. Use library section search when you can.
"""
args = {}
if title:
args['title'] = title
if libtype:
args['type'] = utils.searchType(libtype)
for attr, value in kwargs.items():
args[attr] = value
key = '/library/all%s' % utils.joinArgs(args)
return self.fetchItems(key)
def cleanBundles(self):
""" Poster images and other metadata for items in your library are kept in "bundle"
packages. When you remove items from your library, these bundles aren't immediately
removed. Removing these old bundles can reduce the size of your install. By default, your
server will automatically clean up old bundles once a week as part of Scheduled Tasks.
"""
# TODO: Should this check the response for success or the correct mediaprefix?
self._server.query('/library/clean/bundles')
def emptyTrash(self):
""" If a library has items in the Library Trash, use this option to empty the Trash. """
for section in self.sections():
section.emptyTrash()
def optimize(self):
""" The Optimize option cleans up the server database from unused or fragmented data.
For example, if you have deleted or added an entire library or many items in a
library, you may like to optimize the database.
"""
self._server.query('/library/optimize')
def update(self):
""" Scan this library for new items."""
self._server.query('/library/sections/all/refresh')
def cancelUpdate(self):
""" Cancel a library update. """
key = '/library/sections/all/refresh'
self._server.query(key, method=self._server._session.delete)
def refresh(self):
""" Forces a download of fresh media information from the internet.
This can take a long time. Any locked fields are not modified.
"""
self._server.query('/library/sections/all/refresh?force=1')
def deleteMediaPreviews(self):
""" Delete the preview thumbnails for the all sections. This cannot be
undone. Recreating media preview files can take hours or even days.
"""
for section in self.sections():
section.deleteMediaPreviews()
def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs):
""" Simplified add for the most common options.
Parameters:
name (str): Name of the library
agent (str): Example com.plexapp.agents.imdb
type (str): movie, show, # check me
location (str): /path/to/files
language (str): Two letter language fx en
kwargs (dict): Advanced options should be passed as a dict. where the id is the key.
**Photo Preferences**
* **agent** (str): com.plexapp.agents.none
* **enableAutoPhotoTags** (bool): Tag photos. Default value false.
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Photo Scanner
**Movie Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Movie Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source, Default value 0 Possible options:
0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador,
16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland,
22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands,
29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal,
35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa,
40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom,
46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Movie Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 Possible
options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada,
9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain,
42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay,
49:Venezuela.
**Show Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first.
* **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Series Scanner
**TheTVDB Show Options** (com.plexapp.agents.thetvdb)
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
**TheMovieDB Show Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 options
0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile,
10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa,
41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States,
48:Uruguay, 49:Venezuela.
**Other Video Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Other Video Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source Default value 0 Possible options:
0:Rotten Tomatoes,1:IMDb,2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France,
17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica,
24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua,
31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico,
37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad,
45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default
value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize,
6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic,
13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany,
19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica,
25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand,
31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore,
40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad,
46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela.
"""
part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % (
quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location)) # noqa E126
if kwargs:
part += urlencode(kwargs)
return self._server.query(part, method=self._server._session.post)
def history(self, maxresults=9999999, mindate=None):
""" Get Play History for all library Sections for the owner.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
hist = []
for section in self.sections():
hist.extend(section.history(maxresults=maxresults, mindate=mindate))
return hist
class LibrarySection(PlexObject):
""" Base class for a single library section.
Attributes:
ALLOWED_FILTERS (tuple): ()
ALLOWED_SORT (tuple): ()
BOOLEAN_FILTERS (tuple<str>): ('unwatched', 'duplicate')
server (:class:`~plexapi.server.PlexServer`): Server this client is connected to.
initpath (str): Path requested when building this object.
agent (str): Unknown (com.plexapp.agents.imdb, etc)
allowSync (bool): True if you allow syncing content from this section.
art (str): Wallpaper artwork used to respresent this section.
composite (str): Composit image used to represent this section.
createdAt (datetime): Datetime this library section was created.
filters (str): Unknown
key (str): Key (or ID) of this library section.
language (str): Language represented in this section (en, xn, etc).
locations (str): Paths on disk where section content is stored.
refreshing (str): True if this section is currently being refreshed.
scanner (str): Internal scanner used to find media (Plex Movie Scanner, Plex Premium Music Scanner, etc.)
thumb (str): Thumbnail image used to represent this section.
title (str): Title of this section.
type (str): Type of content section represents (movie, artist, photo, show).
updatedAt (datetime): Datetime this library section was last updated.
uuid (str): Unique id for this section (32258d7c-3e6c-4ac5-98ad-bad7a3b78c63)
totalSize (int): Total number of item in the library
"""
ALLOWED_FILTERS = ()
ALLOWED_SORT = ()
BOOLEAN_FILTERS = ('unwatched', 'duplicate')
def _loadData(self, data):
self._data = data
self.agent = data.attrib.get('agent')
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.art = data.attrib.get('art')
self.composite = data.attrib.get('composite')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.filters = data.attrib.get('filters')
self.key = data.attrib.get('key') # invalid key from plex
self.language = data.attrib.get('language')
self.locations = self.listAttrs(data, 'path', etag='Location')
self.refreshing = utils.cast(bool, data.attrib.get('refreshing'))
self.scanner = data.attrib.get('scanner')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))
self.uuid = data.attrib.get('uuid')
# Private attrs as we dont want a reload.
self._total_size = None
def fetchItems(self, ekey, cls=None, container_start=None, container_size=None, **kwargs):
""" Load the specified key to find and build all items with the specified tag
and attrs. See :func:`~plexapi.base.PlexObject.fetchItem` for more details
on how this is used.
Parameters:
container_start (None, int): offset to get a subset of the data
container_size (None, int): How many items in data
"""
url_kw = {}
if container_start is not None:
url_kw["X-Plex-Container-Start"] = container_start
if container_size is not None:
url_kw["X-Plex-Container-Size"] = container_size
if ekey is None:
raise BadRequest('ekey was not provided')
data = self._server.query(ekey, params=url_kw)
if '/all' in ekey:
# totalSize is only included in the xml response
# if container size is used.
total_size = data.attrib.get("totalSize") or data.attrib.get("size")
self._total_size = utils.cast(int, total_size)
items = self.findItems(data, cls, ekey, **kwargs)
librarySectionID = data.attrib.get('librarySectionID')
if librarySectionID:
for item in items:
item.librarySectionID = librarySectionID
return items
@property
def totalSize(self):
if self._total_size is None:
part = '/library/sections/%s/all?X-Plex-Container-Start=0&X-Plex-Container-Size=1' % self.key
data = self._server.query(part)
self._total_size = int(data.attrib.get("totalSize"))
return self._total_size
def delete(self):
""" Delete a library section. """
try:
return self._server.query('/library/sections/%s' % self.key, method=self._server._session.delete)
except BadRequest: # pragma: no cover
msg = 'Failed to delete library %s' % self.key
msg += 'You may need to allow this permission in your Plex settings.'
log.error(msg)
raise
def reload(self, key=None):
return self._server.library.section(self.title)
def edit(self, agent=None, **kwargs):
""" Edit a library (Note: agent is required). See :class:`~plexapi.library.Library` for example usage.
Parameters:
kwargs (dict): Dict of settings to edit.
"""
if not agent:
agent = self.agent
part = '/library/sections/%s?agent=%s&%s' % (self.key, agent, urlencode(kwargs))
self._server.query(part, method=self._server._session.put)
# Reload this way since the self.key dont have a full path, but is simply a id.
for s in self._server.library.sections():
if s.key == self.key:
return s
def get(self, title):
""" Returns the media item with the specified title.
Parameters:
title (str): Title of the item to return.
"""
key = '/library/sections/%s/all?title=%s' % (self.key, quote(title, safe=''))
return self.fetchItem(key, title__iexact=title)
def all(self, sort=None, **kwargs):
""" Returns a list of media from this library section.
Parameters:
sort (string): The sort string
"""
sortStr = ''
if sort is not None:
sortStr = '?sort=' + sort
key = '/library/sections/%s/all%s' % (self.key, sortStr)
return self.fetchItems(key, **kwargs)
def agents(self):
""" Returns a list of available `:class:`~plexapi.media.Agent` for this library section.
"""
return self._server.agents(utils.searchType(self.type))
def settings(self):
""" Returns a list of all library settings. """
key = '/library/sections/%s/prefs' % self.key
data = self._server.query(key)
return self.findItems(data, cls=Setting)
def onDeck(self):
""" Returns a list of media items on deck from this library section. """
key = '/library/sections/%s/onDeck' % self.key
return self.fetchItems(key)
def recentlyAdded(self, maxresults=50):
""" Returns a list of media items recently added from this library section.
Parameters:
maxresults (int): Max number of items to return (default 50).
"""
return self.search(sort='addedAt:desc', maxresults=maxresults)
def analyze(self):
""" Run an analysis on all of the items in this library section. See
See :func:`~plexapi.base.PlexPartialObject.analyze` for more details.
"""
key = '/library/sections/%s/analyze' % self.key
self._server.query(key, method=self._server._session.put)
def emptyTrash(self):
""" If a section has items in the Trash, use this option to empty the Trash. """
key = '/library/sections/%s/emptyTrash' % self.key
self._server.query(key, method=self._server._session.put)
def update(self):
""" Scan this section for new media. """
key = '/library/sections/%s/refresh' % self.key
self._server.query(key)
def cancelUpdate(self):
""" Cancel update of this Library Section. """
key = '/library/sections/%s/refresh' % self.key
self._server.query(key, method=self._server._session.delete)
def refresh(self):
""" Forces a download of fresh media information from the internet.
This can take a long time. Any locked fields are not modified.
"""
key = '/library/sections/%s/refresh?force=1' % self.key
self._server.query(key)
def deleteMediaPreviews(self):
""" Delete the preview thumbnails for items in this library. This cannot
be undone. Recreating media preview files can take hours or even days.
"""
key = '/library/sections/%s/indexes' % self.key
self._server.query(key, method=self._server._session.delete)
def listChoices(self, category, libtype=None, **kwargs):
""" Returns a list of :class:`~plexapi.library.FilterChoice` objects for the
specified category and libtype. kwargs can be any of the same kwargs in
:func:`plexapi.library.LibraySection.search()` to help narrow down the choices
to only those that matter in your current context.
Parameters:
category (str): Category to list choices for (genre, contentRating, etc).
libtype (int): Library type of item filter.
**kwargs (dict): Additional kwargs to narrow down the choices.
Raises:
:class:`plexapi.exceptions.BadRequest`: Cannot include kwarg equal to specified category.
"""
# TODO: Should this be moved to base?
if category in kwargs:
raise BadRequest('Cannot include kwarg equal to specified category: %s' % category)
args = {}
for subcategory, value in kwargs.items():
args[category] = self._cleanSearchFilter(subcategory, value)
if libtype is not None:
args['type'] = utils.searchType(libtype)
key = '/library/sections/%s/%s%s' % (self.key, category, utils.joinArgs(args))
return self.fetchItems(key, cls=FilterChoice)
def search(self, title=None, sort=None, maxresults=None,
libtype=None, container_start=0, container_size=X_PLEX_CONTAINER_SIZE, **kwargs):
""" Search the library. The http requests will be batched in container_size. If you're only looking for the first <num>
results, it would be wise to set the maxresults option to that amount so this functions
doesn't iterate over all results on the server.
Parameters:
title (str): General string query to search for (optional).
sort (str): column:dir; column can be any of {addedAt, originallyAvailableAt, lastViewedAt,
titleSort, rating, mediaHeight, duration}. dir can be asc or desc (optional).
maxresults (int): Only return the specified number of results (optional).
libtype (str): Filter results to a spcifiec libtype (movie, show, episode, artist,
album, track; optional).
container_start (int): default 0
container_size (int): default X_PLEX_CONTAINER_SIZE in your config file.
**kwargs (dict): Any of the available filters for the current library section. Partial string
matches allowed. Multiple matches OR together. Negative filtering also possible, just add an
exclamation mark to the end of filter name, e.g. `resolution!=1x1`.
* unwatched: Display or hide unwatched content (True, False). [all]
* duplicate: Display or hide duplicate items (True, False). [movie]
* actor: List of actors to search ([actor_or_id, ...]). [movie]
* collection: List of collections to search within ([collection_or_id, ...]). [all]
* contentRating: List of content ratings to search within ([rating_or_key, ...]). [movie,tv]
* country: List of countries to search within ([country_or_key, ...]). [movie,music]
* decade: List of decades to search within ([yyy0, ...]). [movie]
* director: List of directors to search ([director_or_id, ...]). [movie]
* genre: List Genres to search within ([genere_or_id, ...]). [all]
* network: List of TV networks to search within ([resolution_or_key, ...]). [tv]
* resolution: List of video resolutions to search within ([resolution_or_key, ...]). [movie]
* studio: List of studios to search within ([studio_or_key, ...]). [music]
* year: List of years to search within ([yyyy, ...]). [all]
Raises:
:class:`plexapi.exceptions.BadRequest`: when applying unknown filter
"""
# cleanup the core arguments
args = {}
for category, value in kwargs.items():
args[category] = self._cleanSearchFilter(category, value, libtype)
if title is not None:
args['title'] = title
if sort is not None:
args['sort'] = self._cleanSearchSort(sort)
if libtype is not None:
args['type'] = utils.searchType(libtype)
results = []
subresults = []
offset = container_start
if maxresults is not None:
container_size = min(container_size, maxresults)
while True:
key = '/library/sections/%s/all%s' % (self.key, utils.joinArgs(args))
subresults = self.fetchItems(key, container_start=container_start,
container_size=container_size)
if not len(subresults):
if offset > self.totalSize:
log.info("container_start is higher then the number of items in the library")
break
results.extend(subresults)
# self.totalSize is not used as a condition in the while loop as
# this require a additional http request.
# self.totalSize is updated from .fetchItems
wanted_number_of_items = self.totalSize - offset
if maxresults is not None:
wanted_number_of_items = min(maxresults, wanted_number_of_items)
container_size = min(container_size, maxresults - len(results))
if wanted_number_of_items <= len(results):
break
container_start += container_size
return results
def _cleanSearchFilter(self, category, value, libtype=None):
# check a few things before we begin
if category.endswith('!'):
if category[:-1] not in self.ALLOWED_FILTERS:
raise BadRequest('Unknown filter category: %s' % category[:-1])
elif category not in self.ALLOWED_FILTERS:
raise BadRequest('Unknown filter category: %s' % category)
if category in self.BOOLEAN_FILTERS:
return '1' if value else '0'
if not isinstance(value, (list, tuple)):
value = [value]
# convert list of values to list of keys or ids
result = set()
choices = self.listChoices(category, libtype)
lookup = {c.title.lower(): unquote(unquote(c.key)) for c in choices}
allowed = set(c.key for c in choices)
for item in value:
item = str((item.id or item.tag) if isinstance(item, MediaTag) else item).lower()
# find most logical choice(s) to use in url
if item in allowed: result.add(item); continue
if item in lookup: result.add(lookup[item]); continue
matches = [k for t, k in lookup.items() if item in t]
if matches: map(result.add, matches); continue
# nothing matched; use raw item value
log.debug('Filter value not listed, using raw item value: %s' % item)
result.add(item)
return ','.join(result)
def _cleanSearchSort(self, sort):
sort = '%s:asc' % sort if ':' not in sort else sort
scol, sdir = sort.lower().split(':')
lookup = {s.lower(): s for s in self.ALLOWED_SORT}
if scol not in lookup:
raise BadRequest('Unknown sort column: %s' % scol)
if sdir not in ('asc', 'desc'):
raise BadRequest('Unknown sort dir: %s' % sdir)
return '%s:%s' % (lookup[scol], sdir)
def sync(self, policy, mediaSettings, client=None, clientId=None, title=None, sort=None, libtype=None,
**kwargs):
""" Add current library section as sync item for specified device.
See description of :func:`~plexapi.library.LibrarySection.search()` for details about filtering / sorting
and :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions.
Parameters:
policy (:class:`plexapi.sync.Policy`): policy of syncing the media (how many items to sync and process
watched media or not), generated automatically when method
called on specific LibrarySection object.
mediaSettings (:class:`plexapi.sync.MediaSettings`): Transcoding settings used for the media, generated
automatically when method called on specific
LibrarySection object.
client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see
:func:`plexapi.myplex.MyPlexAccount.sync`.
clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`.
title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be
generated from metadata of current media.
sort (str): formatted as `column:dir`; column can be any of {`addedAt`, `originallyAvailableAt`,
`lastViewedAt`, `titleSort`, `rating`, `mediaHeight`, `duration`}. dir can be `asc` or
`desc`.
libtype (str): Filter results to a specific libtype (`movie`, `show`, `episode`, `artist`, `album`,
`track`).
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the library is not allowed to sync
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import Policy, MediaSettings, VIDEO_QUALITY_3_MBPS_720p
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Movies')
policy = Policy('count', unwatched=True, value=1)
media_settings = MediaSettings.create(VIDEO_QUALITY_3_MBPS_720p)
section.sync(target, policy, media_settings, title='Next best movie', sort='rating:desc')
"""
from plexapi.sync import SyncItem
if not self.allowSync:
raise BadRequest('The requested library is not allowed to sync')
args = {}
for category, value in kwargs.items():
args[category] = self._cleanSearchFilter(category, value, libtype)
if sort is not None:
args['sort'] = self._cleanSearchSort(sort)
if libtype is not None:
args['type'] = utils.searchType(libtype)
myplex = self._server.myPlexAccount()
sync_item = SyncItem(self._server, None)
sync_item.title = title if title else self.title
sync_item.rootTitle = self.title
sync_item.contentType = self.CONTENT_TYPE
sync_item.metadataType = self.METADATA_TYPE
sync_item.machineIdentifier = self._server.machineIdentifier
key = '/library/sections/%s/all' % self.key
sync_item.location = 'library://%s/directory/%s' % (self.uuid, quote_plus(key + utils.joinArgs(args)))
sync_item.policy = policy
sync_item.mediaSettings = mediaSettings
return myplex.sync(client=client, clientId=clientId, sync_item=sync_item)
def history(self, maxresults=9999999, mindate=None):
""" Get Play History for this library Section for the owner.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
return self._server.history(maxresults=maxresults, mindate=mindate, librarySectionID=self.key, accountID=1)
class MovieSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing movies.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('unwatched',
'duplicate', 'year', 'decade', 'genre', 'contentRating', 'collection',
'director', 'actor', 'country', 'studio', 'resolution', 'guid', 'label', 'unmatched')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt',
'originallyAvailableAt', 'lastViewedAt', 'titleSort', 'rating',
'mediaHeight', 'duration')
TAG (str): 'Directory'
TYPE (str): 'movie'
"""
ALLOWED_FILTERS = ('unwatched', 'duplicate', 'year', 'decade', 'genre', 'contentRating',
'collection', 'director', 'actor', 'country', 'studio', 'resolution',
'guid', 'label', 'writer', 'producer', 'subtitleLanguage', 'audioLanguage',
'lastViewedAt', 'viewCount', 'addedAt', 'unmatched')
ALLOWED_SORT = ('addedAt', 'originallyAvailableAt', 'lastViewedAt', 'titleSort', 'rating',
'mediaHeight', 'duration')
TAG = 'Directory'
TYPE = 'movie'
METADATA_TYPE = 'movie'
CONTENT_TYPE = 'video'
def collection(self, **kwargs):
""" Returns a list of collections from this library section. """
return self.search(libtype='collection', **kwargs)
def sync(self, videoQuality, limit=None, unwatched=False, **kwargs):
""" Add current Movie library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in
:mod:`plexapi.sync` module.
limit (int): maximum count of movies to sync, unlimited if `None`.
unwatched (bool): if `True` watched videos wouldn't be synced.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Movies')
section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True,
title='Next best movie', sort='rating:desc')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality)
kwargs['policy'] = Policy.create(limit, unwatched)
return super(MovieSection, self).sync(**kwargs)
class ShowSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing tv shows.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('unwatched',
'year', 'genre', 'contentRating', 'network', 'collection', 'guid', 'label')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt', 'lastViewedAt',
'originallyAvailableAt', 'titleSort', 'rating', 'unwatched', 'unmatched')
TAG (str): 'Directory'
TYPE (str): 'show'
"""
ALLOWED_FILTERS = ('unwatched', 'year', 'genre', 'contentRating', 'network', 'collection',
'guid', 'duplicate', 'label', 'show.title', 'show.year', 'show.userRating',
'show.viewCount', 'show.lastViewedAt', 'show.actor', 'show.addedAt', 'episode.title',
'episode.originallyAvailableAt', 'episode.resolution', 'episode.subtitleLanguage',
'episode.unwatched', 'episode.addedAt', 'episode.userRating', 'episode.viewCount',
'episode.lastViewedAt', 'unmatched')
ALLOWED_SORT = ('addedAt', 'lastViewedAt', 'originallyAvailableAt', 'titleSort',
'rating', 'unwatched')
TAG = 'Directory'
TYPE = 'show'
METADATA_TYPE = 'episode'
CONTENT_TYPE = 'video'
def searchShows(self, **kwargs):
""" Search for a show. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='show', **kwargs)
def searchEpisodes(self, **kwargs):
""" Search for an episode. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='episode', **kwargs)
def recentlyAdded(self, libtype='episode', maxresults=50):
""" Returns a list of recently added episodes from this library section.
Parameters:
maxresults (int): Max number of items to return (default 50).
"""
return self.search(sort='addedAt:desc', libtype=libtype, maxresults=maxresults)
def collection(self, **kwargs):
""" Returns a list of collections from this library section. """
return self.search(libtype='collection', **kwargs)
def sync(self, videoQuality, limit=None, unwatched=False, **kwargs):
""" Add current Show library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in
:mod:`plexapi.sync` module.
limit (int): maximum count of episodes to sync, unlimited if `None`.
unwatched (bool): if `True` watched videos wouldn't be synced.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('TV-Shows')
section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True,
title='Next unwatched episode')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality)
kwargs['policy'] = Policy.create(limit, unwatched)
return super(ShowSection, self).sync(**kwargs)
class MusicSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing music artists.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('genre',
'country', 'collection')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt',
'lastViewedAt', 'viewCount', 'titleSort')
TAG (str): 'Directory'
TYPE (str): 'artist'
"""
ALLOWED_FILTERS = ('genre', 'country', 'collection', 'mood', 'year', 'track.userRating', 'artist.title',
'artist.userRating', 'artist.genre', 'artist.country', 'artist.collection', 'artist.addedAt',
'album.title', 'album.userRating', 'album.genre', 'album.decade', 'album.collection',
'album.viewCount', 'album.lastViewedAt', 'album.studio', 'album.addedAt', 'track.title',
'track.userRating', 'track.viewCount', 'track.lastViewedAt', 'track.skipCount',
'track.lastSkippedAt')
ALLOWED_SORT = ('addedAt', 'lastViewedAt', 'viewCount', 'titleSort', 'userRating')
TAG = 'Directory'
TYPE = 'artist'
CONTENT_TYPE = 'audio'
METADATA_TYPE = 'track'
def albums(self):
""" Returns a list of :class:`~plexapi.audio.Album` objects in this section. """
key = '/library/sections/%s/albums' % self.key
return self.fetchItems(key)
def searchArtists(self, **kwargs):
""" Search for an artist. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='artist', **kwargs)
def searchAlbums(self, **kwargs):
""" Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='album', **kwargs)
def searchTracks(self, **kwargs):
""" Search for a track. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='track', **kwargs)
def collection(self, **kwargs):
""" Returns a list of collections from this library section. """
return self.search(libtype='collection', **kwargs)
def sync(self, bitrate, limit=None, **kwargs):
""" Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
bitrate (int): maximum bitrate for synchronized music, better use one of MUSIC_BITRATE_* values from the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import AUDIO_BITRATE_320_KBPS
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Music')
section.sync(AUDIO_BITRATE_320_KBPS, client=target, limit=100, sort='addedAt:desc',
title='New music')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createMusic(bitrate)
kwargs['policy'] = Policy.create(limit)
return super(MusicSection, self).sync(**kwargs)
class PhotoSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing photos.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('all', 'iso',
'make', 'lens', 'aperture', 'exposure', 'device', 'resolution')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt')
TAG (str): 'Directory'
TYPE (str): 'photo'
"""
ALLOWED_FILTERS = ('all', 'iso', 'make', 'lens', 'aperture', 'exposure', 'device', 'resolution', 'place',
'originallyAvailableAt', 'addedAt', 'title', 'userRating', 'tag', 'year')
ALLOWED_SORT = ('addedAt',)
TAG = 'Directory'
TYPE = 'photo'
CONTENT_TYPE = 'photo'
METADATA_TYPE = 'photo'
def searchAlbums(self, title, **kwargs):
""" Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='photoalbum', title=title, **kwargs)
def searchPhotos(self, title, **kwargs):
""" Search for a photo. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='photo', title=title, **kwargs)
def sync(self, resolution, limit=None, **kwargs):
""" Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import PHOTO_QUALITY_HIGH
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Photos')
section.sync(PHOTO_QUALITY_HIGH, client=target, limit=100, sort='addedAt:desc',
title='Fresh photos')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createPhoto(resolution)
kwargs['policy'] = Policy.create(limit)
return super(PhotoSection, self).sync(**kwargs)
class FilterChoice(PlexObject):
""" Represents a single filter choice. These objects are gathered when using filters
while searching for library items and is the object returned in the result set of
:func:`~plexapi.library.LibrarySection.listChoices()`.
Attributes:
TAG (str): 'Directory'
server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to.
initpath (str): Relative path requested when retrieving specified `data` (optional).
fastKey (str): API path to quickly list all items in this filter
(/library/sections/<section>/all?genre=<key>)
key (str): Short key (id) of this filter option (used ad <key> in fastKey above).
thumb (str): Thumbnail used to represent this filter option.
title (str): Human readable name for this filter option.
type (str): Filter type (genre, contentRating, etc).
"""
TAG = 'Directory'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.fastKey = data.attrib.get('fastKey')
self.key = data.attrib.get('key')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
@utils.registerPlexObject
class Hub(PlexObject):
""" Represents a single Hub (or category) in the PlexServer search.
Attributes:
TAG (str): 'Hub'
hubIdentifier (str): Unknown.
size (int): Number of items found.
title (str): Title of this Hub.
type (str): Type of items in the Hub.
items (str): List of items in the Hub.
"""
TAG = 'Hub'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.hubIdentifier = data.attrib.get('hubIdentifier')
self.size = utils.cast(int, data.attrib.get('size'))
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.key = data.attrib.get('key')
self.items = self.findItems(data)
def __len__(self):
return self.size
@utils.registerPlexObject
class Collections(PlexObject):
TAG = 'Directory'
TYPE = 'collection'
_include = "?includeExternalMedia=1&includePreferences=1"
def _loadData(self, data):
self.ratingKey = utils.cast(int, data.attrib.get('ratingKey'))
self._details_key = "/library/metadata/%s%s" % (self.ratingKey, self._include)
self.key = data.attrib.get('key')
self.type = data.attrib.get('type')
self.title = data.attrib.get('title')
self.subtype = data.attrib.get('subtype')
self.summary = data.attrib.get('summary')
self.index = utils.cast(int, data.attrib.get('index'))
self.thumb = data.attrib.get('thumb')
self.addedAt = utils.toDatetime(data.attrib.get('addedAt'))
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))
self.childCount = utils.cast(int, data.attrib.get('childCount'))
self.minYear = utils.cast(int, data.attrib.get('minYear'))
self.maxYear = utils.cast(int, data.attrib.get('maxYear'))
self.collectionMode = data.attrib.get('collectionMode')
self.collectionSort = data.attrib.get('collectionSort')
@property
def children(self):
return self.fetchItems(self.key)
def __len__(self):
return self.childCount
def delete(self):
part = '/library/metadata/%s' % self.ratingKey
return self._server.query(part, method=self._server._session.delete)
def modeUpdate(self, mode=None):
""" Update Collection Mode
Parameters:
mode: default (Library default)
hide (Hide Collection)
hideItems (Hide Items in this Collection)
showItems (Show this Collection and its Items)
Example:
collection = 'plexapi.library.Collections'
collection.updateMode(mode="hide")
"""
mode_dict = {'default': '-2',
'hide': '0',
'hideItems': '1',
'showItems': '2'}
key = mode_dict.get(mode)
if key is None:
raise BadRequest('Unknown collection mode : %s. Options %s' % (mode, list(mode_dict)))
part = '/library/metadata/%s/prefs?collectionMode=%s' % (self.ratingKey, key)
return self._server.query(part, method=self._server._session.put)
def sortUpdate(self, sort=None):
""" Update Collection Sorting
Parameters:
sort: realease (Order Collection by realease dates)
alpha (Order Collection Alphabetically)
Example:
colleciton = 'plexapi.library.Collections'
collection.updateSort(mode="alpha")
"""
sort_dict = {'release': '0',
'alpha': '1'}
key = sort_dict.get(sort)
if key is None:
raise BadRequest('Unknown sort dir: %s. Options: %s' % (sort, list(sort_dict)))
part = '/library/metadata/%s/prefs?collectionSort=%s' % (self.ratingKey, key)
return self._server.query(part, method=self._server._session.put)
def posters(self):
""" Returns list of available poster objects. :class:`~plexapi.media.Poster`. """
return self.fetchItems('/library/metadata/%s/posters' % self.ratingKey)
def uploadPoster(self, url=None, filepath=None):
""" Upload poster from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """
if url:
key = '/library/metadata/%s/posters?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/posters?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setPoster(self, poster):
""" Set . :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video` """
poster.select()
def arts(self):
""" Returns list of available art objects. :class:`~plexapi.media.Poster`. """
return self.fetchItems('/library/metadata/%s/arts' % self.ratingKey)
def uploadArt(self, url=None, filepath=None):
""" Upload art from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """
if url:
key = '/library/metadata/%s/arts?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/arts?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setArt(self, art):
""" Set :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video` """
art.select()
# def edit(self, **kwargs):
# TODO
|
the-stack_0_8034 | """
Day 2: 1202 Program Alarm
"""
from itertools import product
from utils import get_int_list
from intcode.cpu import IntcodeCpu
def puzzle1():
prog = get_int_list('day2')
prog[1] = 12
prog[2] = 2
cpu = IntcodeCpu(prog)
cpu.run()
print(cpu[0])
def puzzle2():
prog = get_int_list('day2')
cpu = IntcodeCpu(prog)
for noun, verb in product(range(100), range(100)):
cpu.push_state()
cpu[1] = noun
cpu[2] = verb
try:
cpu.run()
except (IndexError, ValueError):
continue
if cpu[0] == 19690720:
break
cpu.pop_state()
else:
print("Not Found")
return
print(100 * noun + verb)
if __name__ == '__main__':
puzzle1()
puzzle2()
|
the-stack_0_8035 | # This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import tempfile
from lxml import etree, html
from nose.tools import eq_
from mapproxy.featureinfo import (combined_inputs, XSLTransformer,
XMLFeatureInfoDoc, HTMLFeatureInfoDoc)
from mapproxy.test.helper import strip_whitespace
def test_combined_inputs():
foo = '<a><b>foo</b></a>'
bar = '<a><b>bar</b></a>'
result = combined_inputs([foo, bar])
result = etree.tostring(result)
eq_(result, b'<a><b>foo</b><b>bar</b></a>')
class TestXSLTransformer(object):
def setup(self):
fd_, self.xsl_script = tempfile.mkstemp('.xsl')
xsl = b"""
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<root>
<xsl:apply-templates select='/a/b'/>
</root>
</xsl:template>
<xsl:template match="/a/b">
<foo><xsl:value-of select="text()" /></foo>
</xsl:template>
</xsl:stylesheet>""".strip()
open(self.xsl_script, 'wb').write(xsl)
def teardown(self):
os.remove(self.xsl_script)
def test_transformer(self):
t = XSLTransformer(self.xsl_script)
doc = t.transform(XMLFeatureInfoDoc('<a><b>Text</b></a>'))
eq_(strip_whitespace(doc.as_string()), b'<root><foo>Text</foo></root>')
def test_multiple(self):
t = XSLTransformer(self.xsl_script)
doc = t.transform(XMLFeatureInfoDoc.combine([
XMLFeatureInfoDoc(x) for x in
[b'<a><b>ab</b></a>',
b'<a><b>ab1</b><b>ab2</b><b>ab3</b></a>',
b'<a><b>ab1</b><c>ac</c><b>ab2</b></a>',
]]))
eq_(strip_whitespace(doc.as_string()),
strip_whitespace(b'''
<root>
<foo>ab</foo>
<foo>ab1</foo><foo>ab2</foo><foo>ab3</foo>
<foo>ab1</foo><foo>ab2</foo>
</root>'''))
eq_(doc.info_type, 'xml')
class TestXMLFeatureInfoDocs(object):
def test_as_string(self):
input_tree = etree.fromstring('<root></root>')
doc = XMLFeatureInfoDoc(input_tree)
eq_(strip_whitespace(doc.as_string()),
b'<root/>')
def test_as_etree(self):
doc = XMLFeatureInfoDoc('<root>hello</root>')
eq_(doc.as_etree().getroot().text, 'hello')
def test_combine(self):
docs = [
XMLFeatureInfoDoc('<root><a>foo</a></root>'),
XMLFeatureInfoDoc('<root><b>bar</b></root>'),
XMLFeatureInfoDoc('<other_root><a>baz</a></other_root>'),
]
result = XMLFeatureInfoDoc.combine(docs)
eq_(strip_whitespace(result.as_string()),
strip_whitespace(b'<root><a>foo</a><b>bar</b><a>baz</a></root>'))
eq_(result.info_type, 'xml')
class TestXMLFeatureInfoDocsNoLXML(object):
def setup(self):
from mapproxy import featureinfo
self.old_etree = featureinfo.etree
featureinfo.etree = None
def teardown(self):
from mapproxy import featureinfo
featureinfo.etree = self.old_etree
def test_combine(self):
docs = [
XMLFeatureInfoDoc(b'<root><a>foo</a></root>'),
XMLFeatureInfoDoc(b'<root><b>bar</b></root>'),
XMLFeatureInfoDoc(b'<other_root><a>baz</a></other_root>'),
]
result = XMLFeatureInfoDoc.combine(docs)
eq_(b'<root><a>foo</a></root>\n<root><b>bar</b></root>\n<other_root><a>baz</a></other_root>',
result.as_string())
eq_(result.info_type, 'text')
class TestHTMLFeatureInfoDocs(object):
def test_as_string(self):
input_tree = html.fromstring('<p>Foo')
doc = HTMLFeatureInfoDoc(input_tree)
assert b'<body><p>Foo</p></body>' in strip_whitespace(doc.as_string())
def test_as_etree(self):
doc = HTMLFeatureInfoDoc('<p>hello</p>')
eq_(doc.as_etree().find('body/p').text, 'hello')
def test_combine(self):
docs = [
HTMLFeatureInfoDoc(b'<html><head><title>Hello<body><p>baz</p><p>baz2'),
HTMLFeatureInfoDoc(b'<p>foo</p>'),
HTMLFeatureInfoDoc(b'<body><p>bar</p></body>'),
]
result = HTMLFeatureInfoDoc.combine(docs)
assert b'<title>Hello</title>' in result.as_string()
assert (b'<body><p>baz</p><p>baz2</p><p>foo</p><p>bar</p></body>' in
result.as_string())
eq_(result.info_type, 'html')
def test_combine_parts(self):
docs = [
HTMLFeatureInfoDoc('<p>foo</p>'),
HTMLFeatureInfoDoc('<body><p>bar</p></body>'),
HTMLFeatureInfoDoc('<html><head><title>Hello<body><p>baz</p><p>baz2'),
]
result = HTMLFeatureInfoDoc.combine(docs)
assert (b'<body><p>foo</p><p>bar</p><p>baz</p><p>baz2</p></body>' in
result.as_string())
eq_(result.info_type, 'html')
class TestHTMLFeatureInfoDocsNoLXML(object):
def setup(self):
from mapproxy import featureinfo
self.old_etree = featureinfo.etree
featureinfo.etree = None
def teardown(self):
from mapproxy import featureinfo
featureinfo.etree = self.old_etree
def test_combine(self):
docs = [
HTMLFeatureInfoDoc(b'<html><head><title>Hello<body><p>baz</p><p>baz2'),
HTMLFeatureInfoDoc(b'<p>foo</p>'),
HTMLFeatureInfoDoc(b'<body><p>bar</p></body>'),
]
result = HTMLFeatureInfoDoc.combine(docs)
eq_(b"<html><head><title>Hello<body><p>baz</p>"
b"<p>baz2\n<p>foo</p>\n<body><p>bar</p></body>",
result.as_string())
eq_(result.info_type, 'text')
|
the-stack_0_8036 | # -*- coding: utf-8 -*-
# @Time : 2020/9/26
# @Author : Benny Jane
# @Email : 暂无
# @File : command.py
# @Project : Flask-Demo
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import request
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_name = os.path.split(os.path.dirname(__file__))[1]
def register_logging(app):
class RequestFormatter(logging.Formatter):
# 通过继承,修改打印信息: 报错的url 与 远程地址
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super(RequestFormatter, self).format(record)
request_formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_path = os.path.join(basedir, f'logs/{project_name}')
if not os.path.exists(log_path):
os.mkdir(log_path)
file_handler = RotatingFileHandler("{}/career_plan.log".format(log_path),
maxBytes=10 * 1024 * 1024, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
# 需要设置整个日志的等级,开发调试模式下,默认为debug; 没有设置会导致无法输出日志
app.logger.setLevel(logging.INFO)
if not app.debug:
# 生产模式下,需要设置合适等级
# app.logger.setLevel(logging.ERROR)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
|
the-stack_0_8037 | """
References:
[1] E. Branlard, M. Gaunaa - Cylindrical vortex wake model: skewed cylinder, application to yawed or tilted rotors - Wind Energy, 2015
[2] E. Branlard - Wind Turbine Aerodynamics and Vorticity Based Method, Springer, 2017
"""
#--- Legacy python 2.7
from __future__ import division
from __future__ import print_function
# --- General
import unittest
import numpy as np
import numpy.matlib
# --- Local
try:
from .elliptic import ellipticPiCarlson, ellipe, ellipk
from .VortexLine import vl_semiinf_u
except:
from elliptic import ellipticPiCarlson, ellipe, ellipk
from VortexLine import vl_semiinf_u
# --------------------------------------------------------------------------------}
# --- Helper function
# --------------------------------------------------------------------------------{
def skew_components(u_x,u_z,m):
coschi = 1/np.sqrt(1+m**2)
sinchi = m/np.sqrt(1+m**2)
u_zeta = u_z * coschi + u_x * sinchi
u_xi = - u_z * sinchi + u_x * coschi
return u_zeta,u_xi
def polar_components(u_x,u_y,vpsi):
u_r = np.multiply(u_x,np.cos(vpsi)) + np.multiply(u_y,np.sin(vpsi))
u_psi= -np.multiply(u_x,np.sin(vpsi)) + np.multiply(u_y,np.cos(vpsi))
return u_r,u_psi
# --------------------------------------------------------------------------------}
# --- Core functions, polar coordinates inputs
# --------------------------------------------------------------------------------{
def svc_tang_u_polar(vr,vpsi,vz,gamma_t=-1,R=1,m=0,ntheta=180,polar_out=False):
""" Induced velocity from a skewed semi infinite cylinder of tangential vorticity.
Takes polar coordinates as inputs, returns velocity either in Cartesian (default) or polar.
The cylinder axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
The algorithm loops over the control points and performs the integration over theta
INPUTS:
vr,vpsi,vz : flat list of control points in polar coordinates
gamma_t : tangential vorticity of the vortex sheet (circulation per unit of length oriented along psi). (for WT rotating positively along psi , gamma psi is negative)
R : radius of cylinder
m =tan(chi): tangent of wake skew angle
ntheta : number of points used for integration
Reference: [1,2]"""
# m = 0
EPSILON_AXIS=1e-7; # relative threshold for using axis formula
vtheta = np.pi/2 + np.linspace(0, 2*np.pi, ntheta)
# Flattening
shape_in=vr.shape
vr = np.asarray(vr).ravel()
vpsi = np.asarray(vpsi).ravel()
vz = np.asarray(vz).ravel()
# Constants of theta
c = 1 + m**2
bz = R * m * np.cos(vtheta)
u_z = np.zeros(vr.shape)
if polar_out:
u_r = np.zeros(vr.shape)
u_psi = np.zeros(vr.shape)
# ---- Loop on all control points to find velocity
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
# Functions of theta in the integrand
a = R**2 + r** 2 + z**2 - 2*R*r*np.cos(vtheta - psi)
b = 2 * m * R * np.cos(vtheta) - 2 * m * r * np.cos(psi) - 2 * z
ap, bp = R * z * np.sin(vtheta - psi), -R * np.sin(vtheta - psi)
ar, br = R * z * np.cos(vtheta - psi), -R * np.cos(vtheta - psi)
az = R * (R - r * np.cos(vtheta - psi))
D = 2*gamma_t/(4*np.pi)/(np.multiply(np.sqrt(a),(2 * np.sqrt(a * c)+ b)))
# Integrations
u_r[i] = np.trapz((ar * np.sqrt(c)+ np.multiply(br,np.sqrt(a)))*D, vtheta)
u_psi[i] = np.trapz((ap * np.sqrt(c)+ np.multiply(bp,np.sqrt(a)))*D, vtheta)
u_z[i] = np.trapz((az * np.sqrt(c)+ np.multiply(bz,np.sqrt(a)))*D, vtheta)
# Reshaping to desired shape
u_r = u_r.reshape(shape_in)
u_psi = u_psi.reshape(shape_in)
u_z = u_z.reshape(shape_in)
return u_r,u_psi,u_z
else:
# print('>>>>>> HACK')
# bx, by = -R * np.cos(vtheta), -R * np.sin(vtheta)
# u_x = np.zeros(vr.shape)
# u_y = np.zeros(vr.shape)
# # ---- Loop on all control points to find velocity
# for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
# # Functions of theta in the integrand
# a = R**2 + r** 2 + z**2 - 2*R*r*np.cos(vtheta - psi)
# b = 2 * m * R * np.cos(vtheta) - 2 * m * r * np.cos(psi) - 2 * z
# ax, ay = R * z * np.cos(vtheta), R * z * np.sin(vtheta)
# az = R * (R - r * np.cos(vtheta - psi))
# #D = 2*gamma_t/(4*np.pi)/(np.multiply(np.sqrt(a),(2 * np.sqrt(a * c)+ b)))
# D = -4*gamma_t/(np.sqrt(c)*4*np.pi)/(4*a*c-b**2)
# # Integrations
# u_x[i] = np.trapz((b*bx-2*ax*c)*D, vtheta)
# u_y[i] = np.trapz((b*by-2*ay*c)*D, vtheta)
# u_z[i] = np.trapz((b*bz-2*az*c)*D, vtheta)
bx, by = -R * np.cos(vtheta), -R * np.sin(vtheta)
u_x = np.zeros(vr.shape)
u_y = np.zeros(vr.shape)
# ---- Loop on all control points to find velocity
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
# Functions of theta in the integrand
a = R**2 + r** 2 + z**2 - 2*R*r*np.cos(vtheta - psi)
b = 2 * m * R * np.cos(vtheta) - 2 * m * r * np.cos(psi) - 2 * z
ax, ay = R * z * np.cos(vtheta), R * z * np.sin(vtheta)
az = R * (R - r * np.cos(vtheta - psi))
D = 2*gamma_t/(4*np.pi)/(np.multiply(np.sqrt(a),(2 * np.sqrt(a * c)+ b)))
# Integrations
u_x[i] = np.trapz((ax * np.sqrt(c)+ np.multiply(bx,np.sqrt(a)))*D, vtheta)
u_y[i] = np.trapz((ay * np.sqrt(c)+ np.multiply(by,np.sqrt(a)))*D, vtheta)
u_z[i] = np.trapz((az * np.sqrt(c)+ np.multiply(bz,np.sqrt(a)))*D, vtheta)
# Reshaping to desired shape
u_x = u_x.reshape(shape_in)
u_y = u_y.reshape(shape_in)
u_z = u_z.reshape(shape_in)
return u_x,u_y,u_z
def svc_longi_u_polar(vr,vpsi,vz,gamma_l=-1,R=1,m=0,ntheta=180,polar_out=False):
""" Raw function, not intended to be exported.
Induced velocity from a skewed semi infinite cylinder of longitudinal vorticity.
Takes polar coordinates as inputs, returns velocity either in Cartesian (default) or polar.
The cylinder axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
INPUTS:
vr,vpsi,vz : control points in polar coordinates, may be of any shape
gamma_t : tangential vorticity of the vortex sheet (circulation per unit of length oriented along psi). (for WT rotating positively along psi , gamma psi is negative)
R : radius of cylinder
m =tan(chi): tangent of wake skew angle
ntheta : number of points used for integration
Reference: [1,2]"""
EPSILON_AXIS=1e-7; # relative threshold for using axis formula
vtheta = np.linspace(0,2 * np.pi,ntheta) + np.pi / ntheta
# Flattening, and dimensionless!
shape_in=vr.shape
vr = np.asarray(vr/R).ravel()
vpsi = np.asarray(vpsi).ravel()
vz = np.asarray(vz/R).ravel()
u_z = np.zeros(vr.shape)
if polar_out:
u_r = np.zeros(vr.shape)
u_psi = np.zeros(vr.shape)
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
Den1 = np.sqrt(1 + r**2 + z**2 - 2*r* np.cos(vtheta - psi))
Den2 = - z + m * np.cos(vtheta) + np.sqrt(1 + m ** 2) * np.sqrt(1 + r ** 2 + z ** 2 - 2 * r * np.cos(vtheta - psi)) - m * r * np.cos(psi)
DenInv = gamma_l/(4*np.pi)/np.multiply(Den1,Den2)
u_r[i] = np.trapz(( - m*z*np.sin(psi) + np.sin(vtheta-psi))*DenInv,vtheta)
u_psi[i] = np.trapz((r - m*z*np.cos(psi) - np.cos(vtheta-psi))*DenInv,vtheta)
u_z[i] = np.trapz(m * (-np.sin(vtheta) + r*np.sin(psi)) *DenInv,vtheta)
# Reshaping to input shape
u_r = u_psi.reshape(shape_in)
u_psi = u_psi.reshape(shape_in)
u_z = u_z.reshape(shape_in)
return (u_r,u_psi,u_z)
else:
u_x = np.zeros(vr.shape)
u_y = np.zeros(vr.shape)
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
Den1 = np.sqrt(1 + r**2 + z**2 - 2*r* np.cos(vtheta - psi))
Den2 = - z + m * np.cos(vtheta) + np.sqrt(1 + m ** 2) * np.sqrt(1 + r ** 2 + z ** 2 - 2 * r * np.cos(vtheta - psi)) - m * r * np.cos(psi)
DenInv = gamma_l/(4*np.pi)/np.multiply(Den1,Den2)
u_x[i] = np.trapz( (np.sin(vtheta) - r*np.sin(psi)) *DenInv,vtheta)
u_y[i] = np.trapz((- m*z - np.cos(vtheta) + r*np.cos(psi)) *DenInv,vtheta)
u_z[i] = np.trapz(m * (-np.sin(vtheta) + r*np.sin(psi)) *DenInv,vtheta)
# Reshaping to input shape
u_x = u_x.reshape(shape_in)
u_y = u_y.reshape(shape_in)
u_z = u_z.reshape(shape_in)
return (u_x,u_y,u_z)
def svc_root_u_polar(vr,vpsi,vz,Gamma_r=-1,m=0,polar_out=False):
"""
Induced velocity from a skewed root vortex
Takes polar coordinates as inputs, returns velocity either in Cartesian (default) or polar.
The cylinder axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
INPUTS:
vr,vpsi,vz : control points in polar coordinates, may be of any shape
Gamma_r : Root vortex circulation, negative for a wind turbine
m =tan(chi): tangent of wake skew angle
Reference: [1,2]"""
EPSILON_AXIS=1e-7; # relative threshold for using axis formula
chi = np.arctan(m)
if Gamma_r==0:
return vr*0,vr*0,vr*0
# Flattening
shape_in=vr.shape
vr = np.asarray(vr).ravel()
vpsi = np.asarray(vpsi).ravel()
vz = np.asarray(vz).ravel()
# --- Computes ux, uy, uz, upsi
u_z = np.zeros(vr.shape)
if (m == 0):
u_psi = np.multiply(Gamma_r/(4*np.pi*vr), (1+vz/np.sqrt(vr** 2 + vz**2)))
u_x = -np.sin(vpsi)*u_psi
u_y = np.cos(vpsi)*u_psi
else:
if (np.max(np.abs(vz)) > 0):
# need to use general formula
u_x = np.zeros(vr.shape)
u_y = np.zeros(vr.shape)
e = np.array([np.sin(chi),0,np.cos(chi)])
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
u_x[i],u_y[i],u_z[i]= vl_semiinf_u(r*np.cos(psi),r*np.sin(psi),z,e[0],e[1],e[2],Gamma_r,visc_model=0,t=0)
u_psi = - u_x*np.sin(vpsi) + u_y* np.cos(vpsi)
else:
# rotor plane analytical (see Yaw article)
u_psi = np.zeros(vr.shape)
coschi = 1 / np.sqrt(1 + m ** 2)
sinchi = m / np.sqrt(1 + m ** 2)
Iz = vr > (EPSILON_AXIS)
bnIz = np.logical_not(Iz)
u_z [Iz] = np.multiply(Gamma_r/(4*np.pi*vr[Iz]), 1.0/(1-np.cos(vpsi[Iz])*sinchi)*sinchi*np.sin(vpsi[Iz]))
u_psi[Iz] = np.multiply(Gamma_r/(4*np.pi*vr[Iz]), 1.0/(1-np.cos(vpsi[Iz])*sinchi)*coschi)
u_z [bnIz] =0
u_psi[bnIz] =0
u_x = -np.sin(vpsi)*u_psi
u_y = np.cos(vpsi)*u_psi
# Reshaping to input shape
u_z = u_z.reshape(shape_in)
if polar_out:
u_r = u_x * np.cos(vpsi) + u_y * np.sin(vpsi)
u_psi = u_psi.reshape(shape_in)
return (u_r,u_psi,u_z)
else:
u_x = u_x.reshape(shape_in)
u_y = u_y.reshape(shape_in)
return (u_x,u_y,u_z)
def svc_u_polar(vr,vpsi,vz,gamma_t,gamma_l,Gamma_r,R=1,m=0,ntheta=180,polar_out=False):
""" Induced velocities from a skewed semi infinite cylinder with:
- tangential vorticity gamma_t
- longitudinal vorticity gamma_l
- a root vortex, Gamma_r
"""
u1 ,u2 ,u3 = svc_longi_u_polar(vr,vpsi,vz,gamma_l,R=R,m=m,ntheta=ntheta,polar_out=False)
u1t,u2t,u3t = svc_tang_u_polar (vr,vpsi,vz,gamma_t,R=R,m=m,ntheta=ntheta,polar_out=False)
u1 += u1t
u2 += u2t
u3 += u3t
u1t,u2t,u3t = svc_root_u_polar (vr,vpsi,vz,Gamma_r ,m=m, polar_out=False)
u1 += u1t
u2 += u2t
u3 += u3t
return u1,u2,u3
# --------------------------------------------------------------------------------}
# --- Main functions with Cartesian inputs
# --------------------------------------------------------------------------------{
def svc_longi_u(Xcp,Ycp,Zcp,gamma_l=-1,R=1,m=0,ntheta=180,polar_out=False):
""" Induced velocity from a skewed semi infinite cylinder of longitudinal vorticity.
The cylinder axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
INPUTS:
Xcp,Ycp,Zcp: vector or matrix of control points Cartesian Coordinates
gamma_l : longitudinal vorticity of the vortex sheet (circulation per unit of length oriented along zeta), negative for a WT
R : radius of cylinder
m =tan(chi): tangent of wake skew angle
ntheta : number of points used for integration
Reference: [1,2]"""
vr, vpsi = np.sqrt(Xcp**2+Ycp**2), np.arctan2(Ycp,Xcp) # polar coords
u1,u2,u3=svc_longi_u_polar(vr,vpsi,Zcp,gamma_l,R,m,ntheta,polar_out=polar_out)
return u1,u2,u3 # ux,uy,uz OR ur,upsi,uz
def svc_tang_u(Xcp,Ycp,Zcp,gamma_t=-1,R=1,m=0,ntheta=180,polar_out=False):
""" Induced velocity from a skewed semi infinite cylinder of tangential vorticity.
The cylinder axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
INPUTS:
Xcp,Ycp,Zcp: vector or matrix of control points Cartesian Coordinates
gamma_t : tangential vorticity of the vortex sheet (circulation per unit of length oriented along psi), negative for a WT
R : radius of cylinder
m =tan(chi): tangent of wake skew angle
ntheta : number of points used for integration
Reference: [1,2]"""
vr, vpsi = np.sqrt(Xcp**2+Ycp**2), np.arctan2(Ycp,Xcp) # polar coords
u1,u2,u3 = svc_tang_u_polar(vr,vpsi,Zcp,gamma_t,R,m,ntheta,polar_out=polar_out)
return u1,u2,u3 # ux,uy,uz OR ur,upsi,uz
def svc_root_u(Xcp,Ycp,Zcp,Gamma_r=-1,m=0,polar_out=False):
""" Induced velocity from a skewed root vortex.
The root vortex axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
INPUTS:
Xcp,Ycp,Zcp: vector or matrix of control points Cartesian Coordinates
Gamma_r : Root vortex circulation, negative for a wind turbine
m =tan(chi): tangent of wake skew angle
ntheta : number of points used for integration
Reference: [1,2]"""
vr, vpsi = np.sqrt(Xcp**2+Ycp**2), np.arctan2(Ycp,Xcp) # polar coords
u1,u2,u3 = svc_root_u_polar(vr,vpsi,Zcp,Gamma_r,m,polar_out=polar_out)
return u1,u2,u3 # ux,uy,uz OR ur,upsi,uz
def svcs_tang_u(Xcp,Ycp,Zcp,gamma_t,R,m,Xcyl,Ycyl,Zcyl,ntheta=180, Ground=False):
"""
Computes the velocity field for nCyl*nr cylinders, extending along z:
nCyl: number of main cylinders
nr : number of concentric cylinders within a main cylinder
INPUTS:
Xcp,Ycp,Zcp: cartesian coordinates of control points where the velocity field is not be computed
gamma_t: array of size (nCyl,nr), distribution of gamma for each cylinder as function of radius
R : array of size (nCyl,nr),
m : array of size (nCyl,nr),
Xcyl,Ycyl,Zcyl: array of size nCyl) giving the center of the rotor
Ground: boolean, True if ground effect is to be accounted for
All inputs (except Ground) should be numpy arrays
"""
Xcp=np.asarray(Xcp)
Ycp=np.asarray(Ycp)
Zcp=np.asarray(Zcp)
ux = np.zeros(Xcp.shape)
uy = np.zeros(Xcp.shape)
uz = np.zeros(Xcp.shape)
nCyl,nr = R.shape
print('Tang. (skewed) ',end='')
for i in np.arange(nCyl):
Xcp0,Ycp0,Zcp0=Xcp-Xcyl[i],Ycp-Ycyl[i],Zcp-Zcyl[i]
if Ground:
YcpMirror = Ycp0+2*Ycyl[i]
Ylist = [Ycp0,YcpMirror]
else:
Ylist = [Ycp0]
for iy,Y in enumerate(Ylist):
for j in np.arange(nr):
if iy==0:
print('.',end='')
else:
print('m',end='')
if np.abs(gamma_t[i,j]) > 0:
ux1,uy1,uz1 = svc_tang_u(Xcp0,Y,Zcp0,gamma_t[i,j],R[i,j],m[i,j],ntheta=ntheta,polar_out=False)
ux = ux + ux1
uy = uy + uy1
uz = uz + uz1
print('')
return ux,uy,uz
def svcs_longi_u(Xcp,Ycp,Zcp,gamma_l,R,m,Xcyl,Ycyl,Zcyl,ntheta=180,Ground=False):
""" See svcs_tang_u """
Xcp=np.asarray(Xcp)
Ycp=np.asarray(Ycp)
Zcp=np.asarray(Zcp)
ux = np.zeros(Xcp.shape)
uy = np.zeros(Xcp.shape)
uz = np.zeros(Xcp.shape)
nCyl,nr = R.shape
print('Longi. (skewed) ',end='')
for i in np.arange(nCyl):
Xcp0,Ycp0,Zcp0=Xcp-Xcyl[i],Ycp-Ycyl[i],Zcp-Zcyl[i]
if Ground:
YcpMirror = Ycp0+2*Ycyl[i]
Ylist = [Ycp0,YcpMirror]
else:
Ylist = [Ycp0]
for iy,Y in enumerate(Ylist):
for j in np.arange(nr):
if iy==0:
print('.',end='')
else:
print('m',end='')
if np.abs(gamma_l[i,j]) > 0:
ux1,uy1,uz1 = svc_longi_u(Xcp0,Ycp0,Zcp0,gamma_l[i,j],R[i,j],m[i,j],ntheta=ntheta,polar_out=False)
ux = ux + ux1
uy = uy + uy1
uz = uz + uz1
print('')
return ux,uy,uz
# --------------------------------------------------------------------------------}
# --- Rewrite of Matlab functions, legacy
# --------------------------------------------------------------------------------{
def fV_Trailed(vr,vpsi,vz,m,gamma_longi,ntheta,nout=7):
""" See Yaw article for notations and coordinate system
Return induced velocity by an infinite number of trailed vortices (semi-infinite lines whose starting points lay on the rotor circle)
"""
u_x,u_y,u_z = svc_longi_u_polar(vr,vpsi,vz,gamma_longi,R=1,m=m,ntheta=ntheta,polar_out=False)
if nout==1:
return u_z
u_zeta,u_xi = skew_components(u_x,u_z,m)
u_r,u_psi = polar_components(u_x,u_y,vpsi)
outputs=(u_z,u_psi,u_x,u_y,u_zeta,u_xi,u_r)
return outputs[:nout]
def fV_Tangential(vr,vpsi,vz,m,gamma_t,ntheta,nout=7):
""" This function is purely for backward compatibility with Matlab scripts"""
u_x,u_y,u_z = svc_tang_u_polar(vr,vpsi,vz,gamma_t,R=1,m=m,ntheta=ntheta,polar_out=False)
if nout==1:
return u_z
u_zeta,u_xi=skew_components(u_x,u_z,m)
u_r,u_psi =polar_components(u_x,u_y,vpsi)
outputs=(u_z,u_psi,u_x,u_y,u_zeta,u_xi,u_r)
return outputs[:nout]
def fV_Root(vr,vpsi,vz, m =0, Gamma_r=-1,nout=1):
""" Return induced velocity by the root vortex
Coordinate system is true polar coordinates, with convention of Yaw article
"""
u_x,u_y,u_z= svc_root_u_polar(vr,vpsi,vz,Gamma_r=Gamma_r,m=m,polar_out=False)
if nout==1:
return u_z
u_zeta,u_xi = skew_components(u_x,u_z,m)
u_r,u_psi = polar_components(u_x,u_y,vpsi)
outputs=(u_z,u_psi,u_x,u_y,u_zeta,u_xi,u_r)
return outputs[:nout]
# --------------------------------------------------------------------------------}
# --- Rotor plane flow expansions
# --------------------------------------------------------------------------------{
def fKxit(vr,m):
""" Returns Kxit according to yaw article . vr is in [0;1], m=tan(chi)"""
EPSILON_AXIS=1e-7; # relative threshold for using axis formula
fOye = 0.5 * (vr + 0.4 * vr ** 3 + 0.4 * vr ** 5)
Kxit_num = np.zeros((1,len(vr)))
k2 = ((1 - vr) ** 2) / ((1 + vr) ** 2)
m1 = (np.sqrt(1 + m ** 2) + np.sqrt(vr ** 2 + m ** 2)) / (1 + vr)
m2 = (np.sqrt(1 + m ** 2) - np.sqrt(vr ** 2 + m ** 2)) / (1 + vr)
b1 = m1 ** 2 - 1
b2 = 1 - m2 ** 2
j2 = 1 - k2
kr2 = ellipk(vr ** 2)
Pi1 = ellipticPiCarlson(- b1,j2)
Pi2 = ellipticPiCarlson(b2,j2)
Kxit=np.zeros(vr.shape)
if (m == 0):
k2 = 4 * vr / ((vr + 1) ** 2)
k = np.sqrt(k2)
K = ellipk(k2)
E = ellipe(k2)
b1 = (vr) > (EPSILON_AXIS)
b0 = np.logical_not(b1)
Kxit[b1] = np.multiply(1/(np.pi)*np.sqrt(1.0/vr[b1]),(np.multiply((2 - k2[b1]) / k[b1],K[b1]) - np.multiply(2.0/k[b1],E[b1])))
Kxit[b0] = 0
else:
b1 = (vr) > (EPSILON_AXIS)
b0 = np.logical_not(b1)
Kxit[b1] = np.multiply(2*(1+m**2)*vr[b1]/(m**2*np.pi),kr2[b1]) - np.multiply(np.multiply(vr[b1],(vr[b1] + 1))*np.sqrt(m ** 2 + 1)/(2*m**2*np.pi*np.sqrt(m**2+vr[b1]**2)),(np.multiply((b1[b1]+j2[b1]),Pi1[b1]) + np.multiply((b2[b1]-j2[b1]),Pi2[b1])))
Kxit[b0] = 0
# See yaw article
chi = np.arctan(m)
vtheta = np.linspace(0,np.pi / 2,1000)
Kxit_num=np.zeros(vr.shape)
for ir in np.arange(len(vr)):
r = vr[ir]
Kxit_num[ir] = 2 * r / np.pi * np.trapz(np.sin(2 * vtheta) ** 2.0 / (np.multiply(np.sqrt((1 + r) ** 2 - 4 * r * np.cos(vtheta) ** 2),((r - np.cos(2 * vtheta)) ** 2 * np.cos(chi) ** 2 + np.sin(2 * vtheta) ** 2))), vtheta)
return Kxit,Kxit_num,fOye
def fKzt(r,m,nout=2):
""" Returns Kzt according to yaw article """
fOye = 0.5 * (r + 0.4 * r ** 3 + 0.4 * r ** 5)
vr = r
Kzt = np.zeros(vr.shape)
Kztnum = np.zeros(vr.shape)
if m == 0:
raise Exception('Not intended for m==0')
k2 = ((1 - r) ** 2) / ((1 + r) ** 2)
m1 = (np.sqrt(1 + m ** 2) + np.sqrt(r ** 2 + m ** 2)) / (1 + r)
m2 = (np.sqrt(1 + m ** 2) - np.sqrt(r ** 2 + m ** 2)) / (1 + r)
b1 = m1 ** 2 - 1
b2 = 1 - m2 ** 2
j2 = 1 - k2
kr2 = ellipk(r ** 2)
Pi1 = ellipticPiCarlson(- b1,j2)
Pi2 = ellipticPiCarlson(b2,j2)
Kzt = np.multiply(2 * np.sqrt(1 + m ** 2) * r / (m * np.pi),kr2) - np.multiply(np.multiply(r,(r + 1)) / (2 * m * np.pi * np.sqrt(m ** 2 + r ** 2)),(np.multiply((b1 + j2),Pi1) + np.multiply((b2 - j2),Pi2)))
# Coleman formula B.5 term 3 and 4 !!!!! Note the minus sign added
vtheta = np.linspace(0,np.pi,1000)
for ir,r in enumerate(vr):
Kztnum[ir] = - 1 / (np.pi) * r * np.sqrt(1 + m ** 2) / m * np.trapz(- 1.0 / (np.sqrt(1 + r ** 2 - 2 * r * np.cos(vtheta))) + np.sqrt(1 - 2 * r * np.cos(vtheta) + r ** 2) / (1 + r ** 2 - 2 * r * np.cos(vtheta) + m ** 2 * np.sin(vtheta) ** 2),vtheta)
if nout==1:
return Kzt
elif nout <=3:
outputs=(Kzt,Kztnum,fOye)
elif nout > 3:
Kztnum2 = np.zeros(vr.shape)
Kztnum3 = np.zeros(vr.shape)
Kztnum4 = np.zeros(vr.shape)
# My formula Alternative form1
vtheta = np.linspace(0,np.pi,1000)
for ir,r in enumerate(vr):
Kztnum2[ir] = r * m * np.sqrt(1 + m ** 2) / np.pi * np.trapz(np.sin(vtheta) ** 2.0 / (np.multiply(np.sqrt(1 + r ** 2 - 2 * r * np.cos(vtheta)),(1 + r ** 2 - 2 * r * np.cos(vtheta) + m ** 2 * np.sin(vtheta) ** 2))),vtheta)
# My formula Alternative form3 (WEIRD RESULTS !!!!!!!!!!!!)
vtheta = np.linspace(0,np.pi / 2,1000)
for ir,r in enumerate(vr):
Kztnum3[ir] = 2 * r * np.sqrt(1 + m ** 2) * m / np.pi * np.trapz(np.sin(2 * vtheta) ** 2.0 / (np.multiply(np.sqrt((1 + r) ** 2 - 4 * r * np.cos(vtheta) ** 2),((1 + r) ** 2 - 4 * r * np.cos(vtheta) ** 2 + m ** 2 * np.sin(2 * vtheta) ** 2))),vtheta)
# My formula Alternative form4
vtheta = np.linspace(0,np.pi / 2,1000)
for ir,r in enumerate(vr):
Kztnum4[ir] = 2 * r / np.pi * 1 * np.sin(chi) * np.trapz(np.sin(2 * vtheta) ** 2.0 / (np.multiply(np.sqrt((1 + r) ** 2 - 4 * r * np.cos(vtheta) ** 2),((r - np.cos(2 * vtheta)) ** 2 * np.cos(chi) ** 2 + np.sin(2 * vtheta) ** 2))),vtheta)
outputs=(Kzt,Kztnum,fOye,Kztnum2,Kztnum3,Kztnum4)
return outputs[:nout]
# --------------------------------------------------------------------------------}
# --- TEST
# --------------------------------------------------------------------------------{
class TestSkewedCylinder(unittest.TestCase):
def test_SVC_rotor(self):
""" """
""" See paragraph "Properties on the rotor disk" of [1] """
# data
gamma_t,R,chi = -5, 10, 30*np.pi/180
m=np.tan(chi) # tan(chi)
eps=10**-1 *R
# --- At rotor center (see also next test, stronger)
u_x,u_y,u_z = svc_tang_u(0,0,0,gamma_t,R,m)
u_zeta,u_xi=skew_components(u_x,u_z,m)
uz0=gamma_t/2
np.testing.assert_almost_equal(u_x ,np.tan(chi/2)*uz0 ,decimal=7)
np.testing.assert_almost_equal(u_z ,uz0 ,decimal=7)
np.testing.assert_almost_equal(u_zeta ,uz0 ,decimal=7)
# --- At psi=pi/2 (i.e. x=0), z=0 (Eq 9 from [1]), ux,uz,uzeta,uxi constant!
y=np.linspace(0,R-eps,4)
x=y*0
z=y*0
u_x,u_y,u_z=svc_tang_u(x,y,z,gamma_t,R,m)
u_zeta,u_xi=skew_components(u_x,u_z,m)
uz0=np.asarray([gamma_t/2]*len(x))
np.testing.assert_almost_equal(u_zeta ,uz0 ,decimal=7)
np.testing.assert_almost_equal(u_z ,uz0 ,decimal=7)
np.testing.assert_almost_equal(u_xi/u_zeta,[-np.tan(chi/2)]*len(x),decimal=7)
np.testing.assert_almost_equal(u_x /u_z ,[ np.tan(chi/2)]*len(x),decimal=7)
np.testing.assert_almost_equal(u_x ,uz0*np.tan(chi/2) ,decimal=7)
# --- Component zeta over the entire plane is g_t/2 (Eq 10 from [1])
vR,vPsi = np.meshgrid(np.linspace(0,R-eps,5), np.linspace(0,2*np.pi,12))
x=vR*np.cos(vPsi)
y=vR*np.sin(vPsi)
z=x*0
u_x,u_y,u_z=svc_tang_u(x,y,z,gamma_t,R,m)
u_zeta,u_xi=skew_components(u_x,u_z,m)
uz0=gamma_t/2
np.testing.assert_almost_equal(u_zeta , uz0 ,decimal=5)
# --- Plane y=0 (anti-)symmetry - x,z,zeta,xi: symmetric - y: anti-symmetric
x,y = np.meshgrid(np.linspace(-R/3,R/3,3), [-R/2,R/2] )
z=x*0
u_x,u_y,u_z=svc_tang_u(x,y,z,gamma_t,R,m)
u_zeta,u_xi=skew_components(u_x,u_z,m)
np.testing.assert_almost_equal(u_x [0,:], u_x [1,:])
np.testing.assert_almost_equal(u_z [0,:], u_z [1,:])
np.testing.assert_almost_equal(u_zeta[0,:], u_zeta[1,:])
np.testing.assert_almost_equal(u_xi [0,:], u_xi [1,:])
np.testing.assert_almost_equal(u_y [0,:],-u_y [1,:]) # anti-symmetric
# --- Radial anti-symmetry of components x,y,z about their origin value
r0 = R/2 # cannot do negative r here since definition of r is positive
psi0 = np.linspace(0,np.pi,10)
vPsi = np.array([psi0 , psi0+np.pi] ).T
x=r0*np.cos(vPsi)
y=r0*np.sin(vPsi)
z=x*0
u_x,u_y,u_z =svc_tang_u(x,y,z,gamma_t,R,m)
u_zeta,u_xi=skew_components(u_x,u_z,m)
u_x0,u_y0,u_z0=svc_tang_u(0,0,0,gamma_t,R,m)
u_zeta0,u_xi0=skew_components(u_x0,u_z0,m)
np.testing.assert_almost_equal(u_x [:,0]+u_x [:,1], 2*u_x0 )
np.testing.assert_almost_equal(u_y [:,0]+u_y [:,1], 2*u_y0 )
np.testing.assert_almost_equal(u_z [:,0]+u_z [:,1], 2*u_z0 )
np.testing.assert_almost_equal(u_zeta[:,0]+u_zeta[:,1], 2*u_zeta0)
np.testing.assert_almost_equal(u_xi [:,0]+u_xi [:,1], 2*u_xi0 )
def test_SVC_farwake(self):
""" """
""" See paragraph "Properties on the rotor disk" of [1] """
# data
gamma_t,R,chi = -5, 10, 30*np.pi/180
m=np.tan(chi) # tan(chi)
eps=10**-1 *R
z0=1000*R # Far wake
# --- At rotor center (see also next test, stronger)
#u_x,u_y,u_z=svc_tang_u(0,0,0,gamma_t,R,m)
#uz0=gamma_t/2
#np.testing.assert_almost_equal(u_x ,np.tan(chi/2)*uz0 ,decimal=7)
#np.testing.assert_almost_equal(u_z ,uz0 ,decimal=7)
#np.testing.assert_almost_equal(u_zeta ,uz0 ,decimal=7)
# --- Component zeta over the entire plane is g_t/2 (Eq 10 from [1])
vR,vTheta = np.meshgrid(np.linspace(0,R-eps,5), np.linspace(0,2*np.pi,12))
x=vR*np.cos(vTheta)+z0*m
y=vR*np.sin(vTheta)
z=x*0 + z0
u_x,u_y,u_z=svc_tang_u(x,y,z,gamma_t,R,m)
u_zeta,u_xi=skew_components(u_x,u_z,m)
np.testing.assert_almost_equal(u_zeta , gamma_t , decimal=5)
np.testing.assert_almost_equal(u_xi , -gamma_t*np.tan(chi/2) , decimal=5)
np.testing.assert_almost_equal(u_z , gamma_t , decimal=5)
np.testing.assert_almost_equal(u_x , gamma_t*np.tan(chi/2) , decimal=5)
#print('ux',u_x)
#print('uy',u_y)
#print('uz',u_z)
#print('uzeta',u_zeta)
#print('uxi',u_xi)
# def test_singularities(self):
# # TODO!
#
# def test_regularization(self):
# #TODO
#
# def test_multirotor(self):
# #TODO
def test_SVC_rings(self):
# Test that induction is close to the one obtained from a series of rings
try:
from .VortexRing import rings_u
except:
try:
from wiz.VortexRing import rings_u
except:
from VortexRing import rings_u
# Parameters
chi = 30*np.pi/180
m=np.tan(chi)
gamma_t, R= -1, 10
eps=10**-6 *R
# Parameters for rings
nRings = 1000
z_max = 20*2*R
Zr = np.linspace(0,z_max,nRings)
dzeta = (Zr[1]-Zr[0])/np.cos(chi)
vGamma_r = Zr*0 + gamma_t*dzeta
vR_r = Zr*0 + R
Xr = m*Zr
Yr = 0*Zr
def compare(x,y,z,dec):
ux,uy,uz = svc_tang_u(x,y,z,gamma_t,R,m, polar_out=False)
ux_r,uy_r,uz_r = rings_u(x,y,z,vGamma_r,vR_r,Xr,Yr,Zr,polar_out = False)
np.testing.assert_almost_equal(ux,ux_r,decimal=dec)
np.testing.assert_almost_equal(uy,uy_r,decimal=dec)
np.testing.assert_almost_equal(uz,uz_r,decimal=dec)
return ux,uy,uz,ux_r,uy_r,uz_r
# --- test on rotor
x0=np.linspace(-2*R,2*R,40)
x,y,z=x0,x0*0,x0*0
b=np.abs(np.sqrt((x-z*m)**2)-R)>0.1*R
x,y,z=x[b],y[b],z[b]
ux,uy,uz,ux_r,uy_r,uz_r=compare(x,y,z,1)
# --- test at -R downstream
x,y,z=x0,x0*0,x0*0-R
b=np.abs(np.sqrt((x-z*m)**2)-R)>0.1*R
x,y,z=x[b],y[b],z[b]
ux,uy,uz,ux_r,uy_r,uz_r=compare(x,y,z,2)
# --- test at +R upstream
x,y,z=x0,x0*0,x0*0+R
b=np.abs(np.sqrt((x-z*m)**2)-R)>0.1*R
x,y,z=x[b],y[b],z[b]
ux,uy,uz,ux_r,uy_r,uz_r=compare(x,y,z,2)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(x,ux)
#plt.plot(x,ux_r)
#plt.figure()
#plt.plot(x,uy)
#plt.plot(x,uy_r)
#plt.figure()
#plt.plot(x,uz)
#plt.plot(x,uz_r)
#plt.show()
if __name__ == "__main__":
# TestCylinder().test_singularities()
unittest.main()
|
the-stack_0_8039 | #Except for the pytorch part content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/
from __future__ import unicode_literals, print_function, division
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
import argparse
from datetime import datetime
import torch
from torch.autograd import Variable
import pandas as pd
from tqdm import tqdm
from rouge import Rouge
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util import data, config
from model import Model
from data_util.utils import write_for_rouge
from train_util import get_input_from_batch
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
use_cuda = config.use_gpu and torch.cuda.is_available()
class Beam(object):
def __init__(self, tokens, log_probs, state, context, coverage):
self.tokens = tokens
self.log_probs = log_probs
self.state = state
self.context = context
self.coverage = coverage
def extend(self, token, log_prob, state, context, coverage):
return Beam(tokens = self.tokens + [token],
log_probs = self.log_probs + [log_prob],
state = state,
context = context,
coverage = coverage)
@property
def latest_token(self):
return self.tokens[-1]
@property
def avg_log_prob(self):
return sum(self.log_probs) / len(self.tokens)
class BeamSearch(object):
def __init__(self, model_file_path, data_folder, log_file_id):
# model_name = os.path.basename(model_file_path)
self._decode_dir = os.path.join(config.log_root, 'decode_%s' % (log_file_id))
self._rouge_ref_dir = os.path.join(self._decode_dir, 'rouge_ref')
self._rouge_dec_dir = os.path.join(self._decode_dir, 'rouge_dec_dir')
for p in [self._decode_dir, self._rouge_ref_dir, self._rouge_dec_dir]:
if not os.path.exists(p):
os.mkdir(p)
dp = config.get_data_paths(data_folder)
self.vocab = Vocab(dp['vocab'], config.vocab_size)
self.batcher = Batcher(dp['decode'], self.vocab, mode='decode', batch_size=config.beam_size, single_pass=True)
time.sleep(15)
self.model = Model(model_file_path, is_eval=True)
def sort_beams(self, beams):
return sorted(beams, key=lambda h: h.avg_log_prob, reverse=True)
def decode(self, log_file_id):
start = time.time()
counter = 0
batch = self.batcher.next_batch()
while batch is not None:
# Run beam search to get best Hypothesis
best_summary = self.beam_search(batch)
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_summary.tokens[1:]]
decoded_words = data.outputids2words(output_ids, self.vocab,
(batch.art_oovs[0] if config.pointer_gen else None))
# Remove the [STOP] token from decoded_words, if necessary
try:
fst_stop_idx = decoded_words.index(data.STOP_DECODING)
decoded_words = decoded_words[:fst_stop_idx]
except ValueError:
decoded_words = decoded_words
original_abstract_sents = batch.original_abstracts_sents[0]
write_for_rouge(original_abstract_sents, decoded_words, counter,
self._rouge_ref_dir, self._rouge_dec_dir)
counter += 1
if counter % config.print_interval == 0:
print('Examples %d-%d decoded in %d sec'%(counter-config.print_interval, counter, time.time() - start))
start = time.time()
batch = self.batcher.next_batch()
print("Decoder has finished reading dataset for single pass.")
print("Now starting ROUGE eval...")
rouge_1_df, rouge_2_df, rouge_l_df = self.rouge_eval(self._rouge_dec_dir, self._rouge_ref_dir)
self.rouge_save(log_file_id, rouge_1_df, rouge_2_df, rouge_l_df)
def beam_search(self, batch):
#batch should have only one example
enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_0, coverage_t_0 = \
get_input_from_batch(batch, use_cuda)
encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
s_t_0 = self.model.reduce_state(encoder_hidden)
dec_h, dec_c = s_t_0 # 1 x 2*hidden_size
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
#decoder batch preparation, it has beam_size example initially everything is repeated
beams = [Beam(tokens=[self.vocab.word2id(data.START_DECODING)],
log_probs=[0.0],
state=(dec_h[0], dec_c[0]),
context = c_t_0[0],
coverage=(coverage_t_0[0] if config.is_coverage else None))
for _ in range(config.beam_size)]
results = []
steps = 0
while steps < config.max_dec_steps and len(results) < config.beam_size:
latest_tokens = [h.latest_token for h in beams]
latest_tokens = [t if t < self.vocab.size() else self.vocab.word2id(data.UNKNOWN_TOKEN) \
for t in latest_tokens]
y_t_1 = Variable(torch.LongTensor(latest_tokens))
if use_cuda:
y_t_1 = y_t_1.cuda()
all_state_h =[]
all_state_c = []
all_context = []
for h in beams:
state_h, state_c = h.state
all_state_h.append(state_h)
all_state_c.append(state_c)
all_context.append(h.context)
s_t_1 = (torch.stack(all_state_h, 0).unsqueeze(0), torch.stack(all_state_c, 0).unsqueeze(0))
c_t_1 = torch.stack(all_context, 0)
coverage_t_1 = None
if config.is_coverage:
all_coverage = []
for h in beams:
all_coverage.append(h.coverage)
coverage_t_1 = torch.stack(all_coverage, 0)
final_dist, s_t, c_t, attn_dist, p_gen, coverage_t = self.model.decoder(y_t_1, s_t_1,
encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,
extra_zeros, enc_batch_extend_vocab, coverage_t_1, steps)
log_probs = torch.log(final_dist)
topk_log_probs, topk_ids = torch.topk(log_probs, config.beam_size * 2)
dec_h, dec_c = s_t
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
all_beams = []
num_orig_beams = 1 if steps == 0 else len(beams)
for i in range(num_orig_beams):
h = beams[i]
state_i = (dec_h[i], dec_c[i])
context_i = c_t[i]
coverage_i = (coverage_t[i] if config.is_coverage else None)
for j in range(config.beam_size * 2): # for each of the top 2*beam_size hyps:
new_beam = h.extend(token=topk_ids[i, j].item(),
log_prob=topk_log_probs[i, j].item(),
state=state_i,
context=context_i,
coverage=coverage_i)
all_beams.append(new_beam)
beams = []
for h in self.sort_beams(all_beams):
if h.latest_token == self.vocab.word2id(data.STOP_DECODING):
if steps >= config.min_dec_steps:
results.append(h)
else:
beams.append(h)
if len(beams) == config.beam_size or len(results) == config.beam_size:
break
steps += 1
if len(results) == 0:
results = beams
beams_sorted = self.sort_beams(results)
return beams_sorted[0]
def rouge_eval(self, decoded_dir, ref_dir):
rouge = Rouge()
columns=['F1','Recall','Precision']
rouge_l_df = pd.DataFrame(columns=columns)
rouge_1_df = pd.DataFrame(columns=columns)
rouge_2_df = pd.DataFrame(columns=columns)
not_found_list = []
file_count = len(os.listdir(ref_dir))
print('Rouge Evaluation started for {} files..'.format(file_count))
for i in tqdm (range(file_count), desc='Running'):
index = str(i).zfill(6)
dec_file = decoded_dir + "/" + index + '_decoded.txt'
ref_file = ref_dir + "/" + index + '_reference.txt'
if os.path.isfile(dec_file) and os.path.isfile(ref_file):
with open(dec_file, 'r') as file:
decoded = file.read().rstrip().decode("utf8")
with open(ref_file, 'r') as file:
reference = file.read().rstrip().decode("utf8")
# If somehow reference file is empty (a rare case bug, cause of which is undetected) put a placeholder.
if reference == '':
reference = '[Input can not be found]'
score = rouge.get_scores(decoded, reference)[0]
rouge_l_df.loc[i] = [score['rouge-l']['f'], score['rouge-l']['r'], score['rouge-l']['p']]
rouge_1_df.loc[i] = [score['rouge-1']['f'], score['rouge-1']['r'], score['rouge-1']['p']]
rouge_2_df.loc[i] = [score['rouge-2']['f'], score['rouge-2']['r'], score['rouge-2']['p']]
else:
not_found_list.append((dec_file, ref_file))
if len(not_found_list) != 0:
print('{} files could not be identified.'.format(len(not_found_list)))
#print(not_found_list)
print('Evaluation Finished..')
return [rouge_1_df, rouge_2_df, rouge_l_df]
def rouge_save(self, save_dir, rouge_1_df, rouge_2_df, rouge_l_df):
save_dir = "logs/decode_"+save_dir
if not os.path.exists(save_dir+'/rouge_scores/'):
os.makedirs(save_dir+'/rouge_scores/')
rouge_l_df.to_csv(save_dir+'/rouge_scores/rouge_l.csv')
rouge_1_df.to_csv(save_dir+'/rouge_scores/rouge_1.csv')
rouge_2_df.to_csv(save_dir+'/rouge_scores/rouge_2.csv')
print('Rouge scores saved..')
with open(save_dir+'/rouge_scores/summary.txt', 'w') as f:
for df, rouge in zip([rouge_1_df, rouge_2_df,rouge_l_df], ['ROUGE-1','ROUGE-2','ROUGE-L']):
print(rouge)
f.write(rouge+"\n")
for metric in rouge_l_df.columns:
line = "{} Mean {}".format(round(df[metric].mean(),4), metric)
print(line)
f.write(line+"\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Decode script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
parser.add_argument("-d",
dest="data_folder",
required=True,
default=None,
help="Dataset name 'data_T50', 'cnn' or 'movie_quotes' (default: None).")
parser.add_argument("-l",
dest="log_file_id",
required=False,
default=datetime.now().strftime("%Y%m%d_%H%M%S"),
help="Postfix for decode log file (default: date_time).")
args = parser.parse_args()
beam_Search_processor = BeamSearch(args.model_file_path, args.data_folder, args.log_file_id)
beam_Search_processor.decode(args.log_file_id)
# rouge_1_df, rouge_2_df, rouge_l_df = beam_Search_processor.rouge_eval(beam_Search_processor._rouge_dec_dir, beam_Search_processor._rouge_ref_dir)
# beam_Search_processor.rouge_save(args.log_file_id, rouge_1_df, rouge_2_df, rouge_l_df)
|
the-stack_0_8040 | #!/usr/bin/env python
from setuptools import setup, find_packages
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='wheelify',
version='0.1.4',
description=('Simple manylinux wheel builder utility'),
long_description=desc,
url='https://github.com/jmvrbanac/wheelify',
author='John Vrbanac',
author_email='[email protected]',
license='Apache v2',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
],
keywords='manylinux wheel builder',
packages=find_packages(exclude=['contrib', 'docs', 'test*']),
install_requires=[],
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'wheelify = wheelify.app:main'
],
},
)
|
the-stack_0_8041 | from setuptools import setup
from setuptools import find_packages
import os
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='megnet',
version='0.3.5',
description='MatErials Graph Networks for machine learning of molecules and crystals.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Chi Chen',
author_email='[email protected]',
download_url='https://github.com/materialsvirtuallab/megnet',
license='BSD',
install_requires=['keras', 'numpy', 'tensorflow', "scikit-learn",
'pymatgen', 'monty'],
extras_require={
'model_saving': ['h5py'],
'molecules': ['openbabel', 'rdkit']
},
packages=find_packages(),
package_data={
"megnet": ["*.json", "*.md"]
},
keywords=["materials", "science", "machine", "learning", "deep", "graph", "networks", "neural"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
)
|
the-stack_0_8042 | """Message Flags class."""
import logging
import binascii
from insteonplm.constants import (MESSAGE_FLAG_EXTENDED_0X10,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
MESSAGE_TYPE_ALL_LINK_CLEANUP,
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK,
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK,
MESSAGE_TYPE_BROADCAST_MESSAGE,
MESSAGE_TYPE_DIRECT_MESSAGE_ACK,
MESSAGE_TYPE_DIRECT_MESSAGE_NAK)
_LOGGER = logging.getLogger(__name__)
class MessageFlags():
"""Message Flags class use in Standard and Extended messages."""
def __init__(self, flags=0x00):
"""Init the MessageFlags class."""
self._messageType = None
self._extended = None
self._hopsLeft = None
self._hopsMax = None
if flags is not None:
self._set_properties(flags)
def __repr__(self):
"""Representation of the message flags."""
return self.hex
def __str__(self):
"""Return a string representation of message flags."""
return self.hex
def __eq__(self, other):
"""Test for equality."""
if hasattr(other, 'messageType'):
is_eq = self._messageType == other.messageType
is_eq = is_eq and self._extended == other.extended
return is_eq
return False
def __ne__(self, other):
"""Test for not equals."""
if hasattr(other, 'messageType'):
return not self.__eq__(other)
return True
def matches_pattern(self, other):
"""Test if current message match a patterns or template."""
if hasattr(other, 'messageType'):
messageTypeIsEqual = False
if self.messageType is None or other.messageType is None:
messageTypeIsEqual = True
else:
messageTypeIsEqual = (self.messageType == other.messageType)
extendedIsEqual = False
if self.extended is None or other.extended is None:
extendedIsEqual = True
else:
extendedIsEqual = (self.extended == other.extended)
return messageTypeIsEqual and extendedIsEqual
return False
@classmethod
def get_properties(cls):
"""Get all properties of the MessageFlags class."""
property_names = [p for p in dir(cls)
if isinstance(getattr(cls, p), property)]
return property_names
@property
def isBroadcast(self):
"""Test if the message is a broadcast message type."""
return (self._messageType & MESSAGE_TYPE_BROADCAST_MESSAGE ==
MESSAGE_TYPE_BROADCAST_MESSAGE)
@property
def isDirect(self):
"""Test if the message is a direct message type."""
direct = (self._messageType == 0x00)
if self.isDirectACK or self.isDirectNAK:
direct = True
return direct
@property
def isDirectACK(self):
"""Test if the message is a direct ACK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_ACK
@property
def isDirectNAK(self):
"""Test if the message is a direct NAK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_NAK
@property
def isAllLinkBroadcast(self):
"""Test if the message is an ALl-Link broadcast message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_BROADCAST
@property
def isAllLinkCleanup(self):
"""Test if the message is a All-Link cleanup message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP
@property
def isAllLinkCleanupACK(self):
"""Test if the message is a All-LInk cleanup ACK message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK
@property
def isAllLinkCleanupNAK(self):
"""Test if the message is a All-Link cleanup NAK message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK
@property
def isExtended(self):
"""Test if the message is an extended message type."""
return self._extended == 1
@property
def hopsLeft(self):
"""Return the number of hops left in message the trasport."""
return self._hopsLeft
@property
def hopsMax(self):
"""Return the maximum number of hops allowed for this message."""
return self._hopsMax
@hopsMax.setter
def hopsMax(self, val):
"""Set the maximum number of hops allowed for this message."""
self._hopsMax = val
@property
def messageType(self):
"""Return the message type."""
return self._messageType
@messageType.setter
def messageType(self, val):
"""Set the message type."""
if val in range(0, 8):
self._messageType = val
else:
raise ValueError
@property
def extended(self):
"""Return the extended flag."""
return self._extended
@extended.setter
def extended(self, val):
"""Set the extended flag."""
if val in [None, 0, 1]:
self._extended = val
else:
raise ValueError
# pylint: disable=protected-access
@classmethod
def create(cls, messageType, extended, hopsleft=3, hopsmax=3):
"""Create message flags.
messageType: integter 0 to 7:
MESSAGE_TYPE_DIRECT_MESSAGE = 0
MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
MESSAGE_TYPE_BROADCAST_MESSAGE = 4
MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
extended: 1 for extended, 0 for standard
hopsleft: int 0 - 3
hopsmax: int 0 - 3
"""
flags = MessageFlags(None)
if messageType < 8:
flags._messageType = messageType
else:
flags._messageType = messageType >> 5
if extended in [0, 1, True, False]:
if extended:
flags._extended = 1
else:
flags._extended = 0
else:
flags._extended = extended >> 4
flags._hopsLeft = hopsleft
flags._hopsMax = hopsmax
return flags
@classmethod
def template(cls, messageType=None, extended=None,
hopsleft=None, hopsmax=None):
"""Create message flags template.
messageType: integter 0 to 7 or None:
MESSAGE_TYPE_DIRECT_MESSAGE = 0
MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
MESSAGE_TYPE_BROADCAST_MESSAGE = 4
MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
extended: 1 for extended, 0 for standard or None
hopsleft: int 0 - 3
hopsmax: int 0 - 3
"""
flags = MessageFlags(None)
if messageType is None:
flags._messageType = None
elif messageType < 8:
flags._messageType = messageType
else:
flags._messageType = messageType >> 5
if extended is None:
flags._extended = None
elif extended in [0, 1, True, False]:
if extended:
flags._extended = 1
else:
flags._extended = 0
else:
flags._extended = extended >> 4
flags._hopsLeft = hopsleft
flags._hopsMax = hopsmax
return flags
@property
def bytes(self):
"""Return a byte representation of the message flags."""
flagByte = 0x00
messageType = 0
if self._messageType is not None:
messageType = self._messageType << 5
extendedBit = 0 if self._extended is None else self._extended << 4
hopsMax = 0 if self._hopsMax is None else self._hopsMax
hopsLeft = 0 if self._hopsLeft is None else (self._hopsLeft << 2)
flagByte = flagByte | messageType | extendedBit | hopsLeft | hopsMax
return bytes([flagByte])
@property
def hex(self):
"""Return a hexadecimal representation of the message flags."""
return binascii.hexlify(self.bytes).decode()
# pylint: disable=no-self-use
def _normalize(self, flags):
"""Take any format of flags and turn it into a hex string."""
norm = None
if isinstance(flags, MessageFlags):
norm = flags.bytes
elif isinstance(flags, bytearray):
norm = binascii.hexlify(flags)
elif isinstance(flags, int):
norm = bytes([flags])
elif isinstance(flags, bytes):
norm = binascii.hexlify(flags)
elif isinstance(flags, str):
flags = flags[0:2]
norm = binascii.hexlify(binascii.unhexlify(flags.lower()))
elif flags is None:
norm = None
else:
_LOGGER.warning('MessageFlags with unknown type %s: %r',
type(flags), flags)
return norm
def _set_properties(self, flags):
"""Set the properties of the message flags based on a byte input."""
flagByte = self._normalize(flags)
if flagByte is not None:
self._messageType = (flagByte[0] & 0xe0) >> 5
self._extended = (flagByte[0] & MESSAGE_FLAG_EXTENDED_0X10) >> 4
self._hopsLeft = (flagByte[0] & 0x0c) >> 2
self._hopsMax = flagByte[0] & 0x03
else:
self._messageType = None
self._extended = None
self._hopsLeft = None
self._hopsMax = None
|
the-stack_0_8043 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import numpy as np
import pytest
import cirq
from cirq.protocols.act_on_protocol_test import DummyActOnArgs
from cirq.testing import (
EqualsTester,
assert_allclose_up_to_global_phase,
)
_bools = (False, True)
_paulis = (cirq.X, cirq.Y, cirq.Z)
def _assert_not_mirror(gate) -> None:
trans_x = gate.transform(cirq.X)
trans_y = gate.transform(cirq.Y)
trans_z = gate.transform(cirq.Z)
right_handed = (
trans_x.flip ^ trans_y.flip ^ trans_z.flip ^ (trans_x.to.relative_index(trans_y.to) != 1)
)
assert right_handed, 'Mirrors'
def _assert_no_collision(gate) -> None:
trans_x = gate.transform(cirq.X)
trans_y = gate.transform(cirq.Y)
trans_z = gate.transform(cirq.Z)
assert trans_x.to != trans_y.to, 'Collision'
assert trans_y.to != trans_z.to, 'Collision'
assert trans_z.to != trans_x.to, 'Collision'
def _all_rotations():
for (
pauli,
flip,
) in itertools.product(_paulis, _bools):
yield cirq.PauliTransform(pauli, flip)
def _all_rotation_pairs():
for px, flip_x, pz, flip_z in itertools.product(_paulis, _bools, _paulis, _bools):
if px == pz:
continue
yield cirq.PauliTransform(px, flip_x), cirq.PauliTransform(pz, flip_z)
def _all_clifford_gates():
for trans_x, trans_z in _all_rotation_pairs():
yield cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
@pytest.mark.parametrize('pauli,flip_x,flip_z', itertools.product(_paulis, _bools, _bools))
def test_init_value_error(pauli, flip_x, flip_z):
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_xz_map((pauli, flip_x), (pauli, flip_z))
@pytest.mark.parametrize('trans_x,trans_z', _all_rotation_pairs())
def test_init_from_xz(trans_x, trans_z):
gate = cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
assert gate.transform(cirq.X) == trans_x
assert gate.transform(cirq.Z) == trans_z
_assert_not_mirror(gate)
_assert_no_collision(gate)
@pytest.mark.parametrize(
'trans1,trans2,from1',
(
(trans1, trans2, from1)
for trans1, trans2, from1 in itertools.product(_all_rotations(), _all_rotations(), _paulis)
if trans1.to != trans2.to
),
)
def test_init_from_double_map_vs_kwargs(trans1, trans2, from1):
from2 = cirq.Pauli.by_relative_index(from1, 1)
from1_str, from2_str = (str(frm).lower() + '_to' for frm in (from1, from2))
gate_kw = cirq.SingleQubitCliffordGate.from_double_map(**{from1_str: trans1, from2_str: trans2})
gate_map = cirq.SingleQubitCliffordGate.from_double_map({from1: trans1, from2: trans2})
# Test initializes the same gate
assert gate_kw == gate_map
# Test initializes what was expected
assert gate_map.transform(from1) == trans1
assert gate_map.transform(from2) == trans2
_assert_not_mirror(gate_map)
_assert_no_collision(gate_map)
@pytest.mark.parametrize(
'trans1,from1',
((trans1, from1) for trans1, from1 in itertools.product(_all_rotations(), _paulis)),
)
def test_init_from_double_invalid(trans1, from1):
from2 = cirq.Pauli.by_relative_index(from1, 1)
# Test throws on invalid arguments
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map({from1: trans1, from2: trans1})
@pytest.mark.parametrize('trans,frm', itertools.product(_all_rotations(), _paulis))
def test_init_from_single_map_vs_kwargs(trans, frm):
from_str = str(frm).lower() + '_to'
# pylint: disable=unexpected-keyword-arg
gate_kw = cirq.SingleQubitCliffordGate.from_single_map(**{from_str: trans})
gate_map = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate_kw == gate_map
@pytest.mark.parametrize(
'trans,frm',
(
(trans, frm)
for trans, frm in itertools.product(_all_rotations(), _paulis)
if trans.to != frm
),
)
def test_init_90rot_from_single(trans, frm):
gate = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate.transform(frm) == trans
_assert_not_mirror(gate)
_assert_no_collision(gate)
# Check that it decomposes to one gate
assert len(gate.decompose_rotation()) == 1
# Check that this is a 90 degree rotation gate
assert (
gate.merged_with(gate).merged_with(gate).merged_with(gate) == cirq.SingleQubitCliffordGate.I
)
# Check that flipping the transform produces the inverse rotation
trans_rev = cirq.PauliTransform(trans.to, not trans.flip)
gate_rev = cirq.SingleQubitCliffordGate.from_single_map({frm: trans_rev})
assert gate ** -1 == gate_rev
@pytest.mark.parametrize(
'trans,frm',
(
(trans, frm)
for trans, frm in itertools.product(_all_rotations(), _paulis)
if trans.to == frm and trans.flip
),
)
def test_init_180rot_from_single(trans, frm):
gate = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate.transform(frm) == trans
_assert_not_mirror(gate)
_assert_no_collision(gate)
# Check that it decomposes to one gate
assert len(gate.decompose_rotation()) == 1
# Check that this is a 180 degree rotation gate
assert gate.merged_with(gate) == cirq.SingleQubitCliffordGate.I
@pytest.mark.parametrize(
'trans,frm',
(
(trans, frm)
for trans, frm in itertools.product(_all_rotations(), _paulis)
if trans.to == frm and not trans.flip
),
)
def test_init_ident_from_single(trans, frm):
gate = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate.transform(frm) == trans
_assert_not_mirror(gate)
_assert_no_collision(gate)
# Check that it decomposes to zero gates
assert len(gate.decompose_rotation()) == 0
# Check that this is an identity gate
assert gate == cirq.SingleQubitCliffordGate.I
@pytest.mark.parametrize(
'pauli,sqrt,expected',
(
(cirq.X, False, cirq.SingleQubitCliffordGate.X),
(cirq.Y, False, cirq.SingleQubitCliffordGate.Y),
(cirq.Z, False, cirq.SingleQubitCliffordGate.Z),
(cirq.X, True, cirq.SingleQubitCliffordGate.X_sqrt),
(cirq.Y, True, cirq.SingleQubitCliffordGate.Y_sqrt),
(cirq.Z, True, cirq.SingleQubitCliffordGate.Z_sqrt),
),
)
def test_init_from_pauli(pauli, sqrt, expected):
gate = cirq.SingleQubitCliffordGate.from_pauli(pauli, sqrt=sqrt)
assert gate == expected
def test_pow():
assert cirq.SingleQubitCliffordGate.X ** -1 == cirq.SingleQubitCliffordGate.X
assert cirq.SingleQubitCliffordGate.H ** -1 == cirq.SingleQubitCliffordGate.H
assert cirq.SingleQubitCliffordGate.X_sqrt == cirq.SingleQubitCliffordGate.X ** 0.5
assert cirq.SingleQubitCliffordGate.Y_sqrt == cirq.SingleQubitCliffordGate.Y ** 0.5
assert cirq.SingleQubitCliffordGate.Z_sqrt == cirq.SingleQubitCliffordGate.Z ** 0.5
assert cirq.SingleQubitCliffordGate.X_nsqrt == cirq.SingleQubitCliffordGate.X ** -0.5
assert cirq.SingleQubitCliffordGate.Y_nsqrt == cirq.SingleQubitCliffordGate.Y ** -0.5
assert cirq.SingleQubitCliffordGate.Z_nsqrt == cirq.SingleQubitCliffordGate.Z ** -0.5
assert cirq.SingleQubitCliffordGate.X_sqrt ** -1 == cirq.SingleQubitCliffordGate.X_nsqrt
assert cirq.inverse(cirq.SingleQubitCliffordGate.X_nsqrt) == (
cirq.SingleQubitCliffordGate.X_sqrt
)
with pytest.raises(TypeError):
_ = cirq.SingleQubitCliffordGate.Z ** 0.25
def test_init_from_quarter_turns():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 0),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 0),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 0),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 8),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 8),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 8),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, -4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, -4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, -4),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 1),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 5),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 9),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, -3),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 1),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 5),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 9),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, -3),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 1),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 5),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 9),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, -3),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 2),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 6),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 3),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 7),
)
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_init_from_quarter_turns_reconstruct(gate):
new_gate = functools.reduce(
cirq.SingleQubitCliffordGate.merged_with,
(
cirq.SingleQubitCliffordGate.from_quarter_turns(pauli, qt)
for pauli, qt in gate.decompose_rotation()
),
cirq.SingleQubitCliffordGate.I,
)
assert gate == new_gate
def test_init_invalid():
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map()
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map({})
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.X, False)}, y_to=(cirq.Y, False)
)
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.X, False), cirq.Y: (cirq.Y, False)}
)
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map()
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map({})
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map({cirq.X: (cirq.X, False)})
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map(x_to=(cirq.X, False))
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.Y, False), cirq.Y: (cirq.Z, False), cirq.Z: (cirq.X, False)}
)
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.X, False), cirq.Y: (cirq.X, False)}
)
def test_eq_ne_and_hash():
eq = EqualsTester()
for trans_x, trans_z in _all_rotation_pairs():
gate_gen = lambda: cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
eq.make_equality_group(gate_gen)
@pytest.mark.parametrize(
'gate,rep',
(
(cirq.SingleQubitCliffordGate.I, 'cirq.SingleQubitCliffordGate(X:+X, Y:+Y, Z:+Z)'),
(cirq.SingleQubitCliffordGate.H, 'cirq.SingleQubitCliffordGate(X:+Z, Y:-Y, Z:+X)'),
(cirq.SingleQubitCliffordGate.X, 'cirq.SingleQubitCliffordGate(X:+X, Y:-Y, Z:-Z)'),
(cirq.SingleQubitCliffordGate.X_sqrt, 'cirq.SingleQubitCliffordGate(X:+X, Y:+Z, Z:-Y)'),
),
)
def test_repr(gate, rep):
assert repr(gate) == rep
@pytest.mark.parametrize(
'gate,trans_y',
(
(cirq.SingleQubitCliffordGate.I, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.H, (cirq.Y, True)),
(cirq.SingleQubitCliffordGate.X, (cirq.Y, True)),
(cirq.SingleQubitCliffordGate.Y, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.Z, (cirq.Y, True)),
(cirq.SingleQubitCliffordGate.X_sqrt, (cirq.Z, False)),
(cirq.SingleQubitCliffordGate.X_nsqrt, (cirq.Z, True)),
(cirq.SingleQubitCliffordGate.Y_sqrt, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.Y_nsqrt, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.Z_sqrt, (cirq.X, True)),
(cirq.SingleQubitCliffordGate.Z_nsqrt, (cirq.X, False)),
),
)
def test_y_rotation(gate, trans_y):
assert gate.transform(cirq.Y) == trans_y
@pytest.mark.parametrize(
'gate,gate_equiv',
(
(cirq.SingleQubitCliffordGate.I, cirq.X ** 0),
(cirq.SingleQubitCliffordGate.H, cirq.H),
(cirq.SingleQubitCliffordGate.X, cirq.X),
(cirq.SingleQubitCliffordGate.Y, cirq.Y),
(cirq.SingleQubitCliffordGate.Z, cirq.Z),
(cirq.SingleQubitCliffordGate.X_sqrt, cirq.X ** 0.5),
(cirq.SingleQubitCliffordGate.X_nsqrt, cirq.X ** -0.5),
(cirq.SingleQubitCliffordGate.Y_sqrt, cirq.Y ** 0.5),
(cirq.SingleQubitCliffordGate.Y_nsqrt, cirq.Y ** -0.5),
(cirq.SingleQubitCliffordGate.Z_sqrt, cirq.Z ** 0.5),
(cirq.SingleQubitCliffordGate.Z_nsqrt, cirq.Z ** -0.5),
),
)
def test_decompose(gate, gate_equiv):
q0 = cirq.NamedQubit('q0')
mat = cirq.Circuit(gate(q0)).unitary()
mat_check = cirq.Circuit(
gate_equiv(q0),
).unitary()
assert_allclose_up_to_global_phase(mat, mat_check, rtol=1e-7, atol=1e-7)
@pytest.mark.parametrize(
'gate,gate_equiv',
(
(cirq.SingleQubitCliffordGate.I, cirq.X ** 0),
(cirq.SingleQubitCliffordGate.H, cirq.H),
(cirq.SingleQubitCliffordGate.X, cirq.X),
(cirq.SingleQubitCliffordGate.Y, cirq.Y),
(cirq.SingleQubitCliffordGate.Z, cirq.Z),
(cirq.SingleQubitCliffordGate.X_sqrt, cirq.X ** 0.5),
(cirq.SingleQubitCliffordGate.X_nsqrt, cirq.X ** -0.5),
(cirq.SingleQubitCliffordGate.Y_sqrt, cirq.Y ** 0.5),
(cirq.SingleQubitCliffordGate.Y_nsqrt, cirq.Y ** -0.5),
(cirq.SingleQubitCliffordGate.Z_sqrt, cirq.Z ** 0.5),
(cirq.SingleQubitCliffordGate.Z_nsqrt, cirq.Z ** -0.5),
),
)
def test_known_matrix(gate, gate_equiv):
assert cirq.has_unitary(gate)
mat = cirq.unitary(gate)
mat_check = cirq.unitary(gate_equiv)
assert_allclose_up_to_global_phase(mat, mat_check, rtol=1e-7, atol=1e-7)
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_inverse(gate):
assert gate == cirq.inverse(cirq.inverse(gate))
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_inverse_matrix(gate):
q0 = cirq.NamedQubit('q0')
mat = cirq.Circuit(gate(q0)).unitary()
mat_inv = cirq.Circuit(cirq.inverse(gate)(q0)).unitary()
assert_allclose_up_to_global_phase(mat, mat_inv.T.conj(), rtol=1e-7, atol=1e-7)
def test_commutes_notimplemented_type():
with pytest.raises(TypeError):
cirq.commutes(cirq.SingleQubitCliffordGate.X, 'X')
assert cirq.commutes(cirq.SingleQubitCliffordGate.X, 'X', default='default') == 'default'
with pytest.raises(TypeError):
cirq.commutes(cirq.CliffordGate.X, 'X')
assert cirq.commutes(cirq.CliffordGate.X, 'X', default='default') == 'default'
@pytest.mark.parametrize(
'gate,other', itertools.product(_all_clifford_gates(), _all_clifford_gates())
)
def test_commutes_single_qubit_gate(gate, other):
q0 = cirq.NamedQubit('q0')
gate_op = gate(q0)
other_op = other(q0)
mat = cirq.Circuit(
gate_op,
other_op,
).unitary()
mat_swap = cirq.Circuit(
other_op,
gate_op,
).unitary()
commutes = cirq.commutes(gate, other)
commutes_check = cirq.allclose_up_to_global_phase(mat, mat_swap)
assert commutes == commutes_check
# Test after switching order
mat_swap = cirq.Circuit(
gate.equivalent_gate_before(other)(q0),
gate_op,
).unitary()
assert_allclose_up_to_global_phase(mat, mat_swap, rtol=1e-7, atol=1e-7)
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_parses_single_qubit_gate(gate):
assert gate == cirq.read_json(json_text=(cirq.to_json(gate)))
@pytest.mark.parametrize(
'gate,pauli,half_turns',
itertools.product(_all_clifford_gates(), _paulis, (1.0, 0.25, 0.5, -0.5)),
)
def test_commutes_pauli(gate, pauli, half_turns):
# TODO(#4328) cirq.X**1 should be _PauliX instead of XPowGate
pauli_gate = pauli if half_turns == 1 else pauli ** half_turns
q0 = cirq.NamedQubit('q0')
mat = cirq.Circuit(
gate(q0),
pauli_gate(q0),
).unitary()
mat_swap = cirq.Circuit(
pauli_gate(q0),
gate(q0),
).unitary()
commutes = cirq.commutes(gate, pauli_gate)
commutes_check = np.allclose(mat, mat_swap)
assert commutes == commutes_check, f"gate: {gate}, pauli {pauli}"
def test_to_clifford_tableau_util_function():
tableau = cirq.ops.clifford_gate._to_clifford_tableau(
x_to=cirq.PauliTransform(to=cirq.X, flip=False),
z_to=cirq.PauliTransform(to=cirq.Z, flip=False),
)
assert tableau == cirq.CliffordTableau(num_qubits=1, initial_state=0)
tableau = cirq.ops.clifford_gate._to_clifford_tableau(
x_to=cirq.PauliTransform(to=cirq.X, flip=False),
z_to=cirq.PauliTransform(to=cirq.Z, flip=True),
)
assert tableau == cirq.CliffordTableau(num_qubits=1, initial_state=1)
tableau = cirq.ops.clifford_gate._to_clifford_tableau(
rotation_map={
cirq.X: cirq.PauliTransform(to=cirq.X, flip=False),
cirq.Z: cirq.PauliTransform(to=cirq.Z, flip=False),
}
)
assert tableau == cirq.CliffordTableau(num_qubits=1, initial_state=0)
tableau = cirq.ops.clifford_gate._to_clifford_tableau(
rotation_map={
cirq.X: cirq.PauliTransform(to=cirq.X, flip=False),
cirq.Z: cirq.PauliTransform(to=cirq.Z, flip=True),
}
)
assert tableau == cirq.CliffordTableau(num_qubits=1, initial_state=1)
with pytest.raises(ValueError):
cirq.ops.clifford_gate._to_clifford_tableau()
@pytest.mark.parametrize(
'gate,sym,exp',
(
(cirq.SingleQubitCliffordGate.I, 'I', 1),
(cirq.SingleQubitCliffordGate.H, 'H', 1),
(cirq.SingleQubitCliffordGate.X, 'X', 1),
(cirq.SingleQubitCliffordGate.X_sqrt, 'X', 0.5),
(cirq.SingleQubitCliffordGate.X_nsqrt, 'X', -0.5),
(
cirq.SingleQubitCliffordGate.from_xz_map((cirq.Y, False), (cirq.X, True)),
'(X^-0.5-Z^0.5)',
1,
),
),
)
def test_text_diagram_info(gate, sym, exp):
assert cirq.circuit_diagram_info(gate) == cirq.CircuitDiagramInfo(
wire_symbols=(sym,), exponent=exp
)
@pytest.mark.parametrize(
"clifford_gate",
(
cirq.SingleQubitCliffordGate.I,
cirq.SingleQubitCliffordGate.H,
cirq.SingleQubitCliffordGate.X,
cirq.SingleQubitCliffordGate.Y,
cirq.SingleQubitCliffordGate.Z,
cirq.SingleQubitCliffordGate.X_sqrt,
cirq.SingleQubitCliffordGate.Y_sqrt,
cirq.SingleQubitCliffordGate.Z_sqrt,
cirq.SingleQubitCliffordGate.X_nsqrt,
cirq.SingleQubitCliffordGate.Y_nsqrt,
cirq.SingleQubitCliffordGate.Z_nsqrt,
),
)
def test_from_unitary(clifford_gate):
u = cirq.unitary(clifford_gate)
result_gate = cirq.SingleQubitCliffordGate.from_unitary(u)
assert result_gate == clifford_gate
def test_from_unitary_with_phase_shift():
u = np.exp(0.42j) * cirq.unitary(cirq.SingleQubitCliffordGate.Y_sqrt)
gate = cirq.SingleQubitCliffordGate.from_unitary(u)
assert gate == cirq.SingleQubitCliffordGate.Y_sqrt
def test_from_unitary_not_clifford():
# Not a single-qubit gate.
u = cirq.unitary(cirq.CNOT)
assert cirq.SingleQubitCliffordGate.from_unitary(u) is None
# Not an unitary matrix.
u = 2 * cirq.unitary(cirq.X)
assert cirq.SingleQubitCliffordGate.from_unitary(u) is None
# Not a Clifford gate.
u = cirq.unitary(cirq.T)
assert cirq.SingleQubitCliffordGate.from_unitary(u) is None
@pytest.mark.parametrize('trans_x,trans_z', _all_rotation_pairs())
def test_to_phased_xz_gate(trans_x, trans_z):
gate = cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
actual_phased_xz_gate = gate.to_phased_xz_gate()._canonical()
expect_phased_xz_gates = cirq.PhasedXZGate.from_matrix(cirq.unitary(gate))
assert np.isclose(actual_phased_xz_gate.x_exponent, expect_phased_xz_gates.x_exponent)
assert np.isclose(actual_phased_xz_gate.z_exponent, expect_phased_xz_gates.z_exponent)
assert np.isclose(
actual_phased_xz_gate.axis_phase_exponent, expect_phased_xz_gates.axis_phase_exponent
)
def test_from_xz_to_clifford_tableau():
seen_tableau = []
for trans_x, trans_z in _all_rotation_pairs():
tableau = cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z).clifford_tableau
tableau_number = sum(2 ** i * t for i, t in enumerate(tableau.matrix().ravel()))
tableau_number = tableau_number * 4 + 2 * tableau.rs[0] + tableau.rs[1]
seen_tableau.append(tableau_number)
# Satisfy the symplectic property
assert sum(tableau.matrix()[0, :2] * tableau.matrix()[1, 1::-1]) % 2 == 1
# Should not have any duplication.
assert len(set(seen_tableau)) == 24
@pytest.mark.parametrize(
'clifford_gate,standard_gate',
[
(cirq.CliffordGate.I, cirq.I),
(cirq.CliffordGate.X, cirq.X),
(cirq.CliffordGate.Y, cirq.Y),
(cirq.CliffordGate.Z, cirq.Z),
(cirq.CliffordGate.H, cirq.H),
(cirq.CliffordGate.S, cirq.S),
(cirq.CliffordGate.CNOT, cirq.CNOT),
(cirq.CliffordGate.CZ, cirq.CZ),
(cirq.CliffordGate.SWAP, cirq.SWAP),
],
)
def test_common_clifford_gate(clifford_gate, standard_gate):
# cirq.unitary is relied on the _decompose_ methods.
u_c = cirq.unitary(clifford_gate)
u_s = cirq.unitary(standard_gate)
cirq.testing.assert_allclose_up_to_global_phase(u_c, u_s, atol=1e-8)
@pytest.mark.parametrize('clifford_gate_name', ("I", "X", "Y", "Z", "H", "S", "CNOT", "CZ", "SWAP"))
def test_common_clifford_gate_caching(clifford_gate_name):
cache_name = f"_{clifford_gate_name}"
delattr(cirq.CliffordGate, cache_name)
assert not hasattr(cirq.CliffordGate, cache_name)
_ = getattr(cirq.CliffordGate, clifford_gate_name)
assert hasattr(cirq.CliffordGate, cache_name)
def test_multi_qubit_clifford_pow():
assert cirq.CliffordGate.X ** -1 == cirq.CliffordGate.X
assert cirq.CliffordGate.H ** -1 == cirq.CliffordGate.H
assert cirq.CliffordGate.S ** 2 == cirq.CliffordGate.Z
assert cirq.CliffordGate.S ** -1 == cirq.CliffordGate.S ** 3
assert cirq.CliffordGate.S ** -3 == cirq.CliffordGate.S
assert cirq.CliffordGate.CNOT ** 3 == cirq.CliffordGate.CNOT
assert cirq.CliffordGate.CNOT ** -3 == cirq.CliffordGate.CNOT
with pytest.raises(TypeError):
_ = cirq.CliffordGate.Z ** 0.25
def test_stabilizer_effec():
assert cirq.has_stabilizer_effect(cirq.CliffordGate.X)
assert cirq.has_stabilizer_effect(cirq.CliffordGate.H)
assert cirq.has_stabilizer_effect(cirq.CliffordGate.S)
assert cirq.has_stabilizer_effect(cirq.CliffordGate.CNOT)
assert cirq.has_stabilizer_effect(cirq.CliffordGate.CZ)
qubits = cirq.LineQubit.range(2)
gate = cirq.CliffordGate.from_op_list(
[cirq.H(qubits[1]), cirq.CZ(*qubits), cirq.H(qubits[1])], qubits
)
assert cirq.has_stabilizer_effect(gate)
def test_clifford_gate_from_op_list():
# Since from_op_list() ==> _act_on_() ==> tableau.then() and then() has already covered
# lots of random circuit cases, here we just test a few well-known relationships.
qubit = cirq.NamedQubit('test')
gate = cirq.CliffordGate.from_op_list([cirq.X(qubit), cirq.Z(qubit)], [qubit])
assert gate == cirq.CliffordGate.Y # The tableau ignores the global phase
gate = cirq.CliffordGate.from_op_list([cirq.Z(qubit), cirq.X(qubit)], [qubit])
assert gate == cirq.CliffordGate.Y # The tableau ignores the global phase
gate = cirq.CliffordGate.from_op_list([cirq.X(qubit), cirq.Y(qubit)], [qubit])
assert gate == cirq.CliffordGate.Z # The tableau ignores the global phase
gate = cirq.CliffordGate.from_op_list([cirq.Z(qubit), cirq.X(qubit)], [qubit])
assert gate == cirq.CliffordGate.Y # The tableau ignores the global phase
# Two qubits gates
qubits = cirq.LineQubit.range(2)
gate = cirq.CliffordGate.from_op_list(
[cirq.H(qubits[1]), cirq.CZ(*qubits), cirq.H(qubits[1])], qubits
)
assert gate == cirq.CliffordGate.CNOT
gate = cirq.CliffordGate.from_op_list(
[cirq.H(qubits[1]), cirq.CNOT(*qubits), cirq.H(qubits[1])], qubits
)
assert gate == cirq.CliffordGate.CZ
# Note the order of qubits matters
gate = cirq.CliffordGate.from_op_list(
[cirq.H(qubits[0]), cirq.CZ(qubits[1], qubits[0]), cirq.H(qubits[0])], qubits
)
assert gate != cirq.CliffordGate.CNOT
# But if we reverse the qubit_order, they will equal again.
gate = cirq.CliffordGate.from_op_list(
[cirq.H(qubits[0]), cirq.CZ(qubits[1], qubits[0]), cirq.H(qubits[0])], qubits[::-1]
)
assert gate == cirq.CliffordGate.CNOT
with pytest.raises(
ValueError, match="only be constructed from the operations that has stabilizer effect"
):
cirq.CliffordGate.from_op_list([cirq.T(qubit)], [qubit])
def test_clifford_gate_from_tableau():
t = cirq.CliffordGate.X.clifford_tableau
assert cirq.CliffordGate.from_clifford_tableau(t) == cirq.CliffordGate.X
t = cirq.CliffordGate.H.clifford_tableau
assert cirq.CliffordGate.from_clifford_tableau(t) == cirq.CliffordGate.H
t = cirq.CliffordGate.CNOT.clifford_tableau
assert cirq.CliffordGate.from_clifford_tableau(t) == cirq.CliffordGate.CNOT
with pytest.raises(ValueError):
t = cirq.CliffordTableau(num_qubits=1)
t.xs = np.array([1, 1]).reshape(2, 1)
t.zs = np.array([1, 1]).reshape(2, 1) # This violates the sympletic property.
cirq.CliffordGate.from_clifford_tableau(t)
with pytest.raises(ValueError, match="Input argument has to be a CliffordTableau instance."):
cirq.CliffordGate.from_clifford_tableau(1)
def test_multi_clifford_decompose_by_unitary():
# Construct a random clifford gate:
n, num_ops = 5, 20 # because we relied on unitary cannot test large-scale qubits
gate_candidate = [cirq.X, cirq.Y, cirq.Z, cirq.H, cirq.S, cirq.CNOT, cirq.CZ]
for seed in range(100):
prng = np.random.RandomState(seed)
qubits = cirq.LineQubit.range(n)
ops = []
for _ in range(num_ops):
g = prng.randint(len(gate_candidate))
indices = (prng.randint(n),) if g < 5 else prng.choice(n, 2, replace=False)
ops.append(gate_candidate[g].on(*[qubits[i] for i in indices]))
gate = cirq.CliffordGate.from_op_list(ops, qubits)
decomposed_ops = cirq.decompose(gate.on(*qubits))
circ = cirq.Circuit(decomposed_ops)
circ.append(cirq.I.on_each(qubits)) # make sure the dimension aligned.
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(gate), cirq.unitary(circ), atol=1e-7
)
def test_pad_tableau_bad_input():
with pytest.raises(
ValueError, match="Input axes of padding should match with the number of qubits"
):
tableau = cirq.CliffordTableau(num_qubits=3)
cirq.ops.clifford_gate._pad_tableau(tableau, num_qubits_after_padding=4, axes=[1, 2])
with pytest.raises(
ValueError, match='The number of qubits in the input tableau should not be larger than'
):
tableau = cirq.CliffordTableau(num_qubits=3)
cirq.ops.clifford_gate._pad_tableau(tableau, num_qubits_after_padding=2, axes=[0, 1, 2])
def test_pad_tableau():
tableau = cirq.CliffordTableau(num_qubits=1)
padded_tableau = cirq.ops.clifford_gate._pad_tableau(
tableau, num_qubits_after_padding=2, axes=[0]
)
assert padded_tableau == cirq.CliffordTableau(num_qubits=2)
tableau = cirq.CliffordTableau(num_qubits=1, initial_state=1)
padded_tableau = cirq.ops.clifford_gate._pad_tableau(
tableau, num_qubits_after_padding=1, axes=[0]
)
assert padded_tableau == cirq.CliffordGate.X.clifford_tableau
# Tableau for H
# [0 1 0]
# [1 0 0]
tableau = cirq.CliffordGate.H.clifford_tableau
padded_tableau = cirq.ops.clifford_gate._pad_tableau(
tableau, num_qubits_after_padding=2, axes=[0]
)
np.testing.assert_equal(
padded_tableau.matrix().astype(np.int64),
np.array(
[
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
]
),
)
np.testing.assert_equal(padded_tableau.rs.astype(np.int64), np.zeros(4))
# The tableau of H again but pad for another ax
tableau = cirq.CliffordGate.H.clifford_tableau
padded_tableau = cirq.ops.clifford_gate._pad_tableau(
tableau, num_qubits_after_padding=2, axes=[1]
)
np.testing.assert_equal(
padded_tableau.matrix().astype(np.int64),
np.array(
[
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
]
),
)
np.testing.assert_equal(padded_tableau.rs.astype(np.int64), np.zeros(4))
def test_clifford_gate_act_on_small_case():
# Note this is also covered by the `from_op_list` one, etc.
qubits = cirq.LineQubit.range(5)
args = cirq.ActOnCliffordTableauArgs(
tableau=cirq.CliffordTableau(num_qubits=5),
qubits=qubits,
prng=np.random.RandomState(),
)
expected_args = cirq.ActOnCliffordTableauArgs(
tableau=cirq.CliffordTableau(num_qubits=5),
qubits=qubits,
prng=np.random.RandomState(),
)
cirq.act_on(cirq.H, expected_args, qubits=[qubits[0]], allow_decompose=False)
cirq.act_on(cirq.CliffordGate.H, args, qubits=[qubits[0]], allow_decompose=False)
assert args.tableau == expected_args.tableau
cirq.act_on(cirq.CNOT, expected_args, qubits=[qubits[0], qubits[1]], allow_decompose=False)
cirq.act_on(cirq.CliffordGate.CNOT, args, qubits=[qubits[0], qubits[1]], allow_decompose=False)
assert args.tableau == expected_args.tableau
cirq.act_on(cirq.H, expected_args, qubits=[qubits[0]], allow_decompose=False)
cirq.act_on(cirq.CliffordGate.H, args, qubits=[qubits[0]], allow_decompose=False)
assert args.tableau == expected_args.tableau
cirq.act_on(cirq.S, expected_args, qubits=[qubits[0]], allow_decompose=False)
cirq.act_on(cirq.CliffordGate.S, args, qubits=[qubits[0]], allow_decompose=False)
assert args.tableau == expected_args.tableau
cirq.act_on(cirq.X, expected_args, qubits=[qubits[2]], allow_decompose=False)
cirq.act_on(cirq.CliffordGate.X, args, qubits=[qubits[2]], allow_decompose=False)
assert args.tableau == expected_args.tableau
def test_clifford_gate_act_on_large_case():
n, num_ops = 50, 1000 # because we don't need unitary, it is fast.
gate_candidate = [cirq.X, cirq.Y, cirq.Z, cirq.H, cirq.S, cirq.CNOT, cirq.CZ]
for seed in range(10):
prng = np.random.RandomState(seed)
t1 = cirq.CliffordTableau(num_qubits=n)
t2 = cirq.CliffordTableau(num_qubits=n)
qubits = cirq.LineQubit.range(n)
args1 = cirq.ActOnCliffordTableauArgs(tableau=t1, qubits=qubits, prng=prng)
args2 = cirq.ActOnCliffordTableauArgs(tableau=t2, qubits=qubits, prng=prng)
ops = []
for _ in range(num_ops):
g = prng.randint(len(gate_candidate))
indices = (prng.randint(n),) if g < 5 else prng.choice(n, 2, replace=False)
cirq.act_on(
gate_candidate[g], args1, qubits=[qubits[i] for i in indices], allow_decompose=False
)
ops.append(gate_candidate[g].on(*[qubits[i] for i in indices]))
compiled_gate = cirq.CliffordGate.from_op_list(ops, qubits)
cirq.act_on(compiled_gate, args2, qubits)
assert args1.tableau == args2.tableau
def test_clifford_gate_act_on_ch_form():
# Although we don't support CH_form from the _act_on_, it will fall back
# to the decomposititon method and apply it through decomposed ops.
# Here we run it for the coverage only.
args = cirq.ActOnStabilizerCHFormArgs(
initial_state=cirq.StabilizerStateChForm(num_qubits=2, initial_state=1),
qubits=cirq.LineQubit.range(2),
prng=np.random.RandomState(),
)
cirq.act_on(cirq.CliffordGate.X, args, qubits=cirq.LineQubit.range(1))
np.testing.assert_allclose(args.state.state_vector(), np.array([0, 0, 0, 1]))
def test_clifford_gate_act_on_fail():
with pytest.raises(TypeError, match="Failed to act"):
cirq.act_on(cirq.CliffordGate.X, DummyActOnArgs(), qubits=())
|
the-stack_0_8045 | #Crie um programa onde 4 jogadores um dado e tenham
#resultados aleatórios.
##Guarde esse resultados em um dicionário. No final, coloque esse
#dicionário em ordem ,sabendo que o vencedor tirou o maior número na dado.
from random import randint
from time import sleep
from operator import itemgetter
jogadores = {'Jogador 1': randint(1, 6),
'Jogador 2': randint(1, 6),
'Jogador 3': randint(1, 6),
'Jogador 4': randint(1, 6)}
for j, d in jogadores.items():
print(f'{j} tirou {d} no dado')
sleep(1)
print('-=' * 20)
print('-----RANKING-----')
sleep(1)
ranking = {}
ranking = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(ranking):
print(f'{i+1}º Lugar: {v[0]} com {v[1]}')
|
the-stack_0_8046 | # -*- coding: utf-8 -*-
import os
import click
from matplusc3d import combine_files
@click.command()
@click.argument('c3dfile', metavar='[filename.c3d]', required=False, type=click.Path())
@click.option('--overwrite', is_flag=True, help="Overwrite existing c3dfiles. "
"If not set, a file new file 'filename_updated.c3d' will be created")
def main(c3dfile, overwrite):
"""Command line tool for adding virtual makers to Coda Motion
C3D files from the information in exported Mat files.
The tool assume the c3d and mat files have the same filename
but different extensions. If called without arguments the tool
will find all matching c3d/mat files in the working directory.
"""
if not c3dfile:
click.confirm('Combine all C3D/Mat files in current dir?', abort=True)
filelist = [(f, os.path.splitext(f)[0]+'.mat')
for f in os.listdir('.') if f.upper().endswith('.C3D')]
elif os.path.isfile(c3dfile):
matfile = os.path.splitext(c3dfile)[0]+'.mat'
if not os.path.isfile(matfile):
raise click.UsageError('No mat file found matching {}'
''.format(c3dfile))
filelist = [(c3dfile, matfile)]
else:
raise click.UsageError('No such file {}'.format(c3dfile))
filelist = [(str(f), str(m)) for f, m in filelist
if os.path.exists(f) and os.path.exists(m)]
for c3dfile, matfile in filelist:
postfix = '' if overwrite else '_updated'
new_c3d = combine_files(c3dfile, matfile, postfix=postfix)
print('Updated: {}'.format(new_c3d))
if __name__ == "__main__":
main()
|
the-stack_0_8047 | #
# MIT License
#
# Copyright (c) 2020 Pablo Rodriguez Nava, @pablintino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import pytest
from app import db, create_app
from tests import setup_db, teardown_db, clean_db
class TestConfig:
SQLALCHEMY_DATABASE_URI = 'mssql+pyodbc://test:[email protected],4490/altium_db_test_ut?driver=ODBC+Driver+17+for+SQL+Server'
REDIS_URL = 'redis://'
SQLALCHEMY_TRACK_MODIFICATIONS = False
@pytest.fixture(scope="session")
def app():
yield create_app(config_class=TestConfig)
@pytest.fixture(scope="session")
def database(app):
assert app is not None
setup_db(app)
yield db
teardown_db()
@pytest.fixture(scope="function")
def db_session(database, app):
assert app is not None
with app.app_context():
clean_db()
yield database.session
database.session.rollback()
|
the-stack_0_8048 | from django.urls import path, include
from .viewsets.numbers import (
NumbersViewset,
DetailNumbersViewset,
StartNumber,
StopNumber,
StatusNumber,
LoginNumber,
)
from .viewsets.messages import (
WhatsappChatAllViewset,
WhatsappChatViewset,
WhatsappChatDetailViewset,
WhatsappMediaViewset,
WhastappMediaDetailViewset
)
from .viewsets.contacts import ContactsViewset, GroupContactsViewset
from .viewsets.media import UsersMediaViewset
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('media', UsersMediaViewset, base_name='api_media')
router.register('group', GroupContactsViewset, base_name='api_group')
router.register('contacts', ContactsViewset, base_name='api_contacts')
urlpatterns = [
path('', include(router.urls)),
path('auth/', include('users_auth.urls')),
# path('group/<int:group_id>/contacts/<int:contact_id>/', GroupContactsViewset.as_view(
# {"get":"contact_detail", "put":"contact_detail", "delete":"contact_detail"})),
# path('contacts/', ContactsViewset.as_view(), name='api_contacts'),
path('numbers/', NumbersViewset.as_view(), name='api_numbers'),
path('numbers/<int:pk>/', DetailNumbersViewset.as_view(), name='api_number_detail'),
path('numbers/<int:pk>/start/', StartNumber.as_view(), name='api_number_start'),
path('numbers/<int:pk>/stop/', StopNumber.as_view(), name='api_number_stop'),
path('numbers/<int:pk>/status/', StatusNumber.as_view(), name='api_number_status'),
path('numbers/<int:pk>/login/', LoginNumber.as_view(), name='api_number_login'),
# path('numbers/<int:pk>/logout/'),
path('numbers/<int:pk>/chats/', WhatsappChatViewset.as_view(), name='api_messages_chat'),
path('numbers/<int:pk>/chats/<int:id>/', WhatsappChatDetailViewset.as_view(), name='api_messages_chat_detail'),
path('numbers/<int:pk>/media/', WhatsappMediaViewset.as_view(), name='api_messages_media'),
path('numbers/<int:pk>/media/<int:id>/', WhastappMediaDetailViewset.as_view(), name='api_messages_media_detail'),
path('messages/', WhatsappChatAllViewset.as_view(), name='api_messages_all')
] |
the-stack_0_8049 | # Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from kmip.core import enums
from kmip.demos import utils
from kmip.pie import client
# NOTE: This demo script shows how to delete the first Name attribute from
# the user-specified object. The object *must* have at least one Name
# attribute for attribute deletion to work. Otherwise, the client
# call to delete_attribute will fail.
if __name__ == '__main__':
logger = utils.build_console_logger(logging.INFO)
parser = utils.build_cli_parser(enums.Operation.DELETE_ATTRIBUTE)
opts, args = parser.parse_args(sys.argv[1:])
if opts.uuid is None:
logger.error("No UUID provided, existing early from demo.")
sys.exit()
with client.ProxyKmipClient(
config=opts.config,
config_file=opts.config_file
) as c:
try:
object_id, modified_attribute = c.delete_attribute(
unique_identifier=opts.uuid,
attribute_name="Name",
attribute_index=0
)
logger.info(
"Successfully deleted 'Name' attribute from object: {}".format(
object_id
)
)
logger.info("Deleted attribute: {}".format(modified_attribute))
except Exception as e:
logger.error(e)
|
the-stack_0_8050 | """
Project: SSITH CyberPhysical Demonstrator
Name: component.py
Author: Ethan Lew
Date: 02 October 2020
an object to establish communication and messaging between services
pub/sub approach -- establish service subscribers and publishers at
initialization. register topics and callbacks for service components.
"""
from .message import Message, Envelope, MessageLevel
import threading
import typing as typ
import zmq
import time
import enum
import functools
import collections
import struct
class ComponentStatus(enum.Enum):
"""component states and commands"""
READY = enum.auto()
ERROR = enum.auto()
class ComponentPollerStatus(enum.Enum):
"""component poller states"""
POLLER_START = enum.auto()
POLLER_END = enum.auto()
def coroutine(func):
"""decorator for coroutine creation/initialization"""
@functools.wraps(func)
def primer(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return primer
class ComponentMetaDict(dict):
"""resolve overloaded attributes"""
def __setitem__(self, key, value):
if hasattr(value, 'recv_spec'):
if "_recv_methods" not in self:
self["_recv_methods"] = {}
self["_recv_methods"][getattr(value, 'recv_spec')] = value
elif hasattr(value, 'recv_can_spec'):
if "_recv_can_methods" not in self:
self["_recv_can_methods"] = {}
self["_recv_can_methods"][getattr(value, 'recv_can_spec')] = value
else:
super().__setitem__(key, value)
def _getitem__(self, key):
if key not in self and '_' and key.isupper():
return key.upper()
else:
return super().__getitem__(key)
class ComponentMeta(type):
"""recognize special decorators, and build out the implied attributes"""
@classmethod
def __prepare__(metacls, name, bases):
def _register_can(*args):
recv_spec = args
def decorate(func):
func.recv_can_spec = recv_spec
return func
return decorate
def _register_topic(*args):
recv_spec = args
def decorate(func):
func.recv_spec = recv_spec
return func
return decorate
d = ComponentMetaDict()
d["recv_topic"] = _register_topic
# TODO: add CAN registry in Component, which may not be the best place for it
d["recv_can"] = _register_can
return d
@classmethod
def _build(cls, attributes):
pass
def __new__(meta, clsname, bases, attributes):
del attributes["recv_topic"]
del attributes["recv_can"]
cls = super().__new__(meta, clsname, bases, attributes)
return cls
class ThreadExiting(threading.Thread):
"""ThreadExiting presents a thread associated with a stop event. Computations done by the
thread are decomposed into poller initialization, polling iteration, and deinitialization.
This permits the thread to stop gracefully, attending to resources used."""
def __init__(self, name, *args, sample_frequency=60, **kwargs):
super().__init__(name=name, *args, daemon=True, **kwargs)
self.stop_evt = threading.Event()
self.poller_start_time = None
self.polling_thread = None
self.sample_period = 1.0 / sample_frequency
def on_poll(self, t):
pass
def on_start(self):
pass
def run(self):
self.on_start()
while not self.stopped:
it_start = time.time()
if self.poller_start_time is None:
self.poller_start_time = time.time()
t = self.poller_start_time
else:
t = time.time() - self.poller_start_time
self.on_poll(t)
it_end = time.time()
it_diff = it_end - it_start
if (self.sample_period - it_diff) > 0:
time.sleep(self.sample_period - it_diff)
self.on_exit()
def on_exit(self):
pass
def stop(self):
self.stop_evt.set()
@property
def stopped(self):
return self.stop_evt.is_set()
def exit(self):
self.stop()
self.join()
class Component(ThreadExiting, metaclass=ComponentMeta):
def __init__(self, name: str,
in_descr: typ.List[typ.Tuple],
out_descr: typ.List[typ.Tuple],
*args,
ip_addr='127.0.0.1' ,
**kwargs):
"""
:param in_descr: port description tuple (elements are of the form (port number, topic name))
:param out_descr: port description tuple (elements are of the form (port number, topic name))
"""
super(Component, self).__init__(name, *args, **kwargs)
self.ip_addr = ip_addr
self._in_ports: typ.Set[typ.Tuple] = set(in_descr)
self._out_ports: typ.Set[typ.Tuple] = set(out_descr)
self._name = name
self._zmq_context: typ.Union[zmq.Context, None] = None
self._in_socks: typ.List[zmq.socket.Socket] = []
self._out_sock: typ.Union[zmq.socket.Socket, None] = None
# time since initialization
self._epoch = time.time()
self._unbound = False
self._ready = False
self._start_finished = False
# bind here so startup messages can be received
self.bind()
def send_message(self, message: Message, topic: str, level=MessageLevel.NORMAL) -> None:
"""send message over pub/sub network
"""
assert self._out_sock is not None, f"requested to send message with no outgoing socket (None)"
assert topic in self.out_topics, f"requested to send message with invalid topic {topic}"
self._out_sock.send_string(topic, zmq.SNDMORE)
self._out_sock.send_pyobj(Envelope.serialize(Envelope(self, message, level=level)))
@coroutine
def get_receiver(self):
"""create an iterable object that yields incoming messages"""
# prime the coroutine
yield None
# main loop -- iterate through input sockets and yield incoming messages
try:
while not self.stopped:
for sn in self._in_socks:
if self.stopped:
yield (f"{self.name}-command", Message(ComponentStatus.EXIT), None)
while isinstance(sn, zmq.Socket) and sn.poll(1) == zmq.POLLIN:
topic = sn.recv_string(zmq.DONTWAIT)
recv: bytes = sn.recv_pyobj(zmq.DONTWAIT)
env: Envelope = Envelope.deserialize(recv)
msg = env.message
t = env.send_time
yield (topic, msg, t)
except Exception as exc:
# TODO: log here
pass
def on_recv(self, topic, msg, t) -> None:
pass
def _process_recvs(self, topic: str, msg: Message, t):
"""find reception response in the receiver registry"""
rgy = self.recv_methods
if (topic,) in rgy:
rgy[(topic,)](self, msg, t)
if isinstance(msg.message, collections.Hashable) and (topic, msg.message) in rgy:
rgy[(topic, msg.message)](self, t)
self.on_recv(topic, msg, t)
def recv(self, id: int, data_len: int, data: bytes):
"""find reception response in the can receiver registry"""
rgy = self.recv_can_methods
if id in rgy:
fmt = rgy[id][0]
# use network order (!)
msg = struct.unpack(fmt, bytearray(data)[:data_len])
rgy[id][1](self, msg)
@property
def recv_methods(self):
"""get all registered receive components for self"""
return getattr(self, "_recv_methods", {})
@property
def recv_can_methods(self):
"""get all registered receive components for self"""
d = getattr(self, "_recv_can_methods", {})
return {k[0]: (k[1], v) for k,v in d.items()}
def run(self) -> None:
"""component mainloop"""
self._ready = True
recvr = self.get_receiver()
self.on_start()
self._start_finished = True
for topic, msg, t in recvr:
self._process_recvs(topic, msg, t)
if self.stopped:
break
self.unbind()
self.on_exit()
def bind(self) -> None:
"""setup sockets"""
if self._zmq_context is not None:
self.unbind()
self._zmq_context = zmq.Context()
if len(self.out_topics) > 0:
# assert that all out port nums are the same
ports = set([i for i, _ in self._out_ports])
assert len(
ports) == 1, f"outgoing port numbers must all be the same for {self.__class__.__name__} (got {ports})"
self._out_sock = self._zmq_context.socket(zmq.PUB)
self._out_sock.bind(f"tcp://*:{list(self._out_ports)[0][0]}")
for port, topic in self._in_ports:
in_sock = self._zmq_context.socket(zmq.SUB)
in_sock.connect(f"tcp://{self.ip_addr}:{port}")
in_sock.setsockopt_string(zmq.SUBSCRIBE, topic)
self._in_socks.append(in_sock)
# give zmq time
time.sleep(0.2)
def unbind(self) -> None:
"""unbind/disconnect sockets, terminate zmq session"""
self._unbound = True
for idx, s in enumerate(self._in_socks):
s.close()
self._in_socks[idx] = None
if self._out_sock is not None:
self._out_sock.close()
if self._zmq_context is not None:
self._zmq_context.term()
self._zmq_context = None
self._in_socks = []
self._out_sock = None
@property
def in_topics(self):
return [i for _, i in self._in_ports]
@property
def out_topics(self):
return [i for _, i in self._out_ports]
@property
def name(self):
return self._name
def wait_ready_command(self):
"""wait until the service has finished booting"""
import time
while not self._start_finished:
time.sleep(0.2)
return ComponentStatus.READY
class ComponentPoller(Component):
"""add a polling thread to respond at a given sampling frequency"""
def __init__(self, *args, sample_frequency=60, **kwargs):
super().__init__(*args, **kwargs)
# poller properties
self.polling_thread = ThreadExiting(name=f"{self.name}-poller", sample_frequency=sample_frequency, **kwargs)
self.polling_thread.on_poll = self.on_poll_poll
self.polling_thread.on_start = self.on_poll_start
self.polling_thread.on_exit = self.on_poll_exit
self._call_started = False
def on_poll_start(self):
pass
def on_poll_exit(self):
pass
def on_poll_poll(self, t):
pass
def start_poller(self):
"""start sensor polling thread"""
if not self.stopped:
self.polling_thread.start()
self._call_started = True
def stop_poller(self):
"""stop sensor polling thread"""
if self._call_started:
self.polling_thread.stop()
def exit_poller(self):
if self._call_started:
self.polling_thread.exit()
def stop(self):
self.stop_poller()
super().stop()
def exit(self):
self.exit_poller()
super().exit()
|
the-stack_0_8051 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
from frappe.core.doctype.user_permission.user_permission import add_user_permissions, remove_applicable
from frappe.permissions import has_user_permission
from frappe.core.doctype.doctype.test_doctype import new_doctype
import frappe
import unittest
class TestUserPermission(unittest.TestCase):
def setUp(self):
frappe.db.sql("""DELETE FROM `tabUser Permission`
WHERE `user` in (
'[email protected]',
'[email protected]',
'[email protected]')""")
frappe.delete_doc_if_exists("DocType", "Person")
frappe.db.sql_ddl("DROP TABLE IF EXISTS `tabPerson`")
frappe.delete_doc_if_exists("DocType", "Doc A")
frappe.db.sql_ddl("DROP TABLE IF EXISTS `tabDoc A`")
def test_default_user_permission_validation(self):
user = create_user('[email protected]')
param = get_params(user, 'User', user.name, is_default=1)
add_user_permissions(param)
#create a duplicate entry with default
perm_user = create_user('[email protected]')
param = get_params(user, 'User', perm_user.name, is_default=1)
self.assertRaises(frappe.ValidationError, add_user_permissions, param)
def test_default_user_permission(self):
frappe.set_user('Administrator')
user = create_user('[email protected]', 'Website Manager')
for category in ['general', 'public']:
if not frappe.db.exists('Blog Category', category):
frappe.get_doc({'doctype': 'Blog Category', 'title': category}).insert()
param = get_params(user, 'Blog Category', 'general', is_default=1)
add_user_permissions(param)
param = get_params(user, 'Blog Category', 'public')
add_user_permissions(param)
frappe.set_user('[email protected]')
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.blog_category, 'general')
frappe.set_user('Administrator')
def test_apply_to_all(self):
''' Create User permission for User having access to all applicable Doctypes'''
user = create_user('[email protected]')
param = get_params(user, 'User', user.name)
is_created = add_user_permissions(param)
self.assertEqual(is_created, 1)
def test_for_apply_to_all_on_update_from_apply_all(self):
user = create_user('[email protected]')
param = get_params(user, 'User', user.name)
# Initially create User Permission document with apply_to_all checked
is_created = add_user_permissions(param)
self.assertEqual(is_created, 1)
is_created = add_user_permissions(param)
# User Permission should not be changed
self.assertEqual(is_created, 0)
def test_for_applicable_on_update_from_apply_to_all(self):
''' Update User Permission from all to some applicable Doctypes'''
user = create_user('[email protected]')
param = get_params(user,'User', user.name, applicable = ["Chat Room", "Chat Message"])
# Initially create User Permission document with apply_to_all checked
is_created = add_user_permissions(get_params(user, 'User', user.name))
self.assertEqual(is_created, 1)
is_created = add_user_permissions(param)
frappe.db.commit()
removed_apply_to_all = frappe.db.exists("User Permission", get_exists_param(user))
is_created_applicable_first = frappe.db.exists("User Permission", get_exists_param(user, applicable = "Chat Room"))
is_created_applicable_second = frappe.db.exists("User Permission", get_exists_param(user, applicable = "Chat Message"))
# Check that apply_to_all is removed
self.assertIsNone(removed_apply_to_all)
# Check that User Permissions for applicable is created
self.assertIsNotNone(is_created_applicable_first)
self.assertIsNotNone(is_created_applicable_second)
self.assertEqual(is_created, 1)
def test_for_apply_to_all_on_update_from_applicable(self):
''' Update User Permission from some to all applicable Doctypes'''
user = create_user('[email protected]')
param = get_params(user, 'User', user.name)
# create User permissions that with applicable
is_created = add_user_permissions(get_params(user, 'User', user.name, applicable = ["Chat Room", "Chat Message"]))
self.assertEqual(is_created, 1)
is_created = add_user_permissions(param)
is_created_apply_to_all = frappe.db.exists("User Permission", get_exists_param(user))
removed_applicable_first = frappe.db.exists("User Permission", get_exists_param(user, applicable = "Chat Room"))
removed_applicable_second = frappe.db.exists("User Permission", get_exists_param(user, applicable = "Chat Message"))
# To check that a User permission with apply_to_all exists
self.assertIsNotNone(is_created_apply_to_all)
# Check that all User Permission with applicable is removed
self.assertIsNone(removed_applicable_first)
self.assertIsNone(removed_applicable_second)
self.assertEqual(is_created, 1)
def test_user_perm_for_nested_doctype(self):
"""Test if descendants' visibility is controlled for a nested DocType."""
from frappe.core.doctype.doctype.test_doctype import new_doctype
user = create_user("[email protected]", "Blogger")
if not frappe.db.exists("DocType", "Person"):
doc = new_doctype("Person",
fields=[
{
"label": "Person Name",
"fieldname": "person_name",
"fieldtype": "Data"
}
], unique=0)
doc.is_tree = 1
doc.insert()
parent_record = frappe.get_doc(
{"doctype": "Person", "person_name": "Parent", "is_group": 1}
).insert()
child_record = frappe.get_doc(
{"doctype": "Person", "person_name": "Child", "is_group": 0, "parent_person": parent_record.name}
).insert()
add_user_permissions(get_params(user, "Person", parent_record.name))
# check if adding perm on a group record, makes child record visible
self.assertTrue(has_user_permission(frappe.get_doc("Person", parent_record.name), user.name))
self.assertTrue(has_user_permission(frappe.get_doc("Person", child_record.name), user.name))
frappe.db.set_value("User Permission", {"allow": "Person", "for_value": parent_record.name}, "hide_descendants", 1)
frappe.cache().delete_value("user_permissions")
# check if adding perm on a group record with hide_descendants enabled,
# hides child records
self.assertTrue(has_user_permission(frappe.get_doc("Person", parent_record.name), user.name))
self.assertFalse(has_user_permission(frappe.get_doc("Person", child_record.name), user.name))
def test_user_perm_on_new_doc_with_field_default(self):
"""Test User Perm impact on frappe.new_doc. with *field* default value"""
frappe.set_user('Administrator')
user = create_user("[email protected]", "Blogger")
# make a doctype "Doc A" with 'doctype' link field and default value ToDo
if not frappe.db.exists("DocType", "Doc A"):
doc = new_doctype("Doc A",
fields=[
{
"label": "DocType",
"fieldname": "doc",
"fieldtype": "Link",
"options": "DocType",
"default": "ToDo"
}
], unique=0)
doc.insert()
# make User Perm on DocType 'ToDo' in Assignment Rule (unrelated doctype)
add_user_permissions(get_params(user, "DocType", "ToDo", applicable=["Assignment Rule"]))
frappe.set_user("[email protected]")
new_doc = frappe.new_doc("Doc A")
# User perm is created on ToDo but for doctype Assignment Rule only
# it should not have impact on Doc A
self.assertEqual(new_doc.doc, "ToDo")
frappe.set_user('Administrator')
remove_applicable(["Assignment Rule"], "[email protected]", "DocType", "ToDo")
def test_user_perm_on_new_doc_with_user_default(self):
"""Test User Perm impact on frappe.new_doc. with *user* default value"""
from frappe.core.doctype.session_default_settings.session_default_settings import (clear_session_defaults,
set_session_default_values)
frappe.set_user('Administrator')
user = create_user("[email protected]", "Blogger")
# make a doctype "Doc A" with 'doctype' link field
if not frappe.db.exists("DocType", "Doc A"):
doc = new_doctype("Doc A",
fields=[
{
"label": "DocType",
"fieldname": "doc",
"fieldtype": "Link",
"options": "DocType",
}
], unique=0)
doc.insert()
# create a 'DocType' session default field
if not frappe.db.exists("Session Default", {"ref_doctype": "DocType"}):
settings = frappe.get_single('Session Default Settings')
settings.append("session_defaults", {
"ref_doctype": "DocType"
})
settings.save()
# make User Perm on DocType 'ToDo' in Assignment Rule (unrelated doctype)
add_user_permissions(get_params(user, "DocType", "ToDo", applicable=["Assignment Rule"]))
# User default Doctype value is ToDo via Session Defaults
frappe.set_user("[email protected]")
set_session_default_values({"doc": "ToDo"})
new_doc = frappe.new_doc("Doc A")
# User perm is created on ToDo but for doctype Assignment Rule only
# it should not have impact on Doc A
self.assertEqual(new_doc.doc, "ToDo")
frappe.set_user('Administrator')
clear_session_defaults()
remove_applicable(["Assignment Rule"], "[email protected]", "DocType", "ToDo")
def create_user(email, *roles):
''' create user with role system manager '''
if frappe.db.exists('User', email):
return frappe.get_doc('User', email)
user = frappe.new_doc('User')
user.email = email
user.first_name = email.split("@")[0]
if not roles:
roles = ('System Manager',)
user.add_roles(*roles)
return user
def get_params(user, doctype, docname, is_default=0, hide_descendants=0, applicable=None):
''' Return param to insert '''
param = {
"user": user.name,
"doctype":doctype,
"docname":docname,
"is_default": is_default,
"apply_to_all_doctypes": 1,
"applicable_doctypes": [],
"hide_descendants": hide_descendants
}
if applicable:
param.update({"apply_to_all_doctypes": 0})
param.update({"applicable_doctypes": applicable})
return param
def get_exists_param(user, applicable = None):
''' param to check existing Document '''
param = {
"user": user.name,
"allow": "User",
"for_value": user.name,
}
if applicable:
param.update({"applicable_for": applicable})
else:
param.update({"apply_to_all_doctypes": 1})
return param
|
the-stack_0_8052 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AutoRestHeadTestServiceConfiguration(Configuration):
"""Configuration for AutoRestHeadTestService.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(AutoRestHeadTestServiceConfiguration, self).__init__(**kwargs)
self.credential = credential
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-sample/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_0_8053 | from math import floor
from tweepy import API, OAuthHandler
from data_analysis.database import session, Tweet
consumer_key = 'dGx62GNqi7Yaj1XIcZgOLNjDb'
consumer_secret = 'ZCE896So7Ba1u96ICwMhulO2QO3oeZ5BeVyfUw1YbIYELzVyJs'
access_token = '1121993185-hGOTr3J40FlKGwWkiNWdeNVrcD4bqqW38SPiM3s'
access_token_secret = 'BAo4d2J24xyXRKFrga6A9MwpTW6bMb5EztfvnL5qv2LvJ'
auth = OAuthHandler(consumer_key,
consumer_secret)
auth.set_access_token(access_token, access_token_secret)
def update_tweets(api, tweets):
"""
This is a method to update our tweets.
`api` is an instance of tweepy.API
`tweets` is a list of all tweets from our database
This method handles high level iteration logic. See `_update_sets` for the
more interesting, updating-of-values, logic
"""
# How many tweets do we have?
len_tweets = len(tweets)
# The Twitter REST API only takes 100 id's at a time. So we need to break
# these into sets of 100, and use the `math.floor` method to get an integer
iterations = floor(len_tweets/100)
# Iterate through the sets of 100s of tweets
for num in range(iterations):
# first number of the set of 100
first_tweet_index = num * 100
# last number of the set of 100
last_tweet_index = first_tweet_index + 99
# Grab the set using index slicing
tweet_set = tweets[first_tweet_index:last_tweet_index]
# Call an the inner method so we avoid code duplication
_update_sets(api, session, tweet_set, num)
# if we can divide perfectly by 100, we're done!
if iterations % 100 == 0:
return
# If we're here, our last set is slightly smaller than 100, so we're
# going to caculate the next number and then grab to the end of the list
last_set_num = iterations * 100
last_set = tweets[last_set_num:]
# pass the last set into our inner method that we used to avoid code
# duplication
_update_sets(api, session, last_set, iterations)
def _update_sets(api, session, tweet_set, start_num):
"""
Broke out a helper method so we didn't have to repeat the code for our
last set.
This helper method does the heavy lifting for us
"""
# Grab out just the tweet ids using a list comprehension
tweet_ids = [tweet.tid for tweet in tweet_set]
# Using the tweepy api, grab the updated tweets
# `trim_user` drops user data
updated_set = api.statuses_lookup(tweet_ids, trim_user=True)
# iterate through update set
for updated_tweet in updated_set:
# the values we want to update
fav_count = updated_tweet.favorite_count
retweet_count = updated_tweet.retweet_count
# Get the old tweet using it's twitter id (tid for short)
tid = updated_tweet.id
database_tweet = session.query(Tweet).filter_by(tid=tid).one()
# update the tweet information in our database
database_tweet.favorite_count = fav_count
database_tweet.retweet_count = retweet_count
# User feedback
print('index: {}'.format(database_tweet.id))
print('favs: {} \t retweets: {}'.format(fav_count, retweet_count))
# save our changes to the database
session.commit()
def main():
api = API(auth)
# Grab all the tweets
tweets = session.query(Tweet).all()
update_tweets(api, tweets)
if __name__ == '__main__':
main()
|
the-stack_0_8054 | import clr
import System
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def DesignOptionIsPrimary(item):
if hasattr(item, "IsPrimary"): return item.IsPrimary
else: return False
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [DesignOptionIsPrimary(x) for x in items]
else: OUT = DesignOptionIsPrimary(items) |
the-stack_0_8061 | """ conver arff to csv format"""
import csv
import pandas as pd
def function_arfftocsv(source: str, dest: str = 'processed.csv'):
"""this function deletes @ and empty lines so that produce a no-header csv"""
fp = open(source)
rdr = csv.reader(filter(lambda row: row[0]!='@' and len(row)>1, fp))
with open(dest,'w', newline = '') as csvfile:
filewriter = csv.writer(csvfile)
for row in rdr:
filewriter.writerow(row)
fp.close()
# this function adds the headers specified in labels argument
def function_labelize(dest: str, labels: list, source: str = 'processed.csv') -> pd.DataFrame:
"""This function takes a destination dir, a source dir, the labels to add
and returns a dataframe with all labels for each column"""
df = pd.read_csv(source, names=labels,index_col=False, na_values='?', sep=',')
df.to_csv(dest, header=True, index_label=False, index=False)
return df
def function_dataEncoding(df: pd.DataFrame, labels: list, to_replace: dict, values: dict,
path: str) -> pd.DataFrame:
"""this function encodes explicitly the nominal values of specified labels
and returns the dataframe with this columns"""
for label in labels:# for each label we want to convert
df[label] = df[label].replace(to_replace[label], values[label])#replace the given values
df[labels].to_csv(path, header= True, index_label= False, index= False)#save as csv
return df[labels] # return the dataFrame
def processing ( all_labels: list,labels: list, to_replace: dict, values: dict,
path: str = 'all.csv', source: str = 'diabetes_paper_fazakis.csv',
des: str ='Finaldata.csv')-> pd.DataFrame:
"""this function places the labels for each model and converts categorical to numerical data"""
function_arfftocsv(source) # transform arff to csv
df = function_labelize(des, all_labels) # add column labels
return function_dataEncoding(df, labels, to_replace, values, path)# encode categorical and return dFrame
|
the-stack_0_8062 | from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from treasure_hunt.models import Level, UserProfile
from django.contrib.auth.decorators import login_required
import django
django.setup() #Hack to fix Models not ready error
def index(request):
return render(request, 'treasurehunt/treasurehunt_index.html')
@login_required
def display_level(request, level):
level_object = get_object_or_404(Level, level_number__exact=level)
current_user = request.user.profile
if request.method == 'GET':
if int(current_user.current_level) <= int(level):
return render(request, 'treasurehunt/treasurehunt_level.html', {'level_object': level_object})
else:
return HttpResponseRedirect('/treasurehunt/level/%d' % current_user.current_level)
else:
level_answer = str(level_object.answer)
user_answer = str(request.POST['answer'])
if level_answer == user_answer and int(current_user.current_level) == int(level):
#Make sure that user level is updated only once for every level
current_user.current_level += 1
current_user.save(update_fields=['current_level'])
return HttpResponseRedirect('/treasurehunt/level/%d' % (int(level) + 1))
return HttpResponseRedirect('/treasurehunt/level/%d' % int(level))
@login_required
def display_leaderboard(request):
users = UserProfile.objects.all().order_by('-current_level')
return render(request, 'treasurehunt/treasurehunt_leaderboard.html', {'users': users})
|
the-stack_0_8063 | """Test the API's checkout process over full digital orders."""
import graphene
import pytest
from ....account.models import Address
from ....checkout.error_codes import CheckoutErrorCode
from ....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ....checkout.models import Checkout
from ....checkout.utils import add_variant_to_checkout
from ....plugins.manager import get_plugins_manager
from ...core.utils import to_global_id_or_none
from ...tests.utils import get_graphql_content
from ..mutations.utils import update_checkout_shipping_method_if_invalid
from .test_checkout import (
MUTATION_CHECKOUT_CREATE,
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE,
MUTATION_UPDATE_SHIPPING_METHOD,
)
from .test_checkout_lines import (
MUTATION_CHECKOUT_LINE_DELETE,
MUTATION_CHECKOUT_LINES_UPDATE,
)
@pytest.mark.parametrize("with_shipping_address", (True, False))
def test_create_checkout(
api_client,
digital_content,
graphql_address_data,
with_shipping_address,
channel_USD,
):
"""Test creating a checkout with a shipping address gets the address ignored."""
address_count = Address.objects.count()
variant = digital_content.product_variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
checkout_input = {
"channel": channel_USD.slug,
"lines": [{"quantity": 1, "variantId": variant_id}],
"email": "[email protected]",
}
if with_shipping_address:
checkout_input["shippingAddress"] = graphql_address_data
get_graphql_content(
api_client.post_graphql(
MUTATION_CHECKOUT_CREATE, {"checkoutInput": checkout_input}
)
)["data"]["checkoutCreate"]
# Retrieve the created checkout
checkout = Checkout.objects.get()
# Check that the shipping address was ignored, thus not created
assert (
checkout.shipping_address is None
), "The address shouldn't have been associated"
assert (
Address.objects.count() == address_count
), "No address should have been created"
def test_checkout_has_no_available_shipping_methods(
api_client, checkout_with_digital_item, address, shipping_zone
):
"""Test no shipping method are available on digital orders."""
query = """
query getCheckout($id: ID!) {
checkout(id: $id) {
availableShippingMethods {
name
price {
amount
}
}
}
}
"""
checkout = checkout_with_digital_item
# Put a shipping address, to ensure it is still handled properly
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
variables = {"id": to_global_id_or_none(checkout)}
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["checkout"]
assert len(data["availableShippingMethods"]) == 0
def test_checkout_update_shipping_address(
api_client, checkout_with_digital_item, graphql_address_data
):
"""Test updating the shipping address of a digital order throws an error."""
checkout = checkout_with_digital_item
variables = {
"id": to_global_id_or_none(checkout),
"shippingAddress": graphql_address_data,
}
response = api_client.post_graphql(
MUTATION_CHECKOUT_SHIPPING_ADDRESS_UPDATE, variables
)
content = get_graphql_content(response)
data = content["data"]["checkoutShippingAddressUpdate"]
assert data["errors"] == [
{
"field": "shippingAddress",
"message": "This checkout doesn't need shipping",
"code": CheckoutErrorCode.SHIPPING_NOT_REQUIRED.name,
}
]
# Ensure the address was unchanged
checkout.refresh_from_db(fields=["shipping_address"])
assert checkout.shipping_address is None
def test_checkout_update_shipping_method(
api_client, checkout_with_digital_item, address, shipping_method
):
"""Test updating the shipping method of a digital order throws an error."""
checkout = checkout_with_digital_item
method_id = graphene.Node.to_global_id("ShippingMethod", shipping_method.pk)
variables = {"id": to_global_id_or_none(checkout), "shippingMethodId": method_id}
# Put a shipping address, to ensure it is still handled properly
checkout.shipping_address = address
checkout.save(update_fields=["shipping_address"])
response = api_client.post_graphql(MUTATION_UPDATE_SHIPPING_METHOD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutShippingMethodUpdate"]
assert data["errors"] == [
{
"field": "shippingMethod",
"message": "This checkout doesn't need shipping",
"code": CheckoutErrorCode.SHIPPING_NOT_REQUIRED.name,
}
]
# Ensure the shipping method was unchanged
checkout.refresh_from_db(fields=["shipping_method"])
assert checkout.shipping_method is None
def test_remove_shipping_method_if_only_digital_in_checkout(
checkout_with_digital_item, address, shipping_method
):
checkout = checkout_with_digital_item
checkout.shipping_address = address
checkout.shipping_method = shipping_method
checkout.save()
assert checkout.shipping_method
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
checkout.refresh_from_db()
assert not checkout.shipping_method
def test_checkout_lines_update_remove_shipping_if_removed_product_with_shipping(
user_api_client, checkout_with_item, digital_content, address, shipping_method
):
checkout = checkout_with_item
digital_variant = digital_content.product_variant
checkout.shipping_address = address
checkout.shipping_method = shipping_method
checkout.save()
checkout_info = fetch_checkout_info(checkout, [], [], get_plugins_manager())
add_variant_to_checkout(checkout_info, digital_variant, 1)
line = checkout.lines.first()
variant = line.variant
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"id": to_global_id_or_none(checkout),
"lines": [{"variantId": variant_id, "quantity": 0}],
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_UPDATE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesUpdate"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.lines.count() == 1
assert not checkout.shipping_method
def test_checkout_line_delete_remove_shipping_if_removed_product_with_shipping(
user_api_client, checkout_with_item, digital_content, address, shipping_method
):
checkout = checkout_with_item
digital_variant = digital_content.product_variant
checkout.shipping_address = address
checkout.shipping_method = shipping_method
checkout.save()
checkout_info = fetch_checkout_info(checkout, [], [], get_plugins_manager())
add_variant_to_checkout(checkout_info, digital_variant, 1)
line = checkout.lines.first()
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"id": to_global_id_or_none(checkout), "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINE_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.lines.count() == 1
assert not checkout.shipping_method
|
the-stack_0_8064 | from apodeixi.controllers.util.manifest_api import ManifestAPI
from apodeixi.util.a6i_error import ApodeixiError
from apodeixi.util.formatting_utils import StringUtils
from apodeixi.controllers.util.skeleton_controller import SkeletonController
from apodeixi.knowledge_base.filing_coordinates import InitiativesFilingCoordinates
from apodeixi.xli.interval import GreedyIntervalSpec, ClosedOpenIntervalSpec
from apodeixi.xli.posting_controller_utils import PostingConfig
from apodeixi.xli.update_policy import UpdatePolicy
class Workstream_Controller(SkeletonController):
'''
Class to process an Excel posting for initiative workstreams. It produces two YAML manifests:
* The workstream's milestones
* The workstream's metrics
@param store A KnowledgeBaseStore instance. Handles all I/O of postings and manifests for this controller.
@param a6i_config The ApodeixiConfig instance for the Python process in which we are running.
'''
def __init__(self, parent_trace, store, a6i_config):
super().__init__(parent_trace, store, a6i_config)
self.MANIFEST_API = ManifestAPI( parent_trace = parent_trace,
domain = 'initiatives',
subdomain = 'workstream',
api_publisher = 'a6i',
extension = 'io')
self.SUPPORTED_VERSIONS = ['v1a']
self.SUPPORTED_KINDS = ['workstream-milestone', 'workstream-metric']
def getManifestAPI(self):
return self.MANIFEST_API
def getSupportedVersions(self):
return self.SUPPORTED_VERSIONS
def getSupportedKinds(self):
return self.SUPPORTED_KINDS
def getPostingConfig(self, parent_trace, kind, manifest_nb):
'''
Return a PostingConfig, corresponding to the configuration that this concrete controller supports.
'''
ME = Workstream_Controller
if kind == 'workstream-milestone':
update_policy = UpdatePolicy(reuse_uids=True, merge=False)
xlr_config = ME._WorkstreamMilestoneConfig( update_policy = update_policy,
kind = kind,
manifest_nb = manifest_nb,
controller = self)
elif kind == 'workstream-metric':
update_policy = UpdatePolicy(reuse_uids=True, merge=False)
xlr_config = ME._WorkstreamMetricConfig( update_policy = update_policy,
kind = kind,
manifest_nb = manifest_nb,
controller = self)
else:
raise ApodeixiError(parent_trace, "Invalid domain object '" + kind + "' - should be one of "
+ ", ".join(self.SUPPORTED_KINDS),
origination = {'signaled_from': __file__})
return xlr_config
def getPostingLabel(self, parent_trace):
'''
Returns a PostingLabel, corresponding to the what is expected by this concrete controller class.
'''
ME = Workstream_Controller
return ME._MyPostingLabel(parent_trace, controller = self)
def _buildAllManifests(self, parent_trace, posting_label_handle):
all_manifests_dict, label = super()._buildAllManifests(parent_trace, posting_label_handle)
return all_manifests_dict, label
def subnamespaceFromLabel(self, parent_trace, label):
'''
Helper method that returns what the 'subnamespace' that is a portion of a manifest's name.
It is inferred from a `label` that provides the posting details for a manifest that should be created.
Returns a string corresponding to the subnamespace, if one applies to this `kind` of manifest.
If no subnamespace applies, returns None.
'''
program = label.program (parent_trace)
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
return FMT(program)
def manifestNameFromLabel(self, parent_trace, label, kind):
'''
Helper method that returns what the 'name' field should be in the manifest to be created with the given
label
@param kind The kind of manifest for which the name is sought. This parameter can be ignored for controller
classes that use the same name for all supported kinds; it is meant to support controllers that
process multiple manifest kinds and do not use the same name for all of them. For example, controllers
that point to reference data in a different domain/sub-domain.
'''
program = label.program (parent_trace)
workstream_UID = label.workstream_UID (parent_trace)
initiative = label.initiative (parent_trace)
scenario = label.scenario (parent_trace)
scoring_cycle = label.scoring_cycle (parent_trace)
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
name = FMT(program + '.' + scoring_cycle + '.' + initiative + '.'
+ workstream_UID + '.' + scenario)
return name
def manifestNameFromCoords(self, parent_trace, subnamespace, coords, kind):
'''
Helper method that returns what the 'name' field should be in the manifest to be created with the given
filing coords, possibly complemented by the subnamespace.
Usually used in the context of generating forms.
Example: consider a manifest name like "modernization.dec-2020.fusionopus.default"
in namespace "my-corp.production".
To build such a name, this method must receive "modernization" as the subnamespace, and
filing coords from which to infer "dec-20220", "fusionopus", and "default".
@param subnamespace A string, which is allowed to be None. If not null, this is a further partioning of
the namespace into finer slices, and a manifest's name is supposed to identify the slice
in which the manifest resides.
@param coords A FilingCoords object corresponding to this controller. It is used, possibly along with the
`subnamespace` parameter, to build a manifest name.
@param kind The kind of manifest for which the name is sought. This parameter can be ignored for controller
classes that use the same name for all supported kinds; it is meant to support controllers that
process multiple manifest kinds and do not use the same name for all of them. For example, controllers
that point to reference data in a different domain/sub-domain.
'''
if not type(coords) == InitiativesFilingCoordinates:
raise ApodeixiError(parent_trace, "Can't build manifest name because received wrong type of filing coordinates",
data = {"Type of coords received": str(type(coords)),
"Expected type of coords": "InitiativesFilingCoordinates"})
workstream_UID = coords.workstream_UID
program = subnamespace
initiative = coords.initiative
scenario = coords.scenario
scoring_cycle = coords.scoring_cycle
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
name = FMT(program + '.' + scoring_cycle + '.' + initiative + '.'
+ workstream_UID + '.' + scenario)
return name
def manifestLabelsFromCoords(self, parent_trace, subnamespace, coords):
'''
Helper method that returns what the a dict whose keys are label field names that should be populated
inside a manifest based on the parameters, and the values are what the value should be for each label.
Usually used in the context of generating forms.
Example: consider a manifest name like "modernization.dec-2020.fusionopus.default"
in namespace "my-corp.production", that arose from a posting for product "Fusion Opus",
scoring cycle "Dec 2020" and scenario "Default".
Then this method returns ["modernization", "Dec 2020", "Fusion Opus", and "Default"].
@param subnamespace A string, which is allowed to be None. If not null, this is a further partioning of
the namespace into finer slices, and a manifest's name is supposed to identify the slice
in which the manifest resides.
@param coords A FilingCoords object corresponding to this controller. It is used, possibly along with the
`subnamespace` parameter, to build a manifest name.
'''
if not type(coords) == InitiativesFilingCoordinates:
raise ApodeixiError(parent_trace, "Can't build manifest name because received wrong type of filing coordinates",
data = {"Type of coords received": str(type(coords)),
"Expected type of coords": "InitiativesFilingCoordinates"})
workstream_UID = coords.workstream_UID
initiative = subnamespace
scenario = coords.scenario
scoring_cycle = coords.scoring_cycle
MY_PL = Workstream_Controller._MyPostingLabel # Abbreviation for readability
result_dict = {}
result_dict[MY_PL._WORKSTREAM_UID] = workstream_UID
result_dict[MY_PL._INITIATIVE] = initiative
result_dict[MY_PL._SCENARIO] = scenario
result_dict[MY_PL._SCORING_CYCLE] = scoring_cycle
return result_dict
def _buildOneManifest(self, parent_trace, posting_data_handle, label):
'''
Helper function, amenable to unit testing, unlike the enveloping controller `apply` function that require a knowledge base
structure
'''
manifest_dict = super()._buildOneManifest(parent_trace, posting_data_handle, label)
my_trace = parent_trace.doing("Getting PostingLabel fields specific to Workstream_Controller")
workstream_UID = label.workstream_UID (my_trace)
workstream_title = label.workstream_title (my_trace)
program = label.program (my_trace)
initiative = label.initiative (my_trace)
scenario = label.scenario (my_trace)
scoring_cycle = label.scoring_cycle (my_trace)
scoring_maturity = label.scoring_maturity (my_trace)
my_trace = parent_trace.doing("Enriching generic manifest fields with additional fields "
+ "specific to Workstream_Controller")
if True:
metadata = manifest_dict['metadata']
MY_PL = Workstream_Controller._MyPostingLabel # Abbreviation for readability
labels = metadata['labels']
labels[MY_PL._WORKSTREAM_UID] = workstream_UID
labels[MY_PL._WORKSTREAM_TITLE] = workstream_title
labels[MY_PL._PROGRAM] = program
labels[MY_PL._INITIATIVE] = initiative
labels[MY_PL._SCENARIO] = scenario
labels[MY_PL._SCORING_CYCLE] = scoring_cycle
labels[MY_PL._SCORING_MATURITY] = scoring_maturity
assertion = manifest_dict['assertion']
assertion[MY_PL._WORKSTREAM_UID] = workstream_UID
assertion[MY_PL._WORKSTREAM_TITLE] = workstream_title
assertion[MY_PL._PROGRAM] = program
assertion[MY_PL._INITIATIVE] = initiative
assertion[MY_PL._SCENARIO] = scenario
assertion[MY_PL._SCORING_CYCLE] = scoring_cycle
assertion[MY_PL._SCORING_MATURITY] = scoring_maturity
return manifest_dict
class _WorkstreamMilestoneConfig(PostingConfig):
'''
Codifies the schema and integrity expectations for workstream's milestones
'''
_ENTITY_NAME = 'Theme'
_SPLITTING_COLUMNS = ['Milestone', 'Task', 'Dependency']
def __init__(self, update_policy, kind, manifest_nb, controller):
ME = Workstream_Controller._WorkstreamMilestoneConfig
super().__init__( kind = kind,
update_policy = update_policy,
manifest_nb = manifest_nb,
controller = controller)
interval_spec_milestones = ClosedOpenIntervalSpec( parent_trace = None,
splitting_columns = ME._SPLITTING_COLUMNS,
entity_name = ME._ENTITY_NAME
)
self.interval_spec = interval_spec_milestones
def preflightPostingValidation(self, parent_trace, posted_content_df):
'''
Method performs some initial validation of the `dataframe`, which is intended to be a DataFrame representation of the
data posted in Excel.
The intention for this preflight validation is to provide the user with more user-friendly error messages that
educate the user on what he/she should change in the posting for it to be valid. In the absence of this
preflight validation, the posting error from the user would eventually be caught deeper in the parsing logic,
by which time the error generated might not be too user friendly.
Thus this method is not so much to avoid corruption of the data, since downstream logic will prevent corruption
anyway. Rather, it is to provide usability by outputting high-level user-meaningful error messages.
'''
ME = Workstream_Controller._WorkstreamMilestoneConfig
posted_cols = list(posted_content_df.columns)
mandatory_cols = [ME._ENTITY_NAME]
mandatory_cols.extend(ME._SPLITTING_COLUMNS)
missing_cols = [col for col in mandatory_cols if not col in posted_cols]
if len(missing_cols) > 0:
raise ApodeixiError(parent_trace, "Posting lacks some mandatory columns. This often happens if "
+ "ranges are wrong in Posting Label.",
data = { 'Missing columns': missing_cols,
'Posted columns': posted_cols})
def entity_name(self):
ME = Workstream_Controller._WorkstreamMilestoneConfig
return ME._ENTITY_NAME
class _WorkstreamMetricConfig(PostingConfig):
'''
Codifies the schema and integrity expectations for workstream's metrics
'''
_ENTITY_NAME = 'Metric'
def __init__(self, update_policy, kind, manifest_nb, controller):
ME = Workstream_Controller._WorkstreamMetricConfig
super().__init__( kind = kind,
update_policy = update_policy,
manifest_nb = manifest_nb,
controller = controller)
interval_spec_metrics = GreedyIntervalSpec(parent_trace = None, entity_name = ME._ENTITY_NAME)
self.interval_spec = interval_spec_metrics
def preflightPostingValidation(self, parent_trace, posted_content_df):
'''
Method performs some initial validation of the `dataframe`, which is intended to be a DataFrame representation of the
data posted in Excel.
The intention for this preflight validation is to provide the user with more user-friendly error messages that
educate the user on what he/she should change in the posting for it to be valid. In the absence of this
preflight validation, the posting error from the user would eventually be caught deeper in the parsing logic,
by which time the error generated might not be too user friendly.
Thus this method is not so much to avoid corruption of the data, since downstream logic will prevent corruption
anyway. Rather, it is to provide usability by outputting high-level user-meaningful error messages.
'''
ME = Workstream_Controller._WorkstreamMetricConfig
posted_cols = list(posted_content_df.columns)
mandatory_cols = [ME._ENTITY_NAME]
missing_cols = [col for col in mandatory_cols if not col in posted_cols]
if len(missing_cols) > 0:
raise ApodeixiError(parent_trace, "Posting lacks some mandatory columns. This often happens if "
+ "ranges are wrong in Posting Label.",
data = { 'Missing columns': missing_cols,
'Posted columns': posted_cols})
def entity_name(self):
ME = Workstream_Controller._WorkstreamMetricConfig
return ME._ENTITY_NAME
class _MyPostingLabel(SkeletonController._MyPostingLabel):
'''
Codifies the schema expectations for the posting label when posting a workstream.
'''
_WORKSTREAM_UID = "workstreamUID"
_WORKSTREAM_TITLE = "workstreamTitle"
_PROGRAM = "program"
_INITIATIVE = "initiative"
_SCENARIO = "scenario"
_SCORING_CYCLE = "scoringCycle"
_SCORING_MATURITY = "scoringMaturity"
def __init__(self, parent_trace, controller):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
super().__init__( parent_trace = parent_trace,
controller = controller,
mandatory_fields = [ ME._PROGRAM, ME._WORKSTREAM_UID, ME._INITIATIVE, ME._SCENARIO, # Determine name
ME._WORKSTREAM_TITLE,
ME._SCORING_CYCLE, ME._SCORING_MATURITY],
date_fields = [])
def program(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._PROGRAM)
def workstream_UID(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._WORKSTREAM_UID)
def workstream_title(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._WORKSTREAM_TITLE)
def initiative(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._INITIATIVE)
def scenario(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._SCENARIO)
def scoring_cycle(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._SCORING_CYCLE)
def scoring_maturity(self, parent_trace):
# Shortcut to reference class static variables
ME = Workstream_Controller._MyPostingLabel
return self._getField(parent_trace, ME._SCORING_MATURITY)
|
the-stack_0_8065 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Bolutife Lawrence, Maico Timmerman
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from decimal import Decimal
from urllib.parse import urlparse
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils.crypto import get_random_string
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django_scopes.forms import SafeModelMultipleChoiceField
from i18nfield.forms import I18nFormField, I18nTextarea
from pytz import common_timezones
from pretix.api.models import WebHook
from pretix.api.webhooks import get_all_webhook_events
from pretix.base.forms import I18nModelForm, PlaceholderValidator, SettingsForm
from pretix.base.forms.questions import NamePartsFormField
from pretix.base.forms.widgets import SplitDateTimePickerWidget
from pretix.base.models import (
Customer, Device, EventMetaProperty, Gate, GiftCard, Membership,
MembershipType, Organizer, Team,
)
from pretix.base.settings import PERSON_NAME_SCHEMES, PERSON_NAME_TITLE_GROUPS
from pretix.control.forms import (
ExtFileField, SMTPSettingsMixin, SplitDateTimeField,
)
from pretix.control.forms.event import (
SafeEventMultipleChoiceField, multimail_validate,
)
from pretix.multidomain.models import KnownDomain
from pretix.multidomain.urlreverse import build_absolute_uri
class OrganizerForm(I18nModelForm):
error_messages = {
'duplicate_slug': _("This slug is already in use. Please choose a different one."),
}
class Meta:
model = Organizer
fields = ['name', 'slug']
def clean_slug(self):
slug = self.cleaned_data['slug']
if Organizer.objects.filter(slug__iexact=slug).exists():
raise forms.ValidationError(
self.error_messages['duplicate_slug'],
code='duplicate_slug',
)
return slug
class OrganizerDeleteForm(forms.Form):
error_messages = {
'slug_wrong': _("The slug you entered was not correct."),
}
slug = forms.CharField(
max_length=255,
label=_("Event slug"),
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if slug != self.organizer.slug:
raise forms.ValidationError(
self.error_messages['slug_wrong'],
code='slug_wrong',
)
return slug
class OrganizerUpdateForm(OrganizerForm):
def __init__(self, *args, **kwargs):
self.domain = kwargs.pop('domain', False)
self.change_slug = kwargs.pop('change_slug', False)
kwargs.setdefault('initial', {})
self.instance = kwargs['instance']
if self.domain and self.instance:
initial_domain = self.instance.domains.filter(event__isnull=True).first()
if initial_domain:
kwargs['initial'].setdefault('domain', initial_domain.domainname)
super().__init__(*args, **kwargs)
if not self.change_slug:
self.fields['slug'].widget.attrs['readonly'] = 'readonly'
if self.domain:
self.fields['domain'] = forms.CharField(
max_length=255,
label=_('Custom domain'),
required=False,
help_text=_('You need to configure the custom domain in the webserver beforehand.')
)
def clean_domain(self):
d = self.cleaned_data['domain']
if d:
if d == urlparse(settings.SITE_URL).hostname:
raise ValidationError(
_('You cannot choose the base domain of this installation.')
)
if KnownDomain.objects.filter(domainname=d).exclude(organizer=self.instance.pk,
event__isnull=True).exists():
raise ValidationError(
_('This domain is already in use for a different event or organizer.')
)
return d
def clean_slug(self):
if self.change_slug:
return self.cleaned_data['slug']
return self.instance.slug
def save(self, commit=True):
instance = super().save(commit)
if self.domain:
current_domain = instance.domains.first()
if self.cleaned_data['domain']:
if current_domain and current_domain.domainname != self.cleaned_data['domain']:
current_domain.delete()
KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain'])
elif not current_domain:
KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain'])
elif current_domain:
current_domain.delete()
instance.cache.clear()
for ev in instance.events.all():
ev.cache.clear()
return instance
class EventMetaPropertyForm(forms.ModelForm):
class Meta:
model = EventMetaProperty
fields = ['name', 'default', 'required', 'protected', 'allowed_values']
widgets = {
'default': forms.TextInput()
}
class MembershipTypeForm(I18nModelForm):
class Meta:
model = MembershipType
fields = ['name', 'transferable', 'allow_parallel_usage', 'max_usages']
class TeamForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all().order_by(
'-has_subevents', '-date_from'
)
class Meta:
model = Team
fields = ['name', 'all_events', 'limit_events', 'can_create_events',
'can_change_teams', 'can_change_organizer_settings',
'can_manage_gift_cards', 'can_manage_customers',
'can_change_event_settings', 'can_change_items',
'can_view_orders', 'can_change_orders', 'can_checkin_orders',
'can_view_vouchers', 'can_change_vouchers']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events',
'class': 'scrolling-multiple-choice scrolling-multiple-choice-large',
}),
}
field_classes = {
'limit_events': SafeEventMultipleChoiceField
}
def clean(self):
data = super().clean()
if self.instance.pk and not data['can_change_teams']:
if not self.instance.organizer.teams.exclude(pk=self.instance.pk).filter(
can_change_teams=True, members__isnull=False
).exists():
raise ValidationError(_('The changes could not be saved because there would be no remaining team with '
'the permission to change teams and permissions.'))
return data
class GateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
kwargs.pop('organizer')
super().__init__(*args, **kwargs)
class Meta:
model = Gate
fields = ['name', 'identifier']
class DeviceForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all().order_by(
'-has_subevents', '-date_from'
)
self.fields['gate'].queryset = organizer.gates.all()
def clean(self):
d = super().clean()
if not d['all_events'] and not d['limit_events']:
raise ValidationError(_('Your device will not have access to anything, please select some events.'))
return d
class Meta:
model = Device
fields = ['name', 'all_events', 'limit_events', 'security_profile', 'gate']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events',
'class': 'scrolling-multiple-choice scrolling-multiple-choice-large',
}),
}
field_classes = {
'limit_events': SafeEventMultipleChoiceField
}
class OrganizerSettingsForm(SettingsForm):
timezone = forms.ChoiceField(
choices=((a, a) for a in common_timezones),
label=_("Default timezone"),
)
name_scheme = forms.ChoiceField(
label=_("Name format"),
help_text=_("This defines how pretix will ask for human names. Changing this after you already received "
"orders might lead to unexpected behavior when sorting or changing names."),
required=True,
)
name_scheme_titles = forms.ChoiceField(
label=_("Allowed titles"),
help_text=_("If the naming scheme you defined above allows users to input a title, you can use this to "
"restrict the set of selectable titles."),
required=False,
)
auto_fields = [
'customer_accounts',
'customer_accounts_link_by_email',
'invoice_regenerate_allowed',
'contact_mail',
'imprint_url',
'organizer_info_text',
'event_list_type',
'event_list_availability',
'organizer_homepage_text',
'organizer_link_back',
'organizer_logo_image_large',
'organizer_logo_image_inherit',
'giftcard_length',
'giftcard_expiry_years',
'locales',
'region',
'meta_noindex',
'event_team_provisioning',
'primary_color',
'theme_color_success',
'theme_color_danger',
'theme_color_background',
'theme_round_borders',
'primary_font',
'privacy_url',
'cookie_consent',
'cookie_consent_dialog_title',
'cookie_consent_dialog_text',
'cookie_consent_dialog_text_secondary',
'cookie_consent_dialog_button_yes',
'cookie_consent_dialog_button_no',
]
organizer_logo_image = ExtFileField(
label=_('Header image'),
ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"),
max_size=settings.FILE_UPLOAD_MAX_SIZE_IMAGE,
required=False,
help_text=_('If you provide a logo image, we will by default not show your organization name '
'in the page header. By default, we show your logo with a size of up to 1140x120 pixels. You '
'can increase the size with the setting below. We recommend not using small details on the picture '
'as it will be resized on smaller screens.')
)
favicon = ExtFileField(
label=_('Favicon'),
ext_whitelist=(".ico", ".png", ".jpg", ".gif", ".jpeg"),
required=False,
max_size=settings.FILE_UPLOAD_MAX_SIZE_FAVICON,
help_text=_('If you provide a favicon, we will show it instead of the default pretix icon. '
'We recommend a size of at least 200x200px to accommodate most devices.')
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name_scheme'].choices = (
(k, _('Ask for {fields}, display like {example}').format(
fields=' + '.join(str(vv[1]) for vv in v['fields']),
example=v['concatenation'](v['sample'])
))
for k, v in PERSON_NAME_SCHEMES.items()
)
self.fields['name_scheme_titles'].choices = [('', _('Free text input'))] + [
(k, '{scheme}: {samples}'.format(
scheme=v[0],
samples=', '.join(v[1])
))
for k, v in PERSON_NAME_TITLE_GROUPS.items()
]
class MailSettingsForm(SMTPSettingsMixin, SettingsForm):
auto_fields = [
'mail_from',
'mail_from_name',
]
mail_bcc = forms.CharField(
label=_("Bcc address"),
help_text=_("All emails will be sent to this address as a Bcc copy"),
validators=[multimail_validate],
required=False,
max_length=255
)
mail_text_signature = I18nFormField(
label=_("Signature"),
required=False,
widget=I18nTextarea,
help_text=_("This will be attached to every email."),
validators=[PlaceholderValidator([])],
widget_kwargs={'attrs': {
'rows': '4',
'placeholder': _(
'e.g. your contact details'
)
}}
)
mail_text_customer_registration = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_customer_email_change = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_customer_reset = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
base_context = {
'mail_text_customer_registration': ['customer', 'url'],
'mail_text_customer_email_change': ['customer', 'url'],
'mail_text_customer_reset': ['customer', 'url'],
}
def _get_sample_context(self, base_parameters):
placeholders = {
'organizer': self.organizer.name
}
if 'url' in base_parameters:
placeholders['url'] = build_absolute_uri(
self.organizer,
'presale:organizer.customer.activate'
) + '?token=' + get_random_string(30)
if 'customer' in base_parameters:
placeholders['name'] = pgettext_lazy('person_name_sample', 'John Doe')
name_scheme = PERSON_NAME_SCHEMES[self.organizer.settings.name_scheme]
for f, l, w in name_scheme['fields']:
if f == 'full_name':
continue
placeholders['name_%s' % f] = name_scheme['sample'][f]
return placeholders
def _set_field_placeholders(self, fn, base_parameters):
phs = [
'{%s}' % p
for p in sorted(self._get_sample_context(base_parameters).keys())
]
ht = _('Available placeholders: {list}').format(
list=', '.join(phs)
)
if self.fields[fn].help_text:
self.fields[fn].help_text += ' ' + str(ht)
else:
self.fields[fn].help_text = ht
self.fields[fn].validators.append(
PlaceholderValidator(phs)
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.get('obj')
super().__init__(*args, **kwargs)
for k, v in self.base_context.items():
self._set_field_placeholders(k, v)
class WebHookForm(forms.ModelForm):
events = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
label=pgettext_lazy('webhooks', 'Event types')
)
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all()
self.fields['events'].choices = [
(
a.action_type,
mark_safe('{} – <code>{}</code>'.format(a.verbose_name, a.action_type))
) for a in get_all_webhook_events().values()
]
if self.instance:
self.fields['events'].initial = list(self.instance.listeners.values_list('action_type', flat=True))
class Meta:
model = WebHook
fields = ['target_url', 'enabled', 'all_events', 'limit_events']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events'
}),
}
field_classes = {
'limit_events': SafeModelMultipleChoiceField
}
class GiftCardCreateForm(forms.ModelForm):
value = forms.DecimalField(
label=_('Gift card value'),
min_value=Decimal('0.00')
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
initial = kwargs.pop('initial', {})
initial['expires'] = self.organizer.default_gift_card_expiry
kwargs['initial'] = initial
super().__init__(*args, **kwargs)
def clean_secret(self):
s = self.cleaned_data['secret']
if GiftCard.objects.filter(
secret__iexact=s
).filter(
Q(issuer=self.organizer) | Q(issuer__gift_card_collector_acceptance__collector=self.organizer)
).exists():
raise ValidationError(
_('A gift card with the same secret already exists in your or an affiliated organizer account.')
)
return s
class Meta:
model = GiftCard
fields = ['secret', 'currency', 'testmode', 'expires', 'conditions']
field_classes = {
'expires': SplitDateTimeField
}
widgets = {
'expires': SplitDateTimePickerWidget,
'conditions': forms.Textarea(attrs={"rows": 2})
}
class GiftCardUpdateForm(forms.ModelForm):
class Meta:
model = GiftCard
fields = ['expires', 'conditions']
field_classes = {
'expires': SplitDateTimeField
}
widgets = {
'expires': SplitDateTimePickerWidget,
'conditions': forms.Textarea(attrs={"rows": 2})
}
class CustomerUpdateForm(forms.ModelForm):
error_messages = {
'duplicate': _("An account with this email address is already registered."),
}
class Meta:
model = Customer
fields = ['is_active', 'name_parts', 'email', 'is_verified', 'locale']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name_parts'] = NamePartsFormField(
max_length=255,
required=False,
scheme=self.instance.organizer.settings.name_scheme,
titles=self.instance.organizer.settings.name_scheme_titles,
label=_('Name'),
)
def clean(self):
email = self.cleaned_data.get('email')
if email is not None:
try:
self.instance.organizer.customers.exclude(pk=self.instance.pk).get(email=email)
except Customer.DoesNotExist:
pass
else:
raise forms.ValidationError(
self.error_messages['duplicate'],
code='duplicate',
)
return self.cleaned_data
class CustomerCreateForm(CustomerUpdateForm):
class Meta:
model = Customer
fields = ['identifier', 'is_active', 'name_parts', 'email', 'is_verified', 'locale']
class MembershipUpdateForm(forms.ModelForm):
class Meta:
model = Membership
fields = ['testmode', 'membership_type', 'date_start', 'date_end', 'attendee_name_parts', 'canceled']
field_classes = {
'date_start': SplitDateTimeField,
'date_end': SplitDateTimeField,
}
widgets = {
'date_start': SplitDateTimePickerWidget(),
'date_end': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_date_Start'}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance and self.instance.pk:
del self.fields['testmode']
self.fields['membership_type'].queryset = self.instance.customer.organizer.membership_types.all()
self.fields['attendee_name_parts'] = NamePartsFormField(
max_length=255,
required=False,
scheme=self.instance.customer.organizer.settings.name_scheme,
titles=self.instance.customer.organizer.settings.name_scheme_titles,
label=_('Attendee name'),
)
|
the-stack_0_8070 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "to-the-point-29960.botics.co"
site_params = {
"name": "To the point",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
the-stack_0_8075 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VaultPatchProperties(Model):
"""Properties of the vault.
:param tenant_id: The Azure Active Directory tenant ID that should be used
for authenticating requests to the key vault.
:type tenant_id: str
:param sku: SKU details
:type sku: ~azure.mgmt.keyvault.v2016_10_01.models.Sku
:param access_policies: An array of 0 to 16 identities that have access to
the key vault. All identities in the array must use the same tenant ID as
the key vault's tenant ID.
:type access_policies:
list[~azure.mgmt.keyvault.v2016_10_01.models.AccessPolicyEntry]
:param enabled_for_deployment: Property to specify whether Azure Virtual
Machines are permitted to retrieve certificates stored as secrets from the
key vault.
:type enabled_for_deployment: bool
:param enabled_for_disk_encryption: Property to specify whether Azure Disk
Encryption is permitted to retrieve secrets from the vault and unwrap
keys.
:type enabled_for_disk_encryption: bool
:param enabled_for_template_deployment: Property to specify whether Azure
Resource Manager is permitted to retrieve secrets from the key vault.
:type enabled_for_template_deployment: bool
:param enable_soft_delete: Property specifying whether recoverable
deletion ('soft' delete) is enabled for this key vault. The property may
not be set to false.
:type enable_soft_delete: bool
:param create_mode: The vault's create mode to indicate whether the vault
need to be recovered or not. Possible values include: 'recover', 'default'
:type create_mode: str or
~azure.mgmt.keyvault.v2016_10_01.models.CreateMode
:param enable_purge_protection: Property specifying whether protection
against purge is enabled for this vault; it is only effective if soft
delete is also enabled. Once activated, the property may no longer be
reset to false.
:type enable_purge_protection: bool
"""
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'access_policies': {'key': 'accessPolicies', 'type': '[AccessPolicyEntry]'},
'enabled_for_deployment': {'key': 'enabledForDeployment', 'type': 'bool'},
'enabled_for_disk_encryption': {'key': 'enabledForDiskEncryption', 'type': 'bool'},
'enabled_for_template_deployment': {'key': 'enabledForTemplateDeployment', 'type': 'bool'},
'enable_soft_delete': {'key': 'enableSoftDelete', 'type': 'bool'},
'create_mode': {'key': 'createMode', 'type': 'CreateMode'},
'enable_purge_protection': {'key': 'enablePurgeProtection', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(VaultPatchProperties, self).__init__(**kwargs)
self.tenant_id = kwargs.get('tenant_id', None)
self.sku = kwargs.get('sku', None)
self.access_policies = kwargs.get('access_policies', None)
self.enabled_for_deployment = kwargs.get('enabled_for_deployment', None)
self.enabled_for_disk_encryption = kwargs.get('enabled_for_disk_encryption', None)
self.enabled_for_template_deployment = kwargs.get('enabled_for_template_deployment', None)
self.enable_soft_delete = kwargs.get('enable_soft_delete', None)
self.create_mode = kwargs.get('create_mode', None)
self.enable_purge_protection = kwargs.get('enable_purge_protection', None)
|
the-stack_0_8077 | """
Get Line Intersection
Get's intersection of 2 lines
TESTED REVIT API: 2017
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
from Autodesk.Revit.DB import Line, XYZ
from Autodesk.Revit.DB import SetComparisonResult, IntersectionResultArray
def get_intersection(line1, line2):
results = clr.Reference[IntersectionResultArray]()
# See ironpython.net/documentation/dotnet for clr.Reference
result = line1.Intersect(line2, results)
# http://www.revitapidocs.com/2018/51961478-fb36-e00b-2d1b-7db27b0a09e6.htm
if result != SetComparisonResult.Overlap:
print('No Intesection')
intersection = results.Item[0]
return intersection.XYZPoint
line1 = Line.CreateBound(XYZ(0,0,0), XYZ(10,0,0))
line2 = Line.CreateBound(XYZ(5,-5,0), XYZ(5,5,0))
point = get_intersection(line1, line2)
print(point)
# <Autodesk.Revit.DB.XYZ object at 0x00000000000001BA [(5.000000000, 0.000000000, 0.000000000)]>
"""
From this discussion:
https://forum.dynamobim.com/t/translating-to-python/13481
C# Equivalent
private XYZ GetIntersection(
Line line1,
Line line2 )
{
IntersectionResultArray results;
SetComparisonResult result
= line1.Intersect( line2, out results );
if( result != SetComparisonResult.Overlap )
throw new InvalidOperationException(
"Input lines did not intersect." );
if( results == null || results.Size != 1 )
throw new InvalidOperationException(
"Could not extract line intersection point." );
IntersectionResult iResult
= results.get_Item( 0 );
return iResult.XYZPoint;
}
"""
|
the-stack_0_8079 | #####DONORSCHOOSE FUNCTIONS
import datetime
from datetime import timedelta, date #for time duration calculations
from dateutil.parser import parse #for fuzzy finding year
def elapseddays(posted, completed):
formatuse = '%Y-%m-%d %H:%M:%S' # The format: see down this page:https://docs.python.org/3/library/datetime.html
otherformat = '%Y-%m-%d'
try:
elapsed_days=completed-posted
except:
try:
elapsed_days = datetime.datetime.strptime(completed,formatuse)-datetime.datetime.strptime(posted,formatuse)
except:
try:
elapsed_days = datetime.datetime.strptime(completed,otherformat)-datetime.datetime.strptime(posted,otherformat)
except:
elapsed_days = 'error'
return(elapsed_days)
def elapsedseconds(posted, completed):
formatuse = '%Y-%m-%d %H:%M:%S' # The format: see down this page:https://docs.python.org/3/library/datetime.html
otherformat = '%Y-%m-%d'
if isinstance(posted, datetime.datetime) or (type(posted) is pd.Timestamp):
clock = completed
else:
try:
clock = datetime.datetime.strptime(completed,formatuse)
except:
clock = datetime.datetime.strptime(completed,otherformat)
if isinstance(completed, datetime.datetime) or (type(completed) is pd.Timestamp):
startclock = completed
else:
try:
startclock = datetime.datetime.strptime(posted,formatuse)
except:
startclock = datetime.datetime.strptime(posted,otherformat)
elapsed = (clock-startclock).total_seconds()
return(elapsed)
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
def display_time(seconds, granularity=2):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
# Function convert seconds into day.decimal
def ConvertSectoDay(n):
day = n // (24 * 3600)
#print(day) #keep day
n = n % (24 * 3600)
daydec=(n/86400) # add this to day
addem=day+daydec
#https://stackoverflow.com/a/48812729/1602288
holder='{:g}'.format(float('{:.{p}g}'.format(addem, p=5)))
return(float(holder))
def projectover(posted, completed,expiration):
formatuse = '%Y-%m-%d %H:%M:%S' # The format: see down this page:https://docs.python.org/3/library/datetime.html
otherformat = '%Y-%m-%d'
#failed projects were never completed, so in those cases, use the expiration date
# if variable is None:
if completed is None:
try:
clock = datetime.datetime.strptime(expiration,formatuse)
except:
try:
clock = datetime.datetime.strptime(expiration,otherformat)
except:
clock = datetime.datetime.strptime('1900-01-01',otherformat)
else:
try:
clock = datetime.datetime.strptime(completed,formatuse)
except:
try:
clock = datetime.datetime.strptime(completed,otherformat)
except:
clock = datetime.datetime.strptime('1900-01-01',otherformat)
return(clock)
def makedate(posted):
formatuse = '%Y-%m-%d %H:%M:%S' # The format: see down this page:https://docs.python.org/3/library/datetime.html
otherformat = '%Y-%m-%d'
try:
clock = datetime.datetime.strptime(posted,formatuse)
except:
try:
clock = datetime.datetime.strptime(posted,otherformat)
except:
clock = datetime.datetime.strptime('1900-01-01',otherformat)
return(clock)
def Convert_to_clock_x(m):
m=int(m)
if m == 1:
a = 1
if m == 2:
a = 2
if m == 3:
a = 3
if m == 4:
a = 2
if m == 5:
a = 1
if m == 6:
a = 0
if m == 7:
a = -1
if m == 8:
a = -2
if m == 9:
a = -3
if m == 10:
a = -2
if m == 11:
a = -1
if m == 12:
a = 0
return(a)
def Convert_to_clock_y(m):
m=int(m)
if m == 1:
a = 2
if m == 2:
a = 1
if m == 3:
a = 0
if m == 4:
a = -1
if m == 5:
a = -2
if m == 6:
a = -3
if m == 7:
a = -2
if m == 8:
a = -1
if m == 9:
a = 0
if m == 10:
a = 1
if m == 11:
a = 2
if m == 12:
a = 3
return(a)
import matplotlib.pyplot as plt
import seaborn as sns
#function for producing nice, smoothed line plots sorted by categorical variable, of a continues (var_dist) variable
def comp_dist(df_to_use, cat_to_subset, var_dist, figw,figh,linew):
plt.figure(figsize=(figw,figh))
sns.set_context( rc={"lines.linewidth": linew})
for grp in sorted(df_to_use[cat_to_subset].unique()):
grp_df = df_to_use.loc[df_to_use[cat_to_subset] == grp]
sns.distplot(grp_df[var_dist], hist=False, label=grp)
plt.xlim(0, 90)
plt.show()
import math
def getxy(day):
x = math.sin((180 - day * 0.9849521203830369)/180 * 3.141)
y = math.cos((180 - day * 0.9849521203830369)/180 * 3.141)
return x, y
|
the-stack_0_8085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import argparse
import numpy as np
from collections import defaultdict
#***************************************************************
class DepTree:
""" """
#=============================================================
def __init__(self, buff):
""" """
self._head2deps = defaultdict(list)
self._dep2head = dict()
self._str = []
for line in buff:
dep_idx = int(line[0])
head_idx = int(line[6])
self.head2deps[head_idx].append(dep_idx)
self.dep2head[dep_idx] = head_idx
self._str.append(line[1])
return
#=============================================================
def count_nonprojective(self):
""" """
nonproj = []
for dep in self:
head = self.dep2head[dep]
span_min = min(dep, head)
span_max = max(dep, head)
for mid_dep in range(span_min+1, span_max):
mid_head = self.dep2head[mid_dep]
if mid_head < span_min or mid_head > span_max:
crossing = True
break
else:
crossing = False
nonproj.append(int(crossing))
return nonproj
#=============================================================
@property
def head2deps(self):
return self._head2deps
@property
def dep2head(self):
return self._dep2head
#=============================================================
def __iter__(self):
return (dep for dep in self.dep2head)
def __len__(self):
return len(self.dep2head)
def __str__(self):
return ' '.join(self._str)+'\n'
#***************************************************************
if __name__ == '__main__':
""" """
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
args = parser.parse_args()
for filename in args.files:
lang = re.search('([-\w]*)-ud', filename).group(1)
nonproj = []
with open(filename) as f:
buff = []
for line in f:
line = line.strip()
if line:
if not re.match('#|[0-9]+[-.][0-9]+', line):
buff.append(line.split('\t'))
else:
tree = DepTree(buff)
nonproj.extend(tree.count_nonprojective())
buff = []
print(lang, np.mean(nonproj)*100,file=sys.stderr)
|
the-stack_0_8086 | from simple_rest_client.api import API
from simple_rest_client.resource import Resource
class FileUploadResource(Resource):
actions = {"create": {"method": "POST", "url": "post.php?dir=example"}}
# http://blog.henrycipolla.com/2011/12/testing-multipartform-data-uploads-with-post-test-server/
files = {"file": open("github.py", "rb")}
post_test_server_api = API(api_root_url="http://posttestserver.com/", timeout=10)
post_test_server_api.add_resource(resource_name="file_upload", resource_class=FileUploadResource)
print(
"post_test_server_api.file_upload.create={!r}".format(
post_test_server_api.file_upload.create(files=files).body
)
)
|
the-stack_0_8088 | """Container for creating and displaying diffs."""
import copy
import difflib
import json
import pygments
from pygments import formatters, lexers
DIFF_LEXER = lexers.get_lexer_by_name('diff')
DIFF_FORMATTER = formatters.get_formatter_by_name('terminal16m')
class DiffText:
"""Generic text diffs."""
def __init__(self, content):
self.original_content = content
self.preview = None
self.before = None
self.after = None
self._diff_lines = None
def __enter__(self):
self.preview = copy.deepcopy(self.original_content)
self.before = self.copy()
return self.preview
def __exit__(self, exc_type, exc_value, traceback):
self.after = self.copy()
def copy(self):
"""Duplicate string for modification."""
return str(self.preview)
@property
def diff(self):
"""Generate diff."""
_diff = difflib.unified_diff(
self.before.split('\n'),
self.after.split('\n'),
fromfile='before changes',
tofile='after changes',
lineterm='',
)
self._diff_lines = list(_diff)
return self._diff_lines
@property
def highlighted(self):
"""Return syntax highlighted diff."""
diff = '\n'.join(self.diff)
highlighted_diff = pygments.highlight(diff, DIFF_LEXER, DIFF_FORMATTER)
highlighted_diff = highlighted_diff.rstrip('\n')
return highlighted_diff
class DiffJson(DiffText):
"""JSON diff."""
def copy(self):
"""Convert contents into static JSON string."""
return json.dumps(self.preview, indent=2)
|
the-stack_0_8089 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# RCACondition
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019, The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
# Third-party modules
import six
from bson import ObjectId
@six.python_2_unicode_compatible
class RCACondition(object):
def __init__(self, alarm_class, condition):
self.name = "%s::%s" % (alarm_class.name, condition.name)
self.window = condition.window
self.root = condition.root
self.same_object = False
# Build condition expression
self.condition = compile(condition.condition, "<string>", "eval")
# Build match condition expression
x = [
"'alarm_class': ObjectId('%s')" % self.root.id,
"'timestamp__gte': alarm.timestamp - datetime.timedelta(seconds=%d)" % self.window,
"'timestamp__lte': alarm.timestamp + datetime.timedelta(seconds=%d)" % self.window,
]
if self.root.id == alarm_class.id:
x += ["'id__ne': alarm.id"]
for k, v in six.iteritems(condition.match_condition):
if k == "managed_object" and v == "alarm.managed_object.id":
self.same_object = True
x += ["'%s': %s" % (k, v)]
self.match_condition = compile("{%s}" % ", ".join(x), "<string>", "eval")
# Build reverse match condition expression
x = [
"'alarm_class': ObjectId('%s')" % alarm_class.id,
"'root__exists': False",
"'timestamp__gte': alarm.timestamp - datetime.timedelta(seconds=%d)" % self.window,
"'timestamp__lte': alarm.timestamp + datetime.timedelta(seconds=%d)" % self.window,
]
if self.root.id == alarm_class.id:
x += ["'id__ne': alarm.id"]
if self.same_object:
x += ["'managed_object': alarm.managed_object"]
self.reverse_match_condition = compile("{%s}" % ", ".join(x), "<string>", "eval")
def __str__(self):
return self.name
def get_context(self, alarm):
return {"alarm": alarm, "datetime": datetime, "ObjectId": ObjectId}
def check_condition(self, alarm):
return eval(self.condition, {}, self.get_context(alarm))
def get_match_condition(self, alarm, **kwargs):
r = eval(self.match_condition, {}, self.get_context(alarm))
if kwargs:
r.update(kwargs)
return r
def get_reverse_match_condition(self, alarm):
return eval(self.reverse_match_condition, {}, self.get_context(alarm))
|
the-stack_0_8091 | import rudra.utils.helper as helper
import requests
import pytest
def test_get_github_repo_info():
gh_repo1 = 'https://github.com/fabric8-analytics/f8a-hpf-insights'
gh_repo2 = 'https://github.com/fabric8-analytics/f8a-hpf-insights.git'
gh_repo3 = 'git+https://github.com/fabric8-analytics/f8a-hpf-insights'
gh_repo4 = 'fabric8-analytics/f8a-hpf-insights'
user, repo = helper.get_github_repo_info(gh_repo1)
assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'
user, repo = helper.get_github_repo_info(gh_repo2)
assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'
user, repo = helper.get_github_repo_info(gh_repo3)
assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'
user, repo = helper.get_github_repo_info(gh_repo4)
assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'
def test_get_training_file_url():
user = 'fabric8-analytics'
repo = 'f8a-hpf-insights'
file_url = helper.get_training_file_url(user, repo)
resp = requests.get(file_url)
assert resp.status_code == 200
file_url = helper.get_training_file_url(user, repo, branch='training-code')
resp = requests.get(file_url)
assert resp.status_code == 200
file_url = helper.get_training_file_url(
user, repo, training_file_path='src/flask_endpoint.py')
resp = requests.get(file_url)
assert resp.status_code == 200
def test_load_hyper_params():
# mock command line args
helper.argv = ['helper.py', '{"a": 111, "b": "some text"}']
hyper_params = helper.load_hyper_params()
assert hyper_params.get('a') == 111
assert hyper_params.get('b') == "some text"
def test_cache_dict_with_zero_max_size():
cache_dict = helper.CacheDict(0)
with pytest.raises(KeyError):
cache_dict['key1'] = 'value1'
assert len(cache_dict) == 0
def test_cache_dict_with_one_max_size():
cache_dict = helper.CacheDict(1)
cache_dict['key1'] = 'value1'
cache_dict['key2'] = 'value2'
assert len(cache_dict) == 1
assert 'key2' in cache_dict
assert 'key1' not in cache_dict
def test_cache_dict():
# default max_len = 1024
cache_dict = helper.CacheDict()
for i in range(2000):
cache_dict[i] = i * i
assert len(cache_dict) == cache_dict.max_len
assert cache_dict[i] == i * i
del cache_dict[i]
assert len(cache_dict) == cache_dict.max_len - 1
assert cache_dict[cache_dict.max_len - 2] == pow(cache_dict.max_len - 2, 2)
assert len(list(cache_dict)) == cache_dict.max_len - 1
assert str(cache_dict.max_len - 2) in str(cache_dict)
|
the-stack_0_8092 | #
def SortCharacters(s):
order = [0] * len(s)
count = {'$': 0, "A": 0, 'C': 0, 'G': 0, 'T': 0}
for char in s:
count[char] += 1
symb = ['$', 'A', 'C', 'G', 'T']
for i in range(1, 5):
count[symb[i]] += count[symb[i-1]]
for j in range(len(s) - 1, -1, -1):
c = s[j]
count[c] -= 1
order[count[c]] = j
return order
def ComputeCharClasses(s, order):
class_chars = [0] * len(s)
for i in range(1, len(s)):
if s[order[i]] == s[order[i-1]]:
class_chars[order[i]] = class_chars[order[i-1]]
else:
class_chars[order[i]] = class_chars[order[i-1]] + 1
return class_chars
def SortDoubled(s, L, old_order, old_class):
count = [0] * len(s)
new_order = [0] * len(s)
for i in range(len(s)):
count[old_class[i]] += 1
for i in range(1, len(s)):
count[i] += count[i-1]
for j in range(len(s) - 1, -1, -1):
start = (old_order[j] - L + len(s)) % len(s)
cl = old_class[start]
count[cl] -= 1
new_order[count[cl]] = start
return new_order
def UpdateClasses(new_order, old_class, L):
n = len(new_order)
new_class = [0] * n
for i in range(1, n):
cur = new_order[i]
mid = (cur + L) % n
prev = new_order[i-1]
mid_prev = (prev + L) % n
if old_class[cur] == old_class[prev] and old_class[mid] == old_class[mid_prev]:
new_class[cur] = new_class[prev]
else:
new_class[cur] = new_class[prev] + 1
return new_class
def BuildSuffixArray(S):
order = SortCharacters(S)
class_ = ComputeCharClasses(S, order)
L = 1
while L < len(S):
order = SortDoubled(S, L, order, class_)
class_ = UpdateClasses(order, class_, L)
L = 2 * L
return order
if __name__ == '__main__':
text = input()
suffix_array = BuildSuffixArray(text)
for elem in suffix_array:
print(elem, end=' ') |
the-stack_0_8095 | # visualization functions
import matplotlib as mpl
import matplotlib.pyplot as plt
def plot_arrays(sample, output):
"""
Create a figure with two plots: the original sample, and a corresponding
prediction.
"""
assert len(sample.shape) == 2 and len(output.shape) == 2
cmap = mpl.colors.ListedColormap(['purple', 'white', 'black', 'orange'])
bounds = [-2.5, -.5, .5, 1.5, 2.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# calculate the difference. Since the output may be longer, align the
# difference to beginning of the arrays
diff = sample - output[:sample.shape[0], :sample.shape[1]]
diff *= -2.
# find the areas where the prediction doesn't match the sample
is_diff = diff != 0.
# change those locations in output so they plot to the correct color
output[:sample.shape[0],:sample.shape[1]][is_diff] = diff[is_diff]
# plot images using the appropriate color map
fig = plt.figure(1)
plt.subplot(121)
plt.imshow(sample, cmap=cmap, norm=norm)
plt.subplot(122)
img2 = plt.imshow(output, cmap=cmap, norm=norm)
bar = plt.colorbar(img2, cmap=cmap, norm=norm, boundaries=bounds, ticks=[-2, 0, 1, 2])
bar.ax.set_yticklabels(["False 0", "True 0", "True 1", "False 1"])
plt.show()
|
the-stack_0_8097 | from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from app.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
app_name = "api"
urlpatterns = router.urls
|
the-stack_0_8098 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
from datetime import datetime
import tensorflow as tf
import metadata
import input
import model
# ******************************************************************************
# YOU MAY MODIFY THIS FUNCTION TO ADD/REMOVE PARAMS OR CHANGE THE DEFAULT VALUES
# ******************************************************************************
def initialise_hyper_params(args_parser):
"""
Define the arguments with the default values,
parses the arguments passed to the task,
and set the HYPER_PARAMS global variable
Args:
args_parser
"""
# data files arguments
args_parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True
)
args_parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=True
)
args_parser.add_argument(
'--feature-stats-file',
help='GCS or local paths to feature statistics json file',
nargs='+',
default=None
)
# Experiment arguments - training
args_parser.add_argument(
'--train-steps',
help="""
Steps to run the training job for. If --num-epochs and --train-size are not specified,
this must be. Otherwise the training job will run indefinitely.
if --num-epochs and --train-size are specified, then --train-steps will be:
(train-size/train-batch-size) * num-epochs\
""",
default=1000,
type=int
)
args_parser.add_argument(
'--train-batch-size',
help='Batch size for each training step',
type=int,
default=200
)
args_parser.add_argument(
'--train-size',
help='Size of training set (instance count)',
type=int,
default=None
)
args_parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --train-size and --num-epochs are specified,
--train-steps will be: (train-size/train-batch-size) * num-epochs.\
""",
default=10,
type=int,
)
# Experiment arguments - evaluation
args_parser.add_argument(
'--eval-every-secs',
help='How long to wait before running the next evaluation',
default=120,
type=int
)
args_parser.add_argument(
'--eval-steps',
help="""\
Number of steps to run evaluation for at each checkpoint',
Set to None to evaluate on the whole evaluation data
""",
default=None,
type=int
)
args_parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=200
)
# features processing arguments
args_parser.add_argument(
'--num-buckets',
help='Number of buckets into which to discretize numeric columns',
default=10,
type=int
)
args_parser.add_argument(
'--embedding-size',
help='Number of embedding dimensions for categorical columns. value of 0 means no embedding',
default=4,
type=int
)
# Estimator arguments
args_parser.add_argument(
'--learning-rate',
help="Learning rate value for the optimizers",
default=0.1,
type=float
)
args_parser.add_argument(
'--hidden-units',
help="""\
Hidden layer sizes to use for DNN feature columns, provided in comma-separated layers.
If --scale-factor > 0, then only the size of the first layer will be used to compute
the sizes of subsequent layers \
""",
default='64,32,16,8'
)
args_parser.add_argument(
'--layer-sizes-scale-factor',
help="""\
Determine how the size of the layers in the DNN decays.
If value = 0 then the provided --hidden-units will be taken as is\
""",
default=0.7,
type=float
)
args_parser.add_argument(
'--num-layers',
help='Number of layers in the DNN. If --scale-factor > 0, then this parameter is ignored',
default=3,
type=int
)
args_parser.add_argument(
'--dropout-prob',
help="The probability we will drop out a given coordinate",
default=None
)
args_parser.add_argument(
'--encode-one-hot',
help="""\
If set to True, the categorical columns will be encoded as One-Hot indicators in the deep part of the DNN model.
Otherwise, the categorical columns will only be used in the wide part of the DNN model
""",
action='store_true',
default=True,
)
args_parser.add_argument(
'--as-wide-columns',
help="""\
If set to True, the categorical columns will be used in the wide part of the DNN model
""",
action='store_true',
default=True,
)
# Saved model arguments
args_parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args_parser.add_argument(
'--reuse-job-dir',
action='store_true',
default=False,
help="""\
Flag to decide if the model checkpoint should
be re-used from the job-dir. If False then the
job-dir will be deleted"""
)
args_parser.add_argument(
'--export-format',
help='The input format of the exported SavedModel binary',
choices=['JSON', 'CSV', 'EXAMPLE'],
default='JSON'
)
# Argument to turn on all logging
args_parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
)
return args_parser.parse_args()
# ******************************************************************************
# YOU NEED NOT TO CHANGE THE FUNCTION TO RUN THE EXPERIMENT
# ******************************************************************************
def run_experiment(run_config):
"""Train, evaluate, and export the model using tf.estimator.train_and_evaluate API"""
train_input_fn = input.generate_input_fn(
file_names_pattern=HYPER_PARAMS.train_files,
mode=tf.estimator.ModeKeys.TRAIN,
num_epochs=HYPER_PARAMS.num_epochs,
batch_size=HYPER_PARAMS.train_batch_size
)
eval_input_fn = input.generate_input_fn(
file_names_pattern=HYPER_PARAMS.eval_files,
mode=tf.estimator.ModeKeys.EVAL,
batch_size=HYPER_PARAMS.eval_batch_size
)
exporter = tf.estimator.FinalExporter(
'estimator',
input.SERVING_FUNCTIONS[HYPER_PARAMS.export_format],
as_text=False # change to true if you want to export the model as readable text
)
# compute the number of training steps based on num_epoch, train_size, and train_batch_size
if HYPER_PARAMS.train_size is not None and HYPER_PARAMS.num_epochs is not None:
train_steps = (HYPER_PARAMS.train_size / HYPER_PARAMS.train_batch_size) * \
HYPER_PARAMS.num_epochs
else:
train_steps = HYPER_PARAMS.train_steps
train_spec = tf.estimator.TrainSpec(
train_input_fn,
max_steps=int(train_steps)
)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=HYPER_PARAMS.eval_steps,
exporters=[exporter],
name='estimator-eval',
throttle_secs=HYPER_PARAMS.eval_every_secs,
)
print("* experiment configurations")
print("===========================")
print("Train size: {}".format(HYPER_PARAMS.train_size))
print("Epoch count: {}".format(HYPER_PARAMS.num_epochs))
print("Train batch size: {}".format(HYPER_PARAMS.train_batch_size))
print("Training steps: {} ({})".format(int(train_steps),
"supplied" if HYPER_PARAMS.train_size is None else "computed"))
print("Evaluate every: {} seconds".format(HYPER_PARAMS.eval_every_secs))
print("===========================")
if metadata.TASK_TYPE == "classification":
estimator = model.create_classifier(
config=run_config
)
elif metadata.TASK_TYPE == "regression":
estimator = model.create_regressor(
config=run_config
)
else:
estimator = model.create_estimator(
config=run_config
)
# train and evaluate
tf.estimator.train_and_evaluate(
estimator,
train_spec,
eval_spec
)
# ******************************************************************************
# THIS IS ENTRY POINT FOR THE TRAINER TASK
# ******************************************************************************
def main():
print('')
print('Hyper-parameters:')
print(HYPER_PARAMS)
print('')
# Set python level verbosity
tf.logging.set_verbosity(HYPER_PARAMS.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.__dict__[HYPER_PARAMS.verbosity] / 10)
# Directory to store output model and checkpoints
model_dir = HYPER_PARAMS.job_dir
# If job_dir_reuse is False then remove the job_dir if it exists
print("Resume training:", HYPER_PARAMS.reuse_job_dir)
if not HYPER_PARAMS.reuse_job_dir:
if tf.gfile.Exists(HYPER_PARAMS.job_dir):
tf.gfile.DeleteRecursively(HYPER_PARAMS.job_dir)
print("Deleted job_dir {} to avoid re-use".format(HYPER_PARAMS.job_dir))
else:
print("No job_dir available to delete")
else:
print("Reusing job_dir {} if it exists".format(HYPER_PARAMS.job_dir))
run_config = tf.estimator.RunConfig(
tf_random_seed=19830610,
log_step_count_steps=1000,
save_checkpoints_secs=HYPER_PARAMS.eval_every_secs, # change frequency of saving checkpoints
keep_checkpoint_max=3,
model_dir=model_dir
)
run_config = run_config.replace(model_dir=model_dir)
print("Model Directory:", run_config.model_dir)
# Run the train and evaluate experiment
time_start = datetime.utcnow()
print("")
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
run_experiment(run_config)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
print("")
args_parser = argparse.ArgumentParser()
HYPER_PARAMS = initialise_hyper_params(args_parser)
if __name__ == '__main__':
main()
|
the-stack_0_8102 | from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
def k_fold_cross_validation(sgd_clf, X_train, y_train_nb):
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_nb):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = (y_train_nb[train_index])
X_test_folds = X_train[test_index]
y_test_folds = (y_train_nb[test_index])
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_folds)
n_correct = sum(y_pred == y_test_folds)
print(n_correct / len(y_pred))
from sklearn.base import BaseEstimator
import numpy as np
class NeverNbClassifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
|
the-stack_0_8104 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import ray.cloudpickle as cloudpickle
import copy
from datetime import datetime
import logging
import json
import uuid
import time
import tempfile
import os
from numbers import Number
# For compatibility under py2 to consider unicode as str
from six import string_types
import ray
from ray.tune import TuneError
from ray.tune.log_sync import validate_sync_function
from ray.tune.logger import pretty_print, UnifiedLogger
# NOTE(rkn): We import ray.tune.registry here instead of importing the names we
# need because there are cyclic imports that may cause specific names to not
# have been defined yet. See https://github.com/ray-project/ray/issues/1716.
import ray.tune.registry
from ray.tune.result import (DEFAULT_RESULTS_DIR, DONE, HOSTNAME, PID,
TIME_TOTAL_S, TRAINING_ITERATION, TIMESTEPS_TOTAL,
EPISODE_REWARD_MEAN, MEAN_LOSS, MEAN_ACCURACY)
from ray.utils import binary_to_hex, hex_to_binary
DEBUG_PRINT_INTERVAL = 5
MAX_LEN_IDENTIFIER = 130
logger = logging.getLogger(__name__)
def date_str():
return datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
class Resources(
namedtuple("Resources", [
"cpu", "gpu", "extra_cpu", "extra_gpu", "custom_resources",
"extra_custom_resources"
])):
"""Ray resources required to schedule a trial.
Attributes:
cpu (float): Number of CPUs to allocate to the trial.
gpu (float): Number of GPUs to allocate to the trial.
extra_cpu (float): Extra CPUs to reserve in case the trial needs to
launch additional Ray actors that use CPUs.
extra_gpu (float): Extra GPUs to reserve in case the trial needs to
launch additional Ray actors that use GPUs.
custom_resources (dict): Mapping of resource to quantity to allocate
to the trial.
extra_custom_resources (dict): Extra custom resources to reserve in
case the trial needs to launch additional Ray actors that use
any of these custom resources.
"""
__slots__ = ()
def __new__(cls,
cpu,
gpu,
extra_cpu=0,
extra_gpu=0,
custom_resources=None,
extra_custom_resources=None):
custom_resources = custom_resources or {}
extra_custom_resources = extra_custom_resources or {}
leftovers = set(custom_resources) ^ set(extra_custom_resources)
for value in leftovers:
custom_resources.setdefault(value, 0)
extra_custom_resources.setdefault(value, 0)
all_values = [cpu, gpu, extra_cpu, extra_gpu]
all_values += list(custom_resources.values())
all_values += list(extra_custom_resources.values())
assert len(custom_resources) == len(extra_custom_resources)
for entry in all_values:
assert isinstance(entry, Number), "Improper resource value."
return super(Resources,
cls).__new__(cls, cpu, gpu, extra_cpu, extra_gpu,
custom_resources, extra_custom_resources)
def summary_string(self):
summary = "{} CPUs, {} GPUs".format(self.cpu + self.extra_cpu,
self.gpu + self.extra_gpu)
custom_summary = ", ".join([
"{} {}".format(self.get_res_total(res), res)
for res in self.custom_resources
])
if custom_summary:
summary += " ({})".format(custom_summary)
return summary
def cpu_total(self):
return self.cpu + self.extra_cpu
def gpu_total(self):
return self.gpu + self.extra_gpu
def get_res_total(self, key):
return self.custom_resources.get(
key, 0) + self.extra_custom_resources.get(key, 0)
def get(self, key):
return self.custom_resources.get(key, 0)
def is_nonnegative(self):
all_values = [self.cpu, self.gpu, self.extra_cpu, self.extra_gpu]
all_values += list(self.custom_resources.values())
all_values += list(self.extra_custom_resources.values())
return all(v >= 0 for v in all_values)
@classmethod
def subtract(cls, original, to_remove):
cpu = original.cpu - to_remove.cpu
gpu = original.gpu - to_remove.gpu
extra_cpu = original.extra_cpu - to_remove.extra_cpu
extra_gpu = original.extra_gpu - to_remove.extra_gpu
all_resources = set(original.custom_resources).union(
set(to_remove.custom_resources))
new_custom_res = {
k: original.custom_resources.get(k, 0) -
to_remove.custom_resources.get(k, 0)
for k in all_resources
}
extra_custom_res = {
k: original.extra_custom_resources.get(k, 0) -
to_remove.extra_custom_resources.get(k, 0)
for k in all_resources
}
return Resources(cpu, gpu, extra_cpu, extra_gpu, new_custom_res,
extra_custom_res)
def to_json(self):
return resources_to_json(self)
def json_to_resources(data):
if data is None or data == "null":
return None
if isinstance(data, string_types):
data = json.loads(data)
for k in data:
if k in ["driver_cpu_limit", "driver_gpu_limit"]:
raise TuneError(
"The field `{}` is no longer supported. Use `extra_cpu` "
"or `extra_gpu` instead.".format(k))
if k not in Resources._fields:
raise ValueError(
"Unknown resource field {}, must be one of {}".format(
k, Resources._fields))
return Resources(
data.get("cpu", 1), data.get("gpu", 0), data.get("extra_cpu", 0),
data.get("extra_gpu", 0), data.get("custom_resources"),
data.get("extra_custom_resources"))
def resources_to_json(resources):
if resources is None:
return None
return {
"cpu": resources.cpu,
"gpu": resources.gpu,
"extra_cpu": resources.extra_cpu,
"extra_gpu": resources.extra_gpu,
"custom_resources": resources.custom_resources.copy(),
"extra_custom_resources": resources.extra_custom_resources.copy()
}
def has_trainable(trainable_name):
return ray.tune.registry._global_registry.contains(
ray.tune.registry.TRAINABLE_CLASS, trainable_name)
def recursive_criteria_check(result, criteria):
for criteria, stop_value in criteria.items():
if criteria not in result:
raise TuneError(
"Stopping criteria {} not provided in result {}.".format(
criteria, result))
elif isinstance(result[criteria], dict) and isinstance(
stop_value, dict):
if recursive_criteria_check(result[criteria], stop_value):
return True
elif result[criteria] >= stop_value:
return True
return False
class Checkpoint(object):
"""Describes a checkpoint of trial state.
Checkpoint may be saved in different storage.
Attributes:
storage (str): Storage type.
value (str): If storage==MEMORY,value is a Python object.
If storage==DISK,value is a path points to the checkpoint in disk.
"""
MEMORY = "memory"
DISK = "disk"
def __init__(self, storage, value, last_result=None):
self.storage = storage
self.value = value
self.last_result = last_result or {}
@staticmethod
def from_object(value=None):
"""Creates a checkpoint from a Python object."""
return Checkpoint(Checkpoint.MEMORY, value)
class ExportFormat(object):
"""Describes the format to export the trial Trainable.
This may correspond to different file formats based on the
Trainable implementation.
"""
CHECKPOINT = "checkpoint"
MODEL = "model"
@staticmethod
def validate(export_formats):
"""Validates export_formats.
Raises:
ValueError if the format is unknown.
"""
for i in range(len(export_formats)):
export_formats[i] = export_formats[i].strip().lower()
if export_formats[i] not in [
ExportFormat.CHECKPOINT, ExportFormat.MODEL
]:
raise TuneError("Unsupported export format: " +
export_formats[i])
class Trial(object):
"""A trial object holds the state for one model training run.
Trials are themselves managed by the TrialRunner class, which implements
the event loop for submitting trial runs to a Ray cluster.
Trials start in the PENDING state, and transition to RUNNING once started.
On error it transitions to ERROR, otherwise TERMINATED on success.
"""
PENDING = "PENDING"
RUNNING = "RUNNING"
PAUSED = "PAUSED"
TERMINATED = "TERMINATED"
ERROR = "ERROR"
def __init__(self,
trainable_name,
config=None,
trial_id=None,
local_dir=DEFAULT_RESULTS_DIR,
experiment_tag="",
resources=None,
stopping_criterion=None,
checkpoint_freq=0,
checkpoint_at_end=False,
keep_checkpoints_num=None,
checkpoint_score_attr="",
export_formats=None,
restore_path=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_function=None,
max_failures=0):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
Trial._registration_check(trainable_name)
# Trial config
self.trainable_name = trainable_name
self.config = config or {}
self.local_dir = local_dir # This remains unexpanded for syncing.
self.experiment_tag = experiment_tag
trainable_cls = self._get_trainable_cls()
if trainable_cls and hasattr(trainable_cls,
"default_resource_request"):
default_resources = trainable_cls.default_resource_request(
self.config)
if default_resources:
if resources:
raise ValueError(
"Resources for {} have been automatically set to {} "
"by its `default_resource_request()` method. Please "
"clear the `resources_per_trial` option.".format(
trainable_cls, default_resources))
resources = default_resources
self.resources = resources or Resources(cpu=1, gpu=0)
self.stopping_criterion = stopping_criterion or {}
self.upload_dir = upload_dir
self.loggers = loggers
self.sync_function = sync_function
validate_sync_function(sync_function)
self.verbose = True
self.max_failures = max_failures
# Local trial state that is updated during the run
self.last_result = {}
self.last_update_time = -float("inf")
self.checkpoint_freq = checkpoint_freq
self.checkpoint_at_end = checkpoint_at_end
self.history = []
self.keep_checkpoints_num = keep_checkpoints_num
self._cmp_greater = not checkpoint_score_attr.startswith("min-")
self.best_checkpoint_attr_value = -float("inf") \
if self._cmp_greater else float("inf")
# Strip off "min-" from checkpoint attribute
self.checkpoint_score_attr = checkpoint_score_attr \
if self._cmp_greater else checkpoint_score_attr[4:]
self._checkpoint = Checkpoint(
storage=Checkpoint.DISK, value=restore_path)
self.export_formats = export_formats
self.status = Trial.PENDING
self.logdir = None
self.runner = None
self.result_logger = None
self.last_debug = 0
self.trial_id = Trial.generate_id() if trial_id is None else trial_id
self.error_file = None
self.num_failures = 0
self.custom_trial_name = None
# AutoML fields
self.results = None
self.best_result = None
self.param_config = None
self.extra_arg = None
self._nonjson_fields = [
"_checkpoint",
"loggers",
"sync_function",
"results",
"best_result",
"param_config",
"extra_arg",
]
if trial_name_creator:
self.custom_trial_name = trial_name_creator(self)
@classmethod
def _registration_check(cls, trainable_name):
if not has_trainable(trainable_name):
# Make sure rllib agents are registered
from ray import rllib # noqa: F401
if not has_trainable(trainable_name):
raise TuneError("Unknown trainable: " + trainable_name)
@classmethod
def generate_id(cls):
return str(uuid.uuid1().hex)[:8]
@classmethod
def create_logdir(cls, identifier, local_dir):
local_dir = os.path.expanduser(local_dir)
if not os.path.exists(local_dir):
os.makedirs(local_dir)
return tempfile.mkdtemp(
prefix="{}_{}".format(identifier[:MAX_LEN_IDENTIFIER], date_str()),
dir=local_dir)
def init_logger(self):
"""Init logger."""
if not self.result_logger:
if not self.logdir:
self.logdir = Trial.create_logdir(str(self), self.local_dir)
elif not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.result_logger = UnifiedLogger(
self.config,
self.logdir,
upload_uri=self.upload_dir,
loggers=self.loggers,
sync_function=self.sync_function)
def update_resources(self, cpu, gpu, **kwargs):
"""EXPERIMENTAL: Updates the resource requirements.
Should only be called when the trial is not running.
Raises:
ValueError if trial status is running.
"""
if self.status is Trial.RUNNING:
raise ValueError("Cannot update resources while Trial is running.")
self.resources = Resources(cpu, gpu, **kwargs)
def sync_logger_to_new_location(self, worker_ip):
"""Updates the logger location.
Also pushes logdir to worker_ip, allowing for cross-node recovery.
"""
if self.result_logger:
self.result_logger.sync_results_to_new_location(worker_ip)
def close_logger(self):
"""Close logger."""
if self.result_logger:
self.result_logger.close()
self.result_logger = None
def write_error_log(self, error_msg):
if error_msg and self.logdir:
self.num_failures += 1 # may be moved to outer scope?
error_file = os.path.join(self.logdir,
"error_{}.txt".format(date_str()))
with open(error_file, "w") as f:
f.write(error_msg)
self.error_file = error_file
def should_stop(self, result):
"""Whether the given result meets this trial's stopping criteria."""
if result.get(DONE):
return True
return recursive_criteria_check(result, self.stopping_criterion)
def should_checkpoint(self):
"""Whether this trial is due for checkpointing."""
result = self.last_result or {}
if result.get(DONE) and self.checkpoint_at_end:
return True
if self.checkpoint_freq:
return result.get(TRAINING_ITERATION,
0) % self.checkpoint_freq == 0
else:
return False
def progress_string(self):
"""Returns a progress message for printing out to the console."""
if not self.last_result:
return self._status_string()
def location_string(hostname, pid):
if hostname == os.uname()[1]:
return "pid={}".format(pid)
else:
return "{} pid={}".format(hostname, pid)
pieces = [
"{}".format(self._status_string()), "[{}]".format(
self.resources.summary_string()), "[{}]".format(
location_string(
self.last_result.get(HOSTNAME),
self.last_result.get(PID))), "{} s".format(
int(self.last_result.get(TIME_TOTAL_S)))
]
if self.last_result.get(TRAINING_ITERATION) is not None:
pieces.append("{} iter".format(
self.last_result[TRAINING_ITERATION]))
if self.last_result.get(TIMESTEPS_TOTAL) is not None:
pieces.append("{} ts".format(self.last_result[TIMESTEPS_TOTAL]))
if self.last_result.get(EPISODE_REWARD_MEAN) is not None:
pieces.append("{} rew".format(
format(self.last_result[EPISODE_REWARD_MEAN], ".3g")))
if self.last_result.get(MEAN_LOSS) is not None:
pieces.append("{} loss".format(
format(self.last_result[MEAN_LOSS], ".3g")))
if self.last_result.get(MEAN_ACCURACY) is not None:
pieces.append("{} acc".format(
format(self.last_result[MEAN_ACCURACY], ".3g")))
return ", ".join(pieces)
def _status_string(self):
return "{}{}".format(
self.status, ", {} failures: {}".format(self.num_failures,
self.error_file)
if self.error_file else "")
def has_checkpoint(self):
return self._checkpoint.value is not None
def clear_checkpoint(self):
self._checkpoint.value = None
def should_recover(self):
"""Returns whether the trial qualifies for restoring.
This is if a checkpoint frequency is set and has not failed more than
max_failures. This may return true even when there may not yet
be a checkpoint.
"""
return (self.checkpoint_freq > 0
and (self.num_failures < self.max_failures
or self.max_failures < 0))
def update_last_result(self, result, terminate=False):
if terminate:
result.update(done=True)
if self.verbose and (terminate or time.time() - self.last_debug >
DEBUG_PRINT_INTERVAL):
print("Result for {}:".format(self))
print(" {}".format(pretty_print(result).replace("\n", "\n ")))
self.last_debug = time.time()
self.last_result = result
self.last_update_time = time.time()
self.result_logger.on_result(self.last_result)
def compare_checkpoints(self, attr_mean):
"""Compares two checkpoints based on the attribute attr_mean param.
Greater than is used by default. If command-line parameter
checkpoint_score_attr starts with "min-" less than is used.
Arguments:
attr_mean: mean of attribute value for the current checkpoint
Returns:
True: when attr_mean is greater than previous checkpoint attr_mean
and greater than function is selected
when attr_mean is less than previous checkpoint attr_mean and
less than function is selected
False: when attr_mean is not in alignment with selected cmp fn
"""
if self._cmp_greater and attr_mean > self.best_checkpoint_attr_value:
return True
elif (not self._cmp_greater
and attr_mean < self.best_checkpoint_attr_value):
return True
return False
def _get_trainable_cls(self):
return ray.tune.registry._global_registry.get(
ray.tune.registry.TRAINABLE_CLASS, self.trainable_name)
def set_verbose(self, verbose):
self.verbose = verbose
def is_finished(self):
return self.status in [Trial.TERMINATED, Trial.ERROR]
def __repr__(self):
return str(self)
def __str__(self):
"""Combines ``env`` with ``trainable_name`` and ``experiment_tag``.
Can be overriden with a custom string creator.
"""
if self.custom_trial_name:
return self.custom_trial_name
if "env" in self.config:
env = self.config["env"]
if isinstance(env, type):
env = env.__name__
identifier = "{}_{}".format(self.trainable_name, env)
else:
identifier = self.trainable_name
if self.experiment_tag:
identifier += "_" + self.experiment_tag
return identifier.replace("/", "_")
def __getstate__(self):
"""Memento generator for Trial.
Sets RUNNING trials to PENDING, and flushes the result logger.
Note this can only occur if the trial holds a DISK checkpoint.
"""
assert self._checkpoint.storage == Checkpoint.DISK, (
"Checkpoint must not be in-memory.")
state = self.__dict__.copy()
state["resources"] = resources_to_json(self.resources)
for key in self._nonjson_fields:
state[key] = binary_to_hex(cloudpickle.dumps(state.get(key)))
state["runner"] = None
state["result_logger"] = None
if self.result_logger:
self.result_logger.flush()
state["__logger_started__"] = True
else:
state["__logger_started__"] = False
return copy.deepcopy(state)
def __setstate__(self, state):
logger_started = state.pop("__logger_started__")
state["resources"] = json_to_resources(state["resources"])
if state["status"] == Trial.RUNNING:
state["status"] = Trial.PENDING
for key in self._nonjson_fields:
state[key] = cloudpickle.loads(hex_to_binary(state[key]))
self.__dict__.update(state)
Trial._registration_check(self.trainable_name)
if logger_started:
self.init_logger()
|
the-stack_0_8106 | import tkinter as tk
from tkinter import ttk
import numpy as np
from itertools import product
from display_track import OIdisplay, CMdisplay, Cdisplay, ROIdisplay, MainDisplay
class ImageOriginal():
def create_window(self):
try: self.iot_Window.destroy()
except AttributeError: pass
self.iot_Window = tk.Toplevel(self)
self.iot_Window.title('Original Image')
self.iot_Window.geometry('600x400+100+100')
self.iot_Window.protocol('WM_DELETE_WINDOW',
lambda: ImageOriginal.close(self))
self.iot_Window.rowconfigure(0, weight = 1)
self.iot_Window.columnconfigure(0, weight = 1)
self.iot_frameMain = ttk.Frame(self.iot_Window)
self.iot_frameMain.rowconfigure(1, weight = 1)
self.iot_frameMain.columnconfigure(0, weight = 1)
self.iot_frameMain.grid(row = 0, column = 0,
sticky = 'nsew',
padx = 10, pady = 10)
def show(self):
if self._menucheckOI.get() == 1:
ImageOriginal.create_window(self)
OIdisplay.init_canvas(self)
OIdisplay.show_image(self)
elif self._menucheckOI.get() == 0:
try: self.iot_Window.destroy()
except AttributeError: pass
OIdisplay.hide_delete(self)
def close(self):
self.iot_Window.destroy()
self._menucheckOI.set(0)
class ImageInfo():
def create_window(self):
try: self.iit_Window.destroy()
except AttributeError: pass
self.iit_Window = tk.Toplevel(self)
self.iit_Window.title('Image Info')
#self.iit_Window.geometry('300x360-100-100')
self.iit_Window.resizable(0,0)
self.iit_Window.protocol('WM_DELETE_WINDOW',
lambda: ImageInfo.close(self))
self.iit_frame = ttk.Frame(self.iit_Window)
self.iit_frame.grid(row = 0, column = 0,
sticky = 'nsew',
padx = 2, pady = 2)
self.iit_filedirLabel = ttk.Label(self.iit_frame, text = 'Folder: ')
self.iit_filenameLabel = ttk.Label(self.iit_frame, text = 'File: ')
self.iit_typeLabel = ttk.Label(self.iit_frame, text = 'Type: ')
self.iit_sizepixxLabel = ttk.Label(self.iit_frame, text = 'Size X (pix) :')
self.iit_sizepixyLabel = ttk.Label(self.iit_frame, text = 'Size Y (pix) : ')
self.iit_sizenmxLabel = ttk.Label(self.iit_frame, text = 'Size X (nm) : ')
self.iit_sizenmyLabel = ttk.Label(self.iit_frame, text = 'Size Y (nm) : ')
self.iit_calfactorLabel = ttk.Label(self.iit_frame, text = 'Cal. Factor (nm/pix) : ')
self.iit_vminLabel = ttk.Label(self.iit_frame, text = 'I min: ')
self.iit_vmaxLabel = ttk.Label(self.iit_frame, text = 'I max: ')
self.iit_xminLabel = ttk.Label(self.iit_frame, text = 'X min: ')
self.iit_xmaxLabel = ttk.Label(self.iit_frame, text = 'X max: ')
self.iit_yminLabel = ttk.Label(self.iit_frame, text = 'Y min: ')
self.iit_ymaxLabel = ttk.Label(self.iit_frame, text = 'Y max: ')
self.iit_filedirDynLabel = ttk.Label(self.iit_frame,
textvariable = self._file_info['directory'],
wraplength = 160)
self.iit_filenameDynLabel = ttk.Label(self.iit_frame,
textvariable = self._file_info['file'],
wraplength = 160)
self.iit_typeDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['type'])
self.iit_sizepixxDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['sizepix_x'])
self.iit_sizepixyDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['sizepix_y'])
self.iit_sizenmxDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['sizenm_x'])
self.iit_sizenmyDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['sizenm_y'])
self.iit_calfactorDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['cal_factor'])
self.iit_vminDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['vmin'])
self.iit_vmaxDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['vmax'])
self.iit_xminDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['xmin'])
self.iit_xmaxDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['xmax'])
self.iit_yminDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['ymin'])
self.iit_ymaxDynLabel = ttk.Label(self.iit_frame,
textvariable = self._img_info['ymax'])
self.iit_filedirLabel.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_filenameLabel.grid(row = 1, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_typeLabel.grid(row = 2, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizepixxLabel.grid(row = 3, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizepixyLabel.grid(row = 4, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizenmxLabel.grid(row = 5, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizenmyLabel.grid(row = 6, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_calfactorLabel.grid(row = 7, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_vminLabel.grid(row = 8, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_vmaxLabel.grid(row = 9, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_xminLabel.grid(row = 10, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_xmaxLabel.grid(row = 11, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_yminLabel.grid(row = 12, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_ymaxLabel.grid(row = 13, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_filedirDynLabel.grid(row = 0, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_filenameDynLabel.grid(row = 1, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_typeDynLabel.grid(row = 2, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizepixxDynLabel.grid(row = 3, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizepixyDynLabel.grid(row = 4, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizenmxDynLabel.grid(row = 5, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_sizenmyDynLabel.grid(row = 6, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_calfactorDynLabel.grid(row = 7, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_vminDynLabel.grid(row = 8, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_vmaxDynLabel.grid(row = 9, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_xminDynLabel.grid(row = 10, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_xmaxDynLabel.grid(row = 11, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_yminDynLabel.grid(row = 12, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
self.iit_ymaxDynLabel.grid(row = 13, column = 1,
sticky = 'nsew', padx = 2, pady = 2)
def show(self):
if self._menucheckII.get() == 1:
ImageInfo.create_window(self)
elif self._menucheckII.get() == 0:
try: self.iit_Window.destroy()
except AttributeError: pass
def close(self):
self.iit_Window.destroy()
self._menucheckII.set(0)
class ImageColormap():
def invert(self):
if self._menucheckCI.get() == 1:
colormap = self._colormap_options.get('Current Main')+'_r'
elif self._menucheckCI.get() == 0:
colormap = self._colormap_options.get('Current Main').replace('_r','')
self._colormap_options['Current Main'] = colormap
self._s_img.set_cmap(colormap)
self._canvas.draw()
def change(self):
colormap_option = self._menucheckCO.get()
if colormap_option == 0: colormap = 'gray'
elif colormap_option == 1: colormap = 'bone'
elif colormap_option == 2: colormap = 'hot'
elif colormap_option == 3: colormap = 'magma'
elif colormap_option == 4: colormap = 'inferno'
self._colormap_options['Current Main'] = colormap
ImageColormap.invert(self)
def other(self):
colormap = self._colormap_options.get('Current Main')
if 'gray' in colormap : colormap_option = 0
elif 'bone' in colormap: colormap_option = 1
elif 'hot' in colormap: colormap_option = 2
elif 'magma' in colormap: colormap_option = 3
elif 'inferno' in colormap: colormap_option = 4
else: colormap_option = 5
self._menucheckCO.set(colormap_option)
ImageColormap.other_create(self)
def other_create(self):
try: self.ico_Window.destroy()
except AttributeError: pass
self.ico_Window = tk.Toplevel(self)
self.ico_Window.title('Other Colormap')
#self.ico_Window.resizable(0,0)
self.ico_frame = ttk.Frame(self.ico_Window)
self.ico_frame.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.ico_buttonFrame = ttk.Frame(self.ico_Window)
CMdisplay.show_colormaps(self)
self.ico_combobox = ttk.Combobox(self.ico_frame,
values = self._colormap_options['Available'])
self.ico_combobox.set(self._colormap_options.get('Current Main').replace('_r',''))
self.ico_applyButton = ttk.Button(self.ico_buttonFrame, text = 'Apply',
command = lambda: ImageColormap.other_apply(self))
self.ico_okButton = ttk.Button(self.ico_buttonFrame, text = 'OK',
command = lambda: ImageColormap.other_ok(self))
self.ico_combobox.grid(row = 1, column = 0)
self.ico_buttonFrame.grid(row = 2, column = 0)
self.ico_applyButton.grid(row = 0, column = 0)
self.ico_okButton.grid(row = 0, column = 1)
def other_apply(self):
self._colormap_options['Current Main'] = self.ico_combobox.get()
self._menucheckCO.set(5)
ImageColormap.invert(self)
def other_ok(self):
ImageColormap.other_apply(self)
self.ico_Window.destroy()
try: self._ic_canvas.delete(ALL)
except AttributeError: pass
self._menucheckCO.set(5)
class ImageContrast():
def show(self):
if self._menucheckCC.get() == 1:
ImageContrast.create(self)
elif self._menucheckCC.get() == 0:
ImageContrast.close(self)
def create(self):
try: self.ic_Window.destroy()
except AttributeError: pass
self.ic_Window = tk.Toplevel(self)
self.ic_Window.title('Adjust Contrast')
self.ic_Window.geometry('300x300-100+200')
self.ic_Window.protocol('WM_DELETE_WINDOW',
lambda: ImageContrast.close(self))
try:
vmin = self._colormap_options['Vmin']
vmax = self._colormap_options['Vmax']
except KeyError:
vmin = np.min(self._mat_img.flatten())
vmax = np.max(self._mat_img.flatten())
self.ic_frame = ttk.Frame(self.ic_Window)
self.ic_controlFrame = ttk.Frame(self.ic_Window)
self.ic_buttonFrame = ttk.Frame(self.ic_Window)
self.ic_frame.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.ic_controlFrame.grid(row = 1, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.ic_buttonFrame.grid(row = 2, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.ic_Window.rowconfigure([0,1,2], weight = 1)
self.ic_Window.columnconfigure(0, weight = 1)
self.ic_frame.columnconfigure(0, weight = 1)
self.ic_frame.rowconfigure(0, weight = 1)
self.ic_controlFrame.columnconfigure(0, weight = 1)
self.ic_buttonFrame.columnconfigure([0,1], weight = 1)
self.ic_vminSlider = tk.Scale(self.ic_controlFrame, orient = 'horizontal',
from_ = np.min(self._mat_img.flatten()), to = np.max(self._mat_img.flatten()))
self.ic_vmaxSlider = tk.Scale(self.ic_controlFrame, orient = 'horizontal',
from_ = np.min(self._mat_img.flatten()), to = np.max(self._mat_img.flatten()))
self.ic_applyButton = ttk.Button(self.ic_buttonFrame, text = 'Apply',
command = lambda: ImageContrast.ok_close(self))
self.ic_closeButton = ttk.Button(self.ic_buttonFrame, text = 'Close',
command = lambda: ImageContrast.close(self))
self.ic_vminSlider.bind('<ButtonRelease-1>',
lambda event, arg = self: ImageContrast.change_slide(arg, event))
self.ic_vmaxSlider.bind('<ButtonRelease-1>',
lambda event, arg = self: ImageContrast.change_slide(arg, event))
self.ic_vminSlider.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.ic_vmaxSlider.grid(row = 1, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.ic_applyButton.grid(row = 0, column = 0,
sticky = 'nsew', padx = 10, pady = 2)
self.ic_closeButton.grid(row = 0, column = 1,
sticky = 'nsew', padx = 10, pady = 2)
self.ic_vminSlider.set(vmin)
self.ic_vmaxSlider.set(vmax)
Cdisplay.show_histogram(self)
Cdisplay.update_clim(self)
def change_slide(self, event):
if self.ic_vminSlider.cget('state') == 'active':
self.ic_vminSlider.after(100)
elif self.ic_vmaxSlider.cget('state') == 'active':
self.ic_vmaxSlider.after(100)
vmin = self.ic_vminSlider.get()
vmax = self.ic_vmaxSlider.get()
Cdisplay.update_clim(self)
def ok_close(self):
vmin = self.ic_vminSlider.get()
vmax = self.ic_vmaxSlider.get()
self._colormap_options['Vmin'] = vmin
self._colormap_options['Vmax'] = vmax
ImageContrast.close(self)
def close(self):
try:
vmin = self._colormap_options['Vmin']
vmax = self._colormap_options['Vmax']
except KeyError:
vmin = np.min(self._mat_img.flatten())
vmax = np.max(self._mat_img.flatten())
self._s_img.set_clim([vmin, vmax])
self._canvas.draw()
try: self.ic_Window.destroy()
except AttributeError: pass
self._menucheckCC.set(0)
class ImageOverlay():
def init_var(self):
self._overcmap = {
'Basic': ['none',
'black',
'gray',
'white',
'yellow',
'orange',
'red',
'magenta',
'blue',
'cyan',
'green',
]
}
self._overedge = {
'enable': tk.IntVar(),
'size' : tk.StringVar(),
'ecolor': tk.StringVar(),
'fcolor': tk.StringVar()
}
self._overskel = {
'enable': tk.IntVar(),
'size' : tk.StringVar(),
'ecolor': tk.StringVar(),
'fcolor': tk.StringVar()
}
self._overlabel = {
'enable': tk.IntVar(),
'size' : tk.StringVar(),
'ecolor': tk.StringVar(),
}
self._overfit = {
'enable': tk.IntVar(),
'lwidth' : tk.StringVar(),
'color': tk.StringVar()
}
self._overedge['enable'].set(0)
self._overedge['size'].set('5')
self._overedge['ecolor'].set('none')
self._overedge['fcolor'].set('orange')
self._overskel['enable'].set(0)
self._overskel['size'].set('5')
self._overskel['ecolor'].set('none')
self._overskel['fcolor'].set('cyan')
self._overlabel['enable'].set(0)
self._overlabel['size'].set('5')
self._overlabel['ecolor'].set('none')
self._overfit['enable'].set(0)
self._overfit['lwidth'].set(1)
self._overfit['color'].set('yellow')
def show(self):
if self._menucheckOO.get() == 1:
ImageOverlay.create_options(self)
ImageOverlay.setstate_init(self)
else: ImageOverlay.close(self)
def create_options(self):
try: self.ov_Window.destroy()
except AttributeError: pass
self.ov_Window = tk.Toplevel(self)
self.ov_Window.title('Display Overlay Options')
#self.ov_Window.geometry('450x300-250+80')
self.ov_Window.resizable(0,0)
self.ov_Window.protocol('WM_DELETE_WINDOW',
lambda: ImageOverlay.close(self))
self.ov_Window.rowconfigure([0,1,2,3,4,5], weight = 1)
self.ov_Window.columnconfigure(0, weight = 1)
self.oveLabelFrame = ttk.LabelFrame(self.ov_Window,
text = 'Edge Options')
self.ovsLabelFrame = ttk.LabelFrame(self.ov_Window,
text = 'Skeleton Options')
self.ovlLabelFrame = ttk.LabelFrame(self.ov_Window,
text = 'Label Options')
self.ovfLabelFrame = ttk.LabelFrame(self.ov_Window,
text = 'Fit Options')
self.ovbuttonFrame = ttk.Frame(self.ov_Window)
self.ove_enButton = ttk.Button(self.oveLabelFrame,
text = 'Enable',
style = 'SunkableButton.TButton',
command = lambda: ImageOverlay.enable_edge(self))
self.ove_szLabel = ttk.Label(self.oveLabelFrame,
text = 'Size : ')
self.ove_szSpinbox = tk.Spinbox(self.oveLabelFrame,
width = 3)
self.ove_szSpinbox.delete(0,'end')
self.ove_szSpinbox.insert(0, self._overedge['size'].get())
self.ove_ecLabel = ttk.Label(self.oveLabelFrame,
text = 'Edge color : ')
self.ove_ecCombobox = ttk.Combobox(self.oveLabelFrame,
width = 7,
values = self._overcmap['Basic'])
self.ove_ecCombobox.set(self._overedge['ecolor'].get())
self.ove_fcLabel = ttk.Label(self.oveLabelFrame,
text = 'Face color: ')
self.ove_fcCombobox = ttk.Combobox(self.oveLabelFrame,
width = 7,
values = self._overcmap['Basic'])
self.ove_fcCombobox.set(self._overedge['fcolor'].get())
self.ovs_enButton = ttk.Button(self.ovsLabelFrame,
text = 'Enable',
style = 'SunkableButton.TButton',
command = lambda: ImageOverlay.enable_skeleton(self))
self.ovs_szLabel = ttk.Label(self.ovsLabelFrame,
text = 'Size : ')
self.ovs_szSpinbox = tk.Spinbox(self.ovsLabelFrame,
width = 3)
self.ovs_szSpinbox.delete(0,'end')
self.ovs_szSpinbox.insert(0, self._overskel['size'].get())
self.ovs_ecLabel = ttk.Label(self.ovsLabelFrame,
text = 'Edge color : ')
self.ovs_ecCombobox = ttk.Combobox(self.ovsLabelFrame,
width = 7,
values = self._overcmap['Basic'])
self.ovs_ecCombobox.set(self._overskel['ecolor'].get())
self.ovs_fcLabel = ttk.Label(self.ovsLabelFrame,
text = 'Face color: ')
self.ovs_fcCombobox = ttk.Combobox(self.ovsLabelFrame,
width = 7,
values = self._overcmap['Basic'])
self.ovs_fcCombobox.set(self._overskel['fcolor'].get())
self.ovl_enButton = ttk.Button(self.ovlLabelFrame,
text = 'Enable',
style = 'SunkableButton.TButton',
command = lambda: ImageOverlay.enable_labels(self))
self.ovl_szLabel = ttk.Label(self.ovlLabelFrame,
text = 'Size : ')
self.ovl_szSpinbox = tk.Spinbox(self.ovlLabelFrame,
width = 3)
self.ovl_szSpinbox.delete(0,'end')
self.ovl_szSpinbox.insert(0, self._overlabel['size'].get())
self.ovl_ecLabel = ttk.Label(self.ovlLabelFrame,
text = 'Edge color : ')
self.ovl_ecCombobox = ttk.Combobox(self.ovlLabelFrame,
width = 7,
values = self._overcmap['Basic'])
self.ovl_ecCombobox.set(self._overlabel['ecolor'].get())
self.ovf_enButton = ttk.Button(self.ovfLabelFrame,
text = 'Enable',
style = 'SunkableButton.TButton',
command = lambda: ImageOverlay.enable_fit(self))
self.ovf_lwLabel = ttk.Label(self.ovfLabelFrame,
text = 'Line Width : ')
self.ovf_lwSpinbox = tk.Spinbox(self.ovfLabelFrame,
width = 3)
self.ovf_lwSpinbox.delete(0,'end')
self.ovf_lwSpinbox.insert(0, self._overfit['lwidth'].get())
self.ovf_lcLabel = ttk.Label(self.ovfLabelFrame,
text = 'Line Color : ')
self.ovf_lcCombobox = ttk.Combobox(self.ovfLabelFrame,
width = 7,
values = self._overcmap['Basic'])
self.ovf_lcCombobox.set(self._overfit['color'].get())
self.ovapplyButton = ttk.Button(self.ovbuttonFrame,
text = 'Apply',
command = lambda: ImageOverlay.apply(self))
self.ovcloseButton = ttk.Button(self.ovbuttonFrame,
text = 'Close',
command = lambda: ImageOverlay.close(self))
self.oveLabelFrame.rowconfigure(0, weight = 1)
self.ovsLabelFrame.rowconfigure(0, weight = 1)
self.ovlLabelFrame.rowconfigure(0, weight = 1)
self.ovfLabelFrame.rowconfigure(0, weight = 1)
self.ovbuttonFrame.columnconfigure([0,1], weight = 1)
self.oveLabelFrame.grid(row = 1, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovsLabelFrame.grid(row = 2, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovlLabelFrame.grid(row = 3, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovfLabelFrame.grid(row = 4, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovbuttonFrame.grid(row = 5, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_enButton.grid(row = 0, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_szLabel.grid(row = 0, column = 1, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_szSpinbox.grid(row = 0, column = 2, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_ecLabel.grid(row = 0, column = 3, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_ecCombobox.grid(row = 0, column = 4, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_fcLabel.grid(row = 0, column = 5, sticky = 'nsew',
padx = 2, pady = 2)
self.ove_fcCombobox.grid(row = 0, column = 6, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_enButton.grid(row = 0, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_szLabel.grid(row = 0, column = 1, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_szSpinbox.grid(row = 0, column = 2, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_ecLabel.grid(row = 0, column = 3, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_ecCombobox.grid(row = 0, column = 4, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_fcLabel.grid(row = 0, column = 5, sticky = 'nsew',
padx = 2, pady = 2)
self.ovs_fcCombobox.grid(row = 0, column = 6, sticky = 'nsew',
padx = 2, pady = 2)
self.ovl_enButton.grid(row = 0, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovl_szLabel.grid(row = 0, column = 1, sticky = 'nsew',
padx = 2, pady = 2)
self.ovl_szSpinbox.grid(row = 0, column = 2, sticky = 'nsew',
padx = 2, pady = 2)
self.ovl_ecLabel.grid(row = 0, column = 3, sticky = 'nsew',
padx = 2, pady = 2)
self.ovl_ecCombobox.grid(row = 0, column = 4, sticky = 'nsew',
padx = 2, pady = 2)
self.ovf_enButton.grid(row = 0, column = 0, sticky = 'nsew',
padx = 2, pady = 2)
self.ovf_lwLabel.grid(row = 0, column = 1, sticky = 'nsew',
padx = 2, pady = 2)
self.ovf_lwSpinbox.grid(row = 0, column = 2, sticky = 'nsew',
padx = 2, pady = 2)
self.ovf_lcLabel.grid(row = 0, column = 3, sticky = 'nsew',
padx = 2, pady = 2)
self.ovf_lcCombobox.grid(row = 0, column = 4, sticky = 'nsew',
padx = 2, pady = 2)
self.ovapplyButton.grid(row = 0, column = 0, sticky = 'snew',
padx = 50, pady = 2)
self.ovcloseButton.grid(row = 0, column = 1, sticky = 'nsew',
padx = 50, pady = 2)
def setstate_init(self):
try: self._skeleton_image
except AttributeError: self._overskel['enable'].set(0)
try: self._mask_edge
except AttributeError: self._overedge['enable'].set(0)
try: self._labelled_filaments
except: AttributeError: self._overlabel['enable'].set(0)
try: self._m
except: AttributeError: self._overfit['enable'].set(0)
if self._overedge['enable'].get() == 1:
self.ove_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
if self._overskel['enable'].get() == 1:
self.ovs_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
if self._overlabel['enable'].get() == 1:
self.ovl_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
if self._overfit['enable'].get() == 1:
self.ovf_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
def enable_edge(self):
if self._overedge['enable'].get() == 1:
self.ove_enButton.state(['!pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED)
self._overedge['enable'].set(0)
elif self._overedge['enable'].get() == 0:
self.ove_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
self._overedge['enable'].set(1)
def enable_skeleton(self):
if self._overskel['enable'].get() == 1:
self.ovs_enButton.state(['!pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED)
self._overskel['enable'].set(0)
elif self._overskel['enable'].get() == 0:
self.ovs_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
self._overskel['enable'].set(1)
def enable_labels(self):
if self._overlabel['enable'].get() == 1:
self.ovl_enButton.state(['!pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED)
self._overlabel['enable'].set(0)
elif self._overlabel['enable'].get() == 0:
self.ovl_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
self._overlabel['enable'].set(1)
def enable_fit(self):
if self._overfit['enable'].get() == 1:
self.ovf_enButton.state(['!pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED)
self._overfit['enable'].set(0)
elif self._overfit['enable'].get() == 0:
self.ovf_enButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
self._overfit['enable'].set(1)
def apply(self):
self._overedge['size'].set(self.ove_szSpinbox.get())
self._overedge['ecolor'].set(self.ove_ecCombobox.get())
self._overedge['fcolor'].set(self.ove_fcCombobox.get())
self._overskel['size'].set(self.ovs_szSpinbox.get())
self._overskel['ecolor'].set(self.ovs_ecCombobox.get())
self._overskel['fcolor'].set(self.ovs_fcCombobox.get())
self._overlabel['size'].set(self.ovl_szSpinbox.get())
self._overlabel['ecolor'].set(self.ovl_ecCombobox.get())
self._overfit['lwidth'].set(self.ovf_lwSpinbox.get())
self._overfit['color'].set(self.ovf_lcCombobox.get())
MainDisplay.show_overlay(self)
try: ROIdisplay.show_roi(self)
except AttributeError: pass
ImageOverlay.setstate_cpanel(self)
def setstate_cpanel(self):
if self._overedge['enable'].get() == 1:
self.eshowButton.config(text = 'Hide')
elif self._overedge['enable'].get() == 0:
self.eshowButton.config(text = 'Show')
if self._overskel['enable'].get() == 1:
self.skskeletonButton.config(text = 'Hide')
elif self._overskel['enable'].get() == 0:
self.skskeletonButton.config(text = 'Skeleton')
if self._overlabel['enable'].get() == 1:
self.tshowlabelButton.config(text = 'Hide Labels')
elif self._overlabel['enable'].get() == 0:
self.tshowlabelButton.config(text = 'Show Labels')
if self._overfit['enable'].get() == 1:
self.tshowfitButton.config(text = 'Hide Fit')
elif self._overfit['enable'].get() == 0:
self.tshowfitButton.config(text = 'Show Fit')
self.skfilterButton.config(text = 'Filter')
self.skmaskButton.config(text = 'Mask')
def close(self):
self.ov_Window.destroy()
self._menucheckOO.set(0)
class ROImanager():
def init_var(self):
self._roicircle = 0
self._roirect = 0
self._roipoly = 0
self._deledge = tk.IntVar()
self._delskel = tk.IntVar()
self._delchain = tk.IntVar()
self._deledge.set(1)
self._delskel.set(1)
self._delchain.set(1)
def create_window(self):
try: self.rt_Window.destroy()
except AttributeError: pass
self.rt_Window = tk.Toplevel(self)
self.rt_Window.title('ROI Manager Tracking')
#self.rt_Window.geometry('240x350-80+50')
self.rt_Window.resizable(0,1)
self.rt_Window.protocol('WM_DELETE_WINDOW',
lambda: ROImanager.close(self))
self.rt_Window.columnconfigure(0, weight = 1)
self.rt_Window.rowconfigure(1, weight = 1)
self.rt_drawFrame = ttk.Frame(self.rt_Window)
self.roicircleButton = ttk.Button(self.rt_drawFrame,
text = 'Circle',
style = 'SunkableButton.TButton',
command = lambda: ROImanager.draw_circle(self))
self.roirectButton = ttk.Button(self.rt_drawFrame,
text = 'Rectangle',
style = 'SunkableButton.TButton',
command = lambda: ROImanager.draw_rectangle(self))
self.roipolyButton = ttk.Button(self.rt_drawFrame,
text = 'Polygon')
self.rt_middleFrame = ttk.Frame(self.rt_Window)
self.rt_middleFrame.rowconfigure(0, weight = 1)
self.rt_middleFrame.columnconfigure([0,1], weight = 1)
self.roilistFrame = ttk.LabelFrame(self.rt_middleFrame,
text = 'ROIs')
self.roilistFrame.rowconfigure(0, weight = 1)
self.roiListbox = tk.Listbox(self.roilistFrame,
width = 15, selectmode = 'extended')
self.roiListbox.bind('<<ListboxSelect>>',
lambda event, arg = self:
ROIdisplay.draw_selec(self, event))
self.roilistScrollbar = ttk.Scrollbar(self.roilistFrame)
self.roilistScrollbar.config(command = self.roiListbox.yview)
self.roiListbox.config(yscrollcommand = self.roilistScrollbar.set)
self.rt_manageFrame = ttk.Frame(self.rt_middleFrame)
self.roiselectallButton = ttk.Button(self.rt_manageFrame,
text = 'Select All',
command = lambda: ROImanager.selectall_roiList(self))
self.roiclearallButton = ttk.Button(self.rt_manageFrame,
text = 'Clear All',
command = lambda: ROImanager.clearall_roiList(self))
self.roideleteallButton = ttk.Button(self.rt_manageFrame,
text = 'Delete All',
command = lambda: ROImanager.keepdelall_roi(self, 0))
self.roikeepallButton = ttk.Button(self.rt_manageFrame,
text = 'Keep All',
command = lambda: ROImanager.keepdelall_roi(self, 1))
self.roideleteselecButton = ttk.Button(self.rt_manageFrame,
text = 'Delete Selection',
command = lambda: ROImanager.keepdelsel_roi(self, 0))
self.roikeepselecButton = ttk.Button(self.rt_manageFrame,
text = 'Keep Selection',
command = lambda: ROImanager.keepdelsel_roi(self,1))
self.rt_bottomFrame = ttk.Frame(self.rt_Window)
self.rt_bottomFrame.columnconfigure([0,1], weight = 1)
self.roioptionsButton = ttk.Button(self.rt_bottomFrame,
text = 'Options',
command = lambda: ROImanager.create_options(self))
self.roicloseButton = ttk.Button(self.rt_bottomFrame,
text = 'Close',
command = lambda: ROImanager.close(self))
self.rt_drawFrame.grid(row = 0, column = 0,
sticky = 'nsew')
self.roicircleButton.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 10)
self.roirectButton.grid(row = 0, column = 1,
sticky = 'nsew', padx = 2, pady = 10)
self.roipolyButton.grid(row = 0, column = 2,
sticky = 'nsew', padx = 2, pady = 10)
self.rt_middleFrame.grid(row = 1, column = 0, sticky = 'nsew')
self.roilistFrame.grid(row = 0, column = 0,
sticky = 'ns')
self.roiListbox.grid(row = 0, column = 0, sticky = 'ns')
self.roilistScrollbar.grid(row = 0, column = 1, sticky = 'ns')
self.rt_manageFrame.grid(row = 0, column = 1,
sticky = 'nsew')
self.roiselectallButton.grid(row = 0, column = 0, sticky = 'nsew',
pady = 2, padx = 2)
self.roiclearallButton.grid(row = 1, column = 0, sticky = 'nsew',
pady = 2, padx = 2)
self.roideleteallButton.grid(row = 2, column = 0, sticky = 'nsew',
pady = 2, padx = 2)
self.roikeepallButton.grid(row = 3, column = 0, sticky = 'nsew',
pady = 2, padx = 2)
self.roideleteselecButton.grid(row = 4, column = 0, sticky = 'nsew',
pady = 2, padx = 2)
self.roikeepselecButton.grid(row = 5, column = 0, sticky = 'nsew',
pady = 2, padx = 2)
self.rt_bottomFrame.grid(row = 2, column = 0,
sticky = 'nsew')
self.roioptionsButton.grid(row = 0, column = 0,
sticky = 'nsew', padx = 10, pady = 10)
self.roicloseButton.grid(row = 0, column = 1,
sticky = 'nsew', padx = 10, pady = 10)
try:
self._roipath[-1]
ROImanager.setstate_roi(self)
ROIdisplay.show_roi(self)
ROImanager.update_roiList(self)
except AttributeError:
ROImanager.setstate_noroi(self)
def update_roiList(self):
self.roiListbox.delete(0,'end')
for n, item in enumerate(self._roipath):
if hasattr(item, 'get_radius'): text = 'Circle '
elif hasattr(item, 'get_width'): text = 'Rectangle '
self.roiListbox.insert('end', text + str(n+1))
def selectall_roiList(self):
self.roiListbox.selection_clear(0, 'end')
self.roiListbox.selection_set(0, 'end')
ROIdisplay.draw_selec(self, '<Button-1>')
def clearall_roiList(self):
MainDisplay.show_overlay(self)
ROIdisplay.noshow_roi(self)
del self._roipath
del self._roilabel
self.roiListbox.delete(0, 'end')
self._canvas.draw()
def create_options(self):
try: self.ro_Window.destroy()
except AttributeError: pass
self.ro_Window = tk.Toplevel(self)
self.ro_Window.title('ROI data options')
#self.ro_Window.geometry('180x150-250+100')
self.ro_Window.resizable(0,0)
self.roLabelFrame = ttk.LabelFrame(self.ro_Window,
text = 'Select variables to consider')
self.roideledgeCheckbutton = ttk.Checkbutton(self.roLabelFrame,
text = 'Edges',
variable = self._deledge)
self.roidelskelCheckbutton = ttk.Checkbutton(self.roLabelFrame,
text = 'Skeleton',
variable = self._delskel)
self.roidelchainCheckbutton = ttk.Checkbutton(self.roLabelFrame,
text = 'Labelled Chains',
variable = self._delchain)
self.roidelcloseButton = ttk.Button(self.roLabelFrame,
text = 'Close',
command = lambda: self.ro_Window.destroy())
self.ro_Window.rowconfigure(0, weight = 1)
self.ro_Window.columnconfigure(0, weight = 1)
self.roLabelFrame.columnconfigure(0, weight = 1)
self.roLabelFrame.rowconfigure([0,1,2], weight = 1)
self.roLabelFrame.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.roideledgeCheckbutton.grid(row = 0, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.roidelskelCheckbutton.grid(row = 1, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.roidelchainCheckbutton.grid(row = 2, column = 0,
sticky = 'nsew', padx = 2, pady = 2)
self.roidelcloseButton.grid(row = 3, column = 0,
sticky = 'nsew', padx = 5, pady = 2)
def keepdelall_roi(self, keep):
self.roiListbox.selection_clear(0, 'end')
self.roiListbox.selection_set(0, 'end')
if keep == 0: ROImanager.deldata_inroi(self)
elif keep == 1: ROImanager.keepdata_inroi(self)
ROIdisplay.noshow_roi(self)
del self._roipath; del self._roilabel
self.roiListbox.delete(0, 'end')
MainDisplay.show_overlay(self)
def keepdelsel_roi(self, keep):
if keep == 0:
ROImanager.deldata_inroi(self)
list_del = self.roiListbox.curselection()
elif keep == 1:
ROImanager.keepdata_inroi(self)
list_del = [item for item in np.arange(self.roiListbox.size())
if item not in self.roiListbox.curselection()]
ROIdisplay.noshow_roi(self)
for item in sorted(list_del, reverse=True):
del self._roipath[item]
del self._roilabel[item]
for n, item in enumerate(self._roilabel):
item.set_text(str(n+1))
MainDisplay.show_overlay(self)
ROIdisplay.show_roi(self)
ROImanager.update_roiList(self)
def keepdata_inroi(self):
mask_all = np.zeros(self._mat_img.shape)
for item in self.roiListbox.curselection():
mask = ROImanager.data_roi(self, self._roipath[item], 1)
mask_all = mask_all+mask
if self._deledge.get() == 1:
try:
self._mask_edge = self._mask_edge*mask_all
except AttributeError: pass
if self._delskel.get() == 1:
try:
self._skeleton_image = self._skeleton_image*mask_all
except AttributeError: pass
if self._delchain.get() == 1:
try:
self._labelled_filaments = self._labelled_filaments*mask_all
except AttributeError: pass
def deldata_inroi(self):
for item in self.roiListbox.curselection():
mask = ROImanager.data_roi(self, self._roipath[item], 0)
if self._deledge.get() == 1:
try:
self._mask_edge = self._mask_edge*mask
except AttributeError: pass
if self._delskel.get() == 1:
try:
self._skeleton_image = self._skeleton_image*mask
except AttributeError: pass
if self._delchain.get() == 1:
try:
self._labelled_filaments = self._labelled_filaments*mask
except AttributeError: pass
def data_roi(self, id_roi, keep):
if keep == 1: mask = np.zeros(self._mat_img.shape)
elif keep == 0: mask = np.ones(self._mat_img.shape)
if hasattr(id_roi, 'get_width'):
x,y = id_roi.get_xy()
width = id_roi.get_width()
height = id_roi.get_height()
mat_roi = np.array(list(product(
range(int(x),int(x+width)),
range(int(y),int(y+height)))))
for point in mat_roi:
mask[point[1], point[0]] = keep
elif hasattr(id_roi, 'get_radius'):
x,y = id_roi.center
r = id_roi.get_radius()
mat_limroi = np.array(list(product(
range(int(x-r), int(x+r)),
range(int(y-r), int(y+r)))))
for point in mat_limroi:
dist = np.sqrt((point[0]-x)**2+(point[1]-y)**2)
if dist<= r : mask[point[1], point[0]] = keep
return mask
def setstate_noroi(self):
self.roiselectallButton.state(['disabled'])
self.roiclearallButton.state(['disabled'])
self.roideleteallButton.state(['disabled'])
self.roikeepallButton.state(['disabled'])
def setstate_roi(self):
self.roiselectallButton.state(['!disabled'])
self.roiclearallButton.state(['!disabled'])
self.roideleteallButton.state(['!disabled'])
self.roikeepallButton.state(['!disabled'])
def close(self):
if self._roicircle == 1: ROImanager.draw_circle(self)
elif self._roirect == 1: ROImanager.draw_rectangle(self)
try: ROIdisplay.noshow_roi(self)
except AttributeError: pass
self.rt_Window.destroy()
self._menucheckROI.set(0)
def connect_mpl(self):
self._cid_press = self._canvas.mpl_connect('button_press_event', lambda event, arg = self: ROIdisplay.on_mousepress(arg, event))
self._cid_drag = self._canvas.mpl_connect('motion_notify_event', lambda event, arg = self: ROIdisplay.on_mousedrag(arg, event))
self._cid_up = self._canvas.mpl_connect('button_release_event', lambda event, arg = self: ROIdisplay.on_mouseup(arg, event))
def disconnect_mpl(self):
self._canvas.mpl_disconnect(self._cid_press)
self._canvas.mpl_disconnect(self._cid_drag)
self._canvas.mpl_disconnect(self._cid_up)
def draw_circle(self):
if self._roirect == 1: ROImanager.draw_rectangle(self)
self._drawmethod = 0
self._cpressed = 0
if self._roicircle == 1:
ROImanager.disconnect_mpl(self)
self.roicircleButton.state(['!pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED)
self._roicircle = 0
elif self._roicircle == 0:
ROImanager.connect_mpl(self)
self.roicircleButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
self._roicircle = 1
def draw_rectangle(self):
if self._roicircle == 1 : ROImanager.draw_circle(self)
self._drawmethod = 1
self._cpressed = 0
if self._roirect == 1:
ROImanager.disconnect_mpl(self)
self.roirectButton.state(['!pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED)
self._roirect = 0
elif self._roirect == 0:
ROImanager.connect_mpl(self)
self.roirectButton.state(['pressed'])
ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN)
self._roirect = 1
|
the-stack_0_8107 | import unittest
from jupytervvp.variablesubstitution import VvpFormatter, NonExistentVariableException, VariableSyntaxException
class VariableSubstitutionTests(unittest.TestCase):
def test_substitute_user_variables_works(self):
input_text = """
INSERT INTO {{ namespace }}_{resultsTable}
SELECT * FROM {{ namespace }}_{tableName}
"""
user_ns = {"resultsTable": "table1", "tableName": "table2"}
formatter = VvpFormatter(input_text, user_ns)
expected_output = """
INSERT INTO {{ namespace }}_table1
SELECT * FROM {{ namespace }}_table2
"""
actual_output = formatter.substitute_user_variables()
assert actual_output == expected_output
def test_substitute_user_variables_undefined_variable_throws(self):
input_text = "{var1} sat on {var2}."
user_ns = {"var1": "The cat"}
formatter = VvpFormatter(input_text, user_ns)
with self.assertRaises(NonExistentVariableException) as exception:
formatter.substitute_user_variables()
assert exception.exception.variable_name == "var2"
def test_substitute_user_variables_ambiguous_throws(self):
input_text = "{var1} sat on {{var2}."
user_ns = {"var1": "The cat"}
formatter = VvpFormatter(input_text, user_ns)
with self.assertRaises(VariableSyntaxException) as exception:
formatter.substitute_user_variables()
assert exception.exception.bad_text == "{{var2}"
def test_prepare_escaped_variables_works_in_simple_case(self):
input_text = "{{ variable }} and {{ another }} with { ignore }"
expected = "{{{{ variable }}}} and {{{{ another }}}} with { ignore }"
assert VvpFormatter._prepare_escaped_variables(input_text) == expected
def test_prepare_escaped_variables_throws_in_ambiguous_case(self):
input_text = "{{ good }} and {also_good} and {{bad_because_no_spaces}}"
user_ns = {"also_good": "dummy_value"}
formatter = VvpFormatter(input_text, user_ns)
with self.assertRaises(VariableSyntaxException) as exception:
formatter.substitute_user_variables()
assert exception.exception.bad_text == "{{bad_because_no_spaces}"
def test_substitute_variables_works_in_simple_case(self):
input_text = "{var1} sat on {var2}."
escaped_text = input_text
user_ns = {"var1": "The cat", "var2": "the mat"}
formatter = VvpFormatter(input_text, user_ns)
formatted = formatter._substitute_variables(escaped_text)
assert formatted == "The cat sat on the mat."
def test_substitute_variables_four_braces_transformed_to_two(self):
input_text = "{var1} sat on {{ sittingObject }}."
escaped_text = "{var1} sat on {{{{ sittingObject }}}}."
user_ns = {"var1": "The cat"}
formatter = VvpFormatter(input_text, user_ns)
formatted = formatter._substitute_variables(escaped_text)
assert formatted == "The cat sat on {{ sittingObject }}."
def test_get_ambiguous_syntax_returns_nothing_if_correct(self):
input_text = "{good} and {{ good }}"
assert VvpFormatter._get_ambiguous_syntax(input_text) is None
def test_get_ambiguous_syntax_finds_missing_spaces(self):
test_data = {
"{{myvar}}": "{{myvar}", # missing space {
"{{myvar": "{{myvar", # missing space; no closing brace match
"myvar}}": "myvar}}", # missing space }
"{ { myvar}}": "{ myvar}}", # only get up to next brace back
"{{ myvar}}": "{ myvar}}", # same even if double braces
"{ {{ myvar}}": "{ myvar}}" # matches missing spaces before nesting
}
for test_input in test_data.keys():
assert VvpFormatter._get_ambiguous_syntax(test_input) == test_data[test_input]
def test_get_ambiguous_syntax_does_not_parse_inside_brackets(self):
test_data = {
"{{ myvar }}": None,
"{{ myvar myvar2 }}": None,
}
for test_input in test_data.keys():
assert VvpFormatter._get_ambiguous_syntax(test_input) == test_data[test_input]
def test_get_ambiguous_syntax_finds_multiple_braces(self):
input_text = "{{{ myvar }}}"
assert VvpFormatter._get_ambiguous_syntax(input_text) == "{{{ myvar }"
def test_get_ambiguous_syntax_finds_nesting(self):
test_data = {
"{ {myvar} }": "{ {myvar}",
"{{ {myvar } }}": "{ {myvar }" # inside double braces not parsed, but nesting detected
}
for input_data in test_data.keys():
assert VvpFormatter._get_ambiguous_syntax(input_data) == test_data[input_data]
|
the-stack_0_8108 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import pathlib
import typing
from multiprocessing.pool import ThreadPool
import requests
CPU_FRACTION = 0.5
WORKER_THREADS = max(int(os.cpu_count() * CPU_FRACTION), 1)
def download_item(source_target: typing.Tuple[str, pathlib.Path]):
"""ThreadPool.imap_unordered accepts tuples as arguments to the callable"""
source_url, download_path = source_target
if not os.path.exists(download_path):
r = requests.get(source_url, stream=True)
if r.status_code == 200:
with open(download_path, "wb") as f:
for chunk in r:
f.write(chunk)
def download_parallel(source_targets: typing.List[typing.Tuple[str, pathlib.Path]]):
ThreadPool(WORKER_THREADS).imap_unordered(download_item, source_targets)
def main(csv_path: pathlib.Path, source_column: str, download_prefix: str):
with open(csv_path) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=",")
download_dir = pathlib.Path(download_prefix)
row_num = 0
source_targets = []
for row in csv_reader:
# Example:
# https://covidtracking.com/screenshots/AL/AL-20210307-230802.png
source_url = row[source_column]
state, filename = row[source_column].split("/")[-2:]
(download_dir / state).mkdir(parents=True, exist_ok=True)
source_targets.append((source_url, download_dir / state / filename))
row_num += 1
if row_num % WORKER_THREADS == 0:
download_parallel(source_targets)
source_targets = []
download_parallel(source_targets)
if __name__ == "__main__":
assert os.environ["CSV_PATH"]
assert os.environ["SOURCE_COLUMN"]
assert os.environ["DOWNLOAD_PREFIX"]
main(
csv_path=pathlib.Path(os.environ["CSV_PATH"]).expanduser(),
source_column=os.environ["SOURCE_COLUMN"],
download_prefix=os.environ["DOWNLOAD_PREFIX"],
)
|
the-stack_0_8109 |
from django import forms
from api.models import JournalEntry, JournalEntryLine, Period, Account
class NewJournalEntryForm(forms.ModelForm):
period = forms.ModelChoiceField(
queryset=Period.objects.all(), required=True, to_field_name="slug")
class Meta:
model = JournalEntry
fields = (
'period', 'date', 'memo',
'is_adjusting_entry', 'is_closing_entry',)
class NewJournalEntryLineForm(forms.ModelForm):
account = forms.ModelChoiceField(
queryset=Account.objects.all(), required=True, to_field_name="slug")
class Meta:
model = JournalEntryLine
fields = ('account', 'type', 'amount',)
|
the-stack_0_8111 | import sys
from datetime import datetime
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from awsglue.context import GlueContext
from awsglue.dynamicframe import DynamicFrame
from awsglue.job import Job
from pyspark.sql.functions import *
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType
#sc = SparkContext()
sc = SparkContext.getOrCreate()
sc.setLogLevel("INFO")
glueContext = GlueContext(sc)
job = Job(glueContext)
args = getResolvedOptions(sys.argv,
['JOB_NAME',
'database_name',
'raw_pose_data_table',
'redshift_conn',
'redshift_role'])
job.init(args['JOB_NAME'], args)
print("Database: {}".format(args['database_name']))
print("Raw Events Table: {}".format(args['raw_pose_data_table']))
# catalog: database and table names
db_name = args['database_name']
raw_pose_data_table = args['raw_pose_data_table']
# Output location
redshift_role = args['redshift_role']
redshift_conn = args['redshift_conn']
redshift_preaction_query = "CREATE TABLE IF NOT EXISTS public.pose_data (msg_id VARCHAR(36),camera_location VARCHAR(20),msg_type VARCHAR(20),identified_action VARCHAR(40),event_time TIMESTAMP,event_time_qs VARCHAR(20),person_count SMALLINT,s3uri VARCHAR(150));"
redshift_options = {
"dbtable": "pose_data",
"database": "default_db",
"aws_iam_role": redshift_role,
"preactions": redshift_preaction_query,
"extracopyoptions": "COMPUPDATE ON"
}
# Helper Function replaces the timestamp into Redshift-compliant format
def applyTransform(rec):
rec["event_time"] = datetime.utcfromtimestamp(rec["timestamp"]).strftime("%m %d, %Y %H:%M:%S")
rec["event_time_qs"] = datetime.utcfromtimestamp(rec["timestamp"]).strftime("%Y-%m-%d %H:%M:%S")
return rec
# Create dynamic frame from the source tables
raw_pose_data = glueContext.create_dynamic_frame.from_catalog(
database=db_name,
table_name=raw_pose_data_table,
# transformation_ctx = "events"
)
print("---- Raw data schema: ----")
raw_pose_data.printSchema()
# Drop the pose field
pose_dropped = raw_pose_data.drop_fields(paths=["pose", "year", "month", "day", "hour"], transformation_ctx="drop_pose")
# Rename some fields to avoid Postgres reserved column name
loc_renamed_df = pose_dropped.rename_field("location", "camera_location", transformation_ctx="rename_location")
act_renamed_df = loc_renamed_df.rename_field("action", "identified_action", transformation_ctx="rename_action")
# Maps a transformation function over each record to change timestamp from epoch to redshift-compliant format
transformed_pose_data = Map.apply(frame = act_renamed_df, f = applyTransform)
final_pose_data = transformed_pose_data.drop_fields(paths=["timestamp"], transformation_ctx="drop_timestamp")
print("---- Processed data schema: ----")
final_pose_data.printSchema()
record_count = final_pose_data.count()
print("Processed record count: {}".format(record_count))
# Avoid errors if Glue Job Bookmark detects no new data to process and records = 0.
if record_count > 0:
glueContext.write_dynamic_frame.from_jdbc_conf(
frame=final_pose_data,
catalog_connection=redshift_conn,
connection_options=redshift_options,
redshift_tmp_dir=args["TempDir"])
else:
print("Glue Job Bookmark detected no new files to process")
job.commit()
|
the-stack_0_8112 | """
https://github.com/tomchristie/django-rest-framework/issues/944
"""
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camelcase_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def underscore_to_camelcase(name, lower_first=True):
result = ''.join(char.capitalize() for char in name.split('_'))
if lower_first:
return result[0].lower() + result[1:]
else:
return result
def recursive_key_map(function, data):
if isinstance(data, dict):
new_dict = {}
for key, value in data.items():
if isinstance(key, str):
new_key = function(key)
new_dict[new_key] = recursive_key_map(function, value)
return new_dict
elif isinstance(data, (list, tuple)):
return [recursive_key_map(function, value) for value in data]
else:
return data
|
the-stack_0_8113 | """
This file offers the methods to automatically retrieve the graph Vibrio palustris.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def VibrioPalustris(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Vibrio palustris graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Vibrio palustris graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="VibrioPalustris",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_8114 | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import logging
import re
import requests
from .heuristics import Part, parts_heuristics
LOG = logging.getLogger(__name__)
# S3 does not support multiple ranges
class S3Streamer:
def __init__(self, url, request, parts, headers, **kwargs):
self.url = url
self.parts = parts
self.request = request
self.headers = dict(**headers)
self.kwargs = kwargs
def __call__(self, chunk_size):
# See https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
headers = dict(**self.headers)
# TODO: add assertions
for i, part in enumerate(self.parts):
if i == 0:
request = self.request
else:
offset, length = part
headers["range"] = f"bytes={offset}-{offset+length-1}"
request = requests.get(
self.url,
stream=True,
headers=headers,
**self.kwargs,
)
try:
request.raise_for_status()
except Exception:
LOG.error("URL %s: %s", self.url, request.text)
raise
header = request.headers
bytes = header["content-range"]
LOG.debug("HEADERS %s", header)
m = re.match(r"^bytes (\d+)d?-(\d+)d?/(\d+)d?$", bytes)
assert m, header
start, end, total = int(m.group(1)), int(m.group(2)), int(m.group(3))
assert end >= start
assert start < total
assert end < total
assert start == part.offset, (bytes, part)
# (end + 1 == total) means that we overshoot the end of the file,
# this happens when we round transfer blocks
assert (end == part.offset + part.length - 1) or (end + 1 == total), (
bytes,
part,
)
yield from request.iter_content(chunk_size)
class MultiPartStreamer:
def __init__(self, url, request, parts, boundary, **kwargs):
self.request = request
self.size = int(request.headers["content-length"])
self.encoding = "utf-8"
self.parts = parts
self.boundary = boundary
def __call__(self, chunk_size):
from email.parser import HeaderParser
from requests.structures import CaseInsensitiveDict
header_parser = HeaderParser()
marker = f"--{self.boundary}\r\n".encode(self.encoding)
end_header = b"\r\n\r\n"
end_data = b"\r\n"
end_of_input = f"--{self.boundary}--\r\n".encode(self.encoding)
if chunk_size < len(end_data):
chunk_size = len(end_data)
iter_content = self.request.iter_content(chunk_size)
chunk = next(iter_content)
# Some servers start with \r\n
if chunk[:2] == end_data:
chunk = chunk[2:]
LOG.debug("MARKER %s", marker)
part = 0
while True:
while len(chunk) < max(len(marker), len(end_of_input)):
more = next(iter_content)
assert more is not None
chunk += more
if chunk.find(end_of_input) == 0:
assert part == len(self.parts)
break
pos = chunk.find(marker)
assert pos == 0, (pos, chunk)
chunk = chunk[pos + len(marker) :]
while True:
pos = chunk.find(end_header)
if pos != -1:
break
more = next(iter_content)
assert more is not None
chunk += more
assert len(chunk) < 1024 * 1024
pos += len(end_header)
header = chunk[:pos].decode(self.encoding)
header = CaseInsensitiveDict(header_parser.parsestr(header))
chunk = chunk[pos:]
# kind = header["content-type"]
bytes = header["content-range"]
LOG.debug("HEADERS %s", header)
m = re.match(r"^bytes (\d+)d?-(\d+)d?/(\d+)d?$", bytes)
assert m, header
start, end, total = int(m.group(1)), int(m.group(2)), int(m.group(3))
assert end >= start
assert start < total
assert end < total
size = end - start + 1
assert start == self.parts[part].offset
# (end + 1 == total) means that we overshoot the end of the file,
# this happens when we round transfer blocks
assert (end == self.parts[part].offset + self.parts[part].length - 1) or (
end + 1 == total
), (bytes, self.parts[part])
while size > 0:
if len(chunk) >= size:
yield chunk[:size]
chunk = chunk[size:]
size = 0
else:
yield chunk
size -= len(chunk)
chunk = next(iter_content)
assert chunk.find(end_data) == 0
chunk = chunk[len(end_data) :]
part += 1
class DecodeMultipart:
def __init__(self, url, request, parts, **kwargs):
self.request = request
assert request.status_code == 206, request.status_code
content_type = request.headers["content-type"]
if content_type.startswith("multipart/byteranges; boundary="):
_, boundary = content_type.split("=")
# print("****** MULTI-PART supported by server", url)
self.streamer = MultiPartStreamer(url, request, parts, boundary, **kwargs)
else:
# print("****** MULTI-PART *NOT* supported by server", url)
self.streamer = S3Streamer(url, request, parts, **kwargs)
def __call__(self, chunk_size):
return self.streamer(chunk_size)
class PartFilter:
def __init__(self, parts, positions=None):
self.parts = parts
if positions is None:
positions = [x.offset for x in parts]
self.positions = positions
assert len(self.parts) == len(self.positions)
def __call__(self, streamer):
def execute(chunk_size):
stream = streamer(chunk_size)
chunk = next(stream)
pos = 0
for (_, length), offset in zip(self.parts, self.positions):
offset -= pos
while offset > len(chunk):
pos += len(chunk)
offset -= len(chunk)
chunk = next(stream)
assert chunk
chunk = chunk[offset:]
pos += offset
size = length
while size > 0:
if len(chunk) >= size:
yield chunk[:size]
chunk = chunk[size:]
pos += size
size = 0
else:
yield chunk
size -= len(chunk)
pos += len(chunk)
chunk = next(stream)
# Drain stream, so we don't created error messages in the server's logs
while True:
try:
next(stream)
except StopIteration:
break
return execute
def compress_parts(parts):
last = -1
result = []
# Compress and check
for offset, length in parts:
assert offset >= 0 and length > 0
assert offset >= last, (
f"Offsets and lengths must be in order, and not overlapping:"
f" offset={offset}, end of previous part={last}"
)
if offset == last:
# Compress
offset, prev_length = result.pop()
length += prev_length
result.append((offset, length))
last = offset + length
return tuple(Part(offset, length) for offset, length in result)
def compute_byte_ranges(parts, method, url, statistics_gatherer):
if callable(method):
blocks = method(parts)
else:
blocks = parts_heuristics(method, statistics_gatherer)(parts)
blocks = compress_parts(blocks)
assert len(blocks) > 0
assert len(blocks) <= len(parts)
statistics_gatherer(
"byte-ranges",
method=str(method),
url=url,
parts=parts,
blocks=blocks,
)
i = 0
positions = []
block_offset, block_length = blocks[i]
for offset, length in parts:
while offset > block_offset + block_length:
i += 1
block_offset, block_length = blocks[i]
start = i
while offset + length > block_offset + block_length:
i += 1
block_offset, block_length = blocks[i]
end = i
# Sanity check: assert that each parts is contain in a rounded part
assert start == end
positions.append(
offset - blocks[i].offset + sum(blocks[j].length for j in range(i))
)
return blocks, positions
|
the-stack_0_8115 | import datetime
import gc
import numpy as np
import os
import pandas as pd
os.environ['KMP_DUPLICATE_LIB_OK']='True' # MacOS fix for libomp issues (https://github.com/dmlc/xgboost/issues/1715)
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import KFold, RepeatedKFold, GroupKFold, StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import NuSVC
from tqdm import tqdm as tqdm
from kinoa import kinoa
from scipy.stats import ttest_ind, ks_2samp
def dprint(*args, **kwargs):
print("[{}] ".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + \
" ".join(map(str,args)), **kwargs)
dprint('PID: {}'.format(os.getpid()))
script_id = 0
data_path = '../input/'
id_col = 'encounter_id'
target_col = 'hospital_death'
fillna_with_est = False
train_lgbm = True
train_xgb = False
# train_catboost = False
train = pd.read_csv(os.path.join(data_path, 'training_v2.csv'))
test = pd.read_csv(os.path.join(data_path, 'unlabeled.csv'))
# Drop constant columns
constant_cols = []
for c in train.columns:
if train[c].nunique(dropna=False) < 2:
constant_cols.append(c)
print(f'constant_cols in train: {constant_cols}')
train.drop(constant_cols, axis=1, inplace=True)
test.drop(constant_cols, axis=1, inplace=True)
constant_cols = []
for c in test.columns:
if c != target_col and test[c].nunique(dropna=False) < 2:
constant_cols.append(c)
print(f'constant_cols in test: {constant_cols}')
train.drop(constant_cols, axis=1, inplace=True)
test.drop(constant_cols, axis=1, inplace=True)
# Add estimated variables to the dataset
est_cols = [
{
'name': 'weight',
'fillna': False,
},
{
'name': 'height',
'fillna': False,
},
{
'name': 'apache_4a_hospital_death_prob',
'fillna': False,
},
# {
# 'name': 'apache_4a_icu_death_prob',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'urineoutput_apache',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'bmi',
# 'fillna': False,
# }, # Worse
# {
# 'name': 'glucose_apache',
# 'fillna': False,
# }, # Worse
]
for c in est_cols:
df = pd.read_csv(f'{c["name"]}_est.csv')
train = train.merge(df, on=id_col, how='left')
test = test.merge(df, on=id_col, how='left')
if c['fillna']:
train.loc[train[c['name']].isnull(), c['name']] = train[c['name'] + '_est']
test.loc[test[c['name']].isnull(), c['name']] = test[c['name'] + '_est']
train.drop([c['name'] + '_est'], axis=1, inplace=True)
test.drop([c['name'] + '_est'], axis=1, inplace=True)
dprint(train.shape, test.shape)
# Extract features
def extract_features(df):
df['d1_temp_minmax'] = df['d1_temp_max'] - df['d1_temp_min']
df['d1_glucose_minmax'] = df['d1_glucose_max'] - df['d1_glucose_min']
df['d1_resprate_minmax'] = df['d1_resprate_max'] - df['d1_resprate_min']
df['d1_spo2_minmax'] = df['d1_spo2_max'] - df['d1_spo2_min']
df['d1_platelets_minmax'] = df['d1_platelets_max'] - df['d1_platelets_min']
# df['d1_heartrate_minmax'] = df['d1_heartrate_max'] - df['d1_heartrate_min']
# df['h1_heartrate_minmax'] = df['h1_heartrate_max'] - df['h1_heartrate_min']
# df['h1_temp_minmax'] = df['h1_temp_max'] - df['h1_temp_min']
# df['h1_glucose_minmax'] = df['h1_glucose_max'] - df['h1_glucose_min']
# df['h1_resprate_minmax'] = df['h1_resprate_max'] - df['h1_resprate_min']
# df['h1_spo2_minmax'] = df['h1_spo2_max'] - df['h1_spo2_min']
# df['h1_platelets_minmax'] = df['h1_platelets_max'] - df['h1_platelets_min']
# df['abmi'] = df['age']*100*100*df['weight']/df['height']/df['height']
df['apache_4a_hospicu_death_prob'] = df['apache_4a_hospital_death_prob'] + df['apache_4a_icu_death_prob']
# df['apache_4a_hospicu_death_prob_m'] = df['apache_4a_hospital_death_prob'] * df['apache_4a_icu_death_prob']
df['age_group'] = df['age']//5
df['weight_group'] = df['weight']//5
# df['hr_a'] = df['d1_heartrate_max']/df['age']
# df['hr_w'] = df['d1_heartrate_max']/df['weight']
if fillna_with_est:
df['bmi'] = 100*100*df['weight']/df['height']/df['height']
else:
df['bmi_w_est'] = 100*100*df['weight_est']/df['height']/df['height']
df['bmi_h_est'] = 100*100*df['weight']/df['height_est']/df['height_est']
df['bmi_wh_est'] = 100*100*df['weight_est']/df['height_est']/df['height_est']
# df['agi'] = df['weight']/df['age']
# df['hrw'] = df['d1_heartrate_max']/df['weight']
# cols = ['temp_apache', 'd1_temp_max', 'd1_temp_min', 'h1_temp_max', 'h1_temp_min']
# for c in cols:
# df[c] = df[c]/36.6
pass
extract_features(train)
extract_features(test)
train['is_test'] = 0
test['is_test'] = 1
df_all = pd.concat([train, test], axis=0)
dprint('Label Encoder...')
cols = [f_ for f_ in df_all.columns if df_all[f_].dtype == 'object']
print(cols)
cnt = 0
for c in tqdm(cols):
if c != id_col:
# print(c)
le = LabelEncoder()
df_all[c] = le.fit_transform(df_all[c].astype(str))
cnt += 1
del le
dprint('len(cols) = {}'.format(cnt))
gfs = ['hospital_id', 'icu_id', 'age_group', 'apache_3j_diagnosis', 'gender', 'ethnicity', 'apache_3j_bodysystem'] #+ \
# ['hospital_admit_source', 'icu_admit_source', 'icu_stay_type', 'icu_type', 'apache_2_bodysystem']
ffs = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi']
# ffs = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi', 'bmi_w_est', 'bmi_h_est', 'bmi_wh_est', 'weight', 'height']
for gf in gfs:
for ff in ffs:
g = df_all.groupby(gf)[ff].agg(['mean', 'std', 'min', 'max']).reset_index()
g.rename({'mean': f'{gf}_{ff}__mean', 'std': f'{gf}_{ff}__std', 'min': f'{gf}_{ff}__min', 'max': f'{gf}_{ff}__max'}, axis=1, inplace=True)
df_all = df_all.merge(g, on=gf, how='left')
train = df_all.loc[df_all['is_test'] == 0].drop(['is_test'], axis=1)
test = df_all.loc[df_all['is_test'] == 1].drop(['is_test'], axis=1)
del df_all
gc.collect()
features = list(train.columns.values)
features.remove(id_col)
features.remove(target_col)
# Build the model
cnt = 0
p_buf = []
n_splits = 4
n_repeats = 1
kf = RepeatedKFold(
n_splits=n_splits,
n_repeats=n_repeats,
random_state=0)
err_buf = []
undersampling = 0
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 8,
'learning_rate': 0.05,
'feature_fraction': 0.85,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'lambda_l1': 1.0,
'lambda_l2': 10.0,
'verbose': -1,
'num_threads': 4,
}
xgb_params = {
'max_depth': 8,
'eta': 0.05,
'objective': 'binary:logistic',
'subsample': 0.85,
'colsample_bytree': 0.85,
'alpha': 1,
'lambda': 1,
'eval_metric': 'auc',
'nthread': 4,
}
cols_to_drop = [
id_col,
target_col,
# 'patient_id',
]
# cols_to_use = features
X = train.drop(cols_to_drop, axis=1, errors='ignore')
y = train[target_col].values
X_test = test.drop(cols_to_drop, axis=1, errors='ignore')
id_test = test[id_col].values
# # Feature selection
# cols_to_drop = []
# for c in X.columns:
# # t = ttest_ind(
# # X[c].fillna(X[c].mean()),
# # X_test[c].fillna(X_test[c].mean()))
# t = ttest_ind(
# X[c].dropna(),
# X_test[c].dropna())
# # print(c, t)
# if t[1] < 0.001:
# print(c, t)
# cols_to_drop.append(c)
# print(f'Dropping after statistical tests: {cols_to_drop}')
# X = X.drop(cols_to_drop, axis=1, errors='ignore')
# X_test = X_test.drop(cols_to_drop, axis=1, errors='ignore')
n_features = X.shape[1]
dprint(f'n_features: {n_features}')
p_test = []
for fold_i, (train_index, valid_index) in enumerate(kf.split(X, y)):
x_train = X.iloc[train_index]
x_valid = X.iloc[valid_index]
y_train = y[train_index]
y_valid = y[valid_index]
x_test = X_test.copy()
# # Frequency encoding
# encoding = x_train.groupby('height').size()
# encoding = encoding/len(x_train)
# x_train['height_fenc'] = x_train['height'].map(encoding)
# x_valid['height_fenc'] = x_valid['height'].map(encoding)
# x_test['height_fenc'] = x_test['height'].map(encoding)
feature_names = list(x_train.columns)
p_valid = []
# LGBM
if train_lgbm:
params = lgb_params.copy()
# pca = PCA(n_components=144)
# x_train = pca.fit_transform(x_train)
# x_valid = pca.transform(x_valid)
# x_test_pca = pca.transform(x_test)
# feature_names = ['pca_{}'.format(i) for i in range(x_train.shape[1])]
lgb_train = lgb.Dataset(
x_train,
y_train,
feature_name=feature_names,
)
lgb_train.raw_data = None
lgb_valid = lgb.Dataset(
x_valid,
y_valid,
)
lgb_valid.raw_data = None
model = lgb.train(
params,
lgb_train,
num_boost_round=5000,
valid_sets=[lgb_valid],
early_stopping_rounds=100,
verbose_eval=100,
)
if fold_i == 0:
importance = model.feature_importance()
model_fnames = model.feature_name()
tuples = sorted(zip(model_fnames, importance), key=lambda x: x[1])[::-1]
tuples = [x for x in tuples if x[1] > 0]
print('Important features:')
for i in range(20):
if i < len(tuples):
print(tuples[i])
else:
break
del importance, model_fnames, tuples
p_lgbm = model.predict(x_valid, num_iteration=model.best_iteration)
p_valid.append(p_lgbm)
err = roc_auc_score(y_valid, p_lgbm)
# err_buf.append(err)
dprint('{} LGBM AUC: {:.4f}'.format(fold_i, err))
p_lgbm_test = model.predict(x_test[feature_names], num_iteration=model.best_iteration)
p_test.append(p_lgbm_test)
# XGB
if train_xgb:
params = xgb_params.copy()
dtrain = xgb.DMatrix(x_train, label=y_train)
dvalid = xgb.DMatrix(x_valid, label=y_valid)
dtest = xgb.DMatrix(x_test[feature_names])
evallist = [(dvalid, 'eval')]
bst = xgb.train(
params,
dtrain,
5000,
evallist,
early_stopping_rounds=100,
verbose_eval=100
)
p_xgb = bst.predict(dvalid, ntree_limit=bst.best_iteration)
p_valid.append(p_xgb)
err = roc_auc_score(y_valid, p_xgb)
# err_buf.append(err)
dprint('{} XGB AUC: {:.4f}'.format(fold_i, err))
p_xgb_test = bst.predict(dtest, ntree_limit=bst.best_iteration)
p_test.append(p_xgb_test)
# Ensemble evaluation
if len(p_valid) > 1:
p_ens = np.mean(p_valid, axis=0)
err = roc_auc_score(y[valid_index], p_ens)
dprint('{} ENS AUC: {:.4f}'.format(fold_i, err))
err_buf.append(err)
# x_train = X.iloc[train_index]
# x_valid = X.iloc[valid_index]
# model = NuSVC(
# probability=True,
# kernel='poly',
# degree=4,
# gamma='auto',
# random_state=0,
# nu=0.6,
# coef0=0.05)
# model.fit(x_train, y[train_index])
# p_nusvc = model.predict_proba(x_valid)[:, 1]
# err = roc_auc_score(y[valid_index], p_nusvc)
# print('{} {} NuSVC AUC: {}'.format(v, cnt + 1, err))
# p_nusvc_test = model.predict_proba(x_test)[:, 1]
# p_mean = 0.1*p_lgbm + 0.9*p_nusvc
# err = roc_auc_score(y[valid_index], p_mean)
# print('{} {} ENS AUC: {}'.format(v, cnt + 1, err))
# p = 0.1*p_lgbm_test + 0.9*p_nusvc_test
del model, lgb_train, lgb_valid
gc.collect
# break
err_mean = np.mean(err_buf)
err_std = np.std(err_buf)
dprint('AUC: {:.4f} +/- {:.4f}'.format(err_mean, err_std))
test_preds = np.mean(p_test, axis=0)
submission = pd.DataFrame()
submission[id_col] = id_test
submission[target_col] = test_preds
submission.to_csv('submission{}.csv'.format(script_id), index=False)
# Save backup
files = [
'model{}.py'.format(script_id),
'model{}.log'.format(script_id),
'submission{}.csv'.format(script_id),
# 'feature_importance{}.txt'.format(script_id),
# 'train_weights{}.csv'.format(script_id),
]
experiment_name = 'Exp{}'.format(script_id)
params = {}
params['n_models'] = cnt
scores = {}
scores['auc_mean'] = err_mean
scores['auc_std'] = err_std
scores['kaggle'] = np.nan
other = {}
other['n_features'] = n_features
other['n_splits'] = n_splits
comments = ''
kinoa.save(
files,
experiment_name=experiment_name,
params=params,
scores=scores,
other=other,
comments=comments,
working_dir='',
sort_log_by='experiment_datetime',
sort_log_ascending=True,
columns_order={'scores.kaggle': -1, 'scores.auc_std': -2, 'scores.auc_mean': -3}
)
dprint('Done!')
|
the-stack_0_8116 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets the IAM policy for the repository."""
from googlecloudsdk.api_lib.source.repos import sourcerepo
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA)
class SetIamPolicy(base.UpdateCommand):
"""Set the IAM policy for the named repository.
This command sets the IAM policy for the given repository from the
policy in the provided file.
## EXAMPLES
To set the IAM policy, issue the following command:
$ {command} REPOSITORY_NAME POLICY_FILE
"""
@staticmethod
def Args(parser):
parser.add_argument(
'name', metavar='REPOSITORY_NAME', help='Name of the repository.')
parser.add_argument(
'policy_file',
help=('JSON or YAML file with IAM policy. '
'See https://cloud.google.com/resource-manager/'
'reference/rest/Shared.Types/Policy'))
parser.display_info.AddFormat('default')
def Run(self, args):
"""Sets the IAM policy for the repository.
Args:
args: argparse.Namespace, the arguments this command is run with.
Returns:
(sourcerepo_v1_messsages.Policy) The IAM policy.
Raises:
ToolException: on project initialization errors.
"""
res = sourcerepo.ParseRepo(args.name)
source = sourcerepo.Source()
policy = iam_util.ParseYamlorJsonPolicyFile(args.policy_file,
source.messages.Policy)
result = source.SetIamPolicy(res, policy)
iam_util.LogSetIamPolicy(res.Name(), 'repo')
return result
|
the-stack_0_8120 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""functional_tests
While unittests work well for testing facets of an implementation, they fail to
provide assurances that the user-visible functions work in practice. Here, we
complement the unittests with functional tests that drive the system as a user
would to verify user visible functionality. These functional tests are run as
part of the unittests.
So, we use Twill to verify Trac's functionality as served by tracd (and in the
future, other frontends).
Unlike most unittests, we setup a single fixture against which we run all the
testcases. This is for two reasons: Primarily, that provides us with a more
complex set of data to test against and thus more room for triggering bugs.
Secondarily, the cost of setting up a new Trac environment and Subversion
repository is significant, so recreating the fixture for each test would be
very costly.
There are two primary objects involved in the testing, the
FunctionalTestEnvironment and the FunctionalTester.
FunctionalTestEnvironment represents the Trac environment, the Subversion
repository, and the server. The server will be run on a random local port in
the range 8000-8999. A subdirectory named 'tracenv' will be created containing
the Trac environment, Subversion repository, and the user authentication
information. An 'admin' user is created and given TRAC_ADMIN privs early in
the testing. There are other users added as well. All accounts are setup with
a password equalling the username. The test environment is left behind after
the testing has completed to assist in debugging.
FunctionalTester provides code reuse for the testcases to allow a higher-level
description of the more complicated bugs. For example, creating a new ticket
is the first step in regression testing many things, so FunctionalTester
provides a create_ticket() method. That method is written as if it were itself
a testcase for creating a ticket, so there is a testcase that simply calls that
method, and other testcases that use it as a higher-level step don't have to
worry about basic issues such as if the ticket was successfully created.
Requirements:
- Twill (http://twill.idyll.org/)
- lxml for XHTML validation (optional)
"""
import os
import unittest
from pkg_resources import parse_version
import trac
# Handle missing twill so we can print a useful 'SKIP'
# message. We import subprocess first to allow customizing it on Windows
# to select pywin32 in favor of _subprocess for low-level calls. If Twill
# is allowed to load first, its (unmodified) copy will always be loaded.
import subprocess
from trac.tests.functional.better_twill import b, tc, twill
try:
# This is the first indicator of whether the subversion bindings are
# correctly installed.
from svn import core
has_svn = True
except ImportError:
has_svn = False
from trac.test import TestSetup, TestCaseSetup
internal_error = 'Trac detected an internal error:'
trac_source_tree = os.path.normpath(os.path.join(trac.__file__, '..', '..'))
if twill:
from trac.tests.functional.testenv import FunctionalTestEnvironment
from trac.tests.functional.svntestenv import SvnFunctionalTestEnvironment
from trac.tests.functional.tester import FunctionalTester
class FunctionalTestSuite(TestSetup):
"""TestSuite that provides a test fixture containing a
FunctionalTestEnvironment and a FunctionalTester.
"""
if has_svn:
env_class = SvnFunctionalTestEnvironment
else:
env_class = FunctionalTestEnvironment
tester_class = FunctionalTester
def __init__(self):
if parse_version(twill.__version__) != parse_version('0.9'):
raise ImportError("Twill 0.9 is required. Found version %s."
% twill.__version__)
super(FunctionalTestSuite, self).__init__()
def setUp(self, port=None):
"""If no port is specified, use a semi-random port and subdirectory
'testenv'; but if a port is specified, use that port and
subdirectory 'testenv<portnum>'.
"""
if port is None:
try:
port = int(os.getenv('TRAC_TEST_PORT'))
except (TypeError, ValueError):
pass
env_path = os.getenv('TRAC_TEST_ENV_PATH')
if not env_path:
env_name = 'testenv%s' % (port or '')
env_path = os.path.join(trac_source_tree, env_name)
else:
env_path += str(port or '')
if port is None:
port = 8000 + os.getpid() % 1000
baseurl = "http://127.0.0.1:%s" % port
self._testenv = self.env_class(env_path, port, baseurl)
# functional-testing.log gets the twill output
self.functional_test_log = \
os.path.join(env_path, 'functional-testing.log')
twill.set_output(open(self.functional_test_log, 'w'))
self._testenv.start()
self._tester = self.tester_class(baseurl)
self.fixture = (self._testenv, self._tester)
self._testenv.set_config('project', 'name', 'Functional Tests')
def tearDown(self):
self._testenv.stop()
class FunctionalTestCaseSetup(TestCaseSetup):
"""Convenience class to expand the fixture into the _testenv and
_tester attributes."""
def setUp(self):
self._testenv, self._tester = self.fixture
class FunctionalTwillTestCaseSetup(FunctionalTestCaseSetup):
failureException = twill.errors.TwillAssertionError
else:
# We're going to have to skip the functional tests
class FunctionalTestSuite(TestSetup):
def __init__(self):
raise ImportError("Twill not installed")
class FunctionalTwillTestCaseSetup(object):
pass
class FunctionalTestCaseSetup(object):
pass
# Twill's find command accepts regexes; some convenient but complex regexes
# & regex factories are provided here (only one so far):
def regex_owned_by(username):
return '(Owned by:(<[^>]*>|\\n| )*%s)' % username
def functionalSuite():
suite = FunctionalTestSuite()
return suite
def test_suite():
try:
suite = functionalSuite()
import trac.tests.functional.testcases
trac.tests.functional.testcases.functionalSuite(suite)
import trac.versioncontrol.tests
trac.versioncontrol.tests.functionalSuite(suite)
import trac.ticket.tests
trac.ticket.tests.functionalSuite(suite)
import trac.mimeview.tests
trac.mimeview.tests.functionalSuite(suite)
import trac.prefs.tests
trac.prefs.tests.functionalSuite(suite)
import trac.wiki.tests
trac.wiki.tests.functionalSuite(suite)
import trac.timeline.tests
trac.timeline.tests.functionalSuite(suite)
import trac.admin.tests
trac.admin.tests.functionalSuite(suite)
import trac.search.tests
trac.search.tests.functionalSuite(suite)
# The db tests should be last since the backup test occurs there.
import trac.db.tests
trac.db.tests.functionalSuite(suite)
except ImportError as e:
print("SKIP: functional tests (%s)" % e)
# No tests to run, provide an empty suite.
suite = unittest.TestSuite()
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
the-stack_0_8122 | from .utils import *
from .QFunction import *
import torch
from torch import nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
class MLP_SquashedGaussianActor(nn.Module):
def __init__(self,
observation_dim,
action_dim,
hidden_sizes,
activation,
act_limit):
super().__init__()
self.log_std_max = 2
self.log_std_min = -20
self.net = create_mlp([observation_dim] + list(hidden_sizes),
activation,
activation)
self.mu_layer = nn.Linear(hidden_sizes[-1], action_dim)
self.log_std_layer = nn.Linear(hidden_sizes[-1], action_dim)
self.act_limit = act_limit
def forward(self, observation, deterministic=False, with_log_prob=True):
net_out = self.net(observation)
# computer the \mu and \sigma of the gaussian
mu = self.mu_layer(net_out)
log_std = self.log_std_layer(net_out)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
std = torch.exp(log_std)
# Pre-squash distribution and sample
pi_distribution = Normal(mu, std)
if deterministic:
# only used for evaluating policy at test time.
pi_action = mu
else:
pi_action = pi_distribution.rsample()
if with_log_prob:
# Appendix C
log_pro_pi = pi_distribution.log_prob(pi_action).sum(dim=-1)
log_pro_pi -= (2 * (np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(dim=-1)
else:
log_pro_pi = None
pi_action = torch.tanh(pi_action)
pi_action = self.act_limit * pi_action
return pi_action, log_pro_pi |
the-stack_0_8123 | import fnmatch
import string
class Match:
ACCEPT = 1
REJECT = 2
UNKNOWN = 3
class PathFilter(object):
class Rule(object):
def __init__(self, pattern, match_action):
assert match_action in (Match.ACCEPT, Match.REJECT)
self.pattern = pattern
self.match_action = match_action
def match(self, path):
if fnmatch.fnmatch(path, self.pattern):
return self.match_action
return Match.UNKNOWN
def __init__(self, rules):
self._rules = rules
def match(self, path):
"""Tests the path against all rules in this filter"""
for rule in self._rules:
if rule.match(path) == Match.ACCEPT:
return True
elif rule.match(path) == Match.REJECT:
return False
return True
@staticmethod
def from_rule_list(rule_list):
"""Read from a dict. `version` is ignored"""
rules = []
for rule_string in rule_list:
rule_string = rule_string.strip()
rule_comps = rule_string.split()
match_action_string = rule_comps[0]
if match_action_string == '+':
match_action = Match.ACCEPT
elif match_action_string == '-':
match_action = Match.REJECT
else:
raise ValueError("unknown match type: %s" %
(match_action_string))
pattern = string.join(rule_comps[1:], ' ')
rules.append(PathFilter.Rule(pattern, match_action))
return PathFilter(rules)
|
the-stack_0_8124 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class NRLTVIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?nrl\.com/tv(/[^/]+)*/(?P<id>[^/?&#]+)"
_TEST = {
"url": "https://www.nrl.com/tv/news/match-highlights-titans-v-knights-862805/",
"info_dict": {
"id": "YyNnFuaDE6kPJqlDhG4CGQ_w89mKTau4",
"ext": "mp4",
"title": "Match Highlights: Titans v Knights",
},
"params": {
# m3u8 download
"skip_download": True,
"format": "bestvideo",
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
q_data = self._parse_json(
self._html_search_regex(r'(?s)q-data="({.+?})"', webpage, "player data"),
display_id,
)
ooyala_id = q_data["videoId"]
return self.url_result(
"ooyala:" + ooyala_id, "Ooyala", ooyala_id, q_data.get("title")
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.