ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a2f66bc368cdfd73df0a7a327e51fefc29f6d3d | """Ensure credentials are preserved through the authorization.
The Authorization Code Grant will need to preserve state as well as redirect
uri and the Implicit Grant will need to preserve state.
"""
from __future__ import absolute_import, unicode_literals
import json
import mock
from oauthlib.oauth2 import (MobileApplicationServer, RequestValidator,
WebApplicationServer)
from oauthlib.oauth2.rfc6749 import errors
from ....unittest import TestCase
from .test_utils import get_fragment_credentials, get_query_credentials
class PreservationTest(TestCase):
DEFAULT_REDIRECT_URI = 'http://i.b./path'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = self.DEFAULT_REDIRECT_URI
self.validator.get_code_challenge.return_value = None
self.validator.authenticate_client.side_effect = self.set_client
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
def set_state(self, state):
def set_request_state(client_id, code, client, request):
request.state = state
return True
return set_request_state
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_state_preservation(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc&response_type='
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + 'code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
self.validator.validate_code.side_effect = self.set_state('xyz')
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['state'], 'xyz')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + 'token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['state'][0], 'xyz')
def test_redirect_uri_preservation(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
redirect_uri = 'http://i.b/path'
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + '&response_type=code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
# confirm_redirect_uri should return false if the redirect uri
# was given in the authorization but not in the token request.
self.validator.confirm_redirect_uri.return_value = False
code = get_query_credentials(h['Location'])['code'][0]
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['error'], 'invalid_request')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + '&response_type=token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
def test_invalid_redirect_uri(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
self.validator.validate_redirect_uri.return_value = False
# authorization grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
def test_default_uri(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
self.validator.get_default_redirect_uri.return_value = None
# authorization grant
self.assertRaises(errors.MissingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MissingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
def test_default_uri_in_token(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + '&response_type=code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(self.DEFAULT_REDIRECT_URI))
# confirm_redirect_uri should return true if the redirect uri
# was not given in the authorization AND not in the token request.
self.validator.confirm_redirect_uri.return_value = True
code = get_query_credentials(h['Location'])['code'][0]
self.validator.validate_code.side_effect = self.set_state('xyz')
_, body, s = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(s, 200)
self.assertEqual(self.validator.confirm_redirect_uri.call_args[0][2], self.DEFAULT_REDIRECT_URI)
|
py | 1a2f6839de35c5142ce239003142bdb6ce9a9cdf | import os
import src.data.atomic as atomic_data
import src.data.conceptnet as conceptnet_data
import src.data.config as cfg
import utils.utils as utils
import pickle
import torch
import json
start_token = "<START>"
end_token = "<END>"
blank_token = "<blank>"
def save_checkpoint(state, filename):
print("Saving model to {}".format(filename))
torch.save(state, filename)
def save_step(model, vocab, optimizer, opt, length, lrs):
if cfg.test_save:
name = "{}.pickle".format(utils.make_name(
opt, prefix="garbage/models/", is_dir=False, eval_=True))
else:
name = "{}.pickle".format(utils.make_name(
opt, prefix="models/", is_dir=False, eval_=True))
save_checkpoint({
"epoch": length, "state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(), "opt": opt,
"vocab": vocab, "epoch_learning_rates": lrs},
name)
def save_eval_file(opt, stats, eval_type="losses", split="dev", ext="pickle"):
if cfg.test_save:
name = "{}/{}.{}".format(utils.make_name(
opt, prefix="garbage/{}/".format(eval_type),
is_dir=True, eval_=True), split, ext)
else:
name = "{}/{}.{}".format(utils.make_name(
opt, prefix="results/{}/".format(eval_type),
is_dir=True, eval_=True), split, ext)
print("Saving {} {} to {}".format(split, eval_type, name))
if ext == "pickle":
with open(name, "wb") as f:
pickle.dump(stats, f)
elif ext == "txt":
with open(name, "w") as f:
f.write(stats)
elif ext == "json":
with open(name, "w") as f:
json.dump(stats, f)
else:
raise
def load_checkpoint(filename, gpu=True):
if os.path.exists(filename):
checkpoint = torch.load(
filename, map_location=lambda storage, loc: storage)
else:
print("No model found at {}".format(filename))
return checkpoint
def make_data_loader(opt, *args):
if opt.dataset == "atomic":
return atomic_data.GenerationDataLoader(opt, *args)
elif opt.dataset == "conceptnet":
return conceptnet_data.GenerationDataLoader(opt, *args)
def set_max_sizes(data_loader, force_split=None):
data_loader.total_size = {}
if force_split is not None:
data_loader.total_size[force_split] = \
data_loader.sequences[force_split]["total"].size(0)
return
for split in data_loader.sequences:
data_loader.total_size[split] = \
data_loader.sequences[split]["total"].size(0)
|
py | 1a2f68d3a0b65297a09c5e1d61db26e06853631f | from .sir import SIR
from common.config import data_type
from common.linalg import as_array, as_matrix, init_weights
from common.stats import RSS, MSPE, RMSE
from numpy.random import normal, uniform
from numpy import *
from filtering.particlefilter import ParticleFilter
class ParticleSIR(SIR):
def __init__(self, num_enbs, params):
self.num_enbs = num_enbs
super(ParticleSIR, self).__init__(params)
del self.alpha
del self.beta
self.current_Is = uniform(0, self.i * 2, num_enbs)
self.current_Ss = ones(num_enbs) - self.current_Is
self.alphas = uniform(0., 1, num_enbs)
self.betas = uniform(0., 1, num_enbs)
self.weights = [init_weights(num_enbs)] # matrix-like
for i in range(num_enbs):
if self.alphas[i] < self.betas[i]:
self.alphas[i], self.betas[i] = self.betas[i], self.alphas[i]
self.Is = [self.current_Is.tolist()]
self.Ss = [self.current_Ss.tolist()]
def update_states(self):
for j in range(self.num_enbs):
s = self.current_Ss[j]
i = self.current_Is[j]
s += self._delta_s(self.current_Ss[j], self.current_Is[j],
self.alphas[j])
i += self._delta_i(self.current_Ss[j], self.current_Is[j],
self.alphas[j], self.betas[j])
s = self.check_bounds(s)
i = self.check_bounds(i)
self.current_Is[j] = i
self.current_Ss[j] = s
self.Is.append(self.current_Is.tolist())
self.Ss.append(self.current_Ss.tolist())
def _init_filter(self):
num_states = 4
num_obs = 1
self.filter = ParticleFilter(self.num_enbs)
def predict_with_filter(self):
F = self.filter
while self.epoch < self.epochs - 1:
X = as_matrix([self.current_Ss, self.current_Is,
self.alphas, self.betas])
F.fit(X)
y = self.CDC_obs[self.epoch]
F.step(y, predict_P=False)
self.weights.append(F.weights)
x_post = F.x_post
for j in range(self.num_enbs):
self.current_Ss[j] = self.check_bounds(x_post[0, j])
self.current_Is[j] = self.check_bounds(x_post[1, j])
self.alphas[j] = self.check_bounds(x_post[2, j], inf)
self.betas[j] = self.check_bounds(x_post[3, j], inf)
self.update_states()
self.epoch += 1
self.get_score()
def _delta_s(self, s, i, alpha):
return - alpha * s * i
def _delta_i(self, s, i, alpha, beta):
return alpha * s * i - beta * i
def check_par_bounds(self, par):
if par < 0: par = 0
return par
def get_score(self):
I_mat = as_array(self.Is)
for i, w in enumerate(self.weights):
I_mat[i] *= w
self.IS = sum(I_mat, axis=1)
time_gap = self.epochs / 52
idx = [x for x in range(self.epochs) if not x % time_gap]
self.score = RSS(self.CDC_obs, self.IS[idx])
self.scores = {}
self.scores['SSE'] = self.score
self.scores['RMSE'] = RMSE(self.CDC_obs, self.IS[idx])
self.scores['MSPE'] = MSPE(self.CDC_obs, self.IS[idx])
self.scores['CORR'] = corrcoef(self.CDC_obs, self.IS[idx])[0, 1]
return self.score
|
py | 1a2f6941e27fb89420a1d6e6a0b3c69ea5a9c28b | from typing import Any, Dict, Optional, Set
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.rule_based_profiler.expectation_configuration_builder import (
ExpectationConfigurationBuilder,
)
from great_expectations.rule_based_profiler.types import Domain, ParameterContainer
from great_expectations.rule_based_profiler.util import (
get_parameter_value_and_validate_return_type,
)
class DefaultExpectationConfigurationBuilder(ExpectationConfigurationBuilder):
"""
Class which creates ExpectationConfiguration out of a given Expectation type and
parameter_name-to-parameter_fully_qualified_parameter_name map (name-value pairs supplied in the kwargs dictionary).
"""
exclude_field_names: Set[str] = {
"kwargs",
}
def __init__(
self,
expectation_type: str,
meta: Optional[Dict[str, Any]] = None,
**kwargs,
):
super().__init__(expectation_type=expectation_type, **kwargs)
self._kwargs = kwargs
if meta is None:
meta = {}
if not isinstance(meta, dict):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Argument "{meta}" in "{self.__class__.__name__}" must be of type "dictionary" \
(value of type "{str(type())}" was encountered).
"""
)
self._meta = meta
@property
def expectation_type(self) -> str:
return self._expectation_type
@property
def kwargs(self) -> dict:
return self._kwargs
@property
def meta(self) -> dict:
return self._meta
def _build_expectation_configuration(
self,
domain: Domain,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> ExpectationConfiguration:
parameter_name: str
fully_qualified_parameter_name: str
expectation_kwargs: Dict[str, Any] = {
parameter_name: get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
for parameter_name, fully_qualified_parameter_name in self.kwargs.items()
}
meta: Dict[str, Any] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self.meta,
expected_return_type=dict,
variables=variables,
parameters=parameters,
)
return ExpectationConfiguration(
expectation_type=self.expectation_type,
kwargs=expectation_kwargs,
meta=meta,
)
|
py | 1a2f698957882dcea8b690d9abd96592d8e8f8b5 | from tkinter import *
from tkinter import messagebox
root=Tk()
root.title("TIC TAC TOE!")
press=True
flag=0
s1="0"
s2="0"
#main game logic
def check(button):
global press
global flag
global s1
global s2
#alternate player turn's logic
if button["text"]=="" and press==True:
button["text"]="X"
press=False
flag+=1
elif button["text"]=="" and press==False:
button["text"]="O"
press=True
flag+=1
# X winning logic
if (button1["text"]=="X" and button2["text"]=="X" and button3["text"]=="X" or
button1["text"]=="X" and button4["text"]=="X" and button7["text"]=="X" or
button2["text"]=="X" and button5["text"]=="X" and button8["text"]=="X" or
button3["text"]=="X" and button6["text"]=="X" and button9["text"]=="X" or
button7["text"]=="X" and button8["text"]=="X" and button9["text"]=="X" or
button4["text"]=="X" and button5["text"]=="X" and button6["text"]=="X" or
button1["text"]=="X" and button5["text"]=="X" and button9["text"]=="X" or
button7["text"]=="X" and button5["text"]=="X" and button3["text"]=="X"):
messagebox.showinfo("GAME OVER","X Is Winner!")
flag=0
press=True
s1=int(s1)
s1=(s1+1)
s1=str(s1)
var0.set(s1)
reset()
# O winning logic
elif (button1["text"]=="O" and button2["text"]=="O" and button3["text"]=="O" or
button1["text"]=="O" and button4["text"]=="O" and button7["text"]=="O" or
button2["text"]=="O" and button5["text"]=="O" and button8["text"]=="O" or
button3["text"]=="O" and button6["text"]=="O" and button9["text"]=="O" or
button7["text"]=="O" and button8["text"]=="O" and button9["text"]=="O" or
button4["text"]=="O" and button5["text"]=="O" and button6["text"]=="O" or
button1["text"]=="O" and button5["text"]=="O" and button9["text"]=="O" or
button7["text"]=="O" and button5["text"]=="O" and button3["text"]=="O"):
messagebox.showinfo("GAME OVER","O Is Winner!")
flag=0
press=True
s2=int(s2)
s2=(s2+1)
s2=str(s2)
var1.set(s2)
reset()
elif flag>=9:
messagebox.showinfo("GAME OVER","Match Is Draw!")
flag=0
press=True
reset()
def resetscore():
global s1
global s2
s1="0"
s2="0"
var0.set(s1)
var1.set(s2)
def reset():
button1["text"]=""
button2["text"]=""
button3["text"]=""
button4["text"]=""
button5["text"]=""
button6["text"]=""
button7["text"]=""
button8["text"]=""
button9["text"]=""
#score logic
score=Label(root,text="---<:{ $core }:>---",font=("Verdana","15","normal"))
score.pack(anchor=N)
var0=StringVar()
scoren=Frame(root)
scoren.pack(anchor=W)
scorep1=Label(scoren,text="player X:",font=("Verdana","11","bold"))
scorep1.grid(row=0,column=0)
scorep1c=Label(scoren,textvariable=var0,font=("Segoe UI","14","bold"))
scorep1c.grid(row=0,column=1)
var0.set(s1)
var1=StringVar()
scorep2=Label(scoren,text="\tplayer O:",font=("Verdana","11","bold"))
scorep2.grid(row=0,column=2)
scorep2c=Label(scoren,textvariable=var1,font=("Segoe UI","14","bold"))
scorep2c.grid(row=0,column=3)
var1.set(s2)
#button logic
buttonframe=Frame(root)
buttonframe.pack(padx=5,pady=5)
button1=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button1))
button1.grid(row=0,column=0)
button2=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button2))
button2.grid(row=0,column=1)
button3=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button3))
button3.grid(row=0,column=2)
button4=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button4))
button4.grid(row=1,column=0)
button5=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button5))
button5.grid(row=1,column=1)
button6=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button6))
button6.grid(row=1,column=2)
button7=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button7))
button7.grid(row=2,column=0)
button8=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button8))
button8.grid(row=2,column=1)
button9=Button(buttonframe,text="",bd=2,relief = GROOVE ,font=("Segoe UI","10","bold"),height=5,width=10,command=lambda:check(button9))
button9.grid(row=2,column=2)
buttonresetscore=Button(root,text="---| Reset $core |---",font=("Verdana","13","normal"),command=lambda:resetscore())
buttonresetscore.pack(fill=X,side=BOTTOM)
buttonresetboard=Button(root,text="---| Reset Board # |---",font=("Verdana","13","normal"),command=lambda:reset())
buttonresetboard.pack(fill=X,side=BOTTOM)
root.mainloop()
|
py | 1a2f6b26d18ed65f77729b5229ceba6d8429758d | from __future__ import unicode_literals
import onedrivesdk
from onedrivesdk.helpers import GetAuthCodeServer
from PIL import Image
import os
input = getattr(__builtins__, 'raw_input', input)
def main():
redirect_uri = "http://localhost:8080/"
client_secret = "BqaTYqI0XI7wDKcnJ5i3MvLwGcVsaMVM"
client = onedrivesdk.get_default_client(client_id='00000000481695BB',
scopes=['wl.signin',
'wl.offline_access',
'onedrive.readwrite'])
auth_url = client.auth_provider.get_auth_url(redirect_uri)
# Block thread until we have the code
code = GetAuthCodeServer.get_auth_code(auth_url, redirect_uri)
# Finally, authenticate!
client.auth_provider.authenticate(code, redirect_uri, client_secret)
item_id = "root"
copy_item_ids = None
action = 0
while True:
items = navigate(client, item_id)
print("0: UP")
count = 0
for count, item in enumerate(items):
print("{} {}".format(count+1, item.name if item.folder is None else "/"+item.name))
selected = input("Select item, enter 'C' to copy all, enter 'L' to list changes in current folder: ")
if selected == "C":
copy_item_ids = []
for item in items:
copy_item_ids.append(item.id)
elif selected == "L":
token = input("Enter your token, or nothing if you do not have one: ")
list_changes(client, item_id, token)
else:
selected = int(selected)
if selected == 0:
item_id = get_parent_id(client, item_id)
else:
action = int(input("Select action: 1:Navigate 2:Rename 3:View Thumbnail 4: Get Sharing Link 5: List Changes 6:Download 7:Upload 8:Delete 9:Copy{}... ".format(" 10: Paste" if copy_item_ids else "")))
if items[selected-1].folder is None or (action != 6 and action != 1):
if action == 1:
print("Can't navigate a file")
elif action == 2:
rename(client, items[selected-1].id)
elif action == 3:
view_thumbnail(client, items[selected-1].id)
elif action == 4:
get_sharing_link(client, items[selected-1].id)
elif action == 5:
token = input("Enter your token, or nothing if you do not have one: ")
list_changes(client, items[selected-1].id, token)
elif action == 6:
download(client, items[selected-1].id)
elif action == 7:
if item.folder is None:
print("You cannot upload to a file")
else:
upload(client, items[selected-1].id)
elif action == 8:
delete(client, items[selected-1].id)
elif action == 9:
copy_item_ids = [items[selected-1].id]
elif action == 10 and copy_item_ids:
if items[selected-1].folder:
paste(client, items[selected-1].id, copy_item_ids)
else:
print("Can't copy to a file")
else:
item_id = items[selected-1].id
def navigate(client, item_id):
items = client.item(id=item_id).children.get()
return items
def rename(client, item_id):
new_name = input("Enter new name: ")
renamed_item = onedrivesdk.Item()
renamed_item.name = new_name
renamed_item.id = item_id
client.item(id=item_id).update(renamed_item)
def view_thumbnail(client, item_id):
if len(client.item(id=item_id).thumbnails.get()) == 0:
print("File does not have any thumbnails!\n")
else:
action = int(input("Size? 1:Small 2:Medium 3:Large... "))
try:
os.remove("./tmp_thumb.jpg")
except:
pass
if action == 1:
client.item(id=item_id).thumbnails[0].small.download("./tmp_thumb.jpg")
elif action == 2:
client.item(id=item_id).thumbnails[0].medium.download("./tmp_thumb.jpg")
elif action == 3:
client.item(id=item_id).thumbnails[0].large.download("./tmp_thumb.jpg")
image = Image.open("./tmp_thumb.jpg")
image.show()
def get_sharing_link(client, item_id):
action = int(input("Type? 1:View 2:Edit... "))
permission = client.item(id=item_id).create_link("view" if action == 1 else "edit").post()
print("\n{}\n".format(permission.link.web_url))
def download(client, item_id):
directory = input("Enter download directory (can be relative): ")
client.item(id=item_id).download(directory)
def upload(client, item_id):
directory = input("Enter upload file directory (can be relative): ")
name = input("Enter file name with extension: ")
client.item(id=item_id).children[name].upload(directory)
def delete(client, item_id):
confirm = input("Confirm delete? Y/N: ")
if confirm == "Y":
client.item(id=item_id).delete()
def paste(client, item_id, copy_item_ids):
ref = onedrivesdk.ItemReference()
ref.id = item_id
for id in copy_item_ids:
client.item(id=id).copy(parent_reference=ref).post()
def list_changes(client, item_id, token):
collection_page = client.item(id=item_id).delta(token).get()
for item in collection_page:
print(item.name)
print("TOKEN: {}".format(collection_page.token))
def get_parent_id(client, item_id):
id = client.item(id=item_id).get().parent_reference.id
return id
if __name__ == "__main__":
main() |
py | 1a2f6c799c17fb14d2f2a92b26e184b7470db5fe | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import sys
from .tooling.cli import checksdev
sys.exit(checksdev())
|
py | 1a2f6ca2696ebe288e70eded2becccc9e7440570 | from flask import Flask
from os import getenv
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URI')
app.config['SQALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from application import routes |
py | 1a2f6d44c2201ac61c6ceeed9258fa77cd45998b | #!/usr/local/epd/bin/python
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def SetPyWignerCUDA_Path():
import sys
sys.path.append("/home/rcabrera/Documents/source/python/PyWignerCUDA")
|
py | 1a2f6da490bdb85625c68ffbd19e1ca73b40428e | def report_generator(file_path1, file_path2):
import numpy as np
import pandas as pd
from IPython.display import display
# read excel files
df1 = pd.read_excel(file_path1, sheet_name = 1, index_col= 0, header = 1, usecols = range(41), skipfooter = 17)
df2 = pd.read_excel(file_path2, sheet_name = 6, index_col= 0, header = 0, usecols = range(46), skipfooter = 0)
cols = [0, 3, 5, 8, 10, 13, 15, 18]
df3 = pd.read_excel(file_path2, sheet_name = 1, header = 0, usecols = cols, skipfooter = 3)
df4 = pd.read_excel(file_path2, sheet_name = 2, header = 0, usecols = cols, skipfooter = 6)
df5 = pd.concat([df3.tail(2), df4.tail(2)], axis = 1)
# check the data
display(df1.tail(2))
display(df2.tail(2))
display(df5.tail(2))
report = pd.read_excel('一手住宅简报格式.xlsx', sheet_name = 1, header = 0, index_col = 0,
skipfooter = 33, usecols = range(0, 45))
display(report.tail(2))
# generate supply and sales data of new houses in 40 cities
cities = list(report.columns[4:])
dict = {}
for i in cities:
dict[i] = [df1[i][-1], df2[i][-1]]
result = pd.DataFrame(dict, index = ['40城住宅成交', '40城住宅供应'])
# generate new house prices in 8 major cities
dict2 = {}
k = 0
j = 1
while j <= 15:
dict2[df5.columns[k]] = df5.iloc[-1, j]
k = k + 2
j = j + 2
result2 = pd.DataFrame(dict2, index = [df5.iloc[-1, 0]])
# write the results into one excel file
writer = pd.ExcelWriter('result_newhouse.xlsx')
result.to_excel(writer, sheet_name = 'supply_and_sales')
result2.to_excel(writer, sheet_name = 'prices')
writer.save()
return
print('运行 report_generator(40城,20城)')
|
py | 1a2f6e8198d39d4856437106bb2118557054f170 | import copy
import unittest
from datetime import datetime
from mltrace.db import Component, ComponentRun, IOPointer, Store
class TestDags(unittest.TestCase):
def setUp(self):
self.store = Store("test")
def testLinkedList(self):
# Create chain of component runs
expected_result = []
num_runs = 10
for i in range(1, num_runs + 1):
self.store.create_component(f"mock_component_{i}", "", "")
inp = self.store.get_io_pointer(f"iop_{i}")
out = self.store.get_io_pointer(f"iop_{i + 1}")
cr = self.store.initialize_empty_component_run(
f"mock_component_{i}"
)
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(inp)
cr.add_output(out)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
expected_result.append((num_runs - i, i))
# Reverse the expected result
expected_result.reverse()
# Trace the final output
trace = self.store.trace("iop_11")
level_id = [(level, cr.id) for level, cr in trace]
self.assertEqual(expected_result, level_id)
def testVersionedComputation(self):
# Run the same computation many times
self.store.create_component("mock_component", "", "")
num_runs = 10
for i in range(1, num_runs + 1):
inp = self.store.get_io_pointer("inp")
out = self.store.get_io_pointer("out")
cr = self.store.initialize_empty_component_run("mock_component")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(inp)
cr.add_output(out)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Trace the out pointer. Only most recent run ID should show.
trace = self.store.trace("out")
self.assertEqual(len(trace), 1)
self.assertEqual(trace[0][0], 0)
self.assertEqual(trace[0][1].id, num_runs)
def testTree(self):
# Create a tree of component runs, 5 levels deep
num_levels = 2
global cr_counter
global iop_counter
cr_counter = 1
iop_counter = 1
def create_tree(level, inp):
if level == num_levels:
return
global cr_counter
global iop_counter
self.store.create_component(f"mock_component_{cr_counter}", "", "")
cr = self.store.initialize_empty_component_run(
f"mock_component_{cr_counter}"
)
cr_counter += 1
cr.set_start_timestamp()
cr.set_end_timestamp()
# Create output pointers
out1 = self.store.get_io_pointer(f"iop_{iop_counter}")
iop_counter += 1
out2 = self.store.get_io_pointer(f"iop_{iop_counter}")
iop_counter += 1
# Add and commit component run
cr.add_input(inp)
cr.add_outputs([out1, out2])
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Create left and right trees
create_tree(level + 1, out1)
create_tree(level + 1, out2)
# Create first input pointer and tree of computation
inp = self.store.get_io_pointer(f"iop_{iop_counter}")
iop_counter += 1
create_tree(0, inp)
# Grab last iop id and trace it
last_iop_id = f"iop_{iop_counter - 1}"
trace = self.store.trace(last_iop_id)
level_id = [(level, cr.id) for level, cr in trace]
self.assertEqual(level_id, [(0, 3), (1, 1)])
def testCycle(self):
# Create cycle. Since dependencies are versioned, we shouldn't run
# into problems.
# Create io pointers and components
iop1 = self.store.get_io_pointer("iop1")
iop2 = self.store.get_io_pointer("iop2")
self.store.create_component("component_1", "", "")
self.store.create_component("component_2", "", "")
# Create component runs
cr = self.store.initialize_empty_component_run("component_1")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(iop1)
cr.add_output(iop2)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
cr = self.store.initialize_empty_component_run("component_2")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(iop2)
cr.add_output(iop1)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Trace iop1
trace_1 = [(level, cr.id) for level, cr in self.store.trace("iop1")]
trace_2 = [(level, cr.id) for level, cr in self.store.trace("iop2")]
self.assertEqual(trace_1, [(0, 2), (1, 1)])
self.assertEqual(trace_2, [(0, 1)])
def testStaleUpdate(self):
# Create computation with stale update.
iop1 = self.store.get_io_pointer("iop1")
iop2 = self.store.get_io_pointer("iop2")
iop3 = self.store.get_io_pointer("iop3")
iop4 = self.store.get_io_pointer("iop4")
self.store.create_component("component_1", "", "")
self.store.create_component("component_2", "", "")
# Create first component
cr = self.store.initialize_empty_component_run("component_1")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(iop1)
cr.add_output(iop2)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Create second component run
cr = self.store.initialize_empty_component_run("component_1")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(iop1)
cr.add_output(iop3)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Create third component run that depends on the first (stale update)
cr = self.store.initialize_empty_component_run("component_2")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(iop2)
cr.add_output(iop4)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Trace iop4
trace = [
(level, cr.id, cr.stale) for level, cr in self.store.trace("iop4")
]
res = [
(
0,
3,
[
"component_1 (ID 1) has 1 fresher run(s) that began "
+ "before this component run started."
],
),
(1, 1, []),
]
self.assertEqual(trace, res)
def testStaleTime(self):
# Create computation with stale update.
iop1 = self.store.get_io_pointer("iop1")
iop2 = self.store.get_io_pointer("iop2")
iop3 = self.store.get_io_pointer("iop3")
self.store.create_component("component_1", "", "")
self.store.create_component("component_2", "", "")
now = datetime.utcnow()
# Create first component
cr = self.store.initialize_empty_component_run("component_1")
cr.set_start_timestamp(now.replace(month=now.month - 2))
cr.set_end_timestamp()
cr.add_input(iop1)
cr.add_output(iop2)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Create second component run
cr = self.store.initialize_empty_component_run("component_2")
cr.set_start_timestamp()
cr.set_end_timestamp()
cr.add_input(iop2)
cr.add_output(iop3)
self.store.set_dependencies_from_inputs(cr)
self.store.commit_component_run(cr)
# Trace
trace = [
(level, cr.id, cr.stale) for level, cr in self.store.trace("iop3")
]
res = [(0, 2, ["component_1 (ID 1) was run 61 days ago."]), (1, 1, [])]
self.assertEqual(trace, res)
if __name__ == "__main__":
unittest.main()
|
py | 1a2f6edaf7508e8f226000be8dc8d908aec1adb9 | import io
import uuid
from mitmproxy.test import tutils
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy import controller
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.net import http as net_http
from mitmproxy.proxy import context
from wsproto.frame_protocol import Opcode
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None):
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello"),
tcp.TCPMessage(False, b"it's me"),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.reply = controller.DummyReply()
return f
def twebsocketflow(client_conn=True, server_conn=True, messages=True, err=None, handshake_flow=True):
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if handshake_flow is True:
req = http.HTTPRequest(
"example.com",
80,
b"GET",
b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=net_http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
resp = http.HTTPResponse(
b"HTTP/1.1",
101,
reason=net_http.status_codes.RESPONSES.get(101),
headers=net_http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
handshake_flow = http.HTTPFlow(client_conn, server_conn)
handshake_flow.request = req
handshake_flow.response = resp
f = websocket.WebSocketFlow(client_conn, server_conn, handshake_flow)
f.metadata['websocket_handshake'] = handshake_flow.id
handshake_flow.metadata['websocket_flow'] = f.id
handshake_flow.metadata['websocket'] = True
if messages is True:
messages = [
websocket.WebSocketMessage(Opcode.BINARY, True, b"hello binary"),
websocket.WebSocketMessage(Opcode.TEXT, True, b"hello text"),
websocket.WebSocketMessage(Opcode.TEXT, False, b"it's me"),
]
if err is True:
err = terr()
f.messages = messages
f.error = err
f.reply = controller.DummyReply()
return f
def tflow(client_conn=True, server_conn=True, req=True, resp=None, err=None):
"""
@type client_conn: bool | None | mitmproxy.proxy.connection.ClientConnection
@type server_conn: bool | None | mitmproxy.proxy.connection.ServerConnection
@type req: bool | None | mitmproxy.proxy.protocol.http.HTTPRequest
@type resp: bool | None | mitmproxy.proxy.protocol.http.HTTPResponse
@type err: bool | None | mitmproxy.proxy.protocol.primitives.Error
@return: mitmproxy.proxy.protocol.http.HTTPFlow
"""
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if req is True:
req = tutils.treq()
if resp is True:
resp = tutils.tresp()
if err is True:
err = terr()
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp
f.error = err
f.reply = controller.DummyReply()
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None):
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.reply = controller.DummyReply()
return f
def tclient_conn() -> context.Client:
c = context.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
tls_established=True,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn_proto_negotiated=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
c.reply = controller.DummyReply()
return c
def tserver_conn() -> context.Server:
c = context.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=True,
sni="address",
alpn_proto_negotiated=None,
tls_version="TLSv1.2",
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
c.reply = controller.DummyReply()
c.rfile = io.BytesIO()
c.wfile = io.BytesIO()
return c
def terr(content="error"):
"""
@return: mitmproxy.proxy.protocol.primitives.Error
"""
err = flow.Error(content)
return err
|
py | 1a2f7176ed8951d59e04fb3e3c7f06be6530084a | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Construct minibatches for Detectron networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import logging
import numpy as np
from core.config import cfg,switch_to_teacher,switch_to_student,teacher_cfg
import roi_data.fast_rcnn
import roi_data.retinanet
import roi_data.rpn
import utils.blob as blob_utils
logger = logging.getLogger(__name__)
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data']
if cfg.DISTILLATION.DISTILLATION_ON:
blob_names.append('teacher/data')
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster R-CNN
blob_names += roi_data.rpn.get_rpn_blob_names(is_training=is_training)
elif cfg.RETINANET.RETINANET_ON:
blob_names += roi_data.retinanet.get_retinanet_blob_names(
is_training=is_training
)
else:
# Fast R-CNN like models trained on precomputed proposals
blob_names += roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=is_training
)
return blob_names
def get_minibatch(roidb):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob, formatted for caffe2
im_blob, im_scales = _get_image_blob(roidb)
if cfg.DISTILLATION.DISTILLATION_ON:
teacher_cfg.TRAIN.SCALES=cfg.TRAIN.SCALES
teacher_cfg.TRAIN.MAX_SIZE=cfg.TRAIN.MAX_SIZE
teacher_blob,_=_get_image_blob(roidb,cfg=teacher_cfg)
blobs['data'] = im_blob
if cfg.DISTILLATION.DISTILLATION_ON:
blobs['teacher/data']=teacher_blob
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster/Mask R-CNN
valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)
elif cfg.RETINANET.RETINANET_ON:
im_width, im_height = im_blob.shape[3], im_blob.shape[2]
# im_width, im_height corresponds to the network input: padded image
# (if needed) width and height. We pass it as input and slice the data
# accordingly so that we don't need to use SampleAsOp
valid = roi_data.retinanet.add_retinanet_blobs(
blobs, im_scales, roidb, im_width, im_height
)
else:
# Fast R-CNN like models trained on precomputed proposals
valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)
return blobs, valid
def _get_image_blob(roidb,cfg=cfg):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images
)
processed_ims = []
im_scales = []
teacher_ims=[]
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS,cfg.PIXEL_DIV,cfg.PIXEL_STD, [target_size], cfg.TRAIN.MAX_SIZE
)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
|
py | 1a2f7261fe1537ec542825018acac13320502ca7 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BucketObjectArgs', 'BucketObject']
@pulumi.input_type
class BucketObjectArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
acl: Optional[pulumi.Input[str]] = None,
bucket_key_enabled: Optional[pulumi.Input[bool]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_hash: Optional[pulumi.Input[str]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
website_redirect: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BucketObject resource.
:param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
:param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
:param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
:param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
:param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
:param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
:param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
:param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
:param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.
:param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
:param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
:param pulumi.Input[str] key: Name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
:param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
:param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
:param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
:param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.
:param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
:param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
if bucket is not None:
warnings.warn("""Use the aws_s3_object resource instead""", DeprecationWarning)
pulumi.log.warn("""bucket is deprecated: Use the aws_s3_object resource instead""")
pulumi.set(__self__, "bucket", bucket)
if acl is not None:
pulumi.set(__self__, "acl", acl)
if bucket_key_enabled is not None:
pulumi.set(__self__, "bucket_key_enabled", bucket_key_enabled)
if cache_control is not None:
pulumi.set(__self__, "cache_control", cache_control)
if content is not None:
pulumi.set(__self__, "content", content)
if content_base64 is not None:
pulumi.set(__self__, "content_base64", content_base64)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_language is not None:
pulumi.set(__self__, "content_language", content_language)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if force_destroy is not None:
pulumi.set(__self__, "force_destroy", force_destroy)
if key is not None:
warnings.warn("""Use the aws_s3_object resource instead""", DeprecationWarning)
pulumi.log.warn("""key is deprecated: Use the aws_s3_object resource instead""")
if key is not None:
pulumi.set(__self__, "key", key)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if object_lock_legal_hold_status is not None:
pulumi.set(__self__, "object_lock_legal_hold_status", object_lock_legal_hold_status)
if object_lock_mode is not None:
pulumi.set(__self__, "object_lock_mode", object_lock_mode)
if object_lock_retain_until_date is not None:
pulumi.set(__self__, "object_lock_retain_until_date", object_lock_retain_until_date)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if source is not None:
pulumi.set(__self__, "source", source)
if source_hash is not None:
pulumi.set(__self__, "source_hash", source_hash)
if storage_class is not None:
pulumi.set(__self__, "storage_class", storage_class)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if website_redirect is not None:
pulumi.set(__self__, "website_redirect", website_redirect)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[str]]:
"""
[Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter(name="bucketKeyEnabled")
def bucket_key_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
"""
return pulumi.get(self, "bucket_key_enabled")
@bucket_key_enabled.setter
def bucket_key_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bucket_key_enabled", value)
@property
@pulumi.getter(name="cacheControl")
def cache_control(self) -> Optional[pulumi.Input[str]]:
"""
Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
"""
return pulumi.get(self, "cache_control")
@cache_control.setter
def cache_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_control", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentBase64")
def content_base64(self) -> Optional[pulumi.Input[str]]:
"""
Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
"""
return pulumi.get(self, "content_base64")
@content_base64.setter
def content_base64(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_base64", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentLanguage")
def content_language(self) -> Optional[pulumi.Input[str]]:
"""
Language the content is in e.g., en-US or en-GB.
"""
return pulumi.get(self, "content_language")
@content_language.setter
def content_language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_language", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
"""
return pulumi.get(self, "force_destroy")
@force_destroy.setter
def force_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_destroy", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Name of the object once it is in the bucket.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="objectLockLegalHoldStatus")
def object_lock_legal_hold_status(self) -> Optional[pulumi.Input[str]]:
"""
[Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
"""
return pulumi.get(self, "object_lock_legal_hold_status")
@object_lock_legal_hold_status.setter
def object_lock_legal_hold_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_lock_legal_hold_status", value)
@property
@pulumi.getter(name="objectLockMode")
def object_lock_mode(self) -> Optional[pulumi.Input[str]]:
"""
Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
"""
return pulumi.get(self, "object_lock_mode")
@object_lock_mode.setter
def object_lock_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_lock_mode", value)
@property
@pulumi.getter(name="objectLockRetainUntilDate")
def object_lock_retain_until_date(self) -> Optional[pulumi.Input[str]]:
"""
Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
"""
return pulumi.get(self, "object_lock_retain_until_date")
@object_lock_retain_until_date.setter
def object_lock_retain_until_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_lock_retain_until_date", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input[str]]:
"""
Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]:
"""
Path to a file that will be read and uploaded as raw bytes for the object content.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="sourceHash")
def source_hash(self) -> Optional[pulumi.Input[str]]:
"""
Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
"""
return pulumi.get(self, "source_hash")
@source_hash.setter
def source_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_hash", value)
@property
@pulumi.getter(name="storageClass")
def storage_class(self) -> Optional[pulumi.Input[str]]:
"""
[Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
"""
return pulumi.get(self, "storage_class")
@storage_class.setter
def storage_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_class", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="websiteRedirect")
def website_redirect(self) -> Optional[pulumi.Input[str]]:
"""
Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
return pulumi.get(self, "website_redirect")
@website_redirect.setter
def website_redirect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website_redirect", value)
@pulumi.input_type
class _BucketObjectState:
def __init__(__self__, *,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_key_enabled: Optional[pulumi.Input[bool]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_hash: Optional[pulumi.Input[str]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None,
website_redirect: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BucketObject resources.
:param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
:param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
:param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
:param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
:param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
:param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
:param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
:param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
:param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.
:param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
:param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
:param pulumi.Input[str] key: Name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
:param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
:param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
:param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
:param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.
:param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
:param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
:param pulumi.Input[str] version_id: Unique version ID value for the object, if bucket versioning is enabled.
:param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
if acl is not None:
pulumi.set(__self__, "acl", acl)
if bucket is not None:
warnings.warn("""Use the aws_s3_object resource instead""", DeprecationWarning)
pulumi.log.warn("""bucket is deprecated: Use the aws_s3_object resource instead""")
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if bucket_key_enabled is not None:
pulumi.set(__self__, "bucket_key_enabled", bucket_key_enabled)
if cache_control is not None:
pulumi.set(__self__, "cache_control", cache_control)
if content is not None:
pulumi.set(__self__, "content", content)
if content_base64 is not None:
pulumi.set(__self__, "content_base64", content_base64)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_language is not None:
pulumi.set(__self__, "content_language", content_language)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if force_destroy is not None:
pulumi.set(__self__, "force_destroy", force_destroy)
if key is not None:
warnings.warn("""Use the aws_s3_object resource instead""", DeprecationWarning)
pulumi.log.warn("""key is deprecated: Use the aws_s3_object resource instead""")
if key is not None:
pulumi.set(__self__, "key", key)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if object_lock_legal_hold_status is not None:
pulumi.set(__self__, "object_lock_legal_hold_status", object_lock_legal_hold_status)
if object_lock_mode is not None:
pulumi.set(__self__, "object_lock_mode", object_lock_mode)
if object_lock_retain_until_date is not None:
pulumi.set(__self__, "object_lock_retain_until_date", object_lock_retain_until_date)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if source is not None:
pulumi.set(__self__, "source", source)
if source_hash is not None:
pulumi.set(__self__, "source_hash", source_hash)
if storage_class is not None:
pulumi.set(__self__, "storage_class", storage_class)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if version_id is not None:
pulumi.set(__self__, "version_id", version_id)
if website_redirect is not None:
pulumi.set(__self__, "website_redirect", website_redirect)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[str]]:
"""
[Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
"""
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="bucketKeyEnabled")
def bucket_key_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
"""
return pulumi.get(self, "bucket_key_enabled")
@bucket_key_enabled.setter
def bucket_key_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bucket_key_enabled", value)
@property
@pulumi.getter(name="cacheControl")
def cache_control(self) -> Optional[pulumi.Input[str]]:
"""
Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
"""
return pulumi.get(self, "cache_control")
@cache_control.setter
def cache_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_control", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentBase64")
def content_base64(self) -> Optional[pulumi.Input[str]]:
"""
Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
"""
return pulumi.get(self, "content_base64")
@content_base64.setter
def content_base64(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_base64", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentLanguage")
def content_language(self) -> Optional[pulumi.Input[str]]:
"""
Language the content is in e.g., en-US or en-GB.
"""
return pulumi.get(self, "content_language")
@content_language.setter
def content_language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_language", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
"""
return pulumi.get(self, "force_destroy")
@force_destroy.setter
def force_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_destroy", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Name of the object once it is in the bucket.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="objectLockLegalHoldStatus")
def object_lock_legal_hold_status(self) -> Optional[pulumi.Input[str]]:
"""
[Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
"""
return pulumi.get(self, "object_lock_legal_hold_status")
@object_lock_legal_hold_status.setter
def object_lock_legal_hold_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_lock_legal_hold_status", value)
@property
@pulumi.getter(name="objectLockMode")
def object_lock_mode(self) -> Optional[pulumi.Input[str]]:
"""
Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
"""
return pulumi.get(self, "object_lock_mode")
@object_lock_mode.setter
def object_lock_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_lock_mode", value)
@property
@pulumi.getter(name="objectLockRetainUntilDate")
def object_lock_retain_until_date(self) -> Optional[pulumi.Input[str]]:
"""
Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
"""
return pulumi.get(self, "object_lock_retain_until_date")
@object_lock_retain_until_date.setter
def object_lock_retain_until_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_lock_retain_until_date", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input[str]]:
"""
Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]:
"""
Path to a file that will be read and uploaded as raw bytes for the object content.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="sourceHash")
def source_hash(self) -> Optional[pulumi.Input[str]]:
"""
Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
"""
return pulumi.get(self, "source_hash")
@source_hash.setter
def source_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_hash", value)
@property
@pulumi.getter(name="storageClass")
def storage_class(self) -> Optional[pulumi.Input[str]]:
"""
[Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
"""
return pulumi.get(self, "storage_class")
@storage_class.setter
def storage_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_class", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
Unique version ID value for the object, if bucket versioning is enabled.
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
@property
@pulumi.getter(name="websiteRedirect")
def website_redirect(self) -> Optional[pulumi.Input[str]]:
"""
Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
return pulumi.get(self, "website_redirect")
@website_redirect.setter
def website_redirect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website_redirect", value)
class BucketObject(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_key_enabled: Optional[pulumi.Input[bool]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_hash: Optional[pulumi.Input[str]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
website_redirect: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
### Encrypting with KMS Key
```python
import pulumi
import pulumi_aws as aws
examplekms = aws.kms.Key("examplekms",
description="KMS key 1",
deletion_window_in_days=7)
examplebucket = aws.s3.BucketV2("examplebucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
kms_key_id=examplekms.arn)
```
### Server Side Encryption with S3 Default Master Key
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.BucketV2("examplebucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
server_side_encryption="aws:kms")
```
### Server Side Encryption with AWS-Managed Key
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.BucketV2("examplebucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
server_side_encryption="AES256")
```
### S3 Object Lock
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.BucketV2("examplebucket", object_lock_configuration=aws.s3.BucketV2ObjectLockConfigurationArgs(
object_lock_enabled="Enabled",
))
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_versioning_v2 = aws.s3.BucketVersioningV2("exampleBucketVersioningV2",
bucket=examplebucket.id,
versioning_configuration=aws.s3.BucketVersioningV2VersioningConfigurationArgs(
status="Enabled",
))
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("important.txt"),
object_lock_legal_hold_status="ON",
object_lock_mode="GOVERNANCE",
object_lock_retain_until_date="2021-12-31T23:59:60Z",
force_destroy=True,
opts=pulumi.ResourceOptions(depends_on=[example_bucket_versioning_v2]))
```
## Import
Objects can be imported using the `id`. The `id` is the bucket name and the key together e.g.,
```sh
$ pulumi import aws:s3/bucketObject:BucketObject object some-bucket-name/some/key.txt
```
Additionally, s3 url syntax can be used, e.g.,
```sh
$ pulumi import aws:s3/bucketObject:BucketObject object s3://some-bucket-name/some/key.txt
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
:param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
:param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
:param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
:param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
:param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
:param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
:param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
:param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.
:param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
:param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
:param pulumi.Input[str] key: Name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
:param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
:param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
:param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
:param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.
:param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
:param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BucketObjectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
### Encrypting with KMS Key
```python
import pulumi
import pulumi_aws as aws
examplekms = aws.kms.Key("examplekms",
description="KMS key 1",
deletion_window_in_days=7)
examplebucket = aws.s3.BucketV2("examplebucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
kms_key_id=examplekms.arn)
```
### Server Side Encryption with S3 Default Master Key
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.BucketV2("examplebucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
server_side_encryption="aws:kms")
```
### Server Side Encryption with AWS-Managed Key
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.BucketV2("examplebucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("index.html"),
server_side_encryption="AES256")
```
### S3 Object Lock
```python
import pulumi
import pulumi_aws as aws
examplebucket = aws.s3.BucketV2("examplebucket", object_lock_configuration=aws.s3.BucketV2ObjectLockConfigurationArgs(
object_lock_enabled="Enabled",
))
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=examplebucket.id,
acl="private")
example_bucket_versioning_v2 = aws.s3.BucketVersioningV2("exampleBucketVersioningV2",
bucket=examplebucket.id,
versioning_configuration=aws.s3.BucketVersioningV2VersioningConfigurationArgs(
status="Enabled",
))
example_bucket_object = aws.s3.BucketObject("exampleBucketObject",
key="someobject",
bucket=examplebucket.id,
source=pulumi.FileAsset("important.txt"),
object_lock_legal_hold_status="ON",
object_lock_mode="GOVERNANCE",
object_lock_retain_until_date="2021-12-31T23:59:60Z",
force_destroy=True,
opts=pulumi.ResourceOptions(depends_on=[example_bucket_versioning_v2]))
```
## Import
Objects can be imported using the `id`. The `id` is the bucket name and the key together e.g.,
```sh
$ pulumi import aws:s3/bucketObject:BucketObject object some-bucket-name/some/key.txt
```
Additionally, s3 url syntax can be used, e.g.,
```sh
$ pulumi import aws:s3/bucketObject:BucketObject object s3://some-bucket-name/some/key.txt
```
:param str resource_name: The name of the resource.
:param BucketObjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BucketObjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_key_enabled: Optional[pulumi.Input[bool]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_hash: Optional[pulumi.Input[str]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
website_redirect: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BucketObjectArgs.__new__(BucketObjectArgs)
__props__.__dict__["acl"] = acl
if bucket is None and not opts.urn:
raise TypeError("Missing required property 'bucket'")
if bucket is not None and not opts.urn:
warnings.warn("""Use the aws_s3_object resource instead""", DeprecationWarning)
pulumi.log.warn("""bucket is deprecated: Use the aws_s3_object resource instead""")
__props__.__dict__["bucket"] = bucket
__props__.__dict__["bucket_key_enabled"] = bucket_key_enabled
__props__.__dict__["cache_control"] = cache_control
__props__.__dict__["content"] = content
__props__.__dict__["content_base64"] = content_base64
__props__.__dict__["content_disposition"] = content_disposition
__props__.__dict__["content_encoding"] = content_encoding
__props__.__dict__["content_language"] = content_language
__props__.__dict__["content_type"] = content_type
__props__.__dict__["etag"] = etag
__props__.__dict__["force_destroy"] = force_destroy
if key is not None and not opts.urn:
warnings.warn("""Use the aws_s3_object resource instead""", DeprecationWarning)
pulumi.log.warn("""key is deprecated: Use the aws_s3_object resource instead""")
__props__.__dict__["key"] = key
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["metadata"] = metadata
__props__.__dict__["object_lock_legal_hold_status"] = object_lock_legal_hold_status
__props__.__dict__["object_lock_mode"] = object_lock_mode
__props__.__dict__["object_lock_retain_until_date"] = object_lock_retain_until_date
__props__.__dict__["server_side_encryption"] = server_side_encryption
__props__.__dict__["source"] = source
__props__.__dict__["source_hash"] = source_hash
__props__.__dict__["storage_class"] = storage_class
__props__.__dict__["tags"] = tags
__props__.__dict__["website_redirect"] = website_redirect
__props__.__dict__["tags_all"] = None
__props__.__dict__["version_id"] = None
super(BucketObject, __self__).__init__(
'aws:s3/bucketObject:BucketObject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[str]] = None,
bucket: Optional[pulumi.Input[str]] = None,
bucket_key_enabled: Optional[pulumi.Input[bool]] = None,
cache_control: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_base64: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_language: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,
object_lock_mode: Optional[pulumi.Input[str]] = None,
object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,
server_side_encryption: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,
source_hash: Optional[pulumi.Input[str]] = None,
storage_class: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None,
website_redirect: Optional[pulumi.Input[str]] = None) -> 'BucketObject':
"""
Get an existing BucketObject resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
:param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
:param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
:param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
:param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
:param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
:param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
:param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
:param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.
:param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
:param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
:param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
:param pulumi.Input[str] key: Name of the object once it is in the bucket.
:param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
:param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
:param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
:param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
:param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
:param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.
:param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
:param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
:param pulumi.Input[str] version_id: Unique version ID value for the object, if bucket versioning is enabled.
:param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BucketObjectState.__new__(_BucketObjectState)
__props__.__dict__["acl"] = acl
__props__.__dict__["bucket"] = bucket
__props__.__dict__["bucket_key_enabled"] = bucket_key_enabled
__props__.__dict__["cache_control"] = cache_control
__props__.__dict__["content"] = content
__props__.__dict__["content_base64"] = content_base64
__props__.__dict__["content_disposition"] = content_disposition
__props__.__dict__["content_encoding"] = content_encoding
__props__.__dict__["content_language"] = content_language
__props__.__dict__["content_type"] = content_type
__props__.__dict__["etag"] = etag
__props__.__dict__["force_destroy"] = force_destroy
__props__.__dict__["key"] = key
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["metadata"] = metadata
__props__.__dict__["object_lock_legal_hold_status"] = object_lock_legal_hold_status
__props__.__dict__["object_lock_mode"] = object_lock_mode
__props__.__dict__["object_lock_retain_until_date"] = object_lock_retain_until_date
__props__.__dict__["server_side_encryption"] = server_side_encryption
__props__.__dict__["source"] = source
__props__.__dict__["source_hash"] = source_hash
__props__.__dict__["storage_class"] = storage_class
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["version_id"] = version_id
__props__.__dict__["website_redirect"] = website_redirect
return BucketObject(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def acl(self) -> pulumi.Output[Optional[str]]:
"""
[Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.
"""
return pulumi.get(self, "acl")
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
"""
Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="bucketKeyEnabled")
def bucket_key_enabled(self) -> pulumi.Output[bool]:
"""
Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.
"""
return pulumi.get(self, "bucket_key_enabled")
@property
@pulumi.getter(name="cacheControl")
def cache_control(self) -> pulumi.Output[Optional[str]]:
"""
Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
"""
return pulumi.get(self, "cache_control")
@property
@pulumi.getter
def content(self) -> pulumi.Output[Optional[str]]:
"""
Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="contentBase64")
def content_base64(self) -> pulumi.Output[Optional[str]]:
"""
Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
"""
return pulumi.get(self, "content_base64")
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> pulumi.Output[Optional[str]]:
"""
Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
"""
return pulumi.get(self, "content_disposition")
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> pulumi.Output[Optional[str]]:
"""
Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
"""
return pulumi.get(self, "content_encoding")
@property
@pulumi.getter(name="contentLanguage")
def content_language(self) -> pulumi.Output[Optional[str]]:
"""
Language the content is in e.g., en-US or en-GB.
"""
return pulumi.get(self, "content_language")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[str]:
"""
Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Triggers updates when the value changes. The only meaningful value is `filemd5("path/to/file")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = "aws:kms"` (see `source_hash` instead).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
"""
return pulumi.get(self, "force_destroy")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Name of the object once it is in the bucket.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[str]:
"""
ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="objectLockLegalHoldStatus")
def object_lock_legal_hold_status(self) -> pulumi.Output[Optional[str]]:
"""
[Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.
"""
return pulumi.get(self, "object_lock_legal_hold_status")
@property
@pulumi.getter(name="objectLockMode")
def object_lock_mode(self) -> pulumi.Output[Optional[str]]:
"""
Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.
"""
return pulumi.get(self, "object_lock_mode")
@property
@pulumi.getter(name="objectLockRetainUntilDate")
def object_lock_retain_until_date(self) -> pulumi.Output[Optional[str]]:
"""
Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).
"""
return pulumi.get(self, "object_lock_retain_until_date")
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> pulumi.Output[str]:
"""
Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
"""
return pulumi.get(self, "server_side_encryption")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[Union[pulumi.Asset, pulumi.Archive]]]:
"""
Path to a file that will be read and uploaded as raw bytes for the object content.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourceHash")
def source_hash(self) -> pulumi.Output[Optional[str]]:
"""
Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")`. (The value is only stored in state and not saved by AWS.)
"""
return pulumi.get(self, "source_hash")
@property
@pulumi.getter(name="storageClass")
def storage_class(self) -> pulumi.Output[str]:
"""
[Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`".
"""
return pulumi.get(self, "storage_class")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> pulumi.Output[str]:
"""
Unique version ID value for the object, if bucket versioning is enabled.
"""
return pulumi.get(self, "version_id")
@property
@pulumi.getter(name="websiteRedirect")
def website_redirect(self) -> pulumi.Output[Optional[str]]:
"""
Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
"""
return pulumi.get(self, "website_redirect")
|
py | 1a2f727f4d21c6fe544013e24ac6ab8313bb3b06 | """
Script for testing on CUB.
Sample usage:
python -m cmr.benchmark.evaluate --split val --name <model_name> --num_train_epoch <model_epoch>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
import os.path as osp
import numpy as np
import torch
import scipy.io as sio
from ..nnutils import test_utils
from ..data import cub as cub_data
from ..nnutils import predictor as pred_utils
flags.DEFINE_boolean('visualize', False, 'if true visualizes things')
opts = flags.FLAGS
class ShapeTester(test_utils.Tester):
def define_model(self):
opts = self.opts
self.predictor = pred_utils.MeshPredictor(opts)
# for visualization
self.renderer = self.predictor.vis_rend
self.renderer.set_bgcolor([1., 1., 1.])
self.renderer.renderer.renderer.renderer.image_size = 512
self.renderer.set_light_dir([0, 1, -1], 0.38)
def init_dataset(self):
opts = self.opts
self.data_module = cub_data
torch.manual_seed(0)
self.dataloader = self.data_module.data_loader(opts)
def evaluate(self, outputs, batch):
"""
Compute IOU and keypoint error
"""
opts = self.opts
bs = opts.batch_size
## compute iou
mask_gt = batch['mask'].view(bs, -1).numpy()
mask_pred = outputs['mask_pred'].cpu().view(bs, -1).type_as(
batch['mask']).numpy()
intersection = mask_gt * mask_pred
union = mask_gt + mask_pred - intersection
iou = intersection.sum(1) / union.sum(1)
# Compute pck
padding_frac = opts.padding_frac
# The [-1,1] coordinate frame in which keypoints corresponds to:
# (1+2*padding_frac)*max_bbox_dim in image coords
# pt_norm = 2* (pt_img - trans)/((1+2*pf)*max_bbox_dim)
# err_pt = 2*err_img/((1+2*pf)*max_bbox_dim)
# err_pck_norm = err_img/max_bbox_dim = err_pt*(1+2*pf)/2
# so the keypoint error in the canonical fram should be multiplied by:
err_scaling = (1 + 2 * padding_frac) / 2.0
kps_gt = batch['kp'].cpu().numpy()
kps_vis = kps_gt[:, :, 2]
kps_gt = kps_gt[:, :, 0:2]
kps_pred = outputs['kp_pred'].cpu().type_as(batch['kp']).numpy()
kps_err = kps_pred - kps_gt
kps_err = np.sqrt(np.sum(kps_err * kps_err, axis=2)) * err_scaling
return iou, kps_err, kps_vis
def visualize(self, outputs, batch):
vert = outputs['verts'][0]
cam = outputs['cam_pred'][0]
texture = outputs['texture'][0]
img_pred = self.renderer(vert, cam, texture=texture)
aroundz = []
aroundy = []
# for deg in np.arange(0, 180, 30):
for deg in np.arange(0, 150, 30):
rendz = self.renderer.diff_vp(
vert, cam, angle=-deg, axis=[1, 0, 0], texture=texture)
rendy = self.renderer.diff_vp(
vert, cam, angle=deg, axis=[0, 1, 0], texture=texture)
aroundz.append(rendz)
aroundy.append(rendy)
aroundz = np.hstack(aroundz)
aroundy = np.hstack(aroundy)
vps = np.vstack((aroundz, aroundy))
img = np.transpose(convert2np(batch['img'][0]), (1, 2, 0))
import matplotlib.pyplot as plt
plt.ion()
fig = plt.figure(1)
ax = fig.add_subplot(121)
ax.imshow(img)
ax.set_title('input')
ax.axis('off')
ax = fig.add_subplot(122)
ax.imshow(img_pred)
ax.set_title('pred_texture')
ax.axis('off')
plt.draw()
fig = plt.figure(2)
plt.imshow(vps)
plt.axis('off')
plt.draw()
plt.pause(0.01)
import ipdb
ipdb.set_trace()
def test(self):
opts = self.opts
bench_stats = {'ious': [], 'kp_errs': [], 'kp_vis': []}
if opts.ignore_pred_delta_v:
result_path = osp.join(opts.results_dir, 'results_meanshape.mat')
elif opts.use_sfm_ms:
result_path = osp.join(opts.results_dir,
'results_sfm_meanshape.mat')
else:
result_path = osp.join(opts.results_dir, 'results.mat')
if opts.use_sfm_camera:
result_path = result_path.replace('.mat', '_sfm_camera.mat')
print('Writing to %s' % result_path)
if not osp.exists(result_path):
n_iter = len(self.dataloader)
for i, batch in enumerate(self.dataloader):
if i % 100 == 0:
print('{}/{} evaluation iterations.'.format(i, n_iter))
if opts.max_eval_iter > 0 and (i >= opts.max_eval_iter):
break
outputs = self.predictor.predict(batch)
if opts.visualize:
self.visualize(outputs, batch)
iou, kp_err, kp_vis = self.evaluate(outputs, batch)
bench_stats['ious'].append(iou)
bench_stats['kp_errs'].append(kp_err)
bench_stats['kp_vis'].append(kp_vis)
if opts.save_visuals and (i % opts.visuals_freq == 0):
self.save_current_visuals(batch, outputs)
bench_stats['kp_errs'] = np.concatenate(bench_stats['kp_errs'])
bench_stats['kp_vis'] = np.concatenate(bench_stats['kp_vis'])
bench_stats['ious'] = np.concatenate(bench_stats['ious'])
sio.savemat(result_path, bench_stats)
else:
bench_stats = sio.loadmat(result_path)
# Report numbers.
mean_iou = bench_stats['ious'].mean()
n_vis_p = np.sum(bench_stats['kp_vis'], axis=0)
n_correct_p_pt1 = np.sum(
(bench_stats['kp_errs'] < 0.1) * bench_stats['kp_vis'], axis=0)
n_correct_p_pt15 = np.sum(
(bench_stats['kp_errs'] < 0.15) * bench_stats['kp_vis'], axis=0)
pck1 = (n_correct_p_pt1 / n_vis_p).mean()
pck15 = (n_correct_p_pt15 / n_vis_p).mean()
print('%s mean iou %.3g, pck.1 %.3g, pck.15 %.3g' %
(osp.basename(result_path), mean_iou, pck1, pck15))
def main(_):
opts.n_data_workers = 0
opts.batch_size = 1
opts.results_dir = osp.join(opts.results_dir_base, '%s' % (opts.split),
opts.name, 'epoch_%d' % opts.num_train_epoch)
if not osp.exists(opts.results_dir):
print('writing to %s' % opts.results_dir)
os.makedirs(opts.results_dir)
torch.manual_seed(0)
tester = ShapeTester(opts)
tester.init_testing()
tester.test()
if __name__ == '__main__':
app.run(main)
|
py | 1a2f72f3f29a482fec7b6f6236c8c573a02a818b | # -*- coding: utf-8 -*-
"""
Mesa Time Module
================
Objects for handling the time component of a model. In particular, this module
contains Schedulers, which handle agent activation. A Scheduler is an object
which controls when agents are called upon to act, and when.
The activation order can have a serious impact on model behavior, so it's
important to specify it explicitly. Example simple activation regimes include
activating all agents in the same order every step, shuffling the activation
order every time, activating each agent *on average* once per step, and more.
Key concepts:
Step: Many models advance in 'steps'. A step may involve the activation of
all agents, or a random (or selected) subset of them. Each agent in turn
may have their own step() method.
Time: Some models may simulate a continuous 'clock' instead of discrete
steps. However, by default, the Time is equal to the number of steps the
model has taken.
TODO: Have the schedulers use the model's randomizer, to keep random number
seeds consistent and allow for replication.
"""
import random
from collections import OrderedDict
class BaseScheduler:
""" Simplest scheduler; activates agents one at a time, in the order
they were added.
Assumes that each agent added has a *step* method which takes no arguments.
(This is explicitly meant to replicate the scheduler in MASON).
"""
def __init__(self, model):
""" Create a new, empty BaseScheduler. """
self.model = model
self.steps = 0
self.time = 0
self._agents = OrderedDict()
def add(self, agent):
""" Add an Agent object to the schedule.
Args:
agent: An Agent to be added to the schedule. NOTE: The agent must
have a step() method.
"""
self._agents[agent.unique_id] = agent
def remove(self, agent):
""" Remove all instances of a given agent from the schedule.
Args:
agent: An agent object.
"""
del self._agents[agent.unique_id]
def step(self):
""" Execute the step of all the agents, one at a time. """
agent_keys = list(self._agents.keys())
for agent_key in agent_keys:
self._agents[agent_key].step()
self.steps += 1
self.time += 1
def get_agent_count(self):
""" Returns the current number of agents in the queue. """
return len(self._agents.keys())
@property
def agents(self):
return list(self._agents.values())
class RandomActivation(BaseScheduler):
""" A scheduler which activates each agent once per step, in random order,
with the order reshuffled every step.
This is equivalent to the NetLogo 'ask agents...' and is generally the
default behavior for an ABM.
Assumes that all agents have a step(model) method.
"""
def step(self):
""" Executes the step of all agents, one at a time, in
random order.
"""
agent_keys = list(self._agents.keys())
random.shuffle(agent_keys)
for agent_key in agent_keys:
self._agents[agent_key].step()
self.steps += 1
self.time += 1
class SimultaneousActivation(BaseScheduler):
""" A scheduler to simulate the simultaneous activation of all the agents.
This scheduler requires that each agent have two methods: step and advance.
step() activates the agent and stages any necessary changes, but does not
apply them yet. advance() then applies the changes.
"""
def step(self):
""" Step all agents, then advance them. """
agent_keys = list(self._agents.keys())
for agent_key in agent_keys:
self._agents[agent_key].step()
for agent_key in agent_keys:
self._agents[agent_key].advance()
self.steps += 1
self.time += 1
class StagedActivation(BaseScheduler):
""" A scheduler which allows agent activation to be divided into several
stages instead of a single `step` method. All agents execute one stage
before moving on to the next.
Agents must have all the stage methods implemented. Stage methods take a
model object as their only argument.
This schedule tracks steps and time separately. Time advances in fractional
increments of 1 / (# of stages), meaning that 1 step = 1 unit of time.
"""
def __init__(self, model, stage_list=None, shuffle=False,
shuffle_between_stages=False):
""" Create an empty Staged Activation schedule.
Args:
model: Model object associated with the schedule.
stage_list: List of strings of names of stages to run, in the
order to run them in.
shuffle: If True, shuffle the order of agents each step.
shuffle_between_stages: If True, shuffle the agents after each
stage; otherwise, only shuffle at the start
of each step.
"""
super().__init__(model)
self.stage_list = ["step"] if not stage_list else stage_list
self.shuffle = shuffle
self.shuffle_between_stages = shuffle_between_stages
self.stage_time = 1 / len(self.stage_list)
def step(self):
""" Executes all the stages for all agents. """
agent_keys = list(self._agents.keys())
if self.shuffle:
random.shuffle(agent_keys)
for stage in self.stage_list:
for agent_key in agent_keys:
getattr(self._agents[agent_key], stage)() # Run stage
if self.shuffle_between_stages:
random.shuffle(agent_keys)
self.time += self.stage_time
self.steps += 1
|
py | 1a2f7375731ff790c98d9989c7a03f71c3314ff6 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `luk-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
|
py | 1a2f74bdf6cc1883c15b2549c51e4d7c863199ec | import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../libbeat/tests/system'))
from beat.beat import TestCase
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "heartbeat"
self.beat_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
|
py | 1a2f75412a30ffd04cdf8f15b9ac5bf891894100 | # This module contains a synchronous implementation of a Channel Access client
# as three top-level functions: read, write, subscribe. They are comparatively
# simple and naive, with no caching or concurrency, and therefore less
# performant but more robust.
import getpass
import inspect
import logging
import selectors
import socket
import threading # just to make callback processing thread-safe
import time
import weakref
import caproto as ca
from .._dbr import ChannelType, SubscriptionType, field_types, native_type
from .._utils import (CaprotoError, CaprotoTimeoutError, ErrorResponseReceived,
adapt_old_callback_signature, get_environment_variables,
safe_getsockname)
from .repeater import spawn_repeater
__all__ = ('read', 'write', 'subscribe', 'block', 'interrupt',
'read_write_read')
logger = logging.getLogger('caproto.ctx')
# Make a dict to hold our tcp sockets.
sockets = {}
global_circuits = {}
_permission_to_block = [] # mutable state shared by block and interrupt
# Convenience functions that do both transport and caproto validation/ingest.
def send(circuit, command, pv_name=None):
if pv_name is not None:
tags = {'pv': pv_name}
else:
tags = None
buffers_to_send = circuit.send(command, extra=tags)
sockets[circuit].sendmsg(buffers_to_send)
def recv(circuit):
bytes_received = sockets[circuit].recv(4096)
commands, _ = circuit.recv(bytes_received)
for c in commands:
circuit.process_command(c)
return commands
def search(pv_name, udp_sock, timeout, *, max_retries=2):
# Set Broadcaster log level to match our logger.
b = ca.Broadcaster(our_role=ca.CLIENT)
b.client_address = safe_getsockname(udp_sock)
# Send registration request to the repeater
logger.debug('Registering with the Channel Access repeater.')
bytes_to_send = b.send(ca.RepeaterRegisterRequest())
env = get_environment_variables()
repeater_port = env['EPICS_CA_REPEATER_PORT']
client_address_list = ca.get_client_address_list()
local_address = ca.get_local_address()
try:
udp_sock.sendto(bytes_to_send, (local_address, repeater_port))
except OSError as exc:
raise ca.CaprotoNetworkError(
f"Failed to send to {local_address}:{repeater_port}") from exc
logger.debug("Searching for %r....", pv_name)
commands = (
ca.VersionRequest(0, ca.DEFAULT_PROTOCOL_VERSION),
ca.SearchRequest(pv_name, 0, ca.DEFAULT_PROTOCOL_VERSION))
bytes_to_send = b.send(*commands)
tags = {'role': 'CLIENT',
'our_address': b.client_address,
'direction': '--->>>'}
def send_search():
for dest in client_address_list:
tags['their_address'] = dest
b.log.debug(
'%d commands %dB',
len(commands), len(bytes_to_send), extra=tags)
try:
udp_sock.sendto(bytes_to_send, dest)
except OSError as exc:
host, port = dest
raise ca.CaprotoNetworkError(f"Failed to send to {host}:{port}") from exc
def check_timeout():
nonlocal retry_at
if time.monotonic() >= retry_at:
send_search()
retry_at = time.monotonic() + retry_timeout
if time.monotonic() - t > timeout:
raise CaprotoTimeoutError(f"Timed out while awaiting a response "
f"from the search for {pv_name!r}. Search "
f"requests were sent to this address list: "
f"{ca.get_address_list()}.")
# Initial search attempt
send_search()
# Await a search response, and keep track of registration status
retry_timeout = timeout / max((max_retries, 1))
t = time.monotonic()
retry_at = t + retry_timeout
try:
orig_timeout = udp_sock.gettimeout()
udp_sock.settimeout(retry_timeout)
while True:
try:
bytes_received, address = udp_sock.recvfrom(ca.MAX_UDP_RECV)
except socket.timeout:
check_timeout()
continue
check_timeout()
commands = b.recv(bytes_received, address)
b.process_commands(commands)
for command in commands:
if isinstance(command, ca.SearchResponse) and command.cid == 0:
address = ca.extract_address(command)
logger.debug('Found %r at %s:%d', pv_name, *address)
return address
else:
# None of the commands we have seen are a reply to our request.
# Receive more data.
continue
finally:
udp_sock.settimeout(orig_timeout)
def make_channel(pv_name, udp_sock, priority, timeout):
log = logging.LoggerAdapter(logging.getLogger('caproto.ch'), {'pv': pv_name})
address = search(pv_name, udp_sock, timeout)
try:
circuit = global_circuits[(address, priority)]
except KeyError:
circuit = global_circuits[(address, priority)] = ca.VirtualCircuit(
our_role=ca.CLIENT,
address=address,
priority=priority)
chan = ca.ClientChannel(pv_name, circuit)
new = False
if chan.circuit not in sockets:
new = True
sockets[chan.circuit] = socket.create_connection(chan.circuit.address,
timeout)
circuit.our_address = sockets[chan.circuit].getsockname()
try:
if new:
# Initialize our new TCP-based CA connection with a VersionRequest.
send(chan.circuit, ca.VersionRequest(
priority=priority,
version=ca.DEFAULT_PROTOCOL_VERSION),
pv_name)
send(chan.circuit, chan.host_name(socket.gethostname()))
send(chan.circuit, chan.client_name(getpass.getuser()))
send(chan.circuit, chan.create(), pv_name)
t = time.monotonic()
while True:
try:
commands = recv(chan.circuit)
if time.monotonic() - t > timeout:
raise socket.timeout
except socket.timeout:
raise CaprotoTimeoutError("Timeout while awaiting channel "
"creation.")
tags = {'direction': '<<<---',
'our_address': chan.circuit.our_address,
'their_address': chan.circuit.address}
for command in commands:
if isinstance(command, ca.Message):
tags['bytesize'] = len(command)
logger.debug("%r", command, extra=tags)
elif command is ca.DISCONNECTED:
raise CaprotoError('Disconnected during initialization')
if chan.states[ca.CLIENT] is ca.CONNECTED:
log.info("Channel connected.")
break
except BaseException:
sockets[chan.circuit].close()
del sockets[chan.circuit]
del global_circuits[(chan.circuit.address, chan.circuit.priority)]
raise
return chan
def _read(chan, timeout, data_type, data_count, notify, force_int_enums):
logger = chan.log
logger.debug("Detected native data_type %r.", chan.native_data_type)
ntype = native_type(chan.native_data_type) # abundance of caution
if ((ntype is ChannelType.ENUM) and
(data_type is None) and (not force_int_enums)):
logger.debug("Changing requested data_type to STRING.")
data_type = ChannelType.STRING
req = chan.read(data_type=data_type, data_count=data_count, notify=notify)
send(chan.circuit, req, chan.name)
t = time.monotonic()
while True:
try:
commands = recv(chan.circuit)
except socket.timeout:
commands = []
if time.monotonic() - t > timeout:
raise CaprotoTimeoutError("Timeout while awaiting reading.")
tags = {'direction': '<<<---',
'our_address': chan.circuit.our_address,
'their_address': chan.circuit.address}
for command in commands:
if isinstance(command, ca.Message):
tags['bytesize'] = len(command)
logger.debug("%r", command, extra=tags)
if (isinstance(command, (ca.ReadResponse, ca.ReadNotifyResponse)) and
command.ioid == req.ioid):
return command
elif isinstance(command, ca.ErrorResponse):
raise ErrorResponseReceived(command)
elif command is ca.DISCONNECTED:
raise CaprotoError('Disconnected while waiting for '
'read response')
def read(pv_name, *, data_type=None, data_count=None, timeout=1, priority=0,
notify=True, force_int_enums=False, repeater=True):
"""
Read a Channel.
Parameters
----------
pv_name : str
The PV name to read from
data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional
Request specific data type or a class of data types, matched to the
channel's native data type. Default is Channel's native data type.
data_count : integer, optional
Requested number of values. Default is the channel's native data
count.
timeout : float, optional
Default is 1 second.
priority : 0, optional
Virtual Circuit priority. Default is 0, lowest. Highest is 99.
notify : boolean, optional
Send a ReadNotifyRequest instead of a ReadRequest. True by default.
force_int_enums : boolean, optional
Retrieve enums as integers. (Default is strings.)
repeater : boolean, optional
Spawn a Channel Access Repeater process if the port is available.
True default, as the Channel Access spec stipulates that well-behaved
clients should do this.
Returns
-------
response : ReadResponse or ReadNotifyResponse
Examples
--------
Get the value of a Channel named 'simple:A'.
>>> read('simple:A').data
array([1], dtype=int32)
Request a richer Channel Access data type that includes the timestamp, and
access the timestamp.
>>> read('cat', data_type='time').metadata.timestmap
1570622339.042392
A convenience method is provided for access the timestamp as a Python
datetime object.
>>> read('cat' data_type='time').metadata.stamp.as_datetime()
datetime.datetime(2019, 10, 9, 11, 58, 59, 42392)
The requested data type may also been given as a specific Channel Access
type
>>> from caproto import ChannelType
>>> read('cat', data_type=ChannelType.CTRL_FLOAT).metadata
DBR_CTRL_FLOAT(
status=<AlarmStatus.NO_ALARM: 0>,
severity=<AlarmSeverity.NO_ALARM: 0>,
upper_disp_limit=0.0,
lower_disp_limit=0.0,
upper_alarm_limit=0.0,
upper_warning_limit=0.0,
lower_warning_limit=0.0,
lower_alarm_limit=0.0,
upper_ctrl_limit=0.0,
lower_ctrl_limit=0.0,
precision=0,
units=b'')
or the corresponding integer identifer
>>> read('cat', data_type=30).metadata
DBR_CTRL_FLOAT(
status=<AlarmStatus.NO_ALARM: 0>,
severity=<AlarmSeverity.NO_ALARM: 0>,
upper_disp_limit=0.0,
lower_disp_limit=0.0,
upper_alarm_limit=0.0,
upper_warning_limit=0.0,
lower_warning_limit=0.0,
lower_alarm_limit=0.0,
upper_ctrl_limit=0.0,
lower_ctrl_limit=0.0,
precision=0,
units=b'')
"""
if repeater:
# As per the EPICS spec, a well-behaved client should start a
# caproto-repeater that will continue running after it exits.
spawn_repeater()
udp_sock = ca.bcast_socket()
# Must bind or getsocketname() will raise on Windows.
# See https://github.com/caproto/caproto/issues/514.
udp_sock.bind(('', 0))
try:
udp_sock.settimeout(timeout)
chan = make_channel(pv_name, udp_sock, priority, timeout)
finally:
udp_sock.close()
try:
return _read(chan, timeout, data_type=data_type, data_count=data_count,
notify=notify, force_int_enums=force_int_enums)
finally:
try:
if chan.states[ca.CLIENT] is ca.CONNECTED:
send(chan.circuit, chan.clear(), chan.name)
finally:
sockets[chan.circuit].close()
del sockets[chan.circuit]
del global_circuits[(chan.circuit.address, chan.circuit.priority)]
def subscribe(pv_name, priority=0, data_type=None, data_count=None,
low=0.0, high=0.0, to=0.0, mask=None):
"""
Define a subscription.
Parameters
----------
pv_name : string
The PV name to subscribe to
priority : integer, optional
Used by the server to triage subscription responses when under high
load. 0 is lowest; 99 is highest.
data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional
Request specific data type or a class of data types, matched to the
channel's native data type. Default is Channel's native data type.
data_count : integer, optional
Requested number of values. Default is the channel's native data
count, which can be checked in the Channel's attribute
:attr:`native_data_count`.
low, high, to : float, optional
deprecated by Channel Access, not yet implemented by caproto
mask : SubscriptionType, optional
Subscribe to selective updates.
Examples
--------
Define a subscription on the ``random_walk:x`` PV.
>>> sub = subscribe('random_walk:x')
Add one or more user-defined callbacks to process responses.
>>> def f(sub, response):
... print(repsonse.data)
...
>>> sub.add_callback(f)
Activate the subscription and process incoming responses.
>>> sub.block()
This is a blocking operation in the sync client. (To do this on a
background thread, use the threading client.) Interrupt using Ctrl+C or
by calling :meth:`sub.interrupt()` from another thread.
The subscription may be reactivated by calling ``sub.block()`` again.
To process multiple subscriptions at once, use the *function*
:func:`block`, which takes one or more Subscriptions as arguments.
>>> block(sub1, sub2)
There is also an :func:`interrupt` function, which is merely an alias to
the method.
"""
return Subscription(pv_name, priority, data_type, data_count, low, high,
to, mask)
def interrupt():
"""
Signal to :func:`block` to stop blocking. Idempotent.
This obviously cannot be called interactively while blocked;
it is intended to be called from another thread.
"""
_permission_to_block.clear()
def block(*subscriptions, duration=None, timeout=1, force_int_enums=False,
repeater=True):
"""
Activate one or more subscriptions and process incoming responses.
Use Ctrl+C (SIGINT) to escape, or from another thread, call
:func:`interrupt()`.
Parameters
----------
*subscriptions : Subscriptions
The list of subscriptions.
duration : float, optional
How many seconds to run for. Run forever (None) by default.
timeout : float, optional
Default is 1 second. This is not the same as `for`; this is the timeout
for failure in the event of no connection.
force_int_enums : boolean, optional
Retrieve enums as integers. (Default is strings.)
repeater : boolean, optional
Spawn a Channel Access Repeater process if the port is available.
True default, as the Channel Access spec stipulates that well-behaved
clients should do this.
Examples
--------
Activate subscription(s) and block while they process updates.
>>> sub1 = subscribe('cat')
>>> sub1 = subscribe('dog')
>>> block(sub1, sub2)
"""
_permission_to_block.append(object())
if duration is not None:
deadline = time.time() + duration
else:
deadline = None
if repeater:
# As per the EPICS spec, a well-behaved client should start a
# caproto-repeater that will continue running after it exits.
spawn_repeater()
loggers = {}
for sub in subscriptions:
loggers[sub.pv_name] = logging.LoggerAdapter(logging.getLogger('caproto.ch'),
{'pv': sub.pv_name})
udp_sock = ca.bcast_socket()
# Must bind or getsocketname() will raise on Windows.
# See https://github.com/caproto/caproto/issues/514.
udp_sock.bind(('', 0))
try:
udp_sock.settimeout(timeout)
channels = {}
for sub in subscriptions:
pv_name = sub.pv_name
chan = make_channel(pv_name, udp_sock, sub.priority, timeout)
channels[sub] = chan
finally:
udp_sock.close()
try:
# Subscribe to all the channels.
sub_ids = {}
for sub, chan in channels.items():
loggers[chan.name].debug("Detected native data_type %r.",
chan.native_data_type)
# abundance of caution
ntype = field_types['native'][chan.native_data_type]
if ((ntype is ChannelType.ENUM) and (not force_int_enums)):
ntype = ChannelType.STRING
time_type = field_types['time'][ntype]
# Adjust the timeout during monitoring.
sockets[chan.circuit].settimeout(None)
loggers[chan.name].debug("Subscribing with data_type %r.",
time_type)
req = chan.subscribe(
data_type=time_type, data_count=sub.data_count, mask=sub.mask)
send(chan.circuit, req, chan.name)
sub_ids[(chan.circuit, req.subscriptionid)] = sub
logger.debug('Subscribed. Building socket selector.')
try:
circuits = set(chan.circuit for chan in channels.values())
selector = selectors.DefaultSelector()
sock_to_circuit = {}
for circuit in circuits:
sock = sockets[circuit]
sock_to_circuit[sock] = circuit
selector.register(sock, selectors.EVENT_READ)
if duration is None:
logger.debug('Continuing until SIGINT is received....')
while True:
events = selector.select(timeout=0.1)
if deadline is not None and time.time() > deadline:
logger.debug('Deadline reached.')
return
if not _permission_to_block:
logger.debug("Interrupted via "
"caproto.sync.client.interrupt().")
break
for selector_key, _ in events:
circuit = sock_to_circuit[selector_key.fileobj]
commands = recv(circuit)
for response in commands:
if isinstance(response, ca.ErrorResponse):
raise ErrorResponseReceived(response)
if response is ca.DISCONNECTED:
# TODO Re-connect.
raise CaprotoError("Disconnected")
sub = sub_ids.get((circuit, response.subscriptionid))
if sub:
sub.process(response)
except KeyboardInterrupt:
logger.debug('Received SIGINT. Closing.')
pass
finally:
_permission_to_block.clear()
try:
for chan in channels.values():
if chan.states[ca.CLIENT] is ca.CONNECTED:
send(chan.circuit, chan.clear(), chan.name)
finally:
# Reinstate the timeout for channel cleanup.
for chan in channels.values():
sockets[chan.circuit].settimeout(timeout)
sockets[chan.circuit].close()
del sockets[chan.circuit]
del global_circuits[(chan.circuit.address, chan.circuit.priority)]
def _write(chan, data, metadata, timeout, data_type, notify):
logger.debug("Detected native data_type %r.", chan.native_data_type)
# abundance of caution
ntype = field_types['native'][chan.native_data_type]
if (data_type is None) and (ntype is ChannelType.ENUM):
# Change data_type to STRING if data contains string-like data, or
# iterable of string-like data
stringy_data = False
if isinstance(data, (str, bytes)):
stringy_data = True
if hasattr(data, '__getitem__') \
and len(data) > 0 \
and isinstance(data[0], (str, bytes)):
stringy_data = True
if stringy_data:
logger.debug("Will write to ENUM as data_type STRING.")
data_type = ChannelType.STRING
logger.debug("Writing.")
req = chan.write(data=data, notify=notify,
data_type=data_type, metadata=metadata)
send(chan.circuit, req, chan.name)
t = time.monotonic()
if notify:
while True:
try:
commands = recv(chan.circuit)
except socket.timeout:
commands = []
if time.monotonic() - t > timeout:
raise CaprotoTimeoutError("Timeout while awaiting write reply.")
tags = {'direction': '<<<---',
'our_address': chan.circuit.our_address,
'their_address': chan.circuit.address}
for command in commands:
if isinstance(command, ca.Message):
tags['bytesize'] = len(command)
logger.debug("%r", command, extra=tags)
if (isinstance(command, ca.WriteNotifyResponse) and
command.ioid == req.ioid):
response = command
break
elif isinstance(command, ca.ErrorResponse):
raise ErrorResponseReceived(command)
elif command is ca.DISCONNECTED:
raise CaprotoError('Disconnected while waiting for '
'write response')
else:
continue
break
return response
else:
return None
def write(pv_name, data, *, notify=False, data_type=None, metadata=None,
timeout=1, priority=0,
repeater=True):
"""
Write to a Channel.
Parameters
----------
pv_name : str
The PV name to write to
data : str, bytes, int, or float or any Iterable of these
Value(s) to write.
notify : boolean, optional
Request notification of completion and wait for it. False by default.
data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional
Write as specific data type. Default is inferred from input.
metadata : ``ctypes.BigEndianStructure`` or tuple
Status and control metadata for the values
timeout : float, optional
Default is 1 second.
priority : 0, optional
Virtual Circuit priority. Default is 0, lowest. Highest is 99.
repeater : boolean, optional
Spawn a Channel Access Repeater process if the port is available.
True default, as the Channel Access spec stipulates that well-behaved
clients should do this.
Returns
-------
initial, final : tuple of ReadNotifyResponse objects
Examples
--------
Write the value 5 to a Channel named 'simple:A'.
>>> write('simple:A', 5) # returns None
Request notification of completion ("put completion") and wait for it.
>>> write('cat', 5, notify=True) # blocks until complete, then returns:
WriteNotifyResponse(
data_type=<ChannelType.LONG: 5>,
data_count=1,
status=CAStatusCode(
name='ECA_NORMAL', code=0, code_with_severity=1,
severity=<CASeverity.SUCCESS: 1>,
success=1, defunct=False,
description='Normal successful completion'),
ioid=0)
"""
if repeater:
# As per the EPICS spec, a well-behaved client should start a
# caproto-repeater that will continue running after it exits.
spawn_repeater()
udp_sock = ca.bcast_socket()
# Must bind or getsocketname() will raise on Windows.
# See https://github.com/caproto/caproto/issues/514.
udp_sock.bind(('', 0))
try:
udp_sock.settimeout(timeout)
chan = make_channel(pv_name, udp_sock, priority, timeout)
finally:
udp_sock.close()
try:
return _write(chan, data, metadata, timeout, data_type, notify)
finally:
try:
if chan.states[ca.CLIENT] is ca.CONNECTED:
send(chan.circuit, chan.clear(), chan.name)
finally:
sockets[chan.circuit].close()
del sockets[chan.circuit]
del global_circuits[(chan.circuit.address, chan.circuit.priority)]
def read_write_read(pv_name, data, *, notify=False,
read_data_type=None, write_data_type=None,
metadata=None, timeout=1, priority=0,
force_int_enums=False, repeater=True):
"""
Write to a Channel, but sandwich the write between to reads.
This is what the command-line utilities ``caproto-put`` and ``caput`` do.
Notice that if you want the second reading to reflect the written value,
you should pass the parameter ``notify=True``. (This is also true of
``caproto-put``/``caput``, which needs the ``-c`` argument to behave the
way you might expect it to behave.)
This is provided as a separate function in order to support ``caproto-put``
efficiently. Making separate calls to :func:`read` and :func:`write` would
re-create a connection redundantly.
Parameters
----------
pv_name : str
The PV name to write/read/write
data : str, bytes, int, or float or any Iterable of these
Value to write.
notify : boolean, optional
Request notification of completion and wait for it. False by default.
read_data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional
Request specific data type.
write_data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional
Write as specific data type. Default is inferred from input.
metadata : ``ctypes.BigEndianStructure`` or tuple
Status and control metadata for the values
timeout : float, optional
Default is 1 second.
priority : 0, optional
Virtual Circuit priority. Default is 0, lowest. Highest is 99.
force_int_enums : boolean, optional
Retrieve enums as integers. (Default is strings.)
repeater : boolean, optional
Spawn a Channel Access Repeater process if the port is available.
True default, as the Channel Access spec stipulates that well-behaved
clients should do this.
Returns
-------
initial, write_response, final : tuple of response
The middle response comes from the write, and it will be ``None`` unless
``notify=True``.
Examples
--------
Write the value 5 to a Channel named 'simple:A'.
>>> read_write_read('cat', 5) # returns initial, None, final
Request notification of completion ("put completion") and wait for it.
>>> read_write_read('cat', 5, notify=True) # initial, WriteNotifyResponse, final
"""
if repeater:
# As per the EPICS spec, a well-behaved client should start a
# caproto-repeater that will continue running after it exits.
spawn_repeater()
udp_sock = ca.bcast_socket()
# Must bind or getsocketname() will raise on Windows.
# See https://github.com/caproto/caproto/issues/514.
udp_sock.bind(('', 0))
try:
udp_sock.settimeout(timeout)
chan = make_channel(pv_name, udp_sock, priority, timeout)
finally:
udp_sock.close()
try:
initial = _read(chan, timeout, read_data_type, None, notify=True,
force_int_enums=force_int_enums)
res = _write(chan, data, metadata, timeout, write_data_type, notify)
final = _read(chan, timeout, read_data_type, None, notify=True,
force_int_enums=force_int_enums)
finally:
try:
if chan.states[ca.CLIENT] is ca.CONNECTED:
send(chan.circuit, chan.clear(), chan.name)
finally:
sockets[chan.circuit].close()
del sockets[chan.circuit]
del global_circuits[(chan.circuit.address, chan.circuit.priority)]
return initial, res, final
class Subscription:
"""
This object encapsulates state related to a Subscription.
See the :func:`subscribe` function.
"""
def __init__(self, pv_name, priority=0, data_type=None, data_count=None,
low=0.0, high=0.0, to=0.0, mask=None):
if mask is None:
mask = SubscriptionType.DBE_VALUE | SubscriptionType.DBE_ALARM
self.pv_name = pv_name
self.priority = priority
self.data_type = data_type
self.data_count = data_count
self.low = low
self.high = high
self.to = to
self.mask = mask
self.callbacks = {}
self._callback_id = 0
self._callback_lock = threading.RLock()
# This is related to back-compat for user callbacks that have the old
# signature, f(response).
self.__wrapper_weakrefs = set()
def block(self, duration=None, timeout=1,
force_int_enums=False,
repeater=True):
"""
Activate one or more subscriptions and process incoming responses.
Use Ctrl+C (SIGINT) to escape, or from another thread, call
:meth:`interrupt()`.
Convenience alias for the top-level function :func:`block`, which may
be used to process multiple Subscriptions concurrently.
Parameters
----------
duration : float, optional
How many seconds to run for. Run forever (None) by default.
timeout : float, optional
Default is 1 second. This is not the same as `for`; this is the
timeout for failure in the event of no connection.
force_int_enums : boolean, optional
Retrieve enums as integers. (Default is strings.)
repeater : boolean, optional
Spawn a Channel Access Repeater process if the port is available.
True default, as the Channel Access spec stipulates that
well-behaved clients should do this.
"""
block(self, duration=duration, timeout=timeout,
force_int_enums=force_int_enums,
repeater=repeater)
def interrupt(self):
"""
Signal to block() to stop blocking. Idempotent.
This obviously cannot be called interactively while blocked;
it is intended to be called from another thread.
This method is a convenience alias for the top-level function
:func:`interrupt`.
"""
interrupt()
def add_callback(self, func):
"""
Add a callback to receive responses.
Parameters
----------
func : callable
Expected signature: ``func(sub, response)``.
The signature ``func(response)`` is also supported for
backward-compatibility but will issue warnings. Support will be
removed in a future release of caproto.
Returns
-------
token : int
Integer token that can be passed to :meth:`remove_callback`.
.. versionchanged:: 0.5.0
Changed the expected signature of ``func`` from ``func(response)``
to ``func(sub, response)``.
"""
func = adapt_old_callback_signature(func, self.__wrapper_weakrefs)
def removed(_):
self.remove_callback(cb_id)
if inspect.ismethod(func):
ref = weakref.WeakMethod(func, removed)
else:
# TODO: strong reference to non-instance methods?
ref = weakref.ref(func, removed)
with self._callback_lock:
cb_id = self._callback_id
self._callback_id += 1
self.callbacks[cb_id] = ref
return cb_id
def remove_callback(self, cb_id):
"""
Remove callback using token that was returned by :meth:`add_callback`.
"""
with self._callback_lock:
self.callbacks.pop(cb_id, None)
def process(self, response):
"""
Run the callbacks on a response.
This is used internally by :func:`block()`, generally not called by the
user.
"""
to_remove = []
with self._callback_lock:
callbacks = list(self.callbacks.items())
for cb_id, ref in callbacks:
callback = ref()
if callback is None:
to_remove.append(cb_id)
continue
callback(self, response)
with self._callback_lock:
for remove_id in to_remove:
self.callbacks.pop(remove_id, None)
def clear(self):
"""
Remove all callbacks. If currently blocking, interrupt.
"""
interrupt()
with self._callback_lock:
for cb_id in list(self.callbacks):
self.remove_callback(cb_id)
|
py | 1a2f762ca989b7431a3a975f650627d70488c10b | import logging
import os
from django.core.files.base import ContentFile
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from django_scopes import scopes_disabled
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedTicket, Event, InvoiceAddress, Order,
OrderPosition,
)
from pretix.base.services.tasks import EventTask, ProfiledTask
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.base.signals import allow_ticket_download, register_ticket_outputs
from pretix.celery_app import app
from pretix.helpers.database import rolledback_transaction
logger = logging.getLogger(__name__)
def generate_orderposition(order_position: int, provider: str):
order_position = OrderPosition.objects.select_related('order', 'order__event').get(id=order_position)
with language(order_position.order.locale):
responses = register_ticket_outputs.send(order_position.order.event)
for receiver, response in responses:
prov = response(order_position.order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate(order_position)
path, ext = os.path.splitext(filename)
for ct in CachedTicket.objects.filter(order_position=order_position, provider=provider):
ct.delete()
ct = CachedTicket.objects.create(order_position=order_position, provider=provider,
extension=ext, type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
def generate_order(order: int, provider: str):
order = Order.objects.select_related('event').get(id=order)
with language(order.locale):
responses = register_ticket_outputs.send(order.event)
for receiver, response in responses:
prov = response(order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate_order(order)
if ttype == 'text/uri-list':
continue
path, ext = os.path.splitext(filename)
for ct in CachedCombinedTicket.objects.filter(order=order, provider=provider):
ct.delete()
ct = CachedCombinedTicket.objects.create(order=order, provider=provider, extension=ext,
type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
@app.task(base=ProfiledTask)
def generate(model: str, pk: int, provider: str):
with scopes_disabled():
if model == 'order':
return generate_order(pk, provider)
elif model == 'orderposition':
return generate_orderposition(pk, provider)
class DummyRollbackException(Exception):
pass
def preview(event: int, provider: str):
event = Event.objects.get(id=event)
with rolledback_transaction(), language(event.settings.locale):
item = event.items.create(name=_("Sample product"), default_price=42.23,
description=_("Sample product description"))
item2 = event.items.create(name=_("Sample workshop"), default_price=23.40)
from pretix.base.models import Order
order = event.orders.create(status=Order.STATUS_PENDING, datetime=now(),
email='[email protected]',
locale=event.settings.locale,
expires=now(), code="PREVIEW1234", total=119)
scheme = PERSON_NAME_SCHEMES[event.settings.name_scheme]
sample = {k: str(v) for k, v in scheme['sample'].items()}
p = order.positions.create(item=item, attendee_name_parts=sample, price=item.default_price)
s = event.subevents.first()
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)
InvoiceAddress.objects.create(order=order, name_parts=sample, company=_("Sample company"))
responses = register_ticket_outputs.send(event)
for receiver, response in responses:
prov = response(event)
if prov.identifier == provider:
return prov.generate(p)
def get_tickets_for_order(order, base_position=None):
can_download = all([r for rr, r in allow_ticket_download.send(order.event, order=order)])
if not can_download:
return []
if not order.ticket_download_available:
return []
providers = [
response(order.event)
for receiver, response
in register_ticket_outputs.send(order.event)
]
tickets = []
positions = list(order.positions_with_tickets)
if base_position:
# Only the given position and its children
positions = [
p for p in positions if p.pk == base_position.pk or p.addon_to_id == base_position.pk
]
for p in providers:
if not p.is_enabled:
continue
if p.multi_download_enabled and not base_position:
try:
if len(positions) == 0:
continue
ct = CachedCombinedTicket.objects.filter(
order=order, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate_order(order.pk, p.identifier)
if not retval:
continue
ct = CachedCombinedTicket.objects.get(pk=retval)
tickets.append((
"{}-{}-{}{}".format(
order.event.slug.upper(), order.code, ct.provider, ct.extension,
),
ct
))
except:
logger.exception('Failed to generate ticket.')
else:
for pos in positions:
try:
ct = CachedTicket.objects.filter(
order_position=pos, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate_orderposition(pos.pk, p.identifier)
if not retval:
continue
ct = CachedTicket.objects.get(pk=retval)
if ct.type == 'text/uri-list':
continue
tickets.append((
"{}-{}-{}-{}{}".format(
order.event.slug.upper(), order.code, pos.positionid, ct.provider, ct.extension,
),
ct
))
except:
logger.exception('Failed to generate ticket.')
return tickets
@app.task(base=EventTask)
def invalidate_cache(event: Event, item: int=None, provider: str=None, order: int=None, **kwargs):
qs = CachedTicket.objects.filter(order_position__order__event=event)
qsc = CachedCombinedTicket.objects.filter(order__event=event)
if item:
qs = qs.filter(order_position__item_id=item)
if provider:
qs = qs.filter(provider=provider)
qsc = qsc.filter(provider=provider)
if order:
qs = qs.filter(order_position__order_id=order)
qsc = qsc.filter(order_id=order)
for ct in qs:
ct.delete()
for ct in qsc:
ct.delete()
|
py | 1a2f76a94c5ed91ed2dc7945d88a25a1c51187d6 |
"""A set of wrapper functions for accessing the eBusd API."""
from .ebusdpy import (init, read, write, raw)
|
py | 1a2f770c5e85f92efca2174d52315a2cd4636e73 | p8 = plot_implicit(y - 1, y_var=y)
p9 = plot_implicit(x - 1, x_var=x)
|
py | 1a2f773d17b0bffa95ae6f4c9e858e9bf0c41348 | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="/srv/share/jyang375/models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet_vid":
args.imdb_name = "imagenet_vid_train+imagenet_det_train"
args.imdbval_name = "imagenet_vid_val"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data, volatile=True)
im_info = Variable(im_info, volatile=True)
num_boxes = Variable(num_boxes, volatile=True)
gt_boxes = Variable(gt_boxes, volatile=True)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[1][0][2]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in xrange(1, imdb.num_classes):
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
# print('Evaluating detections')
# imdb.evaluate_detections(all_boxes, output_dir)
end = time.time()
print("test time: %0.4fs" % (end - start))
|
py | 1a2f77a4a0f8500359799f5a5f44fb0998390381 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import tempfile
from functools import reduce
import numpy
import scipy.linalg
from pyscf import gto
from pyscf import lib
import pyscf.lib.parameters as param
mol0 = gto.Mole()
mol0.atom = [
[1 , (0.,1.,1.)],
["O1", (0.,0.,0.)],
[1 , (1.,1.,0.)], ]
mol0.nucmod = { "O":'gaussian', 3:'g' }
mol0.unit = 'ang'
mol0.basis = {
"O": [(0, 0, (15, 1)), ] + gto.etbs(((0, 4, 1, 1.8),
(1, 3, 2, 1.8),
(2, 2, 1, 1.8),)),
"H": [(0, 0, (1, 1, 0), (3, 3, 1), (5, 1, 0)),
(1, -2, (1, 1)), ]}
mol0.symmetry = 1
mol0.charge = 1
mol0.spin = 1
mol0.verbose = 7
mol0.ecp = {'O1': 'lanl2dz'}
ftmp = tempfile.NamedTemporaryFile()
mol0.output = ftmp.name
mol0.build()
def tearDownModule():
global mol0, ftmp
mol0.stdout.close()
del mol0, ftmp
class KnownValues(unittest.TestCase):
def test_intor_cross(self):
mol1 = mol0.unpack(mol0.pack())
mol1.symmetry = True
mol1.unit = 'Ang'
mol1.atom = '''
1 0 1 1
O 0 0 0
h 1 1 0'''
mol1.basis = {'O': gto.basis.parse('''
C S
3047.5249000 0.0018347
457.3695100 0.0140373
103.9486900 0.0688426
29.2101550 0.2321844
9.2866630 0.4679413
3.1639270 0.3623120
# 1. 0.1
C SP
7.8682724 -0.1193324 0.0689991
1.8812885 -0.1608542 0.3164240
0.5442493 1.1434564 0.7443083
C SP
0.1687144 1.0000000 1.0000000'''),
'H': '6-31g'}
mol1.build()
v = gto.mole.intor_cross('cint1e_ovlp_sph', mol0, mol1)
self.assertAlmostEqual(numpy.linalg.norm(v), 3.6489423434168562, 1)
def test_num_basis(self):
self.assertEqual(mol0.nao_nr(), 34)
self.assertEqual(mol0.nao_2c(), 64)
def test_time_reversal_map(self):
tao = [ -2, 1, -4, 3, 8, -7, 6, -5,-10, 9,-12, 11,-14, 13,-16, 15,-18, 17,
20,-19, 24,-23, 22,-21, 26,-25, 30,-29, 28,-27, 32,-31, 36,-35, 34,-33,
-40, 39,-38, 37,-46, 45,-44, 43,-42, 41,-50, 49,-48, 47,-56, 55,-54, 53,
-52, 51,-58, 57,-60, 59, 64,-63, 62,-61]
self.assertEqual(list(mol0.time_reversal_map()), tao)
def test_check_sanity(self):
mol1 = mol0.copy()
mol1.x = None
mol1.copy = None
mol1.check_sanity()
def test_nao_range(self):
self.assertEqual(mol0.nao_nr_range(1,4), (2, 7))
self.assertEqual(mol0.nao_2c_range(1,4), (4, 12))
self.assertEqual(numpy.dot(range(mol0.nbas+1), mol0.ao_loc_nr()), 2151)
self.assertEqual(numpy.dot(range(mol0.nbas+1), mol0.ao_loc_2c()), 4066)
def test_search_bas(self):
self.assertEqual(mol0.search_shell_id(1, 1), 7)
self.assertRaises(RuntimeError, mol0.search_ao_nr, 1, 1, -1, 5)
self.assertEqual(mol0.search_ao_nr(1, 1, -1, 4), 16)
mol0.cart = True
self.assertEqual(mol0.search_ao_nr(2, 1, -1, 1), 30)
mol0.cart = False
def test_atom_types(self):
atoms = [['H0', ( 0, 0, 0)],
['H1', ( 0, 0, 0)],
['H', ( 0, 0, 0)],
['H3', ( 0, 0, 0)]]
basis = {'H':'sto3g', 'H1': '6-31g'}
atmgroup = gto.mole.atom_types(atoms, basis)
self.assertEqual(atmgroup, {'H': [0, 2, 3], 'H1': [1]})
atoms = [['H0', ( 0, 0, 0)],
['H1', ( 0, 0, 0)],
['H2', ( 0, 0, 0)],
['H3', ( 0, 0, 0)]]
basis = {'H2':'sto3g', 'H3':'6-31g', 'H0':'sto3g', 'H1': '6-31g'}
atmgroup = gto.mole.atom_types(atoms, basis)
self.assertEqual(atmgroup, {'H2': [2], 'H3': [3], 'H0': [0], 'H1': [1]})
def test_given_symmetry(self):
mol = gto.M(atom='H 0 0 -1; H 0 0 1', symmetry='D2h')
self.assertEqual(mol.irrep_id, [0, 5])
mol = gto.M(atom='H 0 0 -1; H 0 0 1', symmetry='D2')
self.assertEqual(mol.irrep_id, [0, 1])
mol = gto.M(atom='H 0 0 -1; H 0 0 1', symmetry='C2v')
self.assertEqual(mol.irrep_id, [0])
def test_dumps_loads(self):
import warnings
mol1 = gto.M()
mol1.x = lambda *args: None
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
d = mol1.dumps()
self.assertTrue(w[0].category, UserWarning)
mol1.loads(mol0.dumps())
def test_symm_orb_serialization(self):
'''Handle the complex symmetry-adapted orbitals'''
mol = gto.M(atom='He', basis='ccpvdz', symmetry=True)
mol.loads(mol.dumps())
lz_minus = numpy.sqrt(.5) * (mol.symm_orb[3] - mol.symm_orb[2] * 1j)
lz_plus = -numpy.sqrt(.5) * (mol.symm_orb[3] + mol.symm_orb[2] * 1j)
mol.symm_orb[2] = lz_minus
mol.symm_orb[3] = lz_plus
mol.loads(mol.dumps())
self.assertTrue(mol.symm_orb[0].dtype == numpy.double)
self.assertTrue(mol.symm_orb[2].dtype == numpy.complex128)
self.assertTrue(mol.symm_orb[3].dtype == numpy.complex128)
def test_same_mol1(self):
self.assertTrue(gto.same_mol(mol0, mol0))
mol1 = gto.M(atom='h 0 1 1; O1 0 0 0; h 1 1 0')
self.assertTrue(not gto.same_mol(mol0, mol1))
self.assertTrue(gto.same_mol(mol0, mol1, cmp_basis=False))
mol1 = gto.M(atom='h 0 1 1; O1 0 0 0; h 1 1 0.01')
self.assertTrue(not gto.same_mol(mol0, mol1, cmp_basis=False))
self.assertTrue(gto.same_mol(mol0, mol1, tol=.02, cmp_basis=False))
mol1 = gto.M(atom='''H 0.0052917700 0.0000000000 -0.8746076326
F 0.0000000000 0.0000000000 0.0516931447''')
mol2 = gto.M(atom='''H 0.0000000000 0.0000000000 -0.8746076326
F 0.0000000000 0.0000000000 0.0516931447''')
self.assertTrue(gto.same_mol(mol1, mol2))
self.assertTrue(not gto.same_mol(mol1, mol2, tol=1e-6))
mol3 = gto.M(atom='''H 0.0000000000 0.0000000000 -0.8746076326
H 0.0000000000 0.0000000000 0.0516931447''')
self.assertTrue(not gto.same_mol(mol3, mol2))
def test_same_mol2(self):
mol1 = gto.M(atom='H 0.0052917700 0.0000000000 -0.8746076326; F 0.0000000000 0.0000000000 0.0464013747')
mol2 = gto.M(atom='H 0.0000000000 0.0000000000 -0.8746076326; F 0.0052917700 0.0000000000 0.0464013747')
self.assertTrue(gto.same_mol(mol1, mol2))
mol1 = gto.M(atom='H 0.0052917700 0.0000000000 -0.8693158626; F 0.0000000000 0.0000000000 0.0464013747')
mol2 = gto.M(atom='H 0.0000000000 0.0052917700 -0.8693158626; F 0.0000000000 0.0000000000 0.0464013747')
mol3 = gto.M(atom='H 0.0000000000 0.0000000000 -0.8693158626; F 0.0052917700 0.0000000000 0.0464013747')
mol4 = gto.M(atom='H -0.0052917700 0.0000000000 -0.8746076326; F 0.0000000000 0.0000000000 0.0411096047')
mols = (mol1, mol2, mol3, mol4)
for i,mi in enumerate(mols):
for j in range(i):
self.assertTrue(gto.same_mol(mols[i], mols[j]))
mol1 = gto.M(atom='''H 0.0000000000 0.0000000000 0.0000000000
H 0.9497795800 1.3265673200 0.0000000000
H 0.9444878100 -1.3265673200 0.0000000000
H1 -0.9444878100 0.0000000000 1.3265673200
H1 -0.9444878100 0.0000000000 -1.3265673200''', basis={'H':'sto3g', 'H1':'sto3g'}, charge=1)
mol2 = gto.M(atom='''H 0.0000000000 0.0000000000 0.0000000000
H 0.9444878100 1.3265673200 0.0000000000
H 0.9497795800 -1.3265673200 0.0000000000
H1 -0.9444878100 0.0000000000 1.3265673200
H1 -0.9444878100 0.0000000000 -1.3265673200''', basis={'H':'sto3g', 'H1':'sto3g'}, charge=1)
self.assertTrue(gto.same_mol(mol1, mol2))
self.assertEqual(len(gto.atom_types(mol1._atom)), 2)
mol3 = gto.M(atom='''H 0.0000000000 0.0000000000 0.0000000000
H1 0.9497795800 1.3265673200 0.0000000000
H1 0.9444878100 -1.3265673200 0.0000000000
H1 -0.9444878100 0.0000000000 1.3265673200
H1 -0.9444878100 0.0000000000 -1.3265673200''', basis={'H':'sto3g', 'H1':'321g'}, charge=1)
self.assertTrue(not gto.same_mol(mol3, mol2))
def test_inertia_momentum(self):
mol1 = gto.Mole()
mol1.atom = mol0.atom
mol1.nucmod = 'G'
mol1.verbose = 5
mol1.nucprop = {'H': {'mass': 3}}
mol1.output = '/dev/null'
mol1.build(False, False)
self.assertAlmostEqual(lib.finger(gto.inertia_moment(mol1)),
2.139593709454326, 9)
mass = mol0.atom_mass_list(isotope_avg=True)
self.assertAlmostEqual(lib.finger(gto.inertia_moment(mol1, mass)),
2.1549269955776205, 9)
def test_chiral_mol(self):
mol1 = gto.M(atom='C 0 0 0; H 1 1 1; He -1 -1 1; Li -1 1 -1; Be 1 -1 -1')
mol2 = gto.M(atom='C 0 0 0; H 1 1 1; He -1 -1 1; Be -1 1 -1; Li 1 -1 -1')
self.assertTrue(gto.chiral_mol(mol1, mol2))
self.assertTrue(gto.chiral_mol(mol1))
mol1 = gto.M(atom='''H 0.9444878100 1.3265673200 0.0052917700
H 0.9444878100 -1.3265673200 0.0000000000
H -0.9444878100 0.0000000000 1.3265673200
H -0.9444878100 0.0000000000 -1.3265673200''')
mol2 = gto.M(atom='''H 0.9444878100 1.3265673200 0.0000000000
H 0.9444878100 -1.3265673200 0.0052917700
H -0.9444878100 0.0000000000 1.3265673200
H -0.9444878100 0.0000000000 -1.3265673200''')
self.assertTrue(gto.chiral_mol(mol1, mol2))
mol1 = gto.M(atom='''H 0.9444878100 1.3265673200 0.0052917700
H 0.9444878100 -1.3265673200 0.0000000000
H -0.9444878100 0.0000000000 1.3265673200
H -0.9444878100 0.0000000000 -1.3265673200''')
self.assertTrue(gto.chiral_mol(mol1))
def test_first_argument(self):
mol1 = gto.Mole()
mol1.build('He')
self.assertEqual(mol1.natm, 1)
def test_atom_as_file(self):
ftmp = tempfile.NamedTemporaryFile('w')
# file in xyz format
ftmp.write('He 0 0 0\nHe 0 0 1\n')
ftmp.flush()
mol1 = gto.M(atom=ftmp.name)
self.assertEqual(mol1.natm, 2)
# file in zmatrix format
ftmp = tempfile.NamedTemporaryFile('w')
ftmp.write('He\nHe 1 1.5\n')
ftmp.flush()
mol1 = gto.M(atom=ftmp.name)
self.assertEqual(mol1.natm, 2)
def test_format_atom(self):
atoms = [['h' , 0,1,1], "O1 0. 0. 0.", [1, 1.,1.,0.],]
self.assertTrue(numpy.allclose(gto.mole.format_atom(atoms, unit='Ang')[0][1],
[0.0, 1.8897261245650618, 1.8897261245650618]))
atoms = '''h 0 1 1
O1 0 0 0; 1 1 1 0; #H 0 0 3'''
self.assertTrue(numpy.allclose(gto.mole.format_atom(atoms, unit=1)[0][1],
[0.0, 1., 1.]))
atoms = 'O1; h 1 1; 1 1 1 2 90'
atoms = gto.mole.format_atom(atoms, unit=1)[2]
self.assertEqual(atoms[0], 'H')
self.assertTrue(numpy.allclose(atoms[1], [0, 0, 1.]))
def test_format_basis(self):
mol = gto.M(atom = '''O 0 0 0; 1 0 1 0; H 0 0 1''',
basis = {8: 'ccpvdz'})
self.assertEqual(mol.nao_nr(), 14)
mol = gto.M(atom = '''O 0 0 0; H:1 0 1 0; H@2 0 0 1''',
basis = {'O': 'ccpvdz', 'H:1': 'sto3g', 'H': 'unc-iglo3'})
self.assertEqual(mol.nao_nr(), 32)
mol = gto.M(
atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',
basis = {'default': ('6-31g', [[0, [.05, 1.]], []]), 'H2': 'sto3g'}
)
self.assertEqual(mol.nao_nr(), 14)
mol = gto.M(
atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',
basis = {'H1': gto.parse('''
# Parse NWChem format basis string (see https://bse.pnl.gov/bse/portal).
# Comment lines are ignored
#BASIS SET: (6s,3p) -> [2s,1p]
H S
2.9412494 -0.09996723
0.6834831 0.39951283
0.2222899 0.70011547
H S
2.9412494 0.15591627
0.6834831 0.60768372
0.2222899 0.39195739
''', optimize=True),
'O': 'unc-ccpvdz',
'H2': gto.load('sto-3g', 'He') # or use basis of another atom
}
)
self.assertEqual(mol.nao_nr(), 29)
mol = gto.M(
atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',
basis = {'H': ['sto3g', '''unc
C S
71.6168370 0.15432897
13.0450960 0.53532814
3.5305122 0.44463454
C SP
2.9412494 -0.09996723 0.15591627
0.6834831 0.39951283 0.60768372
0.2222899 0.70011547 0.39195739
'''],
'O': mol.expand_etbs([(0, 4, 1.5, 2.2), # s-function
(1, 2, 0.5, 2.2)]) # p-function
}
)
self.assertEqual(mol.nao_nr(), 42)
mol = gto.M(
atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',
basis = ('sto3g', 'ccpvdz', '3-21g',
gto.etbs([(0, 4, 1.5, 2.2), (1, 2, 0.5, 2.2)]),
[[0, numpy.array([1e3, 1.])]])
)
self.assertEqual(mol.nao_nr(), 77)
mol.atom = 'Hg'
mol.basis = 'ccpvdz'
self.assertRaises(RuntimeError, mol.build)
def test_default_basis(self):
mol = gto.M(atom=[['h' , 0,1,1], ["O1", (0.,0.,0.)], [1, 1.,1.,0.],],
basis={'default':'321g', 'O1': 'sto3g'})
self.assertEqual(sorted(mol._basis.keys()), ['H', 'O1'])
def test_parse_pople_basis(self):
self.assertEqual(len(gto.basis.load('6-31G(d)' , 'H')), 2)
self.assertEqual(len(gto.basis.load('6-31G(d)' , 'C')), 6)
self.assertEqual(len(gto.basis.load('6-31Gs' , 'C')), 6)
self.assertEqual(len(gto.basis.load('6-31G*' , 'C')), 6)
self.assertEqual(len(gto.basis.load('6-31G(d,p)' , 'H')), 3)
self.assertEqual(len(gto.basis.load('6-31G(d,p)' , 'C')), 6)
self.assertEqual(len(gto.basis.load('6-31G(2d,2p)' , 'H')), 4)
self.assertEqual(len(gto.basis.load('6-31G(2d,2p)' , 'C')), 7)
self.assertEqual(len(gto.basis.load('6-31G(3df,3pd)', 'H')), 6)
self.assertEqual(len(gto.basis.load('6-31G(3df,3pd)', 'C')), 9)
def test_parse_basis(self):
mol = gto.M(atom='''
6 0 0 -0.5
8 0 0 0.5
1 1 0 -1.0
1 -1 0 -1.0''',
basis='''
#BASIS SET: (3s) -> [2s]
H S
5.4471780 0.1562849787
0.82454724 0.9046908767
H S
0.18319158 1.0000000
#BASIS SET: (6s,3p) -> [3s,2p]
C S
172.2560000 0.0617669
25.9109000 0.3587940
5.5333500 0.7007130
C SP
3.6649800 -0.3958970 0.2364600
0.7705450 1.2158400 0.8606190
C SP
0.1958570 1.0000000 1.0000000
#BASIS SET: (6s,3p) -> [3s,2p]
O S
322.0370000 0.0592394
48.4308000 0.3515000
10.4206000 0.7076580
O SP
7.4029400 -0.4044530 0.2445860
1.5762000 1.2215600 0.8539550
O SP
0.3736840 1.0000000 1.0000000
''')
self.assertTrue(mol.nao_nr() == 22)
def test_ghost(self):
mol = gto.M(
atom = 'C 0 0 0; ghost 0 0 2',
basis = {'C': 'sto3g', 'ghost': gto.basis.load('sto3g', 'H')}
)
self.assertEqual(mol.nao_nr(), 6)
mol = gto.M(atom='''
ghost-O 0.000000000 0.000000000 2.500000000
X_H -0.663641000 -0.383071000 3.095377000
ghost.H 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
H -1.663641000 -0.383071000 3.095377000
H 1.663588000 0.383072000 3.095377000
''',
basis='631g')
self.assertEqual(mol.nao_nr(), 26)
def test_nucmod(self):
gto.filatov_nuc_mod(80)
self.assertEqual(gto.mole._parse_nuc_mod(1), gto.NUC_GAUSS)
self.assertEqual(gto.mole._parse_nuc_mod('Gaussian'), gto.NUC_GAUSS)
mol1 = gto.Mole()
mol1.atom = mol0.atom
mol1.nucmod = 'G'
mol1.verbose = 5
mol1.nucprop = {'H': {'mass': 3}}
mol1.output = '/dev/null'
mol1.build(False, False)
mol1.set_nuc_mod(0, 2)
self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_GAUSS)
self.assertAlmostEqual(mol1._env[mol1._atm[0,gto.PTR_ZETA]], 2, 9)
self.assertAlmostEqual(mol1._env[mol1._atm[1,gto.PTR_ZETA]], 586314366.54656982, 4)
mol1.set_nuc_mod(1, 0)
self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_POINT)
mol1.nucmod = None
mol1.build(False, False)
self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_POINT)
mol1.nucmod = {'H': gto.filatov_nuc_mod}
mol1.build(False, False)
self.assertTrue(mol1._atm[0,gto.NUC_MOD_OF] == gto.NUC_GAUSS)
self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_POINT)
self.assertTrue(mol1._atm[2,gto.NUC_MOD_OF] == gto.NUC_GAUSS)
def test_zmat(self):
coord = numpy.array((
(0.200000000000, -1.889726124565, 0.000000000000),
(1.300000000000, -1.889726124565, 0.000000000000),
(2.400000000000, -1.889726124565, 0.000000000000),
(3.500000000000, -1.889726124565, 0.000000000000),
(0.000000000000, 0.000000000000, -1.889726124565),
(0.000000000000, 1.889726124565, 0.000000000000),
(0.200000000000, -0.800000000000, 0.000000000000),
(1.889726124565, 0.000000000000, 1.133835674739)))
zstr0 = gto.cart2zmat(coord)
zstr = '\n'.join(['H '+x for x in zstr0.splitlines()])
atoms = gto.zmat2cart(zstr)
zstr1 = gto.cart2zmat([x[1] for x in atoms])
self.assertTrue(zstr0 == zstr1)
numpy.random.seed(1)
coord = numpy.random.random((6,3))
zstr0 = gto.cart2zmat(coord)
zstr = '\n'.join(['H '+x for x in zstr0.splitlines()])
atoms = gto.zmat2cart(zstr)
zstr1 = gto.cart2zmat([x[1] for x in atoms])
self.assertTrue(zstr0 == zstr1)
def test_c2s(self): # Transformation of cart <-> sph, sph <-> spinor
c = mol0.sph2spinor_coeff()
s0 = mol0.intor('int1e_ovlp_spinor')
s1 = mol0.intor('int1e_ovlp_sph')
sa = reduce(numpy.dot, (c[0].T.conj(), s1, c[0]))
sa+= reduce(numpy.dot, (c[1].T.conj(), s1, c[1]))
mol0.cart = True
s2 = mol0.intor('int1e_ovlp')
mol0.cart = False
self.assertAlmostEqual(abs(s0 - sa).max(), 0, 12)
c = mol0.cart2sph_coeff()
sa = reduce(numpy.dot, (c.T.conj(), s2, c))
self.assertAlmostEqual(abs(s1 - sa).max(), 0, 12)
c0 = gto.mole.cart2sph(1)
ca, cb = gto.mole.cart2spinor_l(1)
ua, ub = gto.mole.sph2spinor_l(1)
self.assertAlmostEqual(abs(c0.dot(ua)-ca).max(), 0, 9)
self.assertAlmostEqual(abs(c0.dot(ub)-cb).max(), 0, 9)
c0 = gto.mole.cart2sph(0, normalized='sp')
ca, cb = gto.mole.cart2spinor_kappa(-1, 0, normalized='sp')
ua, ub = gto.mole.sph2spinor_kappa(-1, 0)
self.assertAlmostEqual(abs(c0.dot(ua)-ca).max(), 0, 9)
self.assertAlmostEqual(abs(c0.dot(ub)-cb).max(), 0, 9)
c1 = gto.mole.cart2sph(0, numpy.eye(1))
self.assertAlmostEqual(abs(c0*0.282094791773878143-c1).max(), 0, 12)
c0 = gto.mole.cart2sph(1, normalized='sp')
ca, cb = gto.mole.cart2spinor_kappa(1, 1, normalized='sp')
ua, ub = gto.mole.sph2spinor_kappa(1, 1)
self.assertAlmostEqual(abs(c0.dot(ua)-ca).max(), 0, 9)
self.assertAlmostEqual(abs(c0.dot(ub)-cb).max(), 0, 9)
c1 = gto.mole.cart2sph(1, numpy.eye(3).T)
self.assertAlmostEqual(abs(c0*0.488602511902919921-c1).max(), 0, 12)
def test_bas_method(self):
self.assertEqual([mol0.bas_len_cart(x) for x in range(mol0.nbas)],
[1, 3, 1, 1, 1, 1, 1, 3, 3, 3, 6, 6, 1, 3])
self.assertEqual([mol0.bas_len_spinor(x) for x in range(mol0.nbas)],
[2, 4, 2, 2, 2, 2, 2, 6, 6, 6, 10, 10, 2, 4])
c0 = mol0.bas_ctr_coeff(0)
self.assertAlmostEqual(abs(c0[:,0]/c0[0,0] - (1,3,1)).max(), 0, 9)
self.assertAlmostEqual(abs(c0[:,1] - (0,1,0)).max(), 0, 9)
self.assertRaises(ValueError, mol0.gto_norm, -1, 1.)
def test_nelectron(self):
mol0.nelectron = mol0.nelectron
mol0.nelectron = mol0.nelectron
mol0.spin = 2
self.assertRaises(RuntimeError, lambda *args: mol0.nelec)
mol0.spin = 1
mol1 = copy.copy(mol0)
self.assertEqual(mol1.nelec, (5, 4))
mol1.nelec = (3, 6)
self.assertEqual(mol1.nelec, (3, 6))
def test_multiplicity(self):
mol1 = copy.copy(mol0)
self.assertEqual(mol1.multiplicity, 2)
mol1.multiplicity = 5
self.assertEqual(mol1.multiplicity, 5)
self.assertEqual(mol1.spin, 4)
self.assertRaises(RuntimeError, lambda:mol1.nelec)
def test_ms(self):
mol1 = copy.copy(mol0)
self.assertEqual(mol1.ms, 0.5)
mol1.ms = 1
self.assertEqual(mol1.multiplicity, 3)
self.assertEqual(mol1.spin, 2)
self.assertRaises(RuntimeError, lambda:mol1.nelec)
def test_atom_method(self):
aoslice = mol0.aoslice_by_atom()
for i in range(mol0.natm):
symb = mol0.atom_pure_symbol(i)
shls = mol0.atom_shell_ids(i)
nshls = aoslice[i][1] - aoslice[i][0]
self.assertEqual(shls[0], aoslice[i][0])
self.assertEqual(len(shls), nshls)
self.assertEqual(mol0.atom_nshells(i), nshls)
aoslice = mol0.aoslice_2c_by_atom()
self.assertEqual([x[2] for x in aoslice], [0, 8, 56])
self.assertEqual([x[3] for x in aoslice], [8, 56, 64])
def test_dump_loads(self):
import json
tmpfile = tempfile.NamedTemporaryFile()
lib.chkfile.save_mol(mol0, tmpfile.name)
mol1 = gto.Mole()
mol1.update(tmpfile.name)
self.assertEqual(json.loads(mol1.dumps()), json.loads(mol0.dumps()))
tmpfile = None
mol1.loads(mol1.dumps())
mol1.loads_(mol0.dumps())
mol1.unpack(mol1.pack())
mol1.unpack_(mol0.pack())
def test_set_geom(self):
mol1 = gto.Mole()
mol1.verbose = 5
mol1.set_geom_(mol0._atom, 'B', symmetry=True)
mol1.set_geom_(mol0.atom_coords(), 'B', inplace=False)
mol1.symmetry = False
mol1.set_geom_(mol0.atom_coords(), 'B')
mol1.set_geom_(mol0.atom_coords(), inplace=False)
mol1.set_geom_(mol0.atom_coords(), unit=1.)
mol1.set_geom_(mol0.atom_coords(), unit='Ang', inplace=False)
def test_apply(self):
from pyscf import scf, mp
self.assertTrue(isinstance(mol0.apply('RHF'), scf.rohf.ROHF))
self.assertTrue(isinstance(mol0.apply('MP2'), mp.ump2.UMP2))
self.assertTrue(isinstance(mol0.apply(scf.RHF), scf.rohf.ROHF))
self.assertTrue(isinstance(mol0.apply(scf.uhf.UHF), scf.uhf.UHF))
def test_with_MoleContext(self):
mol1 = mol0.copy()
with mol1.with_rinv_as_nucleus(1):
self.assertTrue(mol1._env[gto.PTR_RINV_ZETA] != 0)
self.assertAlmostEqual(abs(mol1._env[gto.PTR_RINV_ORIG+2]), 0.46288647587915266, 9)
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ZETA], 0, 9)
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 0, 9)
with mol1.with_rinv_as_nucleus(0):
self.assertAlmostEqual(abs(mol1._env[gto.PTR_RINV_ORIG+2]), 1.8515459035166109, 9)
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 0, 9)
with mol1.with_rinv_zeta(20):
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ZETA], 20, 9)
mol1.set_rinv_zeta(3.)
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ZETA], 0, 9)
with mol1.with_rinv_origin((1,2,3)):
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 3, 9)
self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 0, 9)
with mol1.with_range_coulomb(20):
self.assertAlmostEqual(mol1._env[gto.PTR_RANGE_OMEGA], 20, 9)
mol1.set_range_coulomb(2.)
self.assertAlmostEqual(mol1._env[gto.PTR_RANGE_OMEGA], 0, 9)
with mol1.with_common_origin((1,2,3)):
self.assertAlmostEqual(mol1._env[gto.PTR_COMMON_ORIG+2], 3, 9)
self.assertAlmostEqual(mol1._env[gto.PTR_COMMON_ORIG+2], 0, 9)
mol1.set_f12_zeta(2.)
def test_input_symmetry(self):
mol1 = gto.Mole()
mol1.atom = 'H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1'
mol1.unit = 'B'
mol1.symmetry = True
mol1.verbose = 5
mol1.output = '/dev/null'
mol1.build()
self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 4.2517312170868475, 9)
mol1 = gto.Mole()
mol1 = gto.Mole()
mol1.atom = 'H 0 0 -1; H 0 0 1'
mol1.cart = True
mol1.unit = 'B'
mol1.symmetry = 'Dooh'
mol1.verbose = 5
mol1.output = '/dev/null'
mol1.build()
self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 0.69980902201036865, 9)
mol1 = gto.Mole()
mol1.atom = 'H 0 -1 0; H 0 1 0'
mol1.unit = 'B'
mol1.symmetry = True
mol1.symmetry_subgroup = 'D2h'
mol1.build()
self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 0.69980902201036865, 9)
mol1.atom = 'H 0 0 -1; H 0 0 1'
mol1.unit = 'B'
mol1.symmetry = 'Coov'
mol1.symmetry_subgroup = 'C2'
mol1.build()
self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 0.69980902201036865, 9)
mol1.atom = 'H 1 0 -1; H 0 0 1'
mol1.symmetry = 'Coov'
self.assertRaises(RuntimeWarning, mol1.build)
mol1.atom = '''
C 0. 0. 0.7264
C 0. 0. -.7264
H 0.92419 0. 1.29252
H -.92419 0. 1.29252
H 0. 0.92419 -1.29252
H 0. -.92419 -1.29252'''
mol1.symmetry = True
mol1.symmetry_subgroup = 'C2v'
mol1.build()
self.assertAlmostEqual(lib.finger(mol1.atom_coords()), -0.5215310671099358, 9)
def test_search_ao_label(self):
mol1 = mol0.copy()
mol1.atom = mol0.atom + ['Mg 1,1,1']
mol1.ecp['Mg'] = 'lanl2dz'
mol1.basis['Mg'] = 'lanl2dz'
mol1.build(0, 0)
self.assertEqual(list(mol1.search_ao_label('O.*2p')), [10,11,12])
self.assertEqual(list(mol1.search_ao_label('O1 2p')), [10,11,12])
self.assertEqual(list(mol1.search_ao_label(['O.*2p','0 H 1s'])), [0, 10,11,12])
self.assertEqual(list(mol1.search_ao_label([10,11,12])), [10,11,12])
self.assertEqual(list(mol1.search_ao_label(lambda x: '4d' in x)), [24,25,26,27,28])
mol1.ao_labels(fmt='%s%s%s%s')
mol1.sph_labels(fmt=None)
mol1.cart = True
self.assertEqual(list(mol1.search_ao_label('4d')), [25,26,27,28,29,30])
mol1.ao_labels(fmt='%s%s%s%s')
mol1.ao_labels(fmt=None)
mol1.cart = False
mol1.spinor_labels()
mol1.spinor_labels(fmt='%s%s%s%s')
mol1.spinor_labels(fmt=None)
def test_input_ecp(self):
mol1 = gto.Mole()
mol1.atom = mol0.atom
mol1.ecp = 'lanl2dz'
mol1.build(False, False)
gto.basis.load_ecp('lanl08', 'O')
gto.format_ecp({'O':'lanl08', 1:'lanl2dz'})
self.assertRaises(KeyError, gto.format_ecp, {'H':'lan2ldz'})
def test_condense_to_shell(self):
mol1 = mol0.copy()
mol1.symmetry = False
mol1.build(False, False)
v = gto.condense_to_shell(mol1, mol1.intor('int1e_ovlp'), numpy.max)
self.assertAlmostEqual(lib.finger(v), 5.7342530154117846, 9)
def test_input_ghost_atom(self):
mol = gto.M(
atom = 'C 0 0 0; ghost 0 0 2',
basis = {'C': 'sto3g', 'ghost': gto.basis.load('sto3g', 'H')}
)
mol = gto.M(atom='''
ghost1 0.000000000 0.000000000 2.500000000
ghost2 -0.663641000 -0.383071000 3.095377000
ghost2 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
H -1.663641000 -0.383071000 3.095377000
H 1.663588000 0.383072000 3.095377000
''',
basis={'ghost1':gto.basis.load('sto3g', 'O'),
'ghost2':gto.basis.load('631g', 'H'),
'O':'631g', 'H':'631g'}
)
mol = gto.M(atom='''
ghost-O 0.000000000 0.000000000 2.500000000
ghost_H -0.663641000 -0.383071000 3.095377000
ghost:H 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
H -1.663641000 -0.383071000 3.095377000
H 1.663588000 0.383072000 3.095377000
''', basis='631g')
mol = gto.M(atom='''
X1 0.000000000 0.000000000 2.500000000
X2 -0.663641000 -0.383071000 3.095377000
X2 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
H -1.663641000 -0.383071000 3.095377000
H 1.663588000 0.383072000 3.095377000
''',
basis={'X1':gto.basis.load('sto3g', 'O'),
'X2':gto.basis.load('631g', 'H'),
'O':'631g', 'H':'631g'}
)
mol = gto.M(atom='''
X-O 0.000000000 0.000000000 2.500000000
X_H1 -0.663641000 -0.383071000 3.095377000
X:H 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
H -1.663641000 -0.383071000 3.095377000
H 1.663588000 0.383072000 3.095377000
''', basis='631g')
def test_conc_mole(self):
mol1 = gto.M(atom='Mg', ecp='LANL2DZ', basis='lanl2dz')
mol2 = mol1 + mol0
self.assertEqual(mol2.natm, 4)
self.assertEqual(mol2.nbas, 18)
self.assertEqual(mol2.nao_nr(), 42)
mol2 = mol0 + mol1
self.assertEqual(mol2.natm, 4)
self.assertEqual(mol2.nbas, 18)
self.assertEqual(mol2.nao_nr(), 42)
n0 = mol0.npgto_nr()
n1 = mol1.npgto_nr()
self.assertEqual(mol2.npgto_nr(), n0+n1)
mol2 = mol2 + mol2
mol2.cart = True
self.assertEqual(mol2.npgto_nr(), 100)
def test_intor_cross(self):
mol1 = gto.M(atom='He', basis={'He': [(2,(1.,1))]}, cart=True)
s0 = gto.intor_cross('int1e_ovlp', mol1, mol0)
self.assertEqual(s0.shape, (6, 34))
s0 = gto.intor_cross('int1e_ovlp', mol0, mol1)
self.assertEqual(s0.shape, (34, 6))
s0 = gto.intor_cross('int1e_ovlp_cart', mol0, mol1)
self.assertEqual(s0.shape, (36, 6))
def test_energy_nuc(self):
self.assertAlmostEqual(mol0.get_enuc(), 6.3611415029455705, 9)
self.assertAlmostEqual(gto.M().energy_nuc(), 0, 9)
def test_fakemol(self):
numpy.random.seed(1)
coords = numpy.random.random((6,3))*4
vref = 0
mol = mol0.copy()
for c in coords:
mol.set_rinv_origin(c)
vref += mol.intor('int1e_rinv')
fakemol = gto.fakemol_for_charges(coords)
pmol = mol + fakemol
shls_slice = (0, mol.nbas, 0, mol.nbas, mol.nbas, pmol.nbas)
v = pmol.intor('int3c2e', comp=1, shls_slice=shls_slice)
v = numpy.einsum('pqk->pq', v)
self.assertAlmostEqual(abs(vref-v).max(), 0, 12)
def test_to_uncontracted_cartesian_basis(self):
pmol, ctr_coeff = mol0.to_uncontracted_cartesian_basis()
c = scipy.linalg.block_diag(*ctr_coeff)
s = reduce(numpy.dot, (c.T, pmol.intor('int1e_ovlp'), c))
self.assertAlmostEqual(abs(s-mol0.intor('int1e_ovlp')).max(), 0, 9)
mol0.cart = True
pmol, ctr_coeff = mol0.to_uncontracted_cartesian_basis()
c = scipy.linalg.block_diag(*ctr_coeff)
s = reduce(numpy.dot, (c.T, pmol.intor('int1e_ovlp'), c))
self.assertAlmostEqual(abs(s-mol0.intor('int1e_ovlp')).max(), 0, 9)
mol0.cart = False
if __name__ == "__main__":
print("test mole.py")
unittest.main()
|
py | 1a2f77c4eb3883d36294dbe9e2921e5aaa2e7656 | import json
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import compat_b64decode
from ..utils import (
clean_html,
ExtractorError
)
class ChilloutzoneIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w|-]+)\.html'
_TESTS = [{
'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html',
'md5': 'a76f3457e813ea0037e5244f509e66d1',
'info_dict': {
'id': 'enemene-meck-alle-katzen-weg',
'ext': 'mp4',
'title': 'Enemene Meck - Alle Katzen weg',
'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?',
},
}, {
'note': 'Video hosted at YouTube',
'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html',
'info_dict': {
'id': '1YVQaAgHyRU',
'ext': 'mp4',
'title': '16 Photos Taken 1 Second Before Disaster',
'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814',
'uploader': 'BuzzFeedVideo',
'uploader_id': 'BuzzFeedVideo',
'upload_date': '20131105',
},
}, {
'note': 'Video hosted at Vimeo',
'url': 'http://www.chilloutzone.net/video/icon-blending.html',
'md5': '2645c678b8dc4fefcc0e1b60db18dac1',
'info_dict': {
'id': '85523671',
'ext': 'mp4',
'title': 'The Sunday Times - Icons',
'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',
'uploader': 'Us',
'uploader_id': 'usfilms',
'upload_date': '20140131'
},
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
base64_video_info = self._html_search_regex(
r'var cozVidData = "(.+?)";', webpage, 'video data')
decoded_video_info = compat_b64decode(base64_video_info).decode('utf-8')
video_info_dict = json.loads(decoded_video_info)
# get video information from dict
video_url = video_info_dict['mediaUrl']
description = clean_html(video_info_dict.get('description'))
title = video_info_dict['title']
native_platform = video_info_dict['nativePlatform']
native_video_id = video_info_dict['nativeVideoId']
source_priority = video_info_dict['sourcePriority']
# If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)
if native_platform is None:
youtube_url = YoutubeIE._extract_url(webpage)
if youtube_url:
return self.url_result(youtube_url, ie=YoutubeIE.ie_key())
# Non Fallback: Decide to use native source (e.g. youtube or vimeo) or
# the own CDN
if source_priority == 'native':
if native_platform == 'youtube':
return self.url_result(native_video_id, ie='Youtube')
if native_platform == 'vimeo':
return self.url_result(
'http://vimeo.com/' + native_video_id, ie='Vimeo')
if not video_url:
raise ExtractorError('No video found')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
}
|
py | 1a2f77e0bf0fac1106bb1d67ddd967a8c53e35f8 | #!/usr/bin/env python3
"""
Grid features extraction script.
"""
import argparse
import os
import torch
import tqdm
from fvcore.common.file_io import PathManager
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from detectron2.evaluation import inference_context
from detectron2.modeling import build_model
import numpy as np
from clip.clip import load
import torch.nn as nn
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from grid_feats import (
add_attribute_config,
build_detection_test_loader_with_attributes,
)
from timm.models.vision_transformer import resize_pos_embed
import timm
# A simple mapper from object detection dataset to VQA dataset names
dataset_to_folder_mapper = {}
dataset_to_folder_mapper['coco_2014_train'] = 'train2014'
dataset_to_folder_mapper['coco_2014_val'] = 'val2014'
dataset_to_folder_mapper['coco_2014_val'] = 'trainval2014'
dataset_to_folder_mapper['coco_2014_train'] = 'trainval2014'
# One may need to change the Detectron2 code to support coco_2015_test
# insert "coco_2015_test": ("coco/test2015", "coco/annotations/image_info_test2015.json"),
# at: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/builtin.py#L36
dataset_to_folder_mapper['coco_2015_test'] = 'test2015'
dataset_to_folder_mapper['coco_2015_test-dev'] = 'test-dev2015'
def extract_grid_feature_argument_parser():
parser = argparse.ArgumentParser(description="Grid feature extraction")
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2014_train",
choices=['coco_2014_train', 'coco_2014_val', 'coco_2015_test', 'coco_2015_test-dev'])
parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def extract_grid_feature_on_dataset(model, data_loader, dump_folder):
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
images = model.preprocess_image(inputs)
features = model.backbone(images.tensor)
outputs = model.roi_heads.get_conv5_features(features)
# modify the filename
file_name = inputs[0]['file_name'].split("/")[-1].replace("jpg", "npy")
outputs = outputs.permute(0, 2, 3, 1)
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
# save as CPU tensors
np.save(f, outputs.cpu().numpy())
def do_feature_extraction(cfg, model, dataset_name, args):
with inference_context(model):
dump_folder = os.path.join(cfg.OUTPUT_DIR, "features", dataset_to_folder_mapper[dataset_name])
PathManager.mkdirs(dump_folder)
data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name, args.model_type='clip')
extract_clip_feature_on_dataset(model, data_loader, dump_folder, args)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_attribute_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# force the final residual block to have dilations 1
cfg.MODEL.RESNETS.RES5_DILATION = 1
cfg.freeze()
default_setup(cfg, args)
return cfg
def extract_clip_feature_on_dataset(model, data_loader, dump_folder, args):
if args.model_type != 'vit_base_patch32_224_in21k':
save_args.model_type = args.model_type.split("-")[0]
mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to("cuda").reshape(3, 1, 1)
std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to("cuda").reshape(3, 1, 1)
dump_folder = f"clip/{save_args.model_type}/" + dump_folder.split("/")[-1]
else:
save_args.model_type = 'vit_base'
mean = torch.Tensor([0.5, 0.5, 0.5]).to("cuda").reshape(3, 1, 1)
std = torch.Tensor([0.5, 0.5, 0.5]).to("cuda").reshape(3, 1, 1)
dump_folder = f"clip/{save_args.model_type}/" + dump_folder.split("/")[-1]
print(model.pos_embed.shape)
num_patches = 558 #600 * 1000 // 32 // 32
print(num_patches)
pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, 768, device='cuda'),)
pos_embed.weight = resize_pos_embed(model.pos_embed, pos_embed)
model.pos_embed = pos_embed
if args.model_type == "ViT-B/32":
num_patches = 558 #600 * 1000 // 32 // 32
print(num_patches)
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),)
pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))
model.visual.positional_embedding = pos_embed
if not os.path.exists(dump_folder):
os.makedirs(dump_folder)
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = inputs[0]['file_name'].split("/")[-1].replace("jpg", "npy")
# compute features
image = inputs[0]['image'].to("cuda").float() / 255.0
image = (image - mean) / std
image = image.unsqueeze(0)
if "RN" in args.model_type:
outputs = model.encode_image(image)
elif args.model_type == 'vit_base_patch32_224_in21k':
outputs = model(image)
else:
x = model.visual.conv1(image.half()) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([model.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + model.visual.positional_embedding.to(x.dtype)[:x.shape[1], :]
x = model.visual.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
for layer_idx, layer in enumerate(model.visual.transformer.resblocks):
if layer_idx != 11:
x = layer(x)
outputs = x.permute(1, 0, 2)
if "RN" in args.model_type:
outputs = outputs.permute(0, 2, 3, 1)
else:
outputs = outputs[:, 1:, :].reshape(1, 18, 31, 768)
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
# save as CPU tensors
np.save(f, outputs.float().cpu().numpy())
def main(args):
cfg = setup(args)
if args.model_type != 'vit_base_patch32_224_in21k':
model, transform = load(args.model_type, jit=False)
else:
model = timm.create_model(args.model_type, pretrained=True)
model = model.cuda()
do_feature_extraction(cfg, model, args.dataset, args)
if __name__ == "__main__":
args = extract_grid_feature_argument_parser().parse_args()
print("Command Line Args:", args)
main(args)
|
py | 1a2f78bddc95220e0649a236efd36e6f5f82c8ab | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, [email protected], 2018
import json
import logging
import os
import socket
import sys
import time
from pilot.eventservice.communicationmanager.communicationmanager import CommunicationRequest, CommunicationResponse, CommunicationManager
from pilot.util.https import https_setup
from pilot.util.timing import time_stamp
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logger = logging.getLogger(__name__)
https_setup(None, None)
def check_env():
"""
Function to check whether cvmfs is available.
To be used to decide whether to skip some test functions.
:returns True: if cvmfs is available. Otherwise False.
"""
return os.path.exists('/cvmfs/atlas.cern.ch/repo/')
class TestESCommunicationrRequestResponse(unittest.TestCase):
"""
Unit tests for event service communicator Request and Response.
"""
def test_communicator_request(self):
"""
Make sure that es message thread works as expected.
"""
req_attrs = {'request_type': CommunicationRequest.RequestType.RequestJobs,
'num_jobs': 1, 'post_hook': None, 'response': None}
req_job = CommunicationRequest(req_attrs)
self.assertEqual(req_job.request_type, CommunicationRequest.RequestType.RequestJobs)
req_attrs = {'request_type': CommunicationRequest.RequestType.RequestEvents,
'num_event_ranges': 1, 'post_hook': None, 'response': None}
req_events = CommunicationRequest(req_attrs)
self.assertEqual(req_events.request_type, CommunicationRequest.RequestType.RequestEvents)
req_attrs = {'request_type': CommunicationRequest.RequestType.UpdateEvents,
'output_files': None, 'post_hook': None, 'response': None}
req_output = CommunicationRequest(req_attrs)
self.assertEqual(req_output.request_type, CommunicationRequest.RequestType.UpdateEvents)
resp_attrs = {'status': 0, 'content': None, 'exception': None}
resp = CommunicationResponse(resp_attrs)
self.assertEqual(resp.status, 0)
class TestESCommunicationManagerPanda(unittest.TestCase):
"""
Unit tests for event service communicator manager.
"""
@unittest.skipIf(not check_env(), "No CVMFS")
def test_communicator_manager(self):
"""
Make sure that es communicator manager thread works as expected.
"""
communicator_manager = None
try:
args = {'workflow': 'eventservice_hpc',
'queue': 'BNL_CLOUD_MCORE',
'site': 'BNL_CLOUD_MCORE',
'port': 25443,
'url': 'https://aipanda007.cern.ch',
'job_label': 'ptest',
'pilot_user': 'ATLAS',
'node': socket.getfqdn(),
'mem': 16000,
'disk_space': 160000,
'working_group': '',
'cpu': 2601.0,
'info': None}
communicator_manager = CommunicationManager()
communicator_manager.start()
self.assertTrue(communicator_manager.is_alive())
jobs = communicator_manager.get_jobs(njobs=2, args=args)
self.assertEqual(len(jobs), 2)
jobs = communicator_manager.get_jobs(njobs=1, args=args)
self.assertEqual(len(jobs), 1)
job_list = []
for job in jobs:
job_data = {'node': socket.getfqdn(),
'pilotErrorCode': 0,
'startTime': time.time(),
'jobMetrics': 'coreCount=8',
'schedulerID': 'unknown',
'timestamp': time_stamp(),
'exeErrorCode': 0,
'pilotID': 'unknown|PR|2.0.0 (80)',
'transExitCode': 0,
'pilotErrorDiag': '',
'exeErrorDiag': ''}
job_data['jobId'] = job['PandaID']
job_data['siteName'] = 'BNL_CLOUD_MCORE'
job_data['state'] = 'running'
job_data['attemptNr'] = job['attemptNr'] + 1
job_list.append(job_data)
status = communicator_manager.update_jobs(jobs=job_list)
self.assertEqual(status[0], True)
events = communicator_manager.get_event_ranges(num_event_ranges=1, job=jobs[0])
self.assertEqual(len(events), 1)
for event in events:
event_range_status = {"errorCode": 1220, "eventRangeID": event['eventRangeID'], "eventStatus": 'failed'}
event_range_message = {'version': 0, 'eventRanges': json.dumps(event_range_status)}
res = communicator_manager.update_events(update_events=event_range_message)
self.assertEqual(res['StatusCode'], 0)
events = communicator_manager.get_event_ranges(num_event_ranges=2, job=jobs[0])
self.assertEqual(len(events), 2)
update_events = []
for event in events:
event_range = {"eventRangeID": event['eventRangeID'], "eventStatus": 'finished'}
update_events.append(event_range)
event_range_status = [{"zipFile": {"numEvents": len(update_events),
"objstoreID": 1318,
"adler32": '000000',
"lfn": 'test_file',
"fsize": 100,
"pathConvention": 1000},
"eventRanges": update_events}]
event_range_message = {'version': 1, 'eventRanges': json.dumps(event_range_status)}
res = communicator_manager.update_events(update_events=event_range_message)
self.assertEqual(res['StatusCode'], 0)
communicator_manager.stop()
time.sleep(2)
self.assertFalse(communicator_manager.is_alive())
except Exception as ex:
if communicator_manager:
communicator_manager.stop()
raise ex
|
py | 1a2f79211a981df605f777d54bf91b6cdc9b7585 | """
swat-s1 topology
"""
from mininet.topo import Topo as TopoBase
from srve import Srve
from clie import Clie
class Topoe(TopoBase):
NETMASK = '/24'
NODES = [Srve, Clie]
def build(self):
switch = self.addSwitch('s1')
for node in Topoe.NODES:
host = self.addHost(
node.NAME,
ip=node.IP + Topoe.NETMASK,
mac=node.MAC)
self.addLink(host, switch)
|
py | 1a2f7a212ac92f228456c65764e952ea192e27f2 | # type: ignore[attr-defined]
from solids import example_two_solid # pylint: disable=no-name-in-module
from dagster import pipeline
@pipeline
def example_two_pipeline():
example_two_solid()
|
py | 1a2f7a2694a13bd2fa3ec9c15e13fbabb9667170 | """
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from python_msx_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class GenericEventSecurity(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'client_id': (str,), # noqa: E501
'user_id': (str,), # noqa: E501
'username': (str,), # noqa: E501
'tenant_id': (str,), # noqa: E501
'tenant_name': (str,), # noqa: E501
'provider_id': (str,), # noqa: E501
'original_username': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'client_id': 'clientId', # noqa: E501
'user_id': 'userId', # noqa: E501
'username': 'username', # noqa: E501
'tenant_id': 'tenantId', # noqa: E501
'tenant_name': 'tenantName', # noqa: E501
'provider_id': 'providerId', # noqa: E501
'original_username': 'originalUsername', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GenericEventSecurity - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): [optional] # noqa: E501
user_id (str): [optional] # noqa: E501
username (str): [optional] # noqa: E501
tenant_id (str): [optional] # noqa: E501
tenant_name (str): [optional] # noqa: E501
provider_id (str): [optional] # noqa: E501
original_username (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | 1a2f7a2dd3ba13c02f9753f956bad38c6cb3d724 | import typing
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from db_workers import DatabaseWorker
from db_workers import MySQLWorker
from db_workers import PostgreWorker
from fields import DATABASE_NAME_FIELD_NAME
from fields import FOLDER_NAME_FIELD_NAME
from fields import LOCAL_TABLE_NAME_FILED_NAME
from fields import TABLE_NAME_FIELD_NAME
def validate_get_data_request_body(
required_fields: list, request_body: dict, local_db_worker
) -> dict:
# Check if all fields defined
for f in required_fields:
if f not in request_body:
raise KeyError(f"Field {f} not defined in request")
# If columns defined - check if all exits in database
if "columns" in request_body:
table_data = local_db_worker.get_table(local_table_name=request_body["table"])
for col in request_body["columns"]:
if col not in table_data.columns:
KeyError(f"Column {col} not found in table {request_body['table']}")
# If limit defined - check that it can be converter to float
if "limit" in request_body:
request_body["limit"] = float(request_body["limit"])
return request_body
def get_existing_data(
sql_session, table_class_object, target_attr: str = None
) -> typing.List:
if not getattr(table_class_object, "__tablename__"):
raise ValueError("Получен неверный table_class_object.")
try:
data = sql_session.query(table_class_object).all()
except: # noqa: E722
sql_session.rollback()
return [getattr(i, target_attr) for i in data] if target_attr else data
def get_local_table_name_from_request(request_body: dict, local_worker):
return (
request_body[LOCAL_TABLE_NAME_FILED_NAME]
if LOCAL_TABLE_NAME_FILED_NAME in request_body
else local_worker.get_local_table_name(
database_name=request_body[DATABASE_NAME_FIELD_NAME],
folder_name=request_body[FOLDER_NAME_FIELD_NAME],
table_name=request_body[TABLE_NAME_FIELD_NAME],
)
)
def get_worker(db_type: str) -> typing.Type[DatabaseWorker]: # noqa: TYP006
if db_type == "postgres":
return PostgreWorker
elif db_type == "mysql":
return MySQLWorker
def database_health_check(engine) -> bool:
try:
engine.connect()
return True
except OperationalError:
return False
def get_db_engine(db_type: str, **con_params):
if db_type == "postgres":
return create_engine(
f"postgresql://{con_params.get('username')}:{con_params.get('password')}@"
f"{con_params.get('ip')}:{con_params.get('port')}/{con_params.get('database')}"
)
elif db_type == "mysql":
return create_engine(
f"mysql://{con_params.get('username')}:{con_params.get('password')}@"
f"{con_params.get('ip')}:{con_params.get('port')}/{con_params.get('database')}"
)
def get_bad_request_answer() -> list:
return [{"status": "error", "message": "Incorrect request"}, 400]
def update_data_about_db_structure(local_worker):
"""
This function iterates through all databases and refreshes its structure data (about tables/schemas)
:param local_worker: object of LocalBaseWorker
"""
for local_base_name in local_worker.get_db_name_list():
db_type = local_worker.get_database_object(local_base_name).type
worker = get_worker(db_type)(local_base_name, local_worker)
tables = worker.download_table_list()
# After rescan add grant access for admin tokens
for token_obj in local_worker.get_admin_tokens_objects_list():
token = token_obj.token
for local_table_name in tables:
if local_table_name not in list(token_obj.granted_tables):
local_worker.add_table_for_token(
token, local_table_name=local_table_name
)
|
py | 1a2f7a63cc38b8b547f26d1bba451999cce801a8 | from . import losses, layers
|
py | 1a2f7c0f84f644a7c275dcc3f9d9319e214a6e7d | import os
import unittest
from datetime import datetime
import requests_mock
from dateutil.tz import tzutc
from august.api import API_GET_DOORBELLS_URL, Api, API_GET_LOCKS_URL, \
API_GET_LOCK_STATUS_URL, API_LOCK_URL, API_UNLOCK_URL, API_GET_LOCK_URL, \
API_GET_DOORBELL_URL, API_GET_PINS_URL
from august.lock import LockStatus, LockDoorStatus
ACCESS_TOKEN = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9"
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path) as fptr:
return fptr.read()
def utc_of(year, month, day, hour, minute, second, microsecond):
return datetime(year, month, day, hour, minute, second, microsecond,
tzinfo=tzutc())
class TestApi(unittest.TestCase):
@requests_mock.Mocker()
def test_get_doorbells(self, mock):
mock.register_uri(
"get",
API_GET_DOORBELLS_URL,
text=load_fixture("get_doorbells.json"))
api = Api()
doorbells = sorted(api.get_doorbells(ACCESS_TOKEN),
key=lambda d: d.device_id)
self.assertEqual(2, len(doorbells))
first = doorbells[0]
self.assertEqual("1KDAbJH89XYZ", first.device_id)
self.assertEqual("aaaaR08888", first.serial_number)
self.assertEqual("Back Door", first.device_name)
self.assertEqual("doorbell_call_status_offline", first.status)
self.assertEqual(False, first.has_subscription)
self.assertEqual(None, first.image_url)
self.assertEqual("3dd2accadddd", first.house_id)
second = doorbells[1]
self.assertEqual("K98GiDT45GUL", second.device_id)
self.assertEqual("tBXZR0Z35E", second.serial_number)
self.assertEqual("Front Door", second.device_name)
self.assertEqual("doorbell_call_status_online", second.status)
self.assertEqual(True, second.has_subscription)
self.assertEqual("https://image.com/vmk16naaaa7ibuey7sar.jpg",
second.image_url)
self.assertEqual("3dd2accaea08", second.house_id)
@requests_mock.Mocker()
def test_get_doorbell_detail(self, mock):
mock.register_uri(
"get",
API_GET_DOORBELL_URL.format(doorbell_id="K98GiDT45GUL"),
text=load_fixture("get_doorbell.json"))
api = Api()
doorbell = api.get_doorbell_detail(ACCESS_TOKEN, "K98GiDT45GUL")
self.assertEqual("K98GiDT45GUL", doorbell.device_id)
self.assertEqual("Front Door", doorbell.device_name)
self.assertEqual("3dd2accaea08", doorbell.house_id)
self.assertEqual("tBXZR0Z35E", doorbell.serial_number)
self.assertEqual("2.3.0-RC153+201711151527", doorbell.firmware_version)
self.assertEqual("doorbell_call_status_online", doorbell.status)
self.assertEqual(True, doorbell.is_online)
self.assertEqual(True, doorbell.has_subscription)
self.assertEqual("https://image.com/vmk16naaaa7ibuey7sar.jpg",
doorbell.image_url)
@requests_mock.Mocker()
def test_get_locks(self, mock):
mock.register_uri(
"get",
API_GET_LOCKS_URL,
text=load_fixture("get_locks.json"))
api = Api()
locks = sorted(api.get_locks(ACCESS_TOKEN), key=lambda d: d.device_id)
self.assertEqual(2, len(locks))
first = locks[0]
self.assertEqual("A6697750D607098BAE8D6BAA11EF8063", first.device_id)
self.assertEqual("Front Door Lock", first.device_name)
self.assertEqual("000000000000", first.house_id)
self.assertEqual(True, first.is_operable)
second = locks[1]
self.assertEqual("A6697750D607098BAE8D6BAA11EF9999", second.device_id)
self.assertEqual("Back Door Lock", second.device_name)
self.assertEqual("000000000011", second.house_id)
self.assertEqual(False, second.is_operable)
@requests_mock.Mocker()
def test_get_operable_locks(self, mock):
mock.register_uri(
"get",
API_GET_LOCKS_URL,
text=load_fixture("get_locks.json"))
api = Api()
locks = api.get_operable_locks(ACCESS_TOKEN)
self.assertEqual(1, len(locks))
first = locks[0]
self.assertEqual("A6697750D607098BAE8D6BAA11EF8063", first.device_id)
self.assertEqual("Front Door Lock", first.device_name)
self.assertEqual("000000000000", first.house_id)
self.assertEqual(True, first.is_operable)
@requests_mock.Mocker()
def test_get_lock_detail(self, mock):
mock.register_uri(
"get",
API_GET_LOCK_URL.format(
lock_id="A6697750D607098BAE8D6BAA11EF8063"),
text=load_fixture("get_lock.json"))
api = Api()
lock = api.get_lock_detail(ACCESS_TOKEN,
"A6697750D607098BAE8D6BAA11EF8063")
self.assertEqual("A6697750D607098BAE8D6BAA11EF8063", lock.device_id)
self.assertEqual("Front Door Lock", lock.device_name)
self.assertEqual("000000000000", lock.house_id)
self.assertEqual("X2FSW05DGA", lock.serial_number)
self.assertEqual("109717e9-3.0.44-3.0.30", lock.firmware_version)
self.assertEqual(88, lock.battery_level)
self.assertEqual("Medium", lock.keypad.battery_level)
self.assertEqual("5bc65c24e6ef2a263e1450a8", lock.keypad.device_id)
@requests_mock.Mocker()
def test_get_lock_status_with_locked_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"status\": \"kAugLockState_Locked\"}")
api = Api()
status = api.get_lock_status(ACCESS_TOKEN, lock_id)
self.assertEqual(LockStatus.LOCKED, status)
@requests_mock.Mocker()
def test_get_lock_and_door_status_with_locked_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"status\": \"kAugLockState_Locked\""
",\"doorState\": \"kAugLockDoorState_Closed\"}")
api = Api()
status, door_status = api.get_lock_status(ACCESS_TOKEN, lock_id, True)
self.assertEqual(LockStatus.LOCKED, status)
self.assertEqual(LockDoorStatus.CLOSED, door_status)
@requests_mock.Mocker()
def test_get_lock_status_with_unlocked_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"status\": \"kAugLockState_Unlocked\"}")
api = Api()
status = api.get_lock_status(ACCESS_TOKEN, lock_id)
self.assertEqual(LockStatus.UNLOCKED, status)
@requests_mock.Mocker()
def test_get_lock_status_with_unknown_status_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"status\": \"not_advertising\"}")
api = Api()
status = api.get_lock_status(ACCESS_TOKEN, lock_id)
self.assertEqual(LockStatus.UNKNOWN, status)
@requests_mock.Mocker()
def test_get_lock_door_status_with_closed_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"doorState\": \"kAugLockDoorState_Closed\"}")
api = Api()
door_status = api.get_lock_door_status(ACCESS_TOKEN, lock_id)
self.assertEqual(LockDoorStatus.CLOSED, door_status)
@requests_mock.Mocker()
def test_get_lock_door_status_with_open_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"doorState\": \"kAugLockDoorState_Open\"}")
api = Api()
door_status = api.get_lock_door_status(ACCESS_TOKEN, lock_id)
self.assertEqual(LockDoorStatus.OPEN, door_status)
@requests_mock.Mocker()
def test_get_lock_and_door_status_with_open_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"status\": \"kAugLockState_Unlocked\""
",\"doorState\": \"kAugLockDoorState_Open\"}")
api = Api()
door_status, status = api.get_lock_door_status(ACCESS_TOKEN, lock_id,
True)
self.assertEqual(LockDoorStatus.OPEN, door_status)
self.assertEqual(LockStatus.UNLOCKED, status)
@requests_mock.Mocker()
def test_get_lock_door_status_with_unknown_response(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),
text="{\"doorState\": \"not_advertising\"}")
api = Api()
door_status = api.get_lock_door_status(ACCESS_TOKEN, lock_id)
self.assertEqual(LockDoorStatus.UNKNOWN, door_status)
@requests_mock.Mocker()
def test_lock(self, mock):
lock_id = 1234
mock.register_uri(
"put",
API_LOCK_URL.format(lock_id=lock_id),
text="{\"status\":\"locked\","
"\"dateTime\":\"2017-12-10T07:43:39.056Z\","
"\"isLockStatusChanged\":false,"
"\"valid\":true}")
api = Api()
status = api.lock(ACCESS_TOKEN, lock_id)
self.assertEqual(LockStatus.LOCKED, status)
@requests_mock.Mocker()
def test_unlock(self, mock):
lock_id = 1234
mock.register_uri(
"put",
API_UNLOCK_URL.format(lock_id=lock_id),
text="{\"status\": \"unlocked\"}")
api = Api()
status = api.unlock(ACCESS_TOKEN, lock_id)
self.assertEqual(LockStatus.UNLOCKED, status)
@requests_mock.Mocker()
def test_get_pins(self, mock):
lock_id = 1234
mock.register_uri(
"get",
API_GET_PINS_URL.format(lock_id=lock_id),
text=load_fixture("get_pins.json"))
api = Api()
pins = api.get_pins(ACCESS_TOKEN, lock_id)
self.assertEqual(1, len(pins))
first = pins[0]
self.assertEqual("epoZ87XSPqxlFdsaYyJiRRVR", first.pin_id)
self.assertEqual("A6697750D607098BAE8D6BAA11EF8063", first.lock_id)
self.assertEqual("c3b3a94f-473z-61a3-a8d1-a6e99482787a", first.user_id)
self.assertEqual("in-use", first.state)
self.assertEqual("123456", first.pin)
self.assertEqual(646545456465161, first.slot)
self.assertEqual("one-time", first.access_type)
self.assertEqual("John", first.first_name)
self.assertEqual("Doe", first.last_name)
self.assertEqual(True, first.unverified)
self.assertEqual(utc_of(2016, 11, 26, 22, 27, 11, 176000),
first.created_at)
self.assertEqual(utc_of(2017, 11, 23, 00, 42, 19, 470000),
first.updated_at)
self.assertEqual(utc_of(2017, 12, 10, 3, 12, 55, 563000),
first.loaded_date)
self.assertEqual(utc_of(2018, 1, 1, 1, 1, 1, 563000),
first.access_start_time)
self.assertEqual(utc_of(2018, 12, 1, 1, 1, 1, 563000),
first.access_end_time)
self.assertEqual(utc_of(2018, 11, 5, 10, 2, 41, 684000),
first.access_times)
|
py | 1a2f7c408e191d644b96e8c0d1b6b78823f95403 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SsoUri(Model):
"""The URI required to login to the supplemental portal from the Azure portal.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar sso_uri_value: The URI used to login to the supplemental portal.
:vartype sso_uri_value: str
"""
_validation = {
'sso_uri_value': {'readonly': True},
}
_attribute_map = {
'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SsoUri, self).__init__(**kwargs)
self.sso_uri_value = None
|
py | 1a2f7e4e526e3e92e2a43901f135e68cba2e1455 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 12/02/2004
# Description: Defines a Tkinter ImageControl widget that is used by various
# trait editors to display trait values iconically.
#
# Symbols defined: ImageControl
#
#------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import tk
#-------------------------------------------------------------------------------
# 'ImageControl' class:
#-------------------------------------------------------------------------------
class ImageControl ( wx.Window ):
# Pens used to draw the 'selection' marker:
_selectedPenDark = wx.Pen(
wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DSHADOW ), 1,
wx.SOLID )
_selectedPenLight = wx.Pen(
wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DHIGHLIGHT ), 1,
wx.SOLID )
#---------------------------------------------------------------------------
# Initializes the object:
#---------------------------------------------------------------------------
def __init__ ( self, parent, bitmap, selected = None, handler = None ):
""" Initializes the object.
"""
wx.Window.__init__( self, parent, -1,
size = wx.Size( bitmap.GetWidth() + 10,
bitmap.GetHeight() + 10 ) )
self._bitmap = bitmap
self._selected = selected
self._handler = handler
self._mouse_over = False
self._button_down = False
# Set up the 'paint' event handler:
wx.EVT_PAINT( self, self._on_paint )
# Set up mouse event handlers:
wx.EVT_LEFT_DOWN( self, self._on_left_down )
wx.EVT_LEFT_UP( self, self._on_left_up )
wx.EVT_ENTER_WINDOW( self, self._on_enter )
wx.EVT_LEAVE_WINDOW( self, self._on_leave )
#---------------------------------------------------------------------------
# Gets/Sets the current selection state of the image:
#---------------------------------------------------------------------------
def Selected ( self, selected = None ):
""" Gets/Sets the current selection state of the image.
"""
if selected is not None:
selected = (selected != 0)
if selected != self._selected:
if selected:
for control in self.GetParent().GetChildren():
if (isinstance( control, ImageControl ) and
control.Selected()):
control.Selected( False )
break
self._selected = selected
self.Refresh()
return self._selected
#---------------------------------------------------------------------------
# Gets/Sets the current bitmap image:
#---------------------------------------------------------------------------
def Bitmap ( self, bitmap = None ):
if bitmap is not None:
if bitmap != self._bitmap:
self._bitmap = bitmap
self.Refresh()
return self._bitmap
#---------------------------------------------------------------------------
# Gets/Sets the current click handler:
#---------------------------------------------------------------------------
def Handler ( self, handler = None ):
""" Gets/Sets the current click handler.
"""
if handler is not None:
if handler != self._handler:
self._handler = handler
self.Refresh()
return self._handler
#---------------------------------------------------------------------------
# Handles the mouse entering the control:
#---------------------------------------------------------------------------
def _on_enter ( self, event = None ):
""" Handles the mouse entering the control.
"""
if self._selected is not None:
self._mouse_over = True
self.Refresh()
#---------------------------------------------------------------------------
# Handles the mouse leaving the control:
#---------------------------------------------------------------------------
def _on_leave ( self, event = None ):
""" Handles the mouse leaving the control.
"""
if self._mouse_over:
self._mouse_over = False
self.Refresh()
#---------------------------------------------------------------------------
# Handles the user pressing the mouse button:
#---------------------------------------------------------------------------
def _on_left_down ( self, event = None ):
""" Handles the user pressing the mouse button.
"""
if self._selected is not None:
self.CaptureMouse()
self._button_down = True
self.Refresh()
#---------------------------------------------------------------------------
# Handles the user clicking the control:
#---------------------------------------------------------------------------
def _on_left_up ( self, event = None ):
""" Handles the user clicking the control.
"""
need_refresh = self._button_down
if need_refresh:
self.ReleaseMouse()
self._button_down = False
if self._selected is not None:
wdx, wdy = self.GetClientSizeTuple()
x = event.GetX()
y = event.GetY()
if (0 <= x < wdx) and (0 <= y < wdy):
if self._selected != -1:
self.Selected( True )
elif need_refresh:
self.Refresh()
if self._handler is not None:
self._handler( self )
return
if need_refresh:
self.Refresh()
#---------------------------------------------------------------------------
# Handles the control being re-painted:
#---------------------------------------------------------------------------
def _on_paint ( self, event = None ):
""" Handles the control being re-painted.
"""
wdc = wx.PaintDC( self )
wdx, wdy = self.GetClientSizeTuple()
bitmap = self._bitmap
bdx = bitmap.GetWidth()
bdy = bitmap.GetHeight()
wdc.DrawBitmap( bitmap, (wdx - bdx) / 2, (wdy - bdy) / 2, True )
pens = [ self._selectedPenLight, self._selectedPenDark ]
bd = self._button_down
if self._mouse_over:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 0, 0, wdx, 0 )
wdc.DrawLine( 0, 1, 0, wdy )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 1, 1, wdx - 1, wdy )
wdc.DrawLine( 1, wdy - 1, wdx - 1, wdy - 1 )
if self._selected == True:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 1, 1, wdx - 1, 1 )
wdc.DrawLine( 1, 1, 1, wdy - 1 )
wdc.DrawLine( 2, 2, wdx - 2, 2 )
wdc.DrawLine( 2, 2, 2, wdy - 2 )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 2, 2, wdx - 2, wdy - 1 )
wdc.DrawLine( 2, wdy - 2, wdx - 2, wdy - 2 )
wdc.DrawLine( wdx - 3, 3, wdx - 3, wdy - 2 )
wdc.DrawLine( 3, wdy - 3, wdx - 3, wdy - 3 )
|
py | 1a2f7e6f28d7911cef3cc3f506e871b8705a0374 | # stdlib
# stdlib
import dataclasses
from uuid import UUID
# third party
import sympc
from sympc.config import Config
from sympc.tensor import ShareTensor
# syft absolute
import syft
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...proto.lib.sympc.share_tensor_pb2 import ShareTensor as ShareTensor_PB
from ..python.primitive_factory import PrimitiveFactory
def object2proto(obj: object) -> ShareTensor_PB:
share: ShareTensor = obj
session_uuid = ""
config = {}
if share.session_uuid is not None:
session_uuid = str(share.session_uuid)
config = dataclasses.asdict(share.config)
session_uuid_syft = session_uuid
conf_syft = syft.serialize(
PrimitiveFactory.generate_primitive(value=config), to_proto=True
)
proto = ShareTensor_PB(session_uuid=session_uuid_syft, config=conf_syft)
tensor_data = getattr(share.tensor, "data", None)
if tensor_data is not None:
proto.tensor.CopyFrom(syft.serialize(share.tensor, to_proto=True))
return proto
def proto2object(proto: ShareTensor_PB) -> ShareTensor:
if proto.session_uuid:
session = sympc.session.get_session(proto.session_uuid)
if session is None:
raise ValueError(f"The session {proto.session_uuid} could not be found")
config = dataclasses.asdict(session.config)
else:
config = syft.deserialize(proto.config, from_proto=True)
tensor = syft.deserialize(proto.tensor, from_proto=True)
share = ShareTensor(data=None, config=Config(**config))
if proto.session_uuid:
share.session_uuid = UUID(proto.session_uuid)
# Manually put the tensor since we do not want to re-encode it
share.tensor = tensor
return share
GenerateWrapper(
wrapped_type=ShareTensor,
import_path="sympc.tensor.ShareTensor",
protobuf_scheme=ShareTensor_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
|
py | 1a2f7f4898f84c6a6a06274e0fa55c407c5cce85 | # -= ml_breakdown.py =-
# __ by Morgan Loomis
# ____ ___ / / http://morganloomis.com
# / __ `__ \/ / Revision 4
# / / / / / / / 2018-05-13
# /_/ /_/ /_/_/ _________
# /_________/
#
# ______________
# - -/__ License __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright 2018 Morgan Loomis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ___________________
# - -/__ Installation __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copy this file into your maya scripts directory, for example:
# C:/Documents and Settings/user/My Documents/maya/scripts/ml_breakdown.py
#
# Run the tool in a python shell or shelf button by importing the module,
# and then calling the primary function:
#
# import ml_breakdown
# ml_breakdown.ui()
#
#
# __________________
# - -/__ Description __/- - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Blend a keyframe or pose with the next or previous keys, essentially creating a
# breakdown pose that is weighted one way or the other.
#
# ____________
# - -/__ Usage __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Press the "Breakdown Dragger" button to enter the dragger, and the cursor will
# turn into a hand. Left-click and hold in the viewport, and then drag either left
# or right to weight the key to the next or previous key. Press and hold the
# middle mouse button to weight the key toward or away from the average of the
# surrounding keys. Alternately, set the slider to the desired weight, and press
# the Next, Previous or Average buttons to increment the breakdown. Right click
# the buttons to assign to hotkeys. If you have no keys selected, the tool will
# act only on curves that are visibile in the graph editor. If there are no keys
# at the current frame, keys will be set.
#
# ____________
# - -/__ Video __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# http://www.youtube.com/watch?v=D8yD4zbHTP8
#
# _________
# - -/__ Ui __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# [Breakdown Dragger] : Drag in the viewport to weight a breakdown toward the next or previous frame.
# [<<] : Weight toward the previous frame.
# [Average] : Weight toward the average of the next and previous frame.
# [>>] : Weight toward the next frame.
#
# ___________________
# - -/__ Requirements __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# This script requires the ml_utilities module, which can be downloaded here:
# https://raw.githubusercontent.com/morganloomis/ml_tools/master/ml_utilities.py
#
# __________
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /_ Enjoy! _/- - -
__author__ = 'Morgan Loomis'
__license__ = 'MIT'
__revision__ = 4
__category__ = 'animation'
shelfButton = {'annotation': 'Click to weight keys by dragging, double click to open UI.',
'command': 'import ml_breakdown;ml_breakdown.drag()',
'doubleClickCommand': 'import ml_breakdown;ml_breakdown.ui()',
'imageOverlayLabel': 'BD',
'menuItem': [['Breakdown UI', 'import ml_breakdown;ml_breakdown.ui()'],
['<< Previous', 'import ml_breakdown;ml_breakdown.weightPrevious()'],
['>> Next', 'import ml_breakdown;ml_breakdown.weightNext()'],
['Average', 'import ml_breakdown;ml_breakdown.weightAverage()']],
'order': 12}
import maya.cmds as mc
from maya import OpenMaya
from functools import partial
try:
import ml_utilities as utl
utl.upToDateCheck(32)
except ImportError:
result = mc.confirmDialog( title='Module Not Found',
message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.',
button=['Download Module','Cancel'],
defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' )
if result == 'Download Module':
mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)
def ui():
'''
User interface for breakdown
'''
with utl.MlUi('ml_breakdown', 'Breakdown Tools', width=400, height=180, info='''Select objects.
Press Breakdown Dragger to create a new key and weight it by dragging in the viewport.
Otherwise use the increment buttons to nudge a key's value toward the next or previous key.''') as win:
win.buttonWithPopup(label='Breakdown Dragger', command=drag, annotation='Drag in the viewport to weight a breakdown toward the next or previous frame.',
shelfLabel='BDD')
mc.separator(height=20)
mc.floatSliderGrp('ml_breakdown_value_floatSlider', value=0.2, field=True, minValue=0, maxValue=2)
mc.paneLayout(configuration='vertical3',separatorThickness=1)
win.ButtonWithPopup(label='<<', command=weightPrevious, annotation='Weight toward the previous frame.', shelfLabel='<', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
win.ButtonWithPopup(label='Average', command=weightAverage, annotation='Weight toward the average of the next and previous frame.', shelfLabel='><', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
win.ButtonWithPopup(label='>>', command=weightNext, annotation='Weight toward the next frame.', shelfLabel='>', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
def quickBreakDownUI():
winName = 'ml_quickBreakdownWin'
if mc.window(winName, exists=True):
mc.deleteUI(winName)
mc.window(winName, title='ml :: QBD', iconName='Quick Breakdown', width=100, height=500)
mc.columnLayout(adj=True)
mc.paneLayout(configuration='vertical2', separatorThickness=1)
mc.text('<<')
mc.text('>>')
mc.setParent('..')
for v in (10,20,50,80,90,100,110,120,150):
mc.paneLayout(configuration='vertical2',separatorThickness=1)
mc.button(label=str(v)+' %', command=partial(weightPrevious,v/100.0))
mc.button(label=str(v)+' %', command=partial(weightNext,v/100.0))
mc.setParent('..')
mc.showWindow(winName)
mc.window(winName, edit=True, width=100, height=250)
def drag(*args):
'''The primary command to run the tool'''
BreakdownDragger()
def weightPrevious(weight=0.2, *args):
weightBreakdownStep(direction='previous', weight=weight)
def weightAverage(weight=0.2, *args):
weightBreakdownStep(direction='average', weight=weight)
def weightNext(weight=0.2, *args):
weightBreakdownStep(direction='next', weight=weight)
def weightBreakdownStep(direction='next', weight=0.2):
keySel = utl.KeySelection()
if keySel.selectedKeys():
pass
elif keySel.visibleInGraphEditor():
keySel.setKeyframe()
elif keySel.keyedChannels():
keySel.setKeyframe()
if not keySel.curves:
return
times = list()
values = list()
data = list()
for curve in keySel.curves:
if keySel.selected:
times = mc.keyframe(curve, query=True, timeChange=True, sl=True)
values = mc.keyframe(curve, query=True, valueChange=True, sl=True)
else:
times = [keySel.time]
values = mc.keyframe(curve, time=keySel.time, query=True, valueChange=True)
for i,v in zip(times,values):
nextTime = mc.findKeyframe(curve, time=(i,), which='next')
n = mc.keyframe(curve, time=(nextTime,), query=True, valueChange=True)[0]
prevTime = mc.findKeyframe(curve, time=(i,), which='previous')
p = mc.keyframe(curve, time=(prevTime,), query=True, valueChange=True)[0]
data.append([curve,i,v,n,p])
for d in data:
value = None
if direction == 'next':
value = d[2]+((d[3]-d[2])*weight)
elif direction == 'previous':
value = d[2]+((d[4]-d[2])*weight)
elif direction == 'average':
value = d[2]+(((d[3]+d[4])/2-d[2])*weight)
else: break
mc.keyframe(d[0], time=(d[1],), valueChange=value)
class BreakdownDragger(utl.Dragger):
'''Creates the tool and manages the data'''
def __init__(self,
name='mlBreakdownDraggerContext',
minValue=None,
maxValue=None,
defaultValue=0,
title = 'Breakdown'):
self.keySel = utl.KeySelection()
if self.keySel.selectedKeys():
pass
elif self.keySel.visibleInGraphEditor():
self.keySel.setKeyframe()
elif self.keySel.keyedChannels():
self.keySel.setKeyframe()
if not self.keySel.curves:
return
utl.Dragger.__init__(self, defaultValue=defaultValue, minValue=minValue, maxValue=maxValue, name=name, title=title)
#setup tangent type
itt,ott = utl.getHoldTangentType()
self.time = dict()
self.value = dict()
self.next = dict()
self.prev = dict()
self.average = dict()
for curve in self.keySel.curves:
if self.keySel.selected:
self.time[curve] = mc.keyframe(curve, query=True, timeChange=True, sl=True)
self.value[curve] = mc.keyframe(curve, query=True, valueChange=True, sl=True)
else:
self.time[curve] = self.keySel.time
self.value[curve] = mc.keyframe(curve, time=self.keySel.time, query=True, valueChange=True)
self.next[curve] = list()
self.prev[curve] = list()
self.average[curve] = list()
for i in self.time[curve]:
next = mc.findKeyframe(curve, time=(i,), which='next')
prev = mc.findKeyframe(curve, time=(i,), which='previous')
n = mc.keyframe(curve, time=(next,), query=True, valueChange=True)[0]
p = mc.keyframe(curve, time=(prev,), query=True, valueChange=True)[0]
self.next[curve].append(n)
self.prev[curve].append(p)
self.average[curve].append((n+p)/2)
#set the tangents on this key, and the next and previous, so they flatten properly
mc.keyTangent(curve, time=(i,), itt=itt, ott=ott)
mc.keyTangent(curve, time=(next,), itt=itt)
mc.keyTangent(curve, time=(prev,), ott=ott)
self.setTool()
self.drawString('Left: Weight Prev/Next, Middle: Weight Average')
OpenMaya.MGlobal.displayWarning('Left: Weight Prev/Next, Middle: Weight Average')
def dragLeft(self):
'''This is activated by the left mouse button, and weights to the next or previous keys.'''
#clamp it
if self.x < -1:
self.x = -1
if self.x > 1:
self.x = 1
if self.x > 0:
self.drawString('>> '+str(int(self.x*100))+' %')
for curve in self.keySel.curves:
for i,v,n in zip(self.time[curve],self.value[curve],self.next[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))
elif self.x <0:
self.drawString('<< '+str(int(self.x*-100))+' %')
for curve in self.keySel.curves:
for i,v,p in zip(self.time[curve],self.value[curve],self.prev[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((p-v)*(-1*self.x)))
def dragMiddle(self):
'''This is activated by the middle mouse button, and weights to the average of the surrounding keys.'''
#clamp it
if self.x < -1:
self.x = -1
if self.x > 1:
self.x = 1
self.drawString('Average '+str(int(self.x*100))+' %')
for curve in self.keySel.curves:
for i,v,n in zip(self.time[curve],self.value[curve],self.average[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))
def dragShiftLeft(self):
'''This is activated by Shift and the left mouse button, and weights to the next or previous keys, without clamping.'''
if self.x > 0:
self.drawString('>> '+str(int(self.x*100))+' %')
for curve in self.keySel.curves:
for i,v,n in zip(self.time[curve],self.value[curve],self.next[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))
elif self.x <0:
self.drawString('<< '+str(int(self.x*-100))+' %')
for curve in self.keySel.curves:
for i,v,p in zip(self.time[curve],self.value[curve],self.prev[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((p-v)*(-1*self.x)))
if __name__ == '__main__':
quickBreakDownUI()
# ______________________
# - -/__ Revision History __/- - - - - - - - - - - - - - - - - - - - - - - -
#
# Revision 1: 2015-05-13 : First publish.
#
# Revision 2: 2015-05-13 : Documentation updates.
#
# Revision 3: 2018-02-17 : Updating license to MIT.
#
# Revision 4: 2018-05-13 : shelf support |
py | 1a2f7fb634fc3850ddf798a231305847e25ec0e1 | import asyncio
import io
import userbot.plugins.sql_helper.no_log_pms_sql as no_log_pms_sql
from telethon import events, errors, functions, types
from userbot.utils import admin_cmd
from userbot.uniborgConfig import Config
@borg.on(admin_cmd(pattern="nccreatedch"))
async def create_dump_channel(event):
if Config.PM_LOGGR_BOT_API_ID is None:
result = await borg(functions.channels.CreateChannelRequest( # pylint:disable=E0602
title=f"UniBorg-{borg.uid}-PM_LOGGR_BOT_API_ID-data",
about="PM_LOGGR_BOT_API_ID // Do Not Touch",
megagroup=False
))
logger.info(result)
created_chat_id = result.chats[0].id
result = await borg.edit_admin( # pylint:disable=E0602
entity=created_chat_id,
user=Config.TG_BOT_USER_NAME_BF_HER,
is_admin=True,
title="Editor"
)
logger.info(result)
with io.BytesIO(str.encode(str(created_chat_id))) as out_file:
out_file.name = "PLEASE.IGNORE.dummy.file"
await borg.send_file(
created_chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=f"Please set `PM_LOGGR_BOT_API_ID` to `{created_chat_id}`",
reply_to=1
)
await event.delete()
else:
await event.edit(f"**is configured**. [please do not touch](https://t.me/c/{Config.PM_LOGGR_BOT_API_ID}/2)")
@borg.on(admin_cmd(pattern="nolog ?(.*)"))
async def set_no_log_p_m(event):
if Config.PM_LOGGR_BOT_API_ID is not None:
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if not no_log_pms_sql.is_approved(chat.id):
no_log_pms_sql.approve(chat.id)
await event.edit("Won't Log Messages from this chat")
await asyncio.sleep(3)
await event.delete()
@borg.on(admin_cmd(pattern="enlog ?(.*)"))
async def set_no_log_p_m(event):
if Config.PM_LOGGR_BOT_API_ID is not None:
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if no_log_pms_sql.is_approved(chat.id):
no_log_pms_sql.disapprove(chat.id)
await event.edit("Will Log Messages from this chat")
await asyncio.sleep(3)
await event.delete()
@borg.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
message_media = event.message.media
message_id = event.message.id
message_to_id = event.message.to_id
chat_id = event.chat_id
# logger.info(chat_id)
sender = await borg.get_entity(chat_id)
if chat_id == borg.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if not no_log_pms_sql.is_approved(chat_id):
# log pms
await do_log_pm_action(chat_id, message_text, message_media)
@borg.on(events.ChatAction(blacklist_chats=Config.UB_BLACK_LIST_CHAT))
async def on_new_chat_action_message(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
# logger.info(event.stringify())
chat_id = event.chat_id
message_id = event.action_message.id
if event.created or event.user_added:
added_by_users = event.action_message.action.users
if borg.uid in added_by_users:
added_by_user = event.action_message.from_id
# someone added me to chat
the_message = ""
the_message += "#MessageActionChatAddUser\n\n"
the_message += f"[User](tg://user?id={added_by_user}): `{added_by_user}`\n"
the_message += f"[Private Link](https://t.me/c/{chat_id}/{message_id})\n"
await borg.send_message(
entity=Config.PM_LOGGR_BOT_API_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
@borg.on(events.Raw())
async def on_new_channel_message(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
if tgbot is None:
return
# logger.info(event.stringify())
if isinstance(event, types.UpdateChannel):
channel_id = event.channel_id
message_id = 2
# someone added me to channel
# TODO: https://t.me/TelethonChat/153947
the_message = ""
the_message += "#MessageActionChatAddUser\n\n"
# the_message += f"[User](tg://user?id={added_by_user}): `{added_by_user}`\n"
the_message += f"[Private Link](https://t.me/c/{channel_id}/{message_id})\n"
await borg.send_message(
entity=Config.PM_LOGGR_BOT_API_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
"""@borg.on(events.Raw())
async def _(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
if tgbot is None:
return
logger.info(event.stringify())"""
"""if tgbot is not None:
@tgbot.on(events.Raw())
async def _(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
logger.info(event.stringify())"""
async def do_log_pm_action(chat_id, message_text, message_media):
the_message = ""
the_message += "#LOG_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message: {message_text}\n"
# the_message += f"Media: {message_media}"
await borg.send_message(
entity=Config.PM_LOGGR_BOT_API_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
file=message_media,
silent=True
)
|
py | 1a2f80da119b07d1f38e99b55b0888ac9ab9787b | # -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import warnings
from rasa_core.actions import Action
from rasa_core.agent import Agent
from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.events import SlotSet
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
logger = logging.getLogger(__name__)
support_search = ["话费", "流量"]
def extract_item(item):
"""
check if item supported, this func just for lack of train data.
:param item: item in track, eg: "流量"、"查流量"
:return:
"""
if item is None:
return None
for name in support_search:
if name in item:
return name
return None
class ActionSearchConsume(Action):
def name(self):
return 'action_search_consume'
def run(self, dispatcher, tracker, domain):
item = tracker.get_slot("item")
item = extract_item(item)
if item is None:
dispatcher.utter_message("您好,我现在只会查话费和流量")
dispatcher.utter_message("你可以这样问我:“帮我查话费”")
return []
time = tracker.get_slot("time")
if time is None:
dispatcher.utter_message("您想查询哪个月的话费?")
return []
# query database here using item and time as key. but you may normalize time format first.
dispatcher.utter_message("好,请稍等")
if item == "流量":
dispatcher.utter_message("您好,您{}共使用{}二百八十兆,剩余三十兆。".format(time, item))
else:
dispatcher.utter_message("您好,您{}共消费二十八元。".format(time))
return []
class MobilePolicy(KerasPolicy):
def model_architecture(self, num_features, num_actions, max_history_len):
"""Build a Keras model and return a compiled model."""
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32 # size of hidden layer in LSTM
# Build Model
batch_shape = (None, max_history_len, num_features)
model = Sequential()
model.add(Masking(-1, batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
logger.debug(model.summary())
return model
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer
from rasa_nlu import config
training_data = load_data("data/nlu.json")
trainer = Trainer(config.load("data/nlu_model_config.json"))
trainer.train(training_data)
model_directory = trainer.persist("models/", project_name="ivr", fixed_model_name="demo")
return model_directory
def train_dialogue(domain_file="data/domain.yml",
model_path="models/dialogue",
training_data_file="data/stories.md"):
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=5), KerasPolicy(featurizer)])
agent.train(
training_data_file,
epochs=200,
batch_size=16,
augmentation_factor=50,
validation_split=0.2
)
agent.persist(model_path)
return agent
def run_ivrbot_online(input_channel=ConsoleInputChannel(),
interpreter=RasaNLUInterpreter("models/ivr/demo"),
domain_file="data/domain.yml",
training_data_file="data/stories.md"):
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=5), KerasPolicy(featurizer)],
interpreter=interpreter)
agent.train_online(training_data_file,
input_channel=input_channel,
batch_size=50,
epochs=200,
max_training_samples=300)
return agent
def run(serve_forever=True):
agent = Agent.load("models/dialogue",
interpreter=RasaNLUInterpreter("models/ivr/demo"))
if serve_forever:
agent.handle_channel(ConsoleInputChannel())
return agent
if __name__ == "__main__":
logging.basicConfig(level="INFO")
parser = argparse.ArgumentParser(
description="starts the bot")
parser.add_argument(
"task",
choices=["train-nlu", "train-dialogue", "run", "online-train"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
train_dialogue()
elif task == "run":
run()
elif task == "online-train":
run_ivrbot_online()
else:
warnings.warn("Need to pass either 'train-nlu', 'train-dialogue', 'run' or 'online-train' to use the script.")
exit(1)
|
py | 1a2f81832ea5fcfcc38240a0cac064b8938c36d2 | """
.. module: cloudaux.aws.decorators
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
.. moduleauthor:: Mike Grima <[email protected]>
"""
import functools
import time
import boto
import botocore
RATE_LIMITING_ERRORS = ['Throttling', 'RequestLimitExceeded', 'SlowDown', 'RequestThrottled']
def rate_limited(max_attempts=None, max_delay=4):
def decorator(f):
metadata = {
'count': 0,
'delay': 0
}
@functools.wraps(f)
def decorated_function(*args, **kwargs):
def increase_delay(e):
if metadata['delay'] == 0:
metadata['delay'] = 1
elif metadata['delay'] < max_delay:
metadata['delay'] *= 2
if max_attempts and metadata['count'] > max_attempts:
raise e
metadata['count'] = 0
while True:
metadata['count'] += 1
if metadata['delay'] > 0:
time.sleep(metadata['delay'])
try:
retval = f(*args, **kwargs)
metadata['delay'] = 0
return retval
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] not in RATE_LIMITING_ERRORS:
raise e
increase_delay(e)
except boto.exception.BotoServerError as e:
if e.error_code not in RATE_LIMITING_ERRORS:
raise e
increase_delay(e)
return decorated_function
return decorator
def paginated(response_key, request_pagination_marker="Marker", response_pagination_marker="Marker"):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results.extend(response[response_key])
if ('NextMarker' in response) or ('IsTruncated' in response and response['IsTruncated']):
kwargs.update({request_pagination_marker: response[response_pagination_marker]})
else:
break
return results
return decorated_function
return decorator
|
py | 1a2f818a19975f04f66148cbe31157a7baa193a8 | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class ArrayConnectionResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ArrayConnection]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ArrayConnection]
):
"""
Keyword args:
items (list[ArrayConnection])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayConnectionResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a2f81c3e83be94b291073e1b5cb95eefa99a9ae | "this program runs on ngc and syncs data with a local master machine"
import time
import os
import ray
@ray.remote
def sync(agentparams):
master_datadir = agentparams['master_datadir']
master = agentparams.get('master', 'deepthought')
local_datadir = '/result'
while True:
print('transfer tfrecords to master')
cmd = 'rsync -a --update {} {}:{}'.format(local_datadir + '/', master, master_datadir)
print('executing: {}'.format(cmd))
os.system(cmd)
time.sleep(10)
if __name__ == '__main__':
conf = {}
conf['master_datadir'] = '/raid/ngc2/pushing_data/cartgripper/mj_multi_obj_push3_75step'
sync(0, conf) |
py | 1a2f832053fed1d3e402804f3c7c15ac914da468 | import tensorflow as tf
from build_model import embedded_neural_net, compile_model
def train_model(train_dataset: tf.data.Dataset, validation_dataset: tf.data.Dataset, max_features, patience=4,
epochs=10):
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)
model_structure = embedded_neural_net(max_features)
model = compile_model(model_structure)
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs, callbacks=[callback])
return model, history
|
py | 1a2f8358cd2da66b6bd0cb458a18ee8b8b9a3c91 | import redis
r = redis.Redis()
from datetime import date
today = str(date.today())
import datetime
import pickle
existed = False
stand = [460, 1.3, .7]
def gas_lvl(gas):
status = 'normal'
gas = gas.replace(' ','')
gases = gas.split('|')
ox = float(gases[0])
red = float(gases[1])
nh = float(gases[2])
ox_diff = (abs(ox-stand[0]) / stand[0] ) * 100
red_diff = (abs(red-stand[1]) / stand[1] ) * 100
nh_diff = (abs(nh-stand[2]) / stand[2] ) * 100
if (ox_diff > 30 or red_diff > 30 or nh_diff > 30):
status = 'abnormal'
return status
class RedisHelper:
def __init__(self):
self.r = redis.Redis()
self.existed = False
self.dev_key = 'devices'
def read(self, span=1800):
current = {
"temp": -1,
"humidity" : -1,
"gas" : "abs",
"alerts" : -2,
"messages" : -3
}
msg, msgC = self.messages()
currentTime = datetime.datetime.now()
day = currentTime.strftime("%d/%m/%Y")
key = day
#print(key)
if (self.r.exists(key)):
persisted = pickle.loads(self.r.get(key))
self.existed = True
self.dev_key = 'devices'
#print(persisted)
else:
persisted = {}
timeHM = datetime.datetime.now()
temp = 0
humidity = 0
pressure = 0
count = 0
for keys in persisted:
date_time_obj = datetime.datetime.strptime(keys, '%d/%m/%Y@%H:%M:%S')
diff = timeHM - date_time_obj
#print(diff.seconds, span)
if (diff.seconds <= span) :
count = count + 1
temp = temp + persisted[keys]['temp']
humidity = humidity + persisted[keys]['humidity']
pressure = pressure + persisted[keys]['pressure']
#print(keys, persisted[keys], diff)
if (count > 0):
#print(f"averages are {temp/count} {humidity/count} {pressure/count} {count} ")
last = list(persisted.keys())
last_one = len(last) - 1
gases = persisted[last[last_one]]['gas']
if (gas_lvl(gases) != 'normal'):
alert_message = 'Alert!'
else:
alert_message = 'Normal'
current = {
"temp": round(temp/count,2),
"humidity" : round(humidity/count,2),
"pressure" : round(pressure/count,2),
"gas" : gas_lvl(gases),
"alerts" : alert_message,
"messages" : msgC,
"count" : count
}
return current
def devices_read(self):
if (r.exists(self.dev_key)):
devices = pickle.loads(self.r.get(self.dev_key))
else:
devices = {}
docs = []
for dev in devices:
docs.append(devices[dev])
return docs
def devices_update(self, dev):
devices = self.devices_read()
devices.pop(dev, None)
r.set(self.dev_key, pickle.dumps(devices))
return devices
def messages(self):
if (r.exists('messages')):
messages = pickle.loads(self.r.get('messages'))
else:
messages = {}
return messages, len(messages)
|
py | 1a2f8500def564faf471bf2a79af3c8ccc1af5de | # importation de pygame
import pygame
# importation de la bibliothèque system
import sys
# importation de nos classes
from Model.class_Hero import Hero
from Model.class_Platform import Platform
from Model.class_Atk import Atk
from Model.class_SacDeSable import SacDeSable
from utils import load_imgs
def exit_game(key):
from Model.class_Menu import run
if key == pygame.K_RETURN:
run()
# initialisation de pygame
def main(self):
pygame.init()
WIDTH = 1280
HEIGHT = 720
fenetre = pygame.display.set_mode((WIDTH, HEIGHT), pygame.RESIZABLE)
fond_e = pygame.transform.scale(
pygame.image.load("Images/Background/niveauRecurciforce.png").convert(), (1280, 720)
)
blanchonAa1 = pygame.image.load("Images/Spell/aa1.png").convert()
blanchonAa2 = pygame.image.load("Images/Spell/aa2.png").convert()
blanchonAa3 = pygame.image.load("Images/Spell/aa3.png").convert()
blanchonAaMidAir = pygame.image.load("Images/Spell/aaMidAir.png").convert()
blanchonVector = pygame.image.load("Images/Spell/vector.png").convert()
imagesBlanchonList = {
"Ridle": ["b_idle_1", "b_idle_2"],
"Rmove": ["b_move_0", "b_move_1", "b_move_2", "b_move_1"],
"Ffall": ["b_jumpdown_1", "b_jumpdown_2"],
"Fcrouch": ["b_crouch_1", "b_crouch_2"],
"Rslide": ["b_slide"],
"Fjump": ["b_jumpup_1", "b_jumpup_2", "b_jumpup_3"],
"Oaa1": ["b_aa1_1", "b_aa1_2", "b_aa1_3", "b_aa1_3"],
"Oaa2": ["b_aa2_1", "b_aa2_2", "b_aa2_3", "b_aa2_4", "b_aa2_5", "b_aa2_5"],
"Oaa3": ["b_aa3_1", "b_aa3_2", "b_aa3_3", "b_aa3_4", "b_aa3_5", "b_aa3_6", "b_aa3_6", "b_aa3_6"],
"Oaaa": ["b_aa2_2", "b_atkjumpdown", "b_atkjumpdown"],
"Odmg": ["b_dmg_2", "b_dmg_2"],
"D": ["b_gameover", "b_gameover"],
}
path = "Images/Blanchon"
imagesBlanchon = load_imgs(imagesBlanchonList, path)
blanchon_atkList = [
Atk("autoHit1", 0.5, 32, 32, load_imgs({"idle": ["particlehit"]}, path), 10, 5, -1, 0, 0, 0, 225),
Atk("autoHit2", 0.7, 32, 32, load_imgs({"idle": ["particlehit"]}, path), 15, 5, -2, 0, 0, 0, 300),
Atk("autoHit3", 0.7, 32, 32, load_imgs({"idle": ["particlehit"]}, path), 15, 6, -16, 0, 0, 0, 500),
Atk("EOF", 4, 32, 17, load_imgs({"idle": ["vector"]}, path), 15, 4, -1, 0, 4, 0, 2000),
Atk("airAutoHit", 1, 64, 32, load_imgs({"idle": ["particlehit"]}, path), 10, 5, 5, 0, 0, 0, 300)
]
blanchon = Hero(200, 200, 64, 64, imagesBlanchon, 0.3, 0.7, 8, 6, WIDTH, 100.0, blanchon_atkList)
sol = Platform(0, HEIGHT-70, WIDTH, 10, pygame.image.load("Images/plateformtest.png").convert_alpha(), 0.4)
# INIT PLATEFORMES
platforms = [
Platform(100, HEIGHT - 180, 100, 10, pygame.image.load("Images/plateform.png").convert_alpha(), 1),
Platform(350, HEIGHT - 280, 100, 10, pygame.image.load("Images/plateform.png").convert_alpha(), 1)
]
# INIT ENNEMIS
foes = [SacDeSable(600, 500, WIDTH, 1)]
# INIT SYSTEM CLOCK
clock = pygame.time.Clock()
fps = 60
Mult = pygame.font.Font("Polices/Lady Radical.ttf", 25)
Mult.set_bold(False)
MultB = pygame.font.Font("Polices/Lady Radical.ttf", 40)
MultB.set_bold(True)
damageFont = pygame.font.Font("Polices/Lady Radical.ttf", 30)
# damageFont.set_bold(True)
damageArray = []
timerDamage = 300
# TEXTE DU TUTO------------------------------------------------------------------
self.myfontMini = pygame.font.Font("Polices/Lady Radical.ttf", 15)
self.myfont = pygame.font.Font("Polices/Lady Radical.ttf", 25)
fleches = self.myfont.render("Les fleches directionnelles servent a se deplacer", 1, (200, 200, 0))
atkDeBase = self.myfont.render("'A' (Q sous Windows) permet de donner des coups au corps a corps", 1, (200, 200, 0))
atkDistance = self.myfont.render("'Z' (W sous Windows) permet de lancer des projectiles", 1, (200, 200, 0))
combol = self.myfont.render("Un combo est possible en realisant 3 attaques basiques successives", 1, (200, 200, 0))
dbSaut = self.myfont.render("Le double saut est possible", 1, (200, 200, 0))
quit1 = self.myfontMini.render("Appuyer sur 'Entree' pour ", 1, (200, 200, 0))
quit2 = self.myfontMini.render("retourner au menu principal ", 1, (200, 200, 0))
while 1:
clock.tick(fps)
# GESTION EVENT------------------------------------------------------------------
for event in pygame.event.get():
if event.type == pygame.QUIT: # si l'utilisateur clique sur la croix
sys.exit() # on ferme la fenêtre
if event.type == pygame.KEYDOWN:
exit_game(event.key)
blanchon.key_down(event)
if event.type == pygame.KEYUP:
blanchon.key_up(event)
# GESTION DU DECORS--------------------------------------------------------------
# Fond
fenetre.blit(fond_e, (0, 0))
self.screen.blit(fleches, (600, 50))
self.screen.blit(atkDeBase, (600, 80))
self.screen.blit(atkDistance, (600, 110))
self.screen.blit(combol, (600, 140))
self.screen.blit(dbSaut, (600, 170))
self.screen.blit(quit1, (1100, 600))
self.screen.blit(quit2, (1100, 620))
# Plateformes
nbPlatf = len(platforms)
for i in range(0, nbPlatf):
fenetre.blit(platforms[i].get_img(), platforms[i].get_rect())
# GESTION DU HERO----------------------------------------------------------------
# Affichage Multiplicateur de dégats
Multipl = Mult.render("Mult : ", 1, (255, 255, 0))
combo = blanchon.get_combo()
if combo < 2:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (255, 255, 0))
elif combo < 3:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (0, 0, 255))
elif combo < 4:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (255, 0, 255))
else:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (255, 0, 0))
fenetre.blit(Multipl, (700, 680))
fenetre.blit(MultiplCombo, (800, 670))
# CoolDown Attaque de Blanchon
colorRect = (125, 125, 125, 128)
if not blanchon.get_onGround():
cd = blanchon_atkList[4].get_cd()
if cd > 0:
pygame.draw.rect(fenetre, (0, 0, 0), (95, 655, 60, 60))
else:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
tailleRect1 = 60 * cd / blanchon_atkList[4].get_maxCd()
posRect1 = 715 - tailleRect1
fenetre.blit(blanchonAaMidAir, (100, 660))
CdAH = damageFont.render(f"{cd:.1f}", 1, (255, 0, 0))
elif blanchon.get_autoHitTimer3() > 0:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
fenetre.blit(blanchonAa3, (100, 660))
tailleRect1 = 60 * blanchon.get_autoHitTimer3() / 3000
posRect1 = 715 - tailleRect1
CdAH = damageFont.render(f"{blanchon.get_autoHitTimer3()/1000:.1f}", 1, (255, 0, 0))
elif blanchon.get_autoHitTimer2() > 0:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
fenetre.blit(blanchonAa2, (100, 660))
tailleRect1 = 60 * blanchon.get_autoHitTimer2() / 3000
posRect1 = 715 - tailleRect1
CdAH = damageFont.render(f"{blanchon.get_autoHitTimer2()/1000:.1f}", 1, (255, 0, 0))
else:
cd = blanchon_atkList[0].get_cd()
if cd > 0:
pygame.draw.rect(fenetre, (0, 0, 0), (95, 655, 60, 60))
else:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
fenetre.blit(blanchonAa1, (100, 660))
tailleRect1 = 60 * cd / blanchon_atkList[0].get_maxCd()
posRect1 = 715 - tailleRect1
CdAH = damageFont.render(f"{cd:.1f}", 1, (255, 0, 0))
CaseAa = pygame.Surface((60, tailleRect1), pygame.SRCALPHA)
CaseAa.fill(colorRect)
fenetre.blit(CaseAa, (95, posRect1))
if cd > 0:
fenetre.blit(CdAH, (110, 670))
if blanchon_atkList[3].get_cd() > 0:
pygame.draw.rect(fenetre, (0, 0, 0), (175, 655, 60, 60))
pygame.draw.rect(fenetre, (255, 255, 255), (180, 660, 50, 50))
else:
pygame.draw.rect(fenetre, (200, 200, 50), (175, 655, 60, 60))
pygame.draw.rect(fenetre, (255, 255, 255), (180, 660, 50, 50))
fenetre.blit(blanchonVector, (189, 677))
tailleRect2 = 60 * blanchon_atkList[3].get_cd() / blanchon_atkList[3].get_maxCd()
posRect2 = 715 - tailleRect2
CaseAa = pygame.Surface((60, tailleRect2), pygame.SRCALPHA)
CaseAa.fill((125, 125, 125, 128))
fenetre.blit(CaseAa, (175, posRect2))
CdProj = damageFont.render(f"{blanchon_atkList[3].get_cd():.1f}", 1, (255, 0, 0))
if blanchon_atkList[3].get_cd() > 0:
fenetre.blit(CdProj, (190, 670))
# Teste Hero => Plateforme
heroOnGround = blanchon.isOnGround()
blanchon.setOnAir()
blanchon.testPlatform(sol)
for i in range(0, nbPlatf):
blanchon.testPlatform(platforms[i])
# Le hero est descendu d'une plateforme
if heroOnGround and not blanchon.isOnGround():
blanchon.giveDoubleJump() # On lui donne un saut
blanchon.update(blanchon, fps)
# AFFICHAGE DES DEGATS----------------------------------------------------------
i = 0
while i < len(damageArray):
if damageArray[i][2] > 0:
fenetre.blit(damageArray[i][0], damageArray[i][1])
damageArray[i][2] = damageArray[i][2] - (1000/fps)
i += 1
else:
damageArray.pop(i)
# GESTION DES MOBS---------------------------------------------------------------
# Teste Mob => Plateforme && Atk Hero => Mob
nbAtkHero = len(blanchon.get_AtkEffectList())
i = 0
while i < len(foes):
foes[i].nextImg(fps)
fenetre.blit(foes[i].get_img(), foes[i].get_rect())
pygame.draw.rect(
fenetre, (0, 0, 0), (foes[i].get_rect().x, foes[i].get_rect().y - 10, 60, 6)
)
pygame.draw.rect(
fenetre, (255, 0, 0), (
foes[i].get_rect().x, foes[i].get_rect().y - 10,
int(max(min(foes[i].get_hp()/float(foes[i].get_hpMax())*60, 60), 0)), 6
)
)
foes[i].setOnAir()
foes[i].testPlatform(sol)
for j in range(0, nbPlatf):
foes[i].testPlatform(platforms[j])
# Check si le mob i se fait toucher par l'atk de hero k
for k in range(0, nbAtkHero):
hpBefore = foes[i].get_hp()
foes[i].testAtkEffect(blanchon.get_AtkEffectList()[k])
degats = foes[i].get_hp() - hpBefore
foes[i].set_hp(degats)
if degats < 0.0:
damageArray.append([
damageFont.render(f"{degats:.1f}", 1, (50, 150, 255)),
(foes[i].get_x(), foes[i].get_y()-40), timerDamage
])
nbAtkFoe = len(foes[i].get_AtkEffectList())
for l in range(0, nbAtkFoe):
hpBefore = blanchon.get_hp()
blanchon.testAtkEffect(foes[i].get_AtkEffectList()[l])
degats = blanchon.get_hp() - hpBefore
if degats < 0:
damageArray.append([
damageFont.render(f"{degats:.1f}", 1, (255, 0, 0)),
(blanchon.get_x(), blanchon.get_y()-40), timerDamage
])
fenetre.blit(
foes[i].get_AtkEffectList()[l].get_img(),
foes[i].get_AtkEffectList()[l].get_rect()
)
foes[i].update(blanchon, fps)
if foes[i].get_hp() <= 0:
foes.pop(i)
else:
i += 1
for i in range(0, nbAtkHero):
fenetre.blit(blanchon.get_AtkEffectList()[k].get_img(), blanchon.get_AtkEffectList()[k].get_rect())
# Affichage Hero
blanchon.nextImg(fps)
fenetre.blit(blanchon.get_img(), blanchon.get_rect())
pygame.draw.rect(fenetre, (0, 0, 0), (blanchon.get_rect().x, blanchon.get_rect().y - 10, 60, 6))
pygame.draw.rect(
fenetre, (0, 255, 0), (
blanchon.get_rect().x, blanchon.get_rect().y - 10,
int(max(min(blanchon.get_hp()/float(blanchon.get_hpMax()) * 60, 60), 0)), 6
)
)
pygame.display.flip()
|
py | 1a2f850141c540cc0f646cba5238e1b9ff313898 | # -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: [email protected]
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import numpy as np
#import scipy.weave
from scipy.spatial.distance import cdist
def dist2(ls, x1, x2=None):
# Assumes NxD and MxD matrices.
# Compute the squared distance matrix, given length scales.
if x2 is None:
# Find distance with self for x1.
# Rescale.
xx1 = x1 / ls
xx2 = xx1
else:
# Rescale.
xx1 = x1 / ls
xx2 = x2 / ls
r2 = cdist(xx1,xx2,'sqeuclidean')
return r2
def grad_dist2(ls, x1, x2=None):
if x2 is None:
x2 = x1
# Rescale.
x1 = x1 / ls
x2 = x2 / ls
N = x1.shape[0]
M = x2.shape[0]
D = x1.shape[1]
gX = np.zeros((x1.shape[0],x2.shape[0],x1.shape[1]))
code = \
"""
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
for (int d=0; d<D; d++)
gX(i,j,d) = (2/ls(d))*(x1(i,d) - x2(j,d));
"""
try:
weave.inline(code, ['x1','x2','gX','ls','M','N','D'], \
type_converters=weave.converters.blitz, \
compiler='gcc')
except:
# The C code weave above is 10x faster than this:
for i in range(0,x1.shape[0]):
gX[i,:,:] = 2*(x1[i,:] - x2[:,:])*(1/ls)
return gX
def dist_Mahalanobis(U, x1, x2=None):
W = np.dot(U,U.T)
# This function is useful if the data can appear in multiple forms
# but the code is being changed so that the data will always be an array.
# def extract_data(func):
# """
# Decorator function.
# If the input arguments are dicts instead of ndarrays then this extracts
# the ndarrays at the key 'inputs'. It makes the rest of the kernel cleaner
# since they don't have to do any bookkeeping.
# """
# def inner(cls_instance, *args):
# new_args = []
# for data in args:
# if isinstance(data, dict):
# if not data.has_key('inputs'):
# raise Exception('Data dict must have key "inputs".')
# new_args.append(data['inputs'])
# elif isinstance(data, np.ndarray):
# new_args.append(data)
# else:
# raise Exception('Data of type %s not supported in kernels.' % data.__class__)
# return func(cls_instance, *new_args)
# return inner
|
py | 1a2f85fd6a00795a6baed8c6590d0fdfa79bc20a | # Generated by Django 3.0.8 on 2020-07-06 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('measurements', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='measurement',
name='diastolic_pressure',
field=models.SmallIntegerField(default=80, verbose_name='Ciśnienie rozkurczowe'),
),
migrations.AlterField(
model_name='measurement',
name='pulse',
field=models.SmallIntegerField(default=60, verbose_name='Tętno'),
),
migrations.AlterField(
model_name='measurement',
name='systolic_pressure',
field=models.SmallIntegerField(default=120, verbose_name='Ciśnienie skurczowe'),
),
]
|
py | 1a2f863fc01e3b2a33e989df01e82626774574dd | import os
import mlflow
import random
import hashlib
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from src.git_autocommit import autocommit
SEED = 0
TRACKING_URI = 'http://localhost:5000'
EXPERIMENT_NAME = 'mnist'
random.seed(SEED)
np.random.seed(SEED)
def train(cfg):
os.system("conda env export > environment.yaml")
autocommit(file_paths=['./'], message='Another version of random forest')
mlflow.set_tracking_uri(TRACKING_URI)
mlflow.set_experiment(EXPERIMENT_NAME)
digits = datasets.load_digits()
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.8, random_state=SEED)
# Track hash of data & split
data_hash = hashlib.md5()
for df in [X_train, X_test, y_train, y_test]:
data_hash.update(df)
data_hash = data_hash.hexdigest()
clf = RandomForestClassifier(**cfg, random_state=SEED)
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
scores = classification_report(y_test, preds, output_dict=True)
df = pd.json_normalize(scores, sep='_')
df = df.to_dict(orient='records')[0]
with mlflow.start_run():
mlflow.log_params(cfg)
mlflow.log_param('data_hash', data_hash)
mlflow.log_metrics(df)
print(df['macro avg_f1-score'])
if __name__ == '__main__':
cfg = {'n_estimators': 500,
'max_depth': 25,
'min_samples_split': 2,
'min_samples_leaf': 1,
}
train(cfg)
|
py | 1a2f869ec2c812dfb0a9d1d33b95a16c250ea5fe | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations(object):
"""VpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VpnGateway"
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> "models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VpnGateway"]
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_03_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.VpnGateway"
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.VpnGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VpnGateway"]
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
|
py | 1a2f86cf170f003f6765598b13c57e6737eec197 | import os
import shutil
from nose.tools import assert_raises
from paradrop.lib.utils import pdos, pdosq
from paradrop.base import nexus, settings
class TestingNexus(nexus.NexusBase):
pass
def setup():
settings.loadSettings(mode="unittest")
def teardown():
pdos.remove(settings.CONFIG_HOME_DIR)
###############################################################################
# Settings Assignment, Paths
###############################################################################
def testMetaAssignment():
nex = TestingNexus()
assert nex.info.version == 1
def testConfigLoadingEmpty():
nex = TestingNexus()
assert nex.info.pdid == None
def testConfigLoadingExisting():
contents = dict(pdid='pd.damouse.aardvark', version=1, pdserver='http://paradrop.org', wampRouter='ws://paradrop.org:9080/ws')
nexus.writeYaml(contents, settings.CONFIG_FILE)
nex = TestingNexus()
assert nex.info.pdid == 'pd.damouse.aardvark'
assert nex.info.pdserver == 'http://paradrop.org'
assert nex.info.wampRouter == 'ws://paradrop.org:9080/ws'
pdos.remove(settings.CONFIG_FILE)
###############################################################################
# AttrWrapper
###############################################################################
def testWrapperDoesntAllowChanges():
wrapper = nexus.AttrWrapper()
wrapper.a = 1
assert wrapper.a == 1
def s():
wrapper._lock()
wrapper.a = 2
assert_raises(AttributeError, s)
###############################################################################
# Setings Changes
###############################################################################
def testSaveCallbackTriggered():
class Receiver:
def __init__(self):
self.received = False
def onChange(self, k, v):
self.received = True
rec = Receiver()
wrapper = nexus.AttrWrapper()
wrapper.a = 1
wrapper.setOnChange(rec.onChange)
wrapper.a = 2
assert wrapper.a == 2
assert rec.received == True
def testSaveUpdatesYaml():
nex = TestingNexus()
nex.info.a = 1
dic = pdosq.read_yaml_file(settings.CONFIG_FILE)
assert dic['a'] == 1
|
py | 1a2f8873b884c3ec3c3e0535fbe94b19ca1bf26e | from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from mock import patch
from nose.tools import assert_raises, eq_, ok_
from waffle import Flag
from flicks.base.regions import NORTH_AMERICA
from flicks.base.tests import TestCase
from flicks.videos.forms import VideoSearchForm
from flicks.videos.search import AUTOCOMPLETE_FIELDS
class VideoSearchFormTests(TestCase):
def setUp(self):
super(VideoSearchFormTests, self).setUp()
self.factory = RequestFactory()
self.request = self.factory.get('/')
def test_popular_sort_include(self):
"""If the voting-end waffle flag is not set, include the popular option for sorting."""
Flag.objects.create(name='voting-end', everyone=False)
form = VideoSearchForm(self.request)
ok_('popular' in [c[0] for c in form.fields['sort'].choices])
def test_popular_sort_exclude(self):
"""If the voting-end waffle flag is set, do not include the popular option for sorting."""
Flag.objects.create(name='voting-end', everyone=True)
form = VideoSearchForm(self.request)
ok_('popular' not in [c[0] for c in form.fields['sort'].choices])
@patch('flicks.videos.forms.search_videos')
def test_valid_search(self, search_videos):
form = VideoSearchForm(self.request, {
'query': 'asdf',
'field': 'title',
'region': NORTH_AMERICA,
'sort': 'popular'
})
eq_(form.perform_search(), search_videos.return_value)
search_videos.assert_called_with(
query='asdf',
fields=AUTOCOMPLETE_FIELDS['title'],
region=NORTH_AMERICA,
sort='popular'
)
@patch('flicks.videos.forms.search_videos')
def test_empty_field_passes_none(self, search_videos):
"""If the field isn't specified, pass None to the fields parameter."""
form = VideoSearchForm(self.request, {
'query': 'asdf',
'region': NORTH_AMERICA,
'sort': 'popular'
})
eq_(form.perform_search(), search_videos.return_value)
search_videos.assert_called_with(query='asdf', fields=None,
region=NORTH_AMERICA, sort='popular')
def test_invalid_form(self):
"""If the form fails validation, throw a ValidationError."""
form = VideoSearchForm(self.request, {
'region': -5,
'sort': 'invalid'
})
with assert_raises(ValidationError):
form.perform_search()
def test_clean_no_query(self):
"""
If no search query is specified, do not alter the sort value or
choices.
"""
form = VideoSearchForm(self.request, {'region': NORTH_AMERICA, 'sort': 'title'})
form.full_clean()
eq_(form.cleaned_data['sort'], 'title')
choice_values = zip(*form.fields['sort'].choices)[0]
ok_('' in choice_values)
def test_clean_query(self):
"""
If a search query is specified, remove the random option from the sort
choices and, if the sort is currently set to random, switch to title
sort.
"""
form = VideoSearchForm(self.request, {'query': 'blah', 'sort': ''})
form.full_clean()
eq_(form.cleaned_data['sort'], 'title')
choice_values = zip(*form.fields['sort'].choices)[0]
ok_('' not in choice_values)
# Check that sort is preserved if it is not random.
form = VideoSearchForm(self.request, {'query': 'blah', 'sort': 'popular'})
form.full_clean()
eq_(form.cleaned_data['sort'], 'popular')
choice_values = zip(*form.fields['sort'].choices)[0]
ok_('' not in choice_values)
def test_invalid_sort(self):
"""
An invalid value for sort should not break clean.
Regression test for an issue where a user was attempting to break Flicks by submitting a
bunch of invalid values for sort.
"""
form = VideoSearchForm(self.request, {'query': 'blah', 'sort': 'invalid'})
form.full_clean()
eq_(form.is_valid(), False)
|
py | 1a2f88a032b687e865973285e2c7feb45eb8f216 | from abc import ABC, abstractmethod
import os.path
import logging
from exceptions import UnknownCommandError, FailedCommandError
from sym_api_client_python.processors.sym_elements_parser import SymElementsParser
from sym_api_client_python.processors.sym_message_parser import SymMessageParser
# responses and views
class IResponse(ABC):
@abstractmethod
def update(self, action):
pass
class NullResponse(IResponse):
def update(self, action):
pass
class IView(ABC):
@abstractmethod
def render(self, message):
pass
# Controller interfaces and classes
class IResponds(ABC):
@property
@abstractmethod
def responds_to(self):
pass
class IController(ABC):
@abstractmethod
def update(self, action):
pass
@abstractmethod
def render(self, message):
pass
class GeneralController(IController, IResponds):
def __init__(self, response, view):
self._response = response
self._view = view
def update(self, action):
self._response.update(action)
def render(self, message):
return self._view.render(message)
@staticmethod
def make_form_id(cmd):
return cmd.strip().lower().replace('/', '').replace(' ', '_')
class Controllers(IController):
def __init__(self, controllers=None):
self._controllers = controllers if controllers is not None else {}
self._msg_parser = SymMessageParser()
self._elements_parser = SymElementsParser()
def update(self, action):
ky = self._elements_parser.get_form_id(action)
try:
c = self._controllers[ky]
except KeyError:
raise UnknownCommandError(ky)
c.update(action)
def render(self, message):
msg = ' '.join(self._msg_parser.get_text(message))
ky = GeneralController.make_form_id(msg)
try:
c = self._controllers[ky]
except KeyError:
raise UnknownCommandError(msg)
return c.render(message)
def add(self, controller):
ky = controller.make_form_id(controller.responds_to)
self._controllers[ky] = controller
|
py | 1a2f88ad4bd07d4bb77a5be1ed690861b63df3d2 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.FON/Serif_16/udhr_Latn.FON_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | 1a2f894cf9aa445e7a5b19f18dc604e3c60b91be | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class GroupForm(FlaskForm):
'''
Class to create a wtf form for creating a pitch
'''
name = StringField('Category Name', validators=[Required()])
submit = SubmitField('Create')
class LineForm(FlaskForm):
'''
Class to create a wtf form for creating a pitch
'''
line_content = StringField('One Minute Pitch', validators=[Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
'''
Class to create a wtf form for creating a feedback on a pitch
'''
comment_content = TextAreaField('Comment', validators=[Required()])
submit = SubmitField('Submit')
class UpvoteForm(FlaskForm):
'''
Class to create a wtf form for upvoting a pitch
'''
submit = SubmitField('Upvote')
|
py | 1a2f89fc276fc1f9faa2cecf0ff4c04a0e765139 | from ..Qt import QtGui, QtCore, QtWidgets
__all__ = ['BusyCursor']
class BusyCursor(object):
"""Class for displaying a busy mouse cursor during long operations.
Usage::
with pyqtgraph.BusyCursor():
doLongOperation()
May be nested.
"""
active = []
def __enter__(self):
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
BusyCursor.active.append(self)
def __exit__(self, *args):
BusyCursor.active.pop(-1)
if len(BusyCursor.active) == 0:
QtWidgets.QApplication.restoreOverrideCursor()
|
py | 1a2f8a4d3eab132a3ab8045df770fc8506f5d491 | """
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Any, Mapping, Tuple
from base_python import BaseClient
from .api import (
API,
AgentsAPI,
CompaniesAPI,
ContactsAPI,
ConversationsAPI,
FreshdeskError,
FreshdeskNotFound,
FreshdeskUnauthorized,
GroupsAPI,
RolesAPI,
SatisfactionRatingsAPI,
SkillsAPI,
SurveysAPI,
TicketsAPI,
TimeEntriesAPI,
)
class Client(BaseClient):
def __init__(self, domain, api_key, requests_per_minute: int = None):
self._api = API(domain=domain, api_key=api_key, requests_per_minute=requests_per_minute)
self._apis = {
"agents": AgentsAPI(self._api),
"companies": CompaniesAPI(self._api),
"contacts": ContactsAPI(self._api),
"conversations": ConversationsAPI(self._api),
"groups": GroupsAPI(self._api),
"roles": RolesAPI(self._api),
"skills": SkillsAPI(self._api),
"surveys": SurveysAPI(self._api),
"tickets": TicketsAPI(self._api),
"time_entries": TimeEntriesAPI(self._api),
"satisfaction_ratings": SatisfactionRatingsAPI(self._api),
}
super().__init__()
def settings(self):
url = "settings/helpdesk"
return self._api.get(url)
def stream_has_state(self, name: str) -> bool:
"""Tell if stream supports incremental sync"""
return hasattr(self._apis[name], "state")
def get_stream_state(self, name: str) -> Any:
"""Get state of stream with corresponding name"""
return self._apis[name].state
def set_stream_state(self, name: str, state: Any):
"""Set state of stream with corresponding name"""
self._apis[name].state = state
def _enumerate_methods(self) -> Mapping[str, callable]:
return {name: api.list for name, api in self._apis.items()}
def health_check(self) -> Tuple[bool, str]:
alive = True
error_msg = None
try:
self.settings()
except (FreshdeskUnauthorized, FreshdeskNotFound):
alive = False
error_msg = "Invalid credentials"
except FreshdeskError as error:
alive = False
error_msg = repr(error)
return alive, error_msg
|
py | 1a2f8a4d46289498c1b7c99de898aaad359f4233 | from adder.full_adder import FullAdder
from comparator.comparator import Comparator
from decoder.decoder_mxn import Decoder_nxm
from flipflop.d import D_FlipFlop
from gate.and_gate import And
from gate.input_gate import Input
from gate.one_gate import One
from gate.or_gate import Or
from gate.xor_gate import Xor
from gate.zero_gate import Zero
from latch.d import D_Latch
from multiplexer.mux2x1 import Mux2x1
from multiplexer.mux_mxn import Mux_mxn
from multiplexer.mux4x2 import Mux4x2
from runner.circuit_runner import CircuitRunner
from signals.signal import Signal
from gate.not_gate import Not
import sys
sys.setrecursionlimit(1000) # default is 1000
def turn_off_debug(every_thing=False):
And.DEBUGMODE = every_thing
Or.DEBUGMODE = every_thing
Xor.DEBUGMODE = every_thing
D_FlipFlop.DEBUGMODE = every_thing
D_Latch.DEBUGMODE = every_thing
Not.DEBUGMODE = every_thing
Mux2x1.DEBUGMODE = every_thing
Mux4x2.DEBUGMODE = every_thing
Signal.DEBUGMODE = every_thing
def test1():
clock = Signal()
l1 = D_Latch(clock, None, "l1")
l1.set_input(l1)
l1.set()
CircuitRunner.run([l1], clock, 4, [[l1]])
def test2():
clock = Signal()
d1 = D_FlipFlop(clock, None, "d1")
not1 = Not(d1, "not")
d1.set_input(not1)
d1.set()
for _ in range(20):
clock.pulse()
d1.logic()
print(d1)
def johnson_counter(n=100):
clock = Signal()
bits = [D_FlipFlop(clock, None, f"d{i}") for i in range(n)]
for i in range(1, n):
bits[i].set_input(bits[i - 1])
bits[i].reset()
bits[0].set_input(Not(bits[-1], "not"))
bits[0].reset()
for _ in range(4 * n):
clock.pulse()
bits[0].logic()
print("".join([str(b.q()) for b in bits]))
def multiplexer_test():
mux = Mux4x2((One(), Zero(), One(), Zero()), (One(), Zero()), "my_mux")
CircuitRunner.run([mux], None, None, [[mux]])
def n_bit_adder():
clock = Signal()
n = 200
a, b = "01001" * 40, "01110" * 40
d1 = [D_FlipFlop(clock, None, f"a{i}") for i in range(n)]
d2 = [D_FlipFlop(clock, None, f"b{i}") for i in range(n)]
adder = [FullAdder(None, None, f"adder{i}") for i in range(n)]
res = [D_FlipFlop(clock, None, f"r{i}") for i in range(n)]
for i in range(n):
d1[i].set_input(d1[i])
d2[i].set_input(d2[i])
adder[i].set_input((d1[i], d2[i]))
adder[i].set_cin(Zero() if i == 0 else adder[i - 1].cout)
res[i].set_input(adder[i].sum)
res[i].reset()
if a[n - i - 1] == '0':
d1[i].reset()
else:
d1[i].set()
if b[n - 1 - i] == '0':
d2[i].reset()
else:
d2[i].set()
CircuitRunner.run(res, clock, 3, [res])
def bitsToGates(bitString, inputs):
for i in range(len(bitString)):
inputs[i].output = 0 if bitString[i] == "0" else 1
def n_multiplexer_test():
inputs = [Input() for _ in range(32)]
selectors = [Input() for _ in range(5)]
mux = Mux_mxn(inputs, selectors, 5)
bitsToGates("11001110011100111001110011100101", inputs)
for i in range(32):
i_bin = bin(i)[2:].zfill(5)
bitsToGates(i_bin, selectors)
CircuitRunner.run([mux], display=[[mux]])
def decoder_test():
inputs = [Input() for _ in range(5)]
dec = Decoder_nxm(inputs, 5)
bitsToGates("11101", inputs)
CircuitRunner.run([dec], display=[dec.outputs])
def comparator_test():
i1 = [Input() for _ in range(5)]
i2 = [Input() for _ in range(5)]
comp = Comparator((i1, i2), 5)
bitsToGates("11101", i1)
bitsToGates("11101", i2)
CircuitRunner.run([comp], display=[[comp]])
turn_off_debug(False)
johnson_counter(800)
|
py | 1a2f8b5f650d7adc8c1626bf5f2481e3ee7bd6d3 | import re
import gevent
from gevent.pywsgi import WSGIHandler
from socketio import transports
from geventwebsocket.handler import WebSocketHandler
class SocketIOHandler(WSGIHandler):
path_re = re.compile(r"^/(?P<resource>[^/]+)/(?P<transport>[^/]+)(/(?P<session_id>[^/]*)/?(?P<rest>.*))?$")
handler_types = {
'websocket': transports.WebsocketTransport,
'flashsocket': transports.FlashSocketTransport,
'htmlfile': transports.HTMLFileTransport,
'xhr-multipart': transports.XHRMultipartTransport,
'xhr-polling': transports.XHRPollingTransport,
'jsonp-polling': transports.JSONPolling,
}
def __init__(self, *args, **kwargs):
self.socketio_connection = False
self.allowed_paths = None
super(SocketIOHandler, self).__init__(*args, **kwargs)
def handle_one_response(self):
self.status = None
self.headers_sent = False
self.result = None
self.response_length = 0
self.response_use_chunked = False
path = self.environ.get('PATH_INFO')
parts = SocketIOHandler.path_re.match(path)
# Is this a valid SocketIO path?
if parts:
parts = parts.groupdict()
else:
return super(SocketIOHandler, self).handle_one_response()
resource = parts['resource']
if resource != self.server.resource:
return super(SocketIOHandler, self).handle_one_response()
transport_name = parts['transport']
transport = SocketIOHandler.handler_types.get(transport_name)
if transport is None:
return super(SocketIOHandler, self).handle_one_response()
session_id = parts.get('session_id')
request_method = self.environ.get("REQUEST_METHOD")
# In case this is WebSocket request, switch to the WebSocketHandler
if transport in (transports.WebsocketTransport, \
transports.FlashSocketTransport):
self.__class__ = WebSocketHandler
self.handle_one_response(call_wsgi_app=False)
session = self.server.get_session()
else:
session = self.server.get_session(session_id)
# Make the session object available for WSGI apps
self.environ['socketio'].session = session
# Create a transport and handle the request likewise
self.transport = transport(self)
jobs = self.transport.connect(session, request_method)
if not session.wsgi_app_greenlet or not bool(session.wsgi_app_greenlet):
# Call the WSGI application, and let it run until the Socket.IO
# is *disconnected*, even though many POST/polling requests
# come through.
session.wsgi_app_greenlet = gevent.getcurrent()
session.connected = True
self.application(self.environ,
lambda status, headers, exc=None: None)
session.connected = False
gevent.joinall(jobs)
|
py | 1a2f8b6f6aaf7c574a736be44599e469d44fbe2a | from django.test import TestCase
from .models import Foto,Comment,Follower,Profile
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
self.profile= Profile(image = 'Jam.jpeg', name ='Muriuki', email ='[email protected]',bio = 'hdeydfedf')
# Testing instance
def test_instance(self):
self.assertTrue(self.profile,Follower)
def tearDown(self):
Follower.objects.all().delete()
def test_save(self):
self.profile.save_profile()
profile= Profile.objects.all()
self.assertTrue(len(profile)>=1)
def test_upd(self):
profile = Profile.objects.filter(id=1)
profile.update(image = 'Kam.jpeg', name ='Murki', email ='[email protected]',bio = 'hdefedf')
search = Profile.objects.filter(id=1)
self.assertNotEqual(search,'Kam.jpeg')
def test_dele(self):
self.profile.save_profile()
profi = Profile.objects.all()
self.assertTrue(len(profi)>=0)
class CommentTestClass(TestCase):
# Set up method
def setUp(self):
self.comment= Comment(comment = 'Fun')
# Testing instance
def test_instance(self):
self.assertTrue(self.comment,Comment)
# Testing Save Method
def test_save(self):
self.comment.save_comment()
comments = Comment.objects.all()
self.assertTrue(len(comments) >= 1)
def test_upd(self):
comment = Comment.objects.filter(id=1)
comment.update(comment ='Art')
search = Comment.objects.filter(id=1)
self.assertNotEqual(search,'Art')
def test_del(self):
self.comment.save_comment()
comments = self.comment.dele_comment()
comment = Comment.objects.all()
self.assertTrue(len(comment)<=0)
def tearDown(self):
Comment.objects.all().delete()
class FotoTestClass(TestCase):
def setUp(self):
self.profile= Profile(image = 'Jam.jpeg', name ='Muriuki', email ='[email protected]',bio = 'hdeydfedf')
self.profile.save_profile()
self.new_comment = Comment(comment = 'Fun')
self.new_comment.save_comment()
self.new_photos= Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = "2", comments=self.new_comment)
self.new_photos.save_pic()
def tearDown(self):
Profile.objects.all().delete()
Comment.objects.all().delete()
Foto.objects.all().delete()
def test_save_pick(self):
self.new_photos= Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = "2", comments=self.new_comment)
self.new_photos.save_pic()
picture = Foto.objects.all()
self.assertTrue(len(picture)>=1)
def test_dele_pick(self):
self.new_photos= Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = "2", comments=self.new_comment)
self.new_photos.save_pic()
picture = self.new_photos.dele_pic()
delete = Foto.objects.all()
self.assertTrue(len(delete)>=0)
def test_upd_pic(self):
image = Foto.objects.filter(id=1)
image.update(name ='lez.jpeg')
search = Foto.objects.filter(id=1)
self.assertNotEqual(search,'lez.jpeg')
def test_pic_id(self):
self.image = Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = "2", comments=self.new_comment)
self.image.save_pic()
search = Foto.image_by_id(self.image.id)
self.assertNotEqual(search,self.image)
|
py | 1a2f8bcccfe71e30a7de12aa0c698428913e294e | def get_set():
return set(map(int, input().split()))
def is_super_set(main, sets):
for set in sets:
if not main.issuperset(set):
return False
return True
A = get_set()
queries = int(input())
sets = []
for _ in range(queries):
sets.append(get_set())
print(is_super_set(A, sets))
|
py | 1a2f8dcb8f9b4d277dcf0b89bd2d577aaf5bd104 | # SPDX-License-Identifier: Apache-2.0
import os
from distutils.version import StrictVersion
import numpy as np
import onnx
from onnxruntime import __version__ as ort_version
from skl2onnx import __max_supported_opset__ as max_opset
from skl2onnx.common._topology import OPSET_TO_IR_VERSION
from .tests_helper import dump_data_and_model # noqa
from .tests_helper import ( # noqa
dump_one_class_classification,
dump_binary_classification,
dump_multilabel_classification,
dump_multiple_classification)
from .tests_helper import ( # noqa
dump_multiple_regression,
dump_single_regression,
convert_model,
fit_classification_model,
fit_multilabel_classification_model,
fit_clustering_model,
fit_regression_model,
binary_array_to_string,
path_to_leaf
)
def create_tensor(N, C, H=None, W=None):
if H is None and W is None:
return np.random.rand(N, C).astype(np.float32, copy=False)
elif H is not None and W is not None:
return np.random.rand(N, C, H, W).astype(np.float32, copy=False)
else:
raise ValueError('This function only produce 2-D or 4-D tensor.')
def _get_ir_version(opv):
if opv >= 15:
return 8
if opv >= 12:
return 7
if opv >= 11:
return 6
if opv >= 10:
return 5
if opv >= 9:
return 4
if opv >= 8:
return 4
return 3
def max_onnxruntime_opset():
"""
See `Versioning.md
<https://github.com/microsoft/onnxruntime/blob/
master/docs/Versioning.md>`_.
"""
vi = StrictVersion(ort_version.split('+')[0])
if vi >= StrictVersion("1.9.0"):
return 15
if vi >= StrictVersion("1.8.0"):
return 14
if vi >= StrictVersion("1.6.0"):
return 13
if vi >= StrictVersion("1.3.0"):
return 12
if vi >= StrictVersion("1.0.0"):
return 11
if vi >= StrictVersion("0.4.0"):
return 10
if vi >= StrictVersion("0.3.0"):
return 9
return 8
TARGET_OPSET = int(
os.environ.get(
'TEST_TARGET_OPSET',
min(max_onnxruntime_opset(),
min(max_opset,
onnx.defs.onnx_opset_version()))))
TARGET_IR = int(
os.environ.get(
'TEST_TARGET_IR',
min(OPSET_TO_IR_VERSION[TARGET_OPSET],
_get_ir_version(TARGET_OPSET))))
|
py | 1a2f906f97152222149dfc9b95e0e3885a19b936 | """
Base and utility classes for pandas objects.
"""
import builtins
from collections import OrderedDict
import textwrap
from typing import Dict, FrozenSet, Optional
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetimelike,
is_extension_array_dtype,
is_extension_type,
is_list_like,
is_object_dtype,
is_scalar,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import duplicated, unique1d, value_counts
from pandas.core.arrays import ExtensionArray
import pandas.core.nanops as nanops
_shared_docs = dict() # type: Dict[str, str]
_indexops_doc_kwargs = dict(
klass="IndexOpsMixin",
inplace="",
unique="IndexOpsMixin",
duplicated="IndexOpsMixin",
)
class PandasObject(DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __repr__(self):
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, "_cache", None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, "memory_usage"):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(
"You cannot add any new attribute '{key}'".format(key=key)
)
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin:
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict(
((builtins.sum, np.sum), (builtins.max, np.max), (builtins.min, np.min))
)
_cython_table = OrderedDict(
(
(builtins.sum, "sum"),
(builtins.max, "max"),
(builtins.min, "min"),
(np.all, "all"),
(np.any, "any"),
(np.sum, "sum"),
(np.nansum, "sum"),
(np.mean, "mean"),
(np.nanmean, "mean"),
(np.prod, "prod"),
(np.nanprod, "prod"),
(np.std, "std"),
(np.nanstd, "std"),
(np.var, "var"),
(np.nanvar, "var"),
(np.median, "median"),
(np.nanmedian, "median"),
(np.max, "max"),
(np.nanmax, "max"),
(np.min, "min"),
(np.nanmin, "min"),
(np.cumprod, "cumprod"),
(np.nancumprod, "cumprod"),
(np.cumsum, "cumsum"),
(np.nancumsum, "cumsum"),
)
)
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(
"Column(s) {selection} already selected".format(
selection=self._selection
)
)
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(
"Columns not found: {missing}".format(missing=str(bad_keys)[1:-1])
)
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, arg, None)
if f is not None:
if hasattr(self, "__array__"):
# in particular exclude Window
return f(self, *args, **kwargs)
raise AttributeError(
"'{arg}' is not a valid function for "
"'{cls}' object".format(arg=arg, cls=type(self).__name__)
)
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
_level = kwargs.pop("_level", None)
if isinstance(arg, str):
return self._try_aggregate_string_function(arg, *args, **kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError("Can only pass dict with axis=0")
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
msg = textwrap.dedent(
"""\
using a dict with renaming is deprecated and will be removed
in a future version.
For column-specific groupby renaming, use named aggregation
>>> df.groupby(...).agg(name=('column', aggfunc))
"""
)
warnings.warn(msg, FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in arg.values()):
new_arg = OrderedDict()
for k, v in arg.items():
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = (
"cannot perform renaming for {key} with a "
"nested dictionary"
).format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif isinstance(obj, ABCDataFrame) and k not in obj.columns:
raise KeyError("Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(arg.keys())
if isinstance(obj, ABCDataFrame) and len(
obj.columns.intersection(keys)
) != len(keys):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError(
"nested dictionary is ambiguous in aggregation"
)
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2, subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = OrderedDict()
for fname, agg_how in arg.items():
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(arg.keys())
result = OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = OrderedDict(), result
for r in results:
result.update(r)
keys = list(result.keys())
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(
arg, lambda fname, agg_how: _agg_1dim(self._selection, agg_how)
)
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries) for r in result.values())
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame) for r in result.values())
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys], keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result, name=getattr(self, "name", None))
return result, True
elif is_list_like(arg):
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg, _level=_level, _axis=_axis), None
else:
result = None
f = self._get_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
try:
new_res = colg.aggregate(a)
except (TypeError, DataError):
pass
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
for index, col in enumerate(obj):
colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index])
try:
new_res = colg.aggregate(arg)
except (TypeError, DataError):
pass
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
pass
elif "no results" in str(err):
# raised direcly in _aggregate_multiple_funcs
pass
else:
raise
else:
results.append(new_res)
keys.append(col)
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacement attributes
"""
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _get_cython_func(self, arg: str) -> Optional[str]:
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin:
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_deprecations = frozenset(
[
"tolist", # tolist is not deprecated, just suppressed in the __dir__
"base",
"data",
"item",
"itemsize",
"flags",
"strides",
]
) # type: FrozenSet[str]
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Series and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
DataFrame._is_homogeneous_type : Whether all the columns in a
DataFrame have the same dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
.. deprecated:: 0.25.0
Returns
-------
scalar
The first element of %(klass)s.
"""
warnings.warn(
"`item` has been deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
return self.values.item()
@property
def data(self):
"""
Return the data pointer of the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self.values.base
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
.. versionadded:: 0.24.0
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
# As a mixin, we depend on the mixing class having _values.
# Special mixin syntax may be developed in the future:
# https://github.com/python/typing/issues/246
result = self._values # type: ignore
if is_datetime64_ns_dtype(result.dtype):
from pandas.arrays import DatetimeArray
result = DatetimeArray(result)
elif is_timedelta64_ns_dtype(result.dtype):
from pandas.arrays import TimedeltaArray
result = TimedeltaArray(result)
elif not is_extension_array_dtype(result.dtype):
from pandas.core.arrays.numpy_ import PandasArray
result = PandasArray(result)
return result
def to_numpy(self, dtype=None, copy=False):
"""
A NumPy ndarray representing the values in this Series or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_datetime64tz_dtype(self.dtype) and dtype is None:
# note: this is going to change very soon.
# I have a WIP PR making this unnecessary, but it's
# a bit out of scope for the DatetimeArray PR.
dtype = "object"
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy:
result = result.copy()
return result
@property
def _ndarray_values(self) -> np.ndarray:
"""
The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
# As a mixin, we depend on the mixing class having values.
# Special mixin syntax may be developed in the future:
# https://github.com/python/typing/issues/246
return self.values # type: ignore
@property
def empty(self):
return not self.size
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Return an ndarray of the maximum argument indexer.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Returns
-------
numpy.ndarray
Indices of the maximum values.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmax(self._values, skipna=skipna)
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Return a ndarray of the minimum argument indexer.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Returns
-------
numpy.ndarray
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmin(self._values, skipna=skipna)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if is_datetimelike(self._values):
return map(com.maybe_box_datetimelike, self._values)
elif is_extension_array_dtype(self._values):
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError(
"{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name
)
)
return func(skipna=skipna, **kwds)
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self._values):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
return self._values.map(mapper)
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, "values", values)
if na_action == "ignore":
def map_f(values, f):
return lib.map_infer_mask(values, f, isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
result = value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
return result
def unique(self):
values = self._values
if hasattr(values, "unique"):
result = values.unique()
else:
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values.
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
return self.array.memory_usage(deep=deep)
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.array)
return v
@Substitution(
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""
),
)
@Appender(algorithms._shared_docs["factorize"])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The %(klass)s *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
.. versionchanged:: 0.24.0
If `value` is a scalar, an int is now always returned.
Previously, scalar inputs returned an 1-item array for
:class:`Series` and :class:`Categorical`.
See Also
--------
sort_values
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
3
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
1
>>> x.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> x = pd.Series([2, 1, 3])
>>> x.searchsorted(1)
0 # wrong result, correct would be 1
"""
@Substitution(klass="Index")
@Appender(_shared_docs["searchsorted"])
def searchsorted(self, value, side="left", sorter=None):
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first", inplace=False):
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
def duplicated(self, keep="first"):
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(
duplicated(self, keep=keep), index=self.index
).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, verify_is_copy=True, **kwargs):
raise AbstractMethodError(self)
|
py | 1a2f9090d50fcfe7a07ab9ca7cee5c030f101b59 | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learning.federated_averaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.learning import federated_averaging
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model_examples
from tensorflow_federated.python.learning import model_utils
class FederatedAveragingClientTest(test.TestCase, parameterized.TestCase):
"""Tests of ClientFedAvg that use a common model and data."""
def dataset(self):
# Create a dataset with 4 examples:
dataset = tf.data.Dataset.from_tensor_slices(
model_examples.TrainableLinearRegression.make_batch(
x=[[0.0, 0.0], [1.0, 0.0], [2.0, 0.0], [3.0, 0.0]],
y=[[0.0], [0.0], [1.0], [1.0]]))
# Repeat the dataset 2 times with batches of 3 examples,
# producing 3 minibatches (the last one with only 2 examples).
# Note that `batch` is required for this dataset to be useable,
# as it adds the batch dimension which is expected by the model.
return dataset.repeat(2).batch(3)
def model(self):
return model_examples.TrainableLinearRegression(feature_dim=2)
def initial_weights(self):
return model_utils.ModelWeights(
trainable={
'a': tf.constant([[0.0], [0.0]]),
'b': tf.constant(0.0)
},
non_trainable={'c': 0.0})
@test.graph_mode_test
def test_client_tf(self):
model = self.model()
dataset = self.dataset()
client_tf = federated_averaging.ClientFedAvg(model)
init_op = tf.group(
model_utils.model_initializer(model),
tf.compat.v1.initializers.variables(client_tf.variables),
name='fedavg_initializer')
client_outputs = client_tf(dataset, self.initial_weights())
tf.compat.v1.get_default_graph().finalize()
with self.session() as sess:
sess.run(init_op)
out = sess.run(client_outputs)
# Both trainable parameters should have been updated,
# and we don't return the non-trainable 'c'.
self.assertCountEqual(['a', 'b'], list(out.weights_delta.keys()))
self.assertGreater(np.linalg.norm(out.weights_delta['a']), 0.1)
self.assertGreater(np.linalg.norm(out.weights_delta['b']), 0.1)
self.assertEqual(out.weights_delta_weight, 8.0)
self.assertEqual(out.optimizer_output['num_examples'], 8)
self.assertEqual(out.optimizer_output['has_non_finite_delta'], 0)
self.assertEqual(out.model_output['num_examples'], 8)
self.assertEqual(out.model_output['num_batches'], 3)
self.assertBetween(out.model_output['loss'],
np.finfo(np.float32).eps, 10.0)
def test_client_tf_custom_delta_weight(self):
model = self.model()
dataset = self.dataset()
client_tf = federated_averaging.ClientFedAvg(
model, client_weight_fn=lambda _: tf.constant(1.5))
out = client_tf(dataset, self.initial_weights())
self.assertEqual(self.evaluate(out.weights_delta_weight), 1.5)
@parameterized.named_parameters(('_inf', np.inf), ('_nan', np.nan))
def test_non_finite_aggregation(self, bad_value):
model = self.model()
dataset = self.dataset()
client_tf = federated_averaging.ClientFedAvg(model)
init_weights = self.initial_weights()
init_weights.trainable['b'] = bad_value
out = client_tf(dataset, init_weights)
self.assertEqual(self.evaluate(out.weights_delta_weight), 0.0)
self.assertAllClose(
self.evaluate(out.weights_delta['a']), np.array([[0.0], [0.0]]))
self.assertAllClose(self.evaluate(out.weights_delta['b']), 0.0)
self.assertEqual(
self.evaluate(out.optimizer_output['has_non_finite_delta']), 1)
class FederatedAveragingTffTest(test.TestCase, parameterized.TestCase):
def test_orchestration_execute(self):
iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_examples.TrainableLinearRegression)
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.], [3., 4.]],
'y': [[5.], [6.]]
}).batch(2)
federated_ds = [ds] * 3
server_state = iterative_process.initialize()
prev_loss = np.inf
for _ in range(3):
server_state, metric_outputs = iterative_process.next(
server_state, federated_ds)
self.assertEqual(metric_outputs.num_examples, 2 * len(federated_ds))
self.assertLess(metric_outputs.loss, prev_loss)
prev_loss = metric_outputs.loss
@parameterized.named_parameters([
('functional_model',
model_examples.build_linear_regresion_keras_functional_model),
('sequential_model',
model_examples.build_linear_regresion_keras_sequential_model),
('subclass_model',
model_examples.build_linear_regresion_keras_subclass_model),
])
def test_orchestration_execute_from_keras(self, build_keras_model_fn):
dummy_batch = collections.OrderedDict([
('x', np.zeros([1, 2], np.float32)),
('y', np.zeros([1, 1], np.float32)),
])
def model_fn():
keras_model = build_keras_model_fn(feature_dims=2)
keras_model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[])
return keras_utils.from_compiled_keras_model(keras_model, dummy_batch)
iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_fn)
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.], [3., 4.]],
'y': [[5.], [6.]]
}).batch(2)
federated_ds = [ds] * 3
server_state = iterative_process.initialize()
prev_loss = np.inf
for _ in range(3):
server_state, metrics = iterative_process.next(server_state, federated_ds)
self.assertLess(metrics.loss, prev_loss)
prev_loss = metrics.loss
def test_execute_empty_data(self):
iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_examples.TrainableLinearRegression)
# Results in empty dataset with correct types and shapes.
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.]],
'y': [[5.]]
}).batch(
5, drop_remainder=True)
federated_ds = [ds] * 2
server_state = iterative_process.initialize()
first_state, metric_outputs = iterative_process.next(
server_state, federated_ds)
self.assertEqual(
self.evaluate(tf.reduce_sum(first_state.model.trainable.a)) +
self.evaluate(tf.reduce_sum(first_state.model.trainable.b)), 0)
self.assertEqual(metric_outputs.num_examples, 0)
self.assertTrue(tf.is_nan(metric_outputs.loss))
if __name__ == '__main__':
test.main()
|
py | 1a2f909b91141d5fec10bc94b3f409d2e950c3dd | import argparse
import torch
torch.cuda.current_device()
import torch.optim as optim
from painter import *
# settings
parser = argparse.ArgumentParser(description='STYLIZED NEURAL PAINTING')
parser.add_argument('--img_path', type=str, default='./test_images/sunflowers.jpg', metavar='str',
help='path to test image (default: ./test_images/sunflowers.jpg)')
parser.add_argument('--renderer', type=str, default='rectangle', metavar='str',
help='renderer: [watercolor, markerpen, oilpaintbrush, rectangle (default oilpaintbrush)')
parser.add_argument('--canvas_color', type=str, default='black', metavar='str',
help='canvas_color: [black, white] (default black)')
parser.add_argument('--canvas_size', type=int, default=512, metavar='str',
help='size ( max(w, h) ) of the canvas for stroke rendering')
parser.add_argument('--max_m_strokes', type=int, default=500, metavar='str',
help='max number of strokes (default 500)')
parser.add_argument('--max_divide', type=int, default=5, metavar='N',
help='divide an image up-to max_divide x max_divide patches (default 5)')
parser.add_argument('--beta_L1', type=float, default=1.0,
help='weight for L1 loss (default: 1.0)')
parser.add_argument('--with_ot_loss', action='store_true', default=False,
help='imporve the convergence by using optimal transportation loss')
parser.add_argument('--beta_ot', type=float, default=0.1,
help='weight for optimal transportation loss (default: 0.1)')
parser.add_argument('--net_G', type=str, default='zou-fusion-net', metavar='str',
help='net_G: plain-dcgan, plain-unet, huang-net, or zou-fusion-net (default: zou-fusion-net)')
parser.add_argument('--renderer_checkpoint_dir', type=str, default=r'./checkpoints_G_rectangle', metavar='str',
help='dir to load neu-renderer (default: ./checkpoints_G_rectangle)')
parser.add_argument('--lr', type=float, default=0.005,
help='learning rate for stroke searching (default: 0.005)')
parser.add_argument('--output_dir', type=str, default=r'./output', metavar='str',
help='dir to save painting results (default: ./output)')
parser.add_argument('--disable_preview', action='store_true', default=False,
help='disable cv2.imshow, for running remotely without x-display')
args = parser.parse_args()
# Decide which device we want to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def optimize_x(pt):
pt._load_checkpoint()
pt.net_G.eval()
print('begin drawing...')
PARAMS = np.zeros([1, 0, pt.rderr.d], np.float32)
if pt.rderr.canvas_color == 'white':
CANVAS_tmp = torch.ones([1, 3, 128, 128]).to(device)
else:
CANVAS_tmp = torch.zeros([1, 3, 128, 128]).to(device)
for pt.m_grid in range(1, pt.max_divide + 1):
pt.img_batch = utils.img2patches(pt.img_, pt.m_grid).to(device)
pt.G_final_pred_canvas = CANVAS_tmp
pt.initialize_params()
pt.x_ctt.requires_grad = True
pt.x_color.requires_grad = True
pt.x_alpha.requires_grad = True
utils.set_requires_grad(pt.net_G, False)
pt.optimizer_x = optim.RMSprop([pt.x_ctt, pt.x_color, pt.x_alpha], lr=pt.lr, centered=True)
pt.step_id = 0
for pt.anchor_id in range(0, pt.m_strokes_per_block):
pt.stroke_sampler(pt.anchor_id)
iters_per_stroke = 20
for i in range(iters_per_stroke):
pt.G_pred_canvas = CANVAS_tmp
# update x
pt.optimizer_x.zero_grad()
pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0, 1)
pt.x_ctt.data[:, :, -1] = torch.clamp(pt.x_ctt.data[:, :, -1], 0, 0)
pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)
pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 1, 1)
pt._forward_pass()
pt._backward_x()
pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0, 1)
pt.x_ctt.data[:, :, -1] = torch.clamp(pt.x_ctt.data[:, :, -1], 0, 0)
pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)
pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 1, 1)
pt._drawing_step_states()
pt.optimizer_x.step()
pt.step_id += 1
v = pt._normalize_strokes(pt.x)
PARAMS = np.concatenate([PARAMS, np.reshape(v, [1, -1, pt.rderr.d])], axis=1)
CANVAS_tmp = pt._render(PARAMS)[-1]
CANVAS_tmp = utils.img2patches(CANVAS_tmp, pt.m_grid + 1, to_tensor=True).to(device)
pt._save_stroke_params(PARAMS)
pt.final_rendered_images = pt._render(PARAMS)
pt._save_rendered_images()
if __name__ == '__main__':
pt = ProgressivePainter(args=args)
optimize_x(pt)
|
py | 1a2f91338b1ffe8f2c40042e4495be5c90c79ac7 | import base64
import json
import os
import sys
import re
from logging import getLogger, StreamHandler, INFO
from google.cloud import storage
age = os.environ.get('LIFECYCLE_EXPIRE')
ignorePatterns = os.environ.get('IGNORE_PATTERNS')
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(handler)
logger.propagate = False
def get_gcs_bucket_name(pubsub_message):
proto_payload = pubsub_message.get(u'protoPayload')
if proto_payload is None or len(proto_payload) == 0:
return None
resource_name = proto_payload.get(u'resourceName')
if resource_name is None or len(resource_name) == 0:
return None
return resource_name.split('/')[3]
def get_project_id(pubsub_message):
resource = pubsub_message.get(u'resource')
if resource is None or len(resource) == 0:
return None
labels = resource.get(u'labels')
if labels is None or len(labels) == 0:
return None
project_id = labels.get(u'project_id')
if project_id is None or len(project_id) == 0:
return None
return project_id
# Add lifecycle rule which deletes object after 365 days
def enable_bucket_lifecycle(bucket_name):
client = storage.Client()
bucket = client.get_bucket(bucket_name)
bucket.add_lifecycle_delete_rule(age=age)
bucket.patch()
logger.info("Lifecycle addition is complete.")
def main_handler(event, context):
pubsub_message = json.loads(base64.b64decode(event['data']).decode('utf-8'))
bucket_name = get_gcs_bucket_name(pubsub_message)
if bucket_name is None:
logger.error("Could not get the bucket name from the event data.")
return
logger.info("Bucket: %s" % bucket_name)
project_id = get_project_id(pubsub_message)
if project_id is None:
logger.warning("Could not get the project id from the event data.")
logger.info("Project id: %s" % project_id)
for ignorePattern in ignorePatterns.split('###'):
try:
if re.match(ignorePattern, bucket_name):
logger.info("Since it is included in ignorePattern '%s', it does not set the life cycle." % ignorePattern)
return
except re.error as regex_error:
logger.warning("The grammar expression '%s' has an error : %s" % (ignorePattern, regex_error))
enable_bucket_lifecycle(bucket_name)
# debug
if __name__ == '__main__':
f = open("event_sample.json", "r", encoding="utf-8")
event = json.load(f)
f.close()
context = ''
age = '365'
ignorePatterns = '.*.appspot.com###gcf-sources*'
main_handler(event, context)
|
py | 1a2f91c7e1dddad4cd9de82ccc1d958cc1266038 | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from model import ssd_net_resnet34_large
from dataset import dataset_common
from utils import ssd_preprocessing
from utils import anchor_manipulator
from utils import scaffolds
tf.app.flags.DEFINE_integer(
'num_readers', 8,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 24,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
tf.app.flags.DEFINE_string(
'data_dir', './tfrecords/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_integer(
'num_classes', 81, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_mine_sec.ssd_resnet34_pretrain.no-bn_in_ssd_block_3*3_map/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are printed.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 500,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 3600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'train_image_size', 1200,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'train_epochs', None,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'max_number_of_steps', 840000,
'The max number of steps to use for training.')
tf.app.flags.DEFINE_integer(
'batch_size', 48,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first',
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.5, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'neg_threshold', 0.5, 'Matching threshold for the negtive examples in the loss function.')
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180503, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 5e-4, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 4e-3, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_string(
'decay_boundaries', '6000, 26000, 40000, 60000, 79000, 795000, 815000',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '0.001, 0.01, 0.04, 0.001, 0.001, 0.001, 0.01, 0.001',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
tf.app.flags.DEFINE_string(
'checkpoint_path', './logs_mine_sec.ssd_resnet34_pretrain.no-bn_in_ssd_block.21.1/model.ckpt-99590',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', 'ssd1200',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
'model_scope', 'ssd1200',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', '',
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'multi_gpu', True,
'Whether there is GPU to use for training.')
FLAGS = tf.app.flags.FLAGS
def validate_batch_size_for_multi_gpu(batch_size):
"""For multi-gpu, batch-size must be a multiple of the number of
available GPUs.
Note that this should eventually be handled by replicate_model_fn
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
"""
if FLAGS.multi_gpu:
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
if not num_gpus:
raise ValueError('Multi-GPU mode was specified, but no GPUs '
'were found. To use CPU, run --multi_gpu=False.')
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. '
'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return num_gpus
return 0
def get_init_fn():
return scaffolds.get_init_fn_for_scaffold(FLAGS.model_dir, FLAGS.checkpoint_path,
FLAGS.model_scope, FLAGS.checkpoint_model_scope,
FLAGS.checkpoint_exclude_scopes, FLAGS.ignore_missing_vars,
name_remap=None)#{'/kernel': '/weights', '/bias': '/biases'})
global_anchor_info = dict()
def input_pipeline(dataset_pattern='pascalvoc_0712_train_*', is_training=True, batch_size=FLAGS.batch_size):
def input_fn():
out_shape = [FLAGS.train_image_size] * 2
anchor_creator = anchor_manipulator.AnchorCreator(out_shape,
layers_shapes = [(50, 50), (25, 25), (13, 13), (7, 7), (3, 3), (3, 3)],
anchor_scales = [(0.1,), (0.2,), (0.375,), (0.55,), (0.725,), (0.9,)],
extra_anchor_scales = [(0.1414,), (0.2739,), (0.4541,), (0.6315,), (0.8078,), (0.9836,)],
anchor_ratios = [(1., 2., .5), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), (1., 2., .5), (1., 2., .5)],
layer_steps = [24, 48, 92, 171, 400, 400])
all_anchors, all_num_anchors_depth, all_num_anchors_spatial = anchor_creator.get_all_anchors()
num_anchors_per_layer = []
for ind in range(len(all_anchors)):
num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind])
anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(allowed_borders = [1.0] * 6,
positive_threshold = FLAGS.match_threshold,
ignore_threshold = FLAGS.neg_threshold,
prior_scaling=[0.1, 0.1, 0.2, 0.2])
image_preprocessing_fn = lambda image_, labels_, bboxes_ : ssd_preprocessing.preprocess_image(image_, labels_, bboxes_, out_shape, is_training=is_training, data_format=FLAGS.data_format, output_rgb=False)
anchor_encoder_fn = lambda glabels_, gbboxes_: anchor_encoder_decoder.encode_all_anchors(glabels_, gbboxes_, all_anchors, all_num_anchors_depth, all_num_anchors_spatial)
image, _, shape, loc_targets, cls_targets, match_scores = dataset_common.slim_get_batch(FLAGS.num_classes,
batch_size,
('train' if is_training else 'val'),
os.path.join(FLAGS.data_dir, dataset_pattern),
FLAGS.num_readers,
FLAGS.num_preprocessing_threads,
image_preprocessing_fn,
anchor_encoder_fn,
num_epochs=FLAGS.train_epochs,
is_training=is_training)
global global_anchor_info
global_anchor_info = {'decode_fn': lambda pred : anchor_encoder_decoder.decode_all_anchors(pred, num_anchors_per_layer),
'num_anchors_per_layer': num_anchors_per_layer,
'all_num_anchors_depth': all_num_anchors_depth }
return image, {'shape': shape, 'loc_targets': loc_targets, 'cls_targets': cls_targets, 'match_scores': match_scores}
return input_fn
def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights=1., bbox_outside_weights=1., sigma=1.):
with tf.name_scope('smooth_l1', [bbox_pred, bbox_targets]):
sigma2 = sigma * sigma
inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)
return outside_mul
def ssd_model_fn(features, labels, mode, params):
shape = labels['shape']
loc_targets = labels['loc_targets']
cls_targets = labels['cls_targets']
match_scores = labels['match_scores']
print('loc_targets:', loc_targets)
print('cls_targets:', cls_targets)
global global_anchor_info
decode_fn = global_anchor_info['decode_fn']
num_anchors_per_layer = global_anchor_info['num_anchors_per_layer']
all_num_anchors_depth = global_anchor_info['all_num_anchors_depth']
with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE):
backbone = ssd_net_resnet34_large.Resnet34Backbone(params['data_format'])
feature_layers = backbone.forward(features, training=(mode == tf.estimator.ModeKeys.TRAIN))
location_pred, cls_pred = ssd_net_resnet34_large.multibox_head(feature_layers, params['num_classes'], all_num_anchors_depth, data_format=params['data_format'], strides=(3, 3))
print(location_pred, cls_pred)
if params['data_format'] == 'channels_first':
cls_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in cls_pred]
location_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in location_pred]
cls_pred = [tf.reshape(pred, [tf.shape(features)[0], -1, params['num_classes']]) for pred in cls_pred]
location_pred = [tf.reshape(pred, [tf.shape(features)[0], -1, 4]) for pred in location_pred]
cls_pred = tf.concat(cls_pred, axis=1)
location_pred = tf.concat(location_pred, axis=1)
cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']])
location_pred = tf.reshape(location_pred, [-1, 4])
with tf.device('/cpu:0'):
with tf.control_dependencies([cls_pred, location_pred]):
with tf.name_scope('post_forward'):
#bboxes_pred = decode_fn(location_pred)
bboxes_pred = tf.map_fn(lambda _preds : decode_fn(_preds),
tf.reshape(location_pred, [tf.shape(features)[0], -1, 4]),
dtype=[tf.float32] * len(num_anchors_per_layer), back_prop=False)
#cls_targets = tf.Print(cls_targets, [tf.shape(bboxes_pred[0]),tf.shape(bboxes_pred[1]),tf.shape(bboxes_pred[2]),tf.shape(bboxes_pred[3])])
bboxes_pred = [tf.reshape(preds, [-1, 4]) for preds in bboxes_pred]
bboxes_pred = tf.concat(bboxes_pred, axis=0)
flaten_cls_targets = tf.reshape(cls_targets, [-1])
flaten_match_scores = tf.reshape(match_scores, [-1])
flaten_loc_targets = tf.reshape(loc_targets, [-1, 4])
# each positive examples has one label
positive_mask = flaten_cls_targets > 0
n_positives = tf.count_nonzero(positive_mask)
batch_n_positives = tf.count_nonzero(cls_targets, -1)
batch_negtive_mask = tf.equal(cls_targets, 0)#tf.logical_and(tf.equal(cls_targets, 0), match_scores > 0.)
batch_n_negtives = tf.count_nonzero(batch_negtive_mask, -1)
batch_n_neg_select = tf.cast(params['negative_ratio'] * tf.cast(batch_n_positives, tf.float32), tf.int32)
batch_n_neg_select = tf.minimum(batch_n_neg_select, tf.cast(batch_n_negtives, tf.int32))
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(tf.reshape(cls_pred, [tf.shape(features)[0], -1, params['num_classes']]))[:, :, 0]
prob_for_negtives = tf.where(batch_negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=tf.shape(prob_for_negtives)[1])
score_at_k = tf.gather_nd(topk_prob_for_bg, tf.stack([tf.range(tf.shape(features)[0]), batch_n_neg_select - 1], axis=-1))
selected_neg_mask = prob_for_negtives >= tf.expand_dims(score_at_k, axis=-1)
# include both selected negtive and all positive examples
final_mask = tf.stop_gradient(tf.logical_or(tf.reshape(tf.logical_and(batch_negtive_mask, selected_neg_mask), [-1]), positive_mask))
total_examples = tf.count_nonzero(final_mask)
cls_pred = tf.boolean_mask(cls_pred, final_mask)
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
flaten_cls_targets = tf.boolean_mask(tf.clip_by_value(flaten_cls_targets, 0, params['num_classes']), final_mask)
flaten_loc_targets = tf.stop_gradient(tf.boolean_mask(flaten_loc_targets, positive_mask))
predictions = {
'classes': tf.argmax(cls_pred, axis=-1),
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),
'loc_predict': bboxes_pred }
cls_accuracy = tf.metrics.accuracy(flaten_cls_targets, predictions['classes'])
metrics = {'cls_accuracy': cls_accuracy}
# Create a tensor named train_accuracy for logging purposes.
tf.identity(cls_accuracy[1], name='cls_accuracy')
tf.summary.scalar('cls_accuracy', cls_accuracy[1])
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#flaten_cls_targets=tf.Print(flaten_cls_targets, [flaten_loc_targets],summarize=50000)
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=flaten_cls_targets, logits=cls_pred) * (params['negative_ratio'] + 1.)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy_loss')
tf.summary.scalar('cross_entropy_loss', cross_entropy)
#loc_loss = tf.cond(n_positives > 0, lambda: modified_smooth_l1(location_pred, tf.stop_gradient(flaten_loc_targets), sigma=1.), lambda: tf.zeros_like(location_pred))
loc_loss = modified_smooth_l1(location_pred, flaten_loc_targets, sigma=1.)
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1), name='location_loss')
tf.summary.scalar('location_loss', loc_loss)
tf.losses.add_loss(loc_loss)
l2_loss_vars = []
for trainable_var in tf.trainable_variables():
if '_bn' not in trainable_var.name:
if 'conv4_3_scale' not in trainable_var.name:
l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.1)
else:
l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.1)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
total_loss = tf.add(cross_entropy + loc_loss, tf.multiply(params['weight_decay'], tf.add_n(l2_loss_vars), name='l2_loss'), name='total_loss')
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[int(_) for _ in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate')
# Create a tensor named learning_rate for logging purposes.
tf.summary.scalar('learning_rate', truncated_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
momentum=params['momentum'])
optimizer = tf.contrib.estimator.TowerOptimizer(optimizer)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(total_loss, global_step)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
eval_metric_ops=metrics,
#scaffold=None)
scaffold=tf.train.Scaffold(init_fn=get_init_fn()))
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def main(_):
os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'
#tf.set_pruning_mode()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=FLAGS.num_cpu_threads, inter_op_parallelism_threads=FLAGS.num_cpu_threads, gpu_options=gpu_options)
num_gpus = validate_batch_size_for_multi_gpu(FLAGS.batch_size)
run_config = tf.estimator.RunConfig().replace(
save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(
save_checkpoints_steps=None).replace(
save_summary_steps=FLAGS.save_summary_steps).replace(
keep_checkpoint_max=5).replace(
tf_random_seed=FLAGS.tf_random_seed).replace(
log_step_count_steps=FLAGS.log_every_n_steps).replace(
session_config=config)
replicate_ssd_model_fn = tf.contrib.estimator.replicate_model_fn(ssd_model_fn, loss_reduction=tf.losses.Reduction.MEAN)
ssd_detector = tf.estimator.Estimator(
model_fn=replicate_ssd_model_fn, model_dir=FLAGS.model_dir, config=run_config,
params={
'num_gpus': num_gpus,
'data_format': FLAGS.data_format,
'batch_size': FLAGS.batch_size,
'model_scope': FLAGS.model_scope,
'num_classes': FLAGS.num_classes,
'negative_ratio': FLAGS.negative_ratio,
'match_threshold': FLAGS.match_threshold,
'neg_threshold': FLAGS.neg_threshold,
'weight_decay': FLAGS.weight_decay,
'momentum': FLAGS.momentum,
'learning_rate': FLAGS.learning_rate,
'end_learning_rate': FLAGS.end_learning_rate,
'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),
'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),
})
tensors_to_log = {
'lr': 'learning_rate',
'ce': 'cross_entropy_loss',
'loc': 'location_loss',
'loss': 'total_loss',
'l2': 'l2_loss',
'acc': 'post_forward/cls_accuracy',
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps,
formatter=lambda dicts: (', '.join(['%s=%.6f' % (k, v) for k, v in dicts.items()])))
print('Starting a training cycle.')
ssd_detector.train(input_fn=input_pipeline(dataset_pattern='coco_2017_train-*', is_training=True, batch_size=FLAGS.batch_size),
hooks=[logging_hook], max_steps=FLAGS.max_number_of_steps)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
py | 1a2f9248def5cb5197b1f16ae85cb555e8aac1b1 | from openpyxl import Workbook
from django.http import HttpResponse
from openpyxl.styles import Font,Alignment
from .models import *
def export_wo_xls(request,wo_id):
my_wo = work_order.objects.all().filter(id=wo_id).first()
response = HttpResponse(
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = f'attachment; filename={my_wo} Indents.xlsx'
workbook = Workbook()
# Get active worksheet/tab
worksheet = workbook.active
worksheet.title = f'{my_wo} Indents'
# Define the titles for columns
col_dict = {
'Indent ID':'id',
'Description':'description',
'Material Type':'material_type',
'Quantity':'quantity',
'Weight':'get_weight',
'Unit value':'value',
'Tax (in %)':'tax',
'Tax Value':'tax_amount',
'Other Expanses':'other_expanses',
'Discount':'discounted_total',
'Gross Value':'gross_value',
}
row_num = 1
all_indents = indent.objects.all().filter(WO=my_wo)
# Assign the titles for each cell of the header
for col_num, column_title in enumerate(col_dict, 1):
cell = worksheet.cell(row=row_num, column=col_num)
cell.font = Font(name='Calibri', bold=True, size=12)
cell.alignment=Alignment(horizontal='left')
cell.value = column_title
# Iterate through all movies
for my_indent in all_indents:
row_num += 1
# Define the data for each cell in the row
row = []
for i in col_dict:
temp = getattr(my_indent, col_dict[i])
# print(temp)
if str(type(temp)) == "<class 'method'>":
row.append(temp())
else:
row.append(temp)
# Assign the data for each cell of the row
for col_num, cell_value in enumerate(row, 1):
cell = worksheet.cell(row=row_num, column=col_num)
cell.alignment=Alignment(horizontal='left')
cell.value = cell_value
workbook.save(response)
return response
def export_po_xls(request,po_id):
my_po = purchase_order.objects.all().filter(id=po_id).first()
response = HttpResponse(
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = f'attachment; filename={my_po.po_number} Indents.xlsx'
workbook = Workbook()
# Get active worksheet/tab
worksheet = workbook.active
worksheet.title = f'{my_po.po_number} Indents'
# Define the titles for columns
col_dict = {
'Indent ID':'id',
'Description':'description',
'Material Type':'material_type',
'Quantity':'quantity',
'Weight':'get_weight',
'Unit value':'value',
'Tax (in %)':'tax',
'Tax Value':'tax_amount',
'Other Expanses':'other_expanses',
'Discount':'discounted_total',
'Gross Value':'gross_value',
}
row_num = 1
all_indents = indent.objects.all().filter(PO=my_po)
# Assign the titles for each cell of the header
for col_num, column_title in enumerate(col_dict, 1):
cell = worksheet.cell(row=row_num, column=col_num)
cell.font = Font(name='Calibri', bold=True, size=12)
cell.alignment=Alignment(horizontal='left')
cell.value = column_title
# Iterate through all movies
for my_indent in all_indents:
row_num += 1
# Define the data for each cell in the row
row = []
for i in col_dict:
temp = getattr(my_indent, col_dict[i])
# print(temp)
if str(type(temp)) == "<class 'method'>":
row.append(temp())
else:
row.append(temp)
# Assign the data for each cell of the row
for col_num, cell_value in enumerate(row, 1):
cell = worksheet.cell(row=row_num, column=col_num)
cell.alignment=Alignment(horizontal='left')
cell.value = cell_value
workbook.save(response)
return response
|
py | 1a2f9278bb9e4a8fb5bda1d5b89aa3b45d4a9b13 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
# Work around a bug which causes segfaults if uuid is imported after
# PyQt. See here for details :
#
# https://bugs.gentoo.org/show_bug.cgi?id=317557
# http://www.riverbankcomputing.com/pipermail/pyqt/2010-December/028773.html
#
# Using __import__ rather than import so that we don't pollute the GafferUI
# namespace.
__import__( "uuid" )
##########################################################################
# Function to return the C++ address of a wrapped Qt object. This can
# be useful if needing to implement part of the UI in C++ and the rest
# in Python.
##########################################################################
def _qtAddress( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.unwrapinstance( o )
else :
return __shiboken().getCppPointer( o )[0]
##########################################################################
# Function to return a wrapped Qt object from the given C++ address.
# This can be useful if needing to implement part of the UI in C++ and
# the rest in Python.
##########################################################################
def _qtObject( address, type ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.wrapinstance( address, type )
else :
return __shiboken().wrapInstance( address, type )
##########################################################################
# Determines if the wrapped Qt object is still valid
# Useful when having to deal with the consequences of C++/Python deletion
# order challeneges, see:
# https://github.com/GafferHQ/gaffer/pull/3179
##########################################################################
def _qtObjectIsValid( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return not sip.isdeleted( o )
else :
return __shiboken().isValid( o )
##########################################################################
# Shiboken lives in a variety of places depending on which PySide it is.
##########################################################################
def __shiboken() :
import Qt
assert( "PyQt" not in Qt.__binding__ )
if Qt.__binding__ == "PySide2" :
try :
import PySide2.shiboken2 as shiboken
except ImportError :
import shiboken2 as shiboken
else :
try :
import PySide.shiboken
except ImportError :
import shiboken
return shiboken
##########################################################################
# now import our actual functionality
##########################################################################
# Import modules that must be imported before _GafferUI, using __import__
# to avoid polluting the GafferUI namespace.
__import__( "IECore" )
__import__( "Gaffer" )
from ._GafferUI import *
# general ui stuff first
from .Enums import *
from .Widget import Widget
from .LazyMethod import LazyMethod
from .Menu import Menu
from .ContainerWidget import ContainerWidget
from .Window import Window
from .SplitContainer import SplitContainer
from .ListContainer import ListContainer
from .GridContainer import GridContainer
from .MenuBar import MenuBar
from .EventLoop import EventLoop
from .TabbedContainer import TabbedContainer
from .TextWidget import TextWidget
from .NumericWidget import NumericWidget
from .Button import Button
from .MultiLineTextWidget import MultiLineTextWidget
from .Label import Label
from .GLWidget import GLWidget
from .ScrolledContainer import ScrolledContainer
from .PathWidget import PathWidget
from .PathListingWidget import PathListingWidget
from .PathChooserWidget import PathChooserWidget
from .Dialogue import Dialogue
from .PathChooserDialogue import PathChooserDialogue
from .TextInputDialogue import TextInputDialogue
from .Collapsible import Collapsible
from .ColorSwatch import ColorSwatch
from .Slider import Slider
from .ShowURL import showURL
from .Spacer import Spacer
from .BoolWidget import BoolWidget, CheckBox
from .Image import Image
from .ErrorDialogue import ErrorDialogue
from ._Variant import _Variant
from .VectorDataWidget import VectorDataWidget
from .PathVectorDataWidget import PathVectorDataWidget
from .ProgressBar import ProgressBar
from .SelectionMenu import SelectionMenu
from .PathFilterWidget import PathFilterWidget
from .CompoundPathFilterWidget import CompoundPathFilterWidget
from .InfoPathFilterWidget import InfoPathFilterWidget
from .MatchPatternPathFilterWidget import MatchPatternPathFilterWidget
from .FileSequencePathFilterWidget import FileSequencePathFilterWidget
from .BusyWidget import BusyWidget
from .NumericSlider import NumericSlider
from .ColorChooser import ColorChooser
from .ColorChooserDialogue import ColorChooserDialogue
from .MessageWidget import MessageWidget, MessageSummaryWidget
from .NotificationMessageHandler import NotificationMessageHandler
from .MenuButton import MenuButton
from .MultiSelectionMenu import MultiSelectionMenu
from .PopupWindow import PopupWindow
from .ConfirmationDialogue import ConfirmationDialogue
from .DisplayTransform import DisplayTransform
from .Divider import Divider
from . import _Pointer
from .SplineWidget import SplineWidget
from .Bookmarks import Bookmarks
from . import WidgetAlgo
# then all the PathPreviewWidgets. note that the order
# of import controls the order of display.
from .PathPreviewWidget import PathPreviewWidget
from .CompoundPathPreview import CompoundPathPreview
from .DeferredPathPreview import DeferredPathPreview
from .InfoPathPreview import InfoPathPreview
from .HeaderPathPreview import HeaderPathPreview
from .DataPathPreview import DataPathPreview
# then stuff specific to graph uis
from .BackgroundMethod import BackgroundMethod
from .PlugValueWidget import PlugValueWidget
from .StringPlugValueWidget import StringPlugValueWidget
from .NumericPlugValueWidget import NumericPlugValueWidget
from .BoolPlugValueWidget import BoolPlugValueWidget
from .PathPlugValueWidget import PathPlugValueWidget
from .FileSystemPathPlugValueWidget import FileSystemPathPlugValueWidget
from .VectorDataPlugValueWidget import VectorDataPlugValueWidget
from .PathVectorDataPlugValueWidget import PathVectorDataPlugValueWidget
from .FileSystemPathVectorDataPlugValueWidget import FileSystemPathVectorDataPlugValueWidget
from .PlugWidget import PlugWidget
from .PlugLayout import PlugLayout
from .Editor import Editor
from .PythonEditor import PythonEditor
from .GadgetWidget import GadgetWidget
from .GraphEditor import GraphEditor
from .ScriptWindow import ScriptWindow
from .CompoundEditor import CompoundEditor
from .NameWidget import NameWidget
from .NameLabel import NameLabel
from .NodeSetEditor import NodeSetEditor
from .NodeEditor import NodeEditor
from .Layouts import Layouts
from .NodeMenu import NodeMenu
from . import FileMenu
from . import LayoutMenu
from . import EditMenu
from . import UserPlugs
from .Frame import Frame
from .CompoundNumericPlugValueWidget import CompoundNumericPlugValueWidget
from .BoxPlugValueWidget import BoxPlugValueWidget
from .NodeUI import NodeUI
from .StandardNodeUI import StandardNodeUI
from .NodeToolbar import NodeToolbar
from .StandardNodeToolbar import StandardNodeToolbar
from .Viewer import Viewer
from .ColorSwatchPlugValueWidget import ColorSwatchPlugValueWidget
from .ColorPlugValueWidget import ColorPlugValueWidget
from .AboutWindow import AboutWindow
from . import ApplicationMenu
from .BrowserEditor import BrowserEditor
from .Timeline import Timeline
from .MultiLineStringPlugValueWidget import MultiLineStringPlugValueWidget
from .PresetsPlugValueWidget import PresetsPlugValueWidget
from .GraphComponentBrowserMode import GraphComponentBrowserMode
from .ToolPlugValueWidget import ToolPlugValueWidget
from .LabelPlugValueWidget import LabelPlugValueWidget
from .CompoundDataPlugValueWidget import CompoundDataPlugValueWidget
from .LayoutPlugValueWidget import LayoutPlugValueWidget
from . import ScriptNodeUI
from .RefreshPlugValueWidget import RefreshPlugValueWidget
from . import PreferencesUI
from .SplinePlugValueWidget import SplinePlugValueWidget
from .RampPlugValueWidget import RampPlugValueWidget
from .NodeFinderDialogue import NodeFinderDialogue
from .ConnectionPlugValueWidget import ConnectionPlugValueWidget
from .ButtonPlugValueWidget import ButtonPlugValueWidget
from . import ViewUI
from . import ToolUI
from .Playback import Playback
from . import MetadataWidget
from .UIEditor import UIEditor
from . import GraphBookmarksUI
from . import DocumentationAlgo
from . import _PlugAdder
from .Backups import Backups
from .AnimationEditor import AnimationEditor
from . import CompoundNumericNoduleUI
from . import Examples
from .NameValuePlugValueWidget import NameValuePlugValueWidget
from .ShufflePlugValueWidget import ShufflePlugValueWidget
from .ShufflePlugValueWidget import ShufflesPlugValueWidget
# and then specific node uis
from . import DependencyNodeUI
from . import ComputeNodeUI
from . import RandomUI
from . import SpreadsheetUI
from . import ExpressionUI
from . import BoxUI
from . import ReferenceUI
from . import BackdropUI
from . import DotUI
from . import SubGraphUI
from . import SwitchUI
from . import ContextProcessorUI
from . import ContextVariablesUI
from . import DeleteContextVariablesUI
from . import TimeWarpUI
from . import LoopUI
from . import AnimationUI
from . import BoxIOUI
from . import BoxInUI
from . import BoxOutUI
from . import NameSwitchUI
from . import EditScopeUI
# backwards compatibility
## \todo Remove me
Metadata = __import__( "Gaffer" ).Metadata
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferUI" )
|
py | 1a2f9389e687aa056509a03a24247493b399b439 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Arab.URD/Serif_12/udhr_Arab.URD_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | 1a2f93a1abbf760f4bfdd75bbf26fa50bdde49ac | # -*- coding: utf-8 -*-
#
# python-bplsqlparse documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 26 08:19:28 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../'))
import bplsqlparse
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'python-bplsqlparse'
copyright = '{:%Y}, Andi Albrecht'.format(datetime.date.today())
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = bplsqlparse.__version__
# The full version, including alpha/beta/rc tags.
release = bplsqlparse.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [os.path.abspath('../')]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-sqlparsedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-bplsqlparse.tex', 'python-bplsqlparse Documentation',
'Andi Albrecht', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
todo_include_todos = True
|
py | 1a2f945b414e7c4e58577b3059af0dc459d6abff | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoinold Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
assumevalid.py
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/bitcoinold/bitcoinold/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
'''
from test_framework.mininode import *
from test_framework.test_framework import BitcoinoldTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
from test_framework.key import CECKey
from test_framework.script import *
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def on_close(self, conn):
self.disconnected = True
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
class SendHeadersTest(BitcoinoldTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
def run_test(self):
# Connect to node0
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
node0.wait_for_verack()
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-assumevalid=" + hex(block102.sha256)]))
node1 = BaseNode() # connects to node1
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
node1.add_connection(connections[1])
node1.wait_for_verack()
self.nodes.append(start_node(2, self.options.tmpdir,
["-debug", "-assumevalid=" + hex(block102.sha256)]))
node2 = BaseNode() # connects to node2
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[2])
node2.wait_for_verack()
# send header lists to all three nodes
node0.send_header_for_blocks(self.blocks[0:2000])
node0.send_header_for_blocks(self.blocks[2000:])
node1.send_header_for_blocks(self.blocks[0:2000])
node1.send_header_for_blocks(self.blocks[2000:])
node2.send_header_for_blocks(self.blocks[0:200])
# Send 102 blocks to node0. Block 102 will be rejected.
for i in range(101):
node0.send_message(msg_block(self.blocks[i]))
node0.sync_with_ping() # make sure the most recent block is synced
node0.send_message(msg_block(self.blocks[101]))
assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101)
# Send 3102 blocks to node1. All blocks will be accepted.
for i in range(2202):
node1.send_message(msg_block(self.blocks[i]))
node1.sync_with_ping() # make sure the most recent block is synced
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send 102 blocks to node2. Block 102 will be rejected.
for i in range(101):
node2.send_message(msg_block(self.blocks[i]))
node2.sync_with_ping() # make sure the most recent block is synced
node2.send_message(msg_block(self.blocks[101]))
assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)
if __name__ == '__main__':
SendHeadersTest().main()
|
py | 1a2f94c20bfca4a4a80aa32e7f144f758c9ffa37 |
from setuptools import find_packages, setup
from pathlib import Path
this_directory = Path(__file__).parent
readme = (this_directory / "README.md").read_text()
setup(
name='sentencesimilarity',
packages=find_packages(),
version='0.1.1',
description='Calculates semantic similarity between given sentences.',
long_description= readme,
long_description_content_type='text/markdown',
author='osahin',
author_email = "[email protected]",
license='MIT',
install_requires=['transformers==4.9.2','scikit_learn==0.24.2','torch==1.9.0'],
setup_requires=['pytest-runner'],
tests_require=['pytest==4.4.1'],
test_suite='tests',
)
|
py | 1a2f94dba5ba24353eccf5534e2f85dec025ac23 | # -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class PublicTransit:
def distRaw(self, R, C, i1, j1, i2, j2):
# q = [(i1, j1, 0)]
# deltas = [(1, 0), (-1, 0), (0, -1), (0, 1)]
# while q:
# i, j, d = q.pop(0)
# if i == i2 and j == j2:
# return d
# for delta in deltas:
# ni = i + delta[0]
# nj = j + delta[1]
# if 0 <= ni < R and 0 <= nj < C:
# q.append((ni, nj, d+1))
# return 1000
return abs(i1-i2)+abs(j1-j2)
def distAfterConnect(self, R, C, connect, i1, j1, i2, j2):
if i1 == i2 and j1 == j2:
return 0
return min(self.distRaw(R, C, i1, j1, i2, j2), \
self.distRaw(R, C, i1, j1, connect[0], connect[1]) + self.distRaw(R, C, connect[2], connect[3], i2, j2), \
self.distRaw(R, C, i1, j1, connect[2], connect[3]) + self.distRaw(R, C, connect[0], connect[1], i2, j2))
def maxDist(self, R, C, connect):
res = 1
for i1 in range(R):
for j1 in range(C):
for i2 in range(R-1, -1, -1):
for j2 in range(C-1, -1, -1):
if abs(i1-i2) + abs(j1-j2) <= res:
continue
res = max(res, self.distAfterConnect(R, C, connect, i1, j1, i2, j2))
return res
def minimumLongestDistance(self, R, C):
if R <= 0 or C <= 0:
return 0
if R*C <= 2:
return 1
res = 1000
for i1 in range(R):
for j1 in range(C):
for i2 in range(R):
for j2 in range(C):
if i1 == i2 and j1 == j2:
continue
# connect (i, j) and (i2, j2)
res = min(res, self.maxDist(R, C, (i1, j1, i2, j2)))
return res
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(R, C, __expected):
startTime = time.time()
instance = PublicTransit()
exception = None
try:
__result = instance.minimumLongestDistance(R, C);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("PublicTransit (500 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("PublicTransit.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
R = int(f.readline().rstrip())
C = int(f.readline().rstrip())
f.readline()
__answer = int(f.readline().rstrip())
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(R, C, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1431783977
PT, TT = (T / 60.0, 75.0)
points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
|
py | 1a2f9570c702e00f26b1a895b9224a12e8a3d770 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import json
import re
import sys
import yaml
environment_file = '.ci_support/environment.yml'
name_mapping_file = '.ci_support/pypi_vs_conda_names.json'
class EnvironmentUpdater:
def __init__(self, package_name, from_version, to_version):
"""
Updates the version of a package in the conda environment file.
Parameters:
package_name: Name of the package to update as available on PyPI
from_version: Version the package is before the update
to_version: Version to which the package should be updated
"""
self.from_version = from_version
self.to_version = to_version
with open(name_mapping_file, 'r') as f:
self._name_conversion_dict = json.load(f)
with open(environment_file, 'r') as f:
self.environment = yaml.safe_load(f)
self.package_name = self._convert_package_name(package_name)
def _convert_package_name(self, name):
if name in self._name_conversion_dict.keys():
result = self._name_conversion_dict[name]
else:
result = name
return result
def _update_dependencies(self):
updated_dependencies = []
for dep in self.environment['dependencies']:
updated_dependencies.append(re.sub(
r'(' + self.package_name + '.*)' + self.from_version,
r'\g<1>' + self.to_version,
dep
))
self.environment['dependencies'] = updated_dependencies
def _write(self):
with open(environment_file, 'w') as f:
yaml.safe_dump(self.environment, f)
def update_dependencies(self):
"""Update the version of the requested dependency in the environment file"""
self._update_dependencies()
self._write()
if len(sys.argv) != 7 or not (sys.argv[1] == 'Bump' and sys.argv[3] == 'from' and sys.argv[5] == 'to'):
raise ValueError(f"Title of a dependabot PR 'Bump <package> from <version> to <version>' expected, "
f"but got {' '.join(sys.argv[1:])}")
package_to_update = sys.argv[2]
from_version = sys.argv[4]
to_version = sys.argv[6]
updater = EnvironmentUpdater(package_to_update, from_version, to_version)
updater.update_dependencies()
|
py | 1a2f95d0e6cd940fbc2c94792038478aa5b02563 | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViLT model. """
import unittest
from datasets import load_dataset
from packaging import version
from transformers import ViltConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltModel,
)
from transformers.models.vilt.modeling_vilt import VILT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import ViltProcessor
class ViltModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
scope=None,
modality_type_vocab_size=2,
add_multiple_images=False,
num_images=-1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.modality_type_vocab_size = modality_type_vocab_size
self.add_multiple_images = add_multiple_images
self.num_images = num_images
# we set the expected sequence length (which is used in several tests)
# this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token
self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
if self.add_multiple_images:
pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])
else:
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return (config, input_ids, token_type_ids, input_mask, pixel_values, token_labels)
def get_config(self):
return ViltConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
num_labels=self.num_labels,
modality_type_vocab_size=self.modality_type_vocab_size,
num_images=self.num_images,
)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
):
model = ViltModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, pixel_values=pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
def prepare_pixel_values(self):
return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
@require_torch
class ViltModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ViltModel,
ViltForQuestionAnswering,
ViltForImageAndTextRetrieval,
ViltForMaskedLM,
)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
# ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# if model_class.__name__ == "ViltForNaturalLanguageVisualReasonining":
# inputs_dict["pixel_values"] = floats_tensor([self.model_tester.batch_size, self.model_tester.num_images, self.model_tester.num_channels, self.model_tester.image_size, self.model_tester.image_size])
if return_labels:
if model_class.__name__ == "ViltForQuestionAnswering":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, self.model_tester.num_labels, device=torch_device
)
elif model_class.__name__ == "ViltForMaskedLM":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
elif model_class.__name__ == "ViltForImagesAndTextClassification":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ViltModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_training(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ == "ViltForImagesAndTextClassification":
config.modality_type_vocab_size = 3
# ViltForImageAndTextRetrieval doesn't support training for now
if model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]:
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
for k, v in inputs.items():
print(k, v.shape)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
# ViltForImageAndTextRetrieval doesn't support training for now
if (
model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]
or not model_class.supports_gradient_checkpointing
):
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_save_load(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_determinism(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_model_outputs_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "expected_seq_len", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertEqual(len(self_attentions), self.model_tester.num_images)
self.assertEqual(len(self_attentions[0]), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertEqual(len(hidden_states), self.model_tester.num_images)
self.assertEqual(len(hidden_states[0]), expected_num_layers)
else:
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.expected_seq_len
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(hidden_states[0][0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
else:
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
print("Model class:", model_class)
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
hidden_states[0].retain_grad()
attentions[0].retain_grad()
else:
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertIsNotNone(hidden_states[0].grad)
self.assertIsNotNone(attentions[0].grad)
else:
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
@slow
def test_model_from_pretrained(self):
for model_name in VILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ViltModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase):
all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else ()
def setUp(self):
self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
@unittest.skip("We only test the model that takes in multiple images")
def test_model(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViltModelIntegrationTest(unittest.TestCase):
@cached_property
def default_processor(self):
return ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") if is_vision_available() else None
@slow
def test_inference_masked_lm(self):
model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "a bunch of [MASK] laying on a [MASK]."
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size([1, 11, 30522])
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4))
# verify masked token prediction equals "cats"
predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
assert processor.decode([predicted_id]) == "cats"
@slow
def test_inference_visual_question_answering(self):
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "How many cats are there?"
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 3129))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
# compute loss
vqa_labels = [[2, 3, 155, 800]]
vqa_scores = [[1.0, 0.3, 0.3, 0.3]]
labels = torch.zeros(1, model.config.num_labels).to(torch_device)
for i, (labels_example, scores_example) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(labels_example, scores_example):
labels[i, l] = s
# forward pass
outputs = model(**inputs, labels=labels)
# verify we have a positive loss
self.assertTrue(outputs.loss > 0)
@slow
def test_inference_natural_language_visual_reasoning(self):
model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2").to(
torch_device
)
processor = self.default_processor
dataset = load_dataset("hf-internal-testing/fixtures_nlvr2", split="test")
image1 = Image.open(dataset[0]["file"]).convert("RGB")
image2 = Image.open(dataset[1]["file"]).convert("RGB")
text = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
encoding_1 = processor(image1, text, return_tensors="pt")
encoding_2 = processor(image2, text, return_tensors="pt")
pixel_values = torch.stack([encoding_1.pixel_values, encoding_2.pixel_values], dim=1)
# forward pass
outputs = model(
input_ids=encoding_1.input_ids.to(torch_device),
pixel_values=pixel_values.to(torch_device),
)
# verify the logits
expected_shape = torch.Size([1, 2])
self.assertEqual(outputs.logits.shape, expected_shape)
is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0")
if is_pillow_less_than_9:
expected_slice = torch.tensor(
[-2.4013, 2.9342],
device=torch_device,
)
else:
expected_slice = torch.tensor(
[-2.3713, 2.9168],
device=torch_device,
)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
|
py | 1a2f967b054e32f7b45c3c4250d576604c6a17e5 | import os
import sys
module_path = os.path.abspath(os.path.join('../models/'))
print(module_path)
if module_path not in sys.path:
sys.path.append(module_path)
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import cv2
import time
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
size = 320
# from refinedet import build_refinedet
# from models.multitrident_refinedet_v2 import build_multitridentrefinedet
from models.multitrident_refinedet import build_multitridentrefinedet
net = build_multitridentrefinedet('test', size, 21) # initialize SSD
# net = build_refinedet('test', 512, 21)
# net.load_weights('../weights/RefineDet512_VOC_final.pth')
# net.load_weights('../weights/experiment/320*320/exp_4_[256relufpn][0.3_0.6][mAP_0.77][dilate:11111-12333-12555]/RefineDet320_VOC_275000.pth')
net.load_weights('../weights/experiment/320*320/RefineDet320_VOC_315000.pth')
"""000210 000111 000144 009539 009589 000069 009539 001275 002333 002338 002341
002695 002713 003681 003874 003673 003740"""
im_names = "002695.jpg"
image_file = '/home/yiling/data/VOCdevkit/VOC2007/JPEGImages/' + im_names
image = cv2.imread(image_file, cv2.IMREAD_COLOR) # uncomment if dataset not download
#%matplotlib inline
from matplotlib import pyplot as plt
from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform
# here we specify year (07 or 12) and dataset ('test', 'val', 'train')
testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())
img_id = 62
# image = testset.pull_image(img_id)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# View the sampled input image before transform
plt.figure(figsize=(10,10))
# plt.imshow(rgb_image)
# plt.show()
x = cv2.resize(image, (size, size)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
# plt.imshow(x)
x = torch.from_numpy(x).permute(2, 0, 1)
xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable
if torch.cuda.is_available():
xx = xx.cuda()
start = time.time()
y = net(xx)
end = time.time()
print(end-start)
from data import VOC_CLASSES as labels
top_k=100
plt.figure(figsize=(10,10))
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(rgb_image) # plot the image for matplotlib
currentAxis = plt.gca()
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)
for i in range(detections.size(1)):
for j in range(detections.size(2)):
if detections[0,i,j,0] > 0.05:
score = detections[0, i, j, 0]
label_name = labels[i - 1]
display_txt = '%s: %.2f' % (label_name, score)
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
color = colors[i]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor': color, 'alpha': 0.5})
else:
continue
# j = 0
# while detections[0,i,j,0] >= -1:
# score = detections[0,i,j,0]
# label_name = labels[i-1]
# display_txt = '%s: %.2f'%(label_name, score)
# pt = (detections[0,i,j,1:]*scale).cpu().numpy()
# coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1
# color = colors[i]
# currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
# currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})
# j+=1
plt.show()
|
py | 1a2f97cb5a5f68eb19cc8985425d3d2948e80c3c | # -*- coding: utf-8 -*-
"""
Python Slack Bot class for use with the pythOnBoarding app
"""
import os
from slackclient import SlackClient
# To remember which teams have authorized your app and what tokens are
# associated with each team, we can store this information in memory on
# as a global object. When your bot is out of development, it's best to
# save this in a more persistant memory store.
authed_teams = {}
class Bot(object):
""" Instanciates a Bot object to handle Slack onboarding interactions."""
def __init__(self):
super(Bot, self).__init__()
self.name = "come_back_here"
self.emoji = ":robot_face:"
# When we instantiate a new bot object, we can access the app
# credentials we set earlier in our local development environment.
self.oauth = {"client_id": os.environ.get("CLIENT_ID"),
"client_secret": os.environ.get("CLIENT_SECRET"),
# Scopes provide and limit permissions to what our app
# can access. It's important to use the most restricted
# scope that your app will need.
"scope": "users.profile:read"}
self.oauth
self.verification = os.environ.get("VERIFICATION_TOKEN")
# NOTE: Python-slack requires a client connection to generate
# an oauth token. We can connect to the client without authenticating
# by passing an empty string as a token and then reinstantiating the
# client with a valid OAuth token once we have one.
self.client = SlackClient("")
# We'll use this dictionary to store the state of each message object.
# In a production envrionment you'll likely want to store this more
# persistantly in a database.
self.messages = {}
def auth(self, code):
"""
Authenticate with OAuth and assign correct scopes.
Save a dictionary of authed team information in memory on the bot
object.
Parameters
----------
code : str
temporary authorization code sent by Slack to be exchanged for an
OAuth token
"""
# After the user has authorized this app for use in their Slack team,
# Slack returns a temporary authorization code that we'll exchange for
# an OAuth token using the oauth.access endpoint
auth_response = self.client.api_call(
"oauth.access",
client_id=self.oauth["client_id"],
client_secret=self.oauth["client_secret"],
code=code
)
# To keep track of authorized teams and their associated OAuth tokens,
# we will save the team ID and bot tokens to the global
# authed_teams object
team_id = auth_response["team_id"]
authed_teams[team_id] = {"bot_token":
auth_response["access_token"]}
# Then we'll reconnect to the Slack Client with the correct team's
# bot token
self.client = SlackClient(authed_teams[team_id]["bot_token"])
def bring_back_user(self, user_id, channel, token):
"""
Create and send an onboarding welcome message to new users. Save the
time stamp of this message on the message object for updating in the
future.
Parameters
----------
team_id : str
id of the Slack team associated with the incoming event
user_id : str
id of the Slack user associated with the incoming event
"""
# We'll use the message object's method to create the attachments that
# we'll want to add to our Slack message. This method will also save
# the attachments on the message object which we're accessing in the
# API call below through the message object's `attachments` attribute.
text = "Hey... get back here <@" + str(user_id) + ">"
self.client.api_call(
"chat.postMessage",
channel=channel,
token=token,
username=self.name,
icon_emoji=self.emoji,
text=text
)
self.client.api_call("channels.invite", token=token, channel=channel, user=user_id)
|
py | 1a2f981b3f5cd8e4906825f0c625333ed02f4993 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import errno
import logging
from geoserver.layer import Layer as GsLayer
from django.conf import settings
from django.dispatch import receiver, Signal
from django.forms.models import model_to_dict
from django.contrib.staticfiles.templatetags import staticfiles
# use different name to avoid module clash
from geonode.utils import (
is_monochromatic_image,
json_serializer_producer)
from geonode.decorators import on_ogc_backend
from geonode.geoserver.helpers import (
gs_catalog,
ogc_server_settings)
from geonode.geoserver.tasks import geoserver_create_thumbnail
from geonode.layers.models import Layer
from geonode.services.enumerations import CASCADED
from . import BACKEND_PACKAGE
from .tasks import geoserver_cascading_delete, geoserver_post_save_layers
logger = logging.getLogger("geonode.geoserver.signals")
geoserver_post_save_complete = Signal(providing_args=['instance'])
def geoserver_delete(typename):
# cascading_delete should only be called if
# ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings, "BACKEND_WRITE_ENABLED", True):
geoserver_cascading_delete.apply_async((typename,))
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_pre_delete(instance, sender, **kwargs):
"""Removes the layer from GeoServer
"""
# cascading_delete should only be called if
# ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings, "BACKEND_WRITE_ENABLED", True):
if instance.remote_service is None or instance.remote_service.method == CASCADED:
if instance.alternate:
geoserver_cascading_delete.apply_async((instance.alternate,))
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_pre_save(*args, **kwargs):
# nothing to do here, processing is pushed to post-save
pass
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_post_save(instance, sender, created, **kwargs):
from geonode.messaging import producer
# this is attached to various models, (ResourceBase, Document)
# so we should select what will be handled here
if isinstance(instance, Layer):
instance_dict = model_to_dict(instance)
payload = json_serializer_producer(instance_dict)
try:
producer.geoserver_upload_layer(payload)
except Exception as e:
logger.error(e)
if getattr(settings, 'DELAYED_SECURITY_SIGNALS', False):
instance.set_dirty_state()
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_post_save_local(instance, *args, **kwargs):
"""Send information to geoserver.
The attributes sent include:
* Title
* Abstract
* Name
* Keywords
* Metadata Links,
* Point of Contact name and url
"""
geoserver_post_save_layers.apply_async(
(instance.id, args, kwargs))
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_pre_save_maplayer(instance, sender, **kwargs):
# If this object was saved via fixtures,
# do not do post processing.
if kwargs.get('raw', False):
return
try:
instance.local = isinstance(
gs_catalog.get_layer(
instance.name),
GsLayer)
except EnvironmentError as e:
if e.errno == errno.ECONNREFUSED:
msg = f'Could not connect to catalog to verify if layer {instance.name} was local'
logger.warn(msg)
else:
raise e
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_post_save_map(instance, sender, created, **kwargs):
instance.set_missing_info()
if not created:
if not instance.thumbnail_url or \
instance.thumbnail_url == staticfiles.static(settings.MISSING_THUMBNAIL):
logger.debug(f"... Creating Thumbnail for Map [{instance.title}]")
# create_gs_thumbnail(instance, overwrite=False, check_bbox=True)
geoserver_create_thumbnail.apply_async(((instance.id, False, True, )))
@receiver(geoserver_post_save_complete)
def geoserver_post_save_thumbnail(sender, instance, **kwargs):
# Creating Layer Thumbnail
# some thumbnail generators will update thumbnail_url. If so, don't
# immediately re-generate the thumbnail here. use layer#save(update_fields=['thumbnail_url'])
try:
instance.refresh_from_db()
logger.debug(f"... Creating Thumbnail for Layer {instance.title}")
_recreate_thumbnail = False
if 'update_fields' in kwargs and kwargs['update_fields'] is not None and \
'thumbnail_url' in kwargs['update_fields']:
_recreate_thumbnail = True
if not instance.thumbnail_url or \
instance.thumbnail_url == staticfiles.static(settings.MISSING_THUMBNAIL) or \
is_monochromatic_image(instance.thumbnail_url):
_recreate_thumbnail = True
if _recreate_thumbnail:
geoserver_create_thumbnail.apply_async(((instance.id, False, True, )))
else:
logger.debug(f"... Thumbnail for Layer {instance.title} already exists: {instance.thumbnail_url}")
except Exception as e:
logger.exception(e)
|
py | 1a2f982e46600735ad81eb22afcab7796311f9c0 | # -*- coding: utf-8 -*-
#
# dist_fit documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 10 11:16:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import matplotlib
matplotlib.use('Agg')
import sys
from os.path import dirname, join
sys.path.insert(0, join(dirname(__file__), '../'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'probfit'
copyright = u'2012, Piti Ongmongkolkul'
autoclass_content = 'both'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import probfit.info
version = probfit.info.__version__
# The full version, including alpha/beta/rc tags.
release = probfit.info.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_themes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'armstrong'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes', ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'probfitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'probfit.tex', u'dist\\_fit Documentation',
u'Piti Ongmongkolkul', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'probfit', u'probfit Documentation',
[u'Piti Ongmongkolkul'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'probfit', u'probfit Documentation',
u'Piti Ongmongkolkul', 'probfit', 'Fitting Stuff',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
py | 1a2f998340ee16e43ab660980c196eaa7a6ef5de | """
"Beacon" (c) by Ignacio Slater M.
"Beacon" is licensed under a
Creative Commons Attribution 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by/4.0/>.
""" |
py | 1a2f99fe9e7de73aadee46f02c068cf4dc5697ed | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for low-level eager execution primitives.
Packaged as a test to ensure that this code is exercised by continuous
integration tests. To get numbers:
bazel build -c opt :benchmarks_test &&
./bazel-bin/tensorflow/python/eager/benchmarks_test --iters=0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
FLAGS = None
@contextlib.contextmanager
def timer(label, iters=30000):
start = time.time()
yield xrange(iters)
end = time.time()
t = (end - start) * 1e6 / iters
print("%-40s took %.2fus (%d iterations)" % (label, t, iters))
def benchmark_create_tensor(n):
"""Benchmark overheads of creating a Tensor object."""
def label(s):
return "{:20s}".format(s)
with timer(label("np.array([[3.0]])"), iters=n) as iters:
for _ in iters:
np.array([[3.0]])
ctx = context.context()
handle = ctx._handle
device = ctx.device_name
# May be warmup GPU.
ops.EagerTensor([[3.0]], context=handle, device=device)
# float32
dtype = dtypes.float32.as_datatype_enum
three = [[3.0]]
with timer(label("EagerTensor([[3.0]])"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(three, context=handle, device=device, dtype=dtype)
np_3 = np.array([[3.0]], dtype=np.float32)
with timer(label("EagerTensor(np.array([[3.0]]))"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)
# int32.
# This is interesting since int32 will be kept on host memory for the GPU
# case.
dtype = dtypes.int32.as_datatype_enum
three = [[3]]
with timer(label("EagerTensor([[3]])"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(three, context=handle, device=device, dtype=dtype)
np_3 = np.array([[3]], dtype=np.int32)
with timer(label("EagerTensor(np.array([[3]]))"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)
def benchmark_matmul(shape, n, use_gpu=False):
"""Benchmark for matrix multiplication using tf.matmul."""
transpose_b = (shape[0] != shape[1])
m = random_ops.random_uniform(shape)
if use_gpu:
m = m.gpu()
# Warm up the GPU - the very first kernel invocation
# seems to require a bunch of setup.
math_ops.matmul(m, m, transpose_b=transpose_b)
def label(s):
return "MatMul {}: {:30s}".format(shape, s)
if not use_gpu:
a = m.cpu().numpy()
b = a.T if transpose_b else a
with timer(label("np.dot"), iters=n) as iters:
for _ in iters:
np.dot(a, b)
with timer(label("tf.matmul"), iters=n) as iters:
for _ in iters:
math_ops.matmul(m, m, transpose_b=transpose_b)
with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
for _ in iters:
gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
with timer(label("TFE_Py_Execute"), iters=n) as iters:
for _ in iters:
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
inputs, attrs, 1)
f = function.defun(math_ops.matmul)
with timer(label("defun(tf.matmul)"), iters=n) as iters:
for _ in iters:
f(m, m, transpose_b=transpose_b)
def benchmark_multiply(shape, n, use_gpu=False):
m = random_ops.random_uniform(shape)
if use_gpu:
m = m.gpu()
# Warm up the GPU - the very first kernel invocation
# seems to require a bunch of setup.
_ = m * m
def label(s):
return "Multiply {}: {:30s}".format(shape, s)
if not use_gpu:
a = m.cpu().numpy()
with timer(label("np.multiply"), iters=n) as iters:
for _ in iters:
_ = a * a
with timer(label("tf.multiply"), iters=n) as iters:
for _ in iters:
_ = m * m
class BenchmarksTest(test_util.TensorFlowTestCase):
def testBenchmarks(self):
# This isn't actually a test, but benchmarks packaged as a test
# so that continuous integration runs catch any breakages.
print(context.context())
benchmark_create_tensor(FLAGS.iters or 30000)
benchmark_matmul([2, 2], FLAGS.iters or 30000)
benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000)
benchmark_multiply([2], FLAGS.iters or 30000)
if context.context().num_gpus() > 0:
print("---- RUNNING ON GPU NOW ----")
with context.device("/device:GPU:0"):
benchmark_create_tensor(FLAGS.iters or 30000)
benchmark_matmul([2, 2], FLAGS.iters or 30000, use_gpu=True)
benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000, use_gpu=True)
benchmark_multiply([2], FLAGS.iters or 30000, use_gpu=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Default iterations to 1 to keep continuos integration test times low.
parser.add_argument(
"--iters",
type=int,
default=1,
help="Number of iterators for each test. None or 0 for auto-selection")
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
py | 1a2f9caa414793b1785cf0cee2e5c50908fe5161 | # -*- coding: utf-8 -*-
import os
import interface.session_events as se
################################################################################
def use_GUI_windows(vip):
"""Let the windows pop up that contain all the vip widgets.
"""
message_cwd = "Current working directory:\n{0}\n".format(os.getcwd())
message_welcome = "Welcome to the Virtual Instrument Panel!\n"
vip.GUI_feedback([message_cwd, message_welcome])
se.bn_open_GUI_feedback(vip)
se.bn_open_plots_12(vip)
vip.show()
print "\n/(use_GUI_windows)\n"
def customize_paths(vip, DIR_PATH_data):
"""Use the vip's .set method to set the path relevant to the user interface.
"""
### this sets several line edits to initial sensible values
FILE_PATH_session = DIR_PATH_data+os.sep+"session_init.txt"
FILE_PATH_notes = "K:\\_Computing\\MeasurementSoftware\\VIP_notes.txt"
FILE_PATH_waveform = "C:/Users/Public/Documents/Signadyne/Examples/Waveforms/Gaussian.csv"
### The format is: vip.set(SESSION_KEY, REPLACEMENT_DICTIONARY)
### Note that we could also save those settings to a .txt file and load it.
vip.set('Results', {'DIR_PATH_results' : DIR_PATH_data})
vip.set('Session', {'FILE_PATH_session' : FILE_PATH_session})
vip.set('Options', {'FILE_PATH_notes' : FILE_PATH_notes})
vip.set('H3344_1', {'FILE_PATH_waveform' : FILE_PATH_waveform})
for index in vip._sessions_local.keys():
vip.set('Session', {'F_dict_index' : str(index)}) # why is this not setting the index??/
se.bn_vip_to_list_session(vip)
vip.set('Session', {'F_dict_index' : 'default'})
### Unless it already exists, create a results data file directory.
if not os.path.isdir(DIR_PATH_data):
os.makedirs(DIR_PATH_data)
###Finally, make the Data folder the working directory for our session.
os.chdir(DIR_PATH_data)
print "\n/(customize_DIR_and_FILE_paths)\n"
|
py | 1a2f9eaa432ecda298d5d7d079f83268aba0a32a | from time import sleep
from threading import Event, Thread
from zmq import (
Context,
HWM,
NOBLOCK,
Poller,
POLLIN,
PUB,
PAIR,
SUB,
SUBSCRIBE,
)
shutdown = Event()
class KillThread(Exception):
"""Raised when we want threads to die"""
class Heartbeat(Thread):
def __init__(self, context, *args, **kw):
self.context = context
self.pub = self.context.socket(PAIR)
self.pub.bind("inproc://#1")
super(Heartbeat, self).__init__(*args, **kw)
def cleanup(self):
self.pub.send("DIE")
self.pub.close()
def run(self):
try:
x = 0
while not shutdown.is_set():
self.pub.send("BEAT.FOO.* %d, %s" % (x + 1, self.name))
x += 1
sleep(1)
finally:
print "%s exiting..." % self.name
self.cleanup()
class Stethoscope(Thread):
def __init__(self, context, *args, **kw):
self.context = context
self.recv = self.context.socket(PAIR)
self.recv.connect("inproc://#1")
self.pub = self.context.socket(PUB)
self.pub.connect('tcp://localhost:7003')
self.pub.setsockopt(HWM, 1000)
self.poller = Poller()
self.poller.register(self.recv, POLLIN)
super(Stethoscope, self).__init__(*args, **kw)
def cleanup(self):
self.recv.close()
self.pub.close()
def run(self):
try:
while not shutdown.is_set():
socks = dict(self.poller.poll())
if socks.get(self.recv) == POLLIN:
msg = self.recv.recv()
self.pub.send(msg, flags=NOBLOCK)
if msg == "DIE":
raise KillThread
except KillThread:
print "%s exiting..." % self.name
finally:
self.cleanup()
context = Context()
heart = Heartbeat(context, name="Heartbeat Thread")
stethoscope = Stethoscope(context, name="Stethoscope Thread")
for t in (heart, stethoscope):
t.start()
while True:
try:
# call thread.join to keep some control in the main thread
while (heart.is_alive() or
stethoscope.is_alive()):
heart.join(timeout=0.1)
stethoscope.join(timeout=0.1)
except KeyboardInterrupt:
shutdown.set()
while (heart.is_alive() or
stethoscope.is_alive()):
heart.join(timeout=0.1)
stethoscope.join(timeout=0.1)
context.term()
break
|
py | 1a2f9f9a333372189788bbe40fb4fda6f65e8a24 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Define the Image class and functions to work with Image instances
* fromarray : create an Image instance from an ndarray (deprecated in favor of
using the Image constructor)
* subsample : slice an Image instance (deprecated in favor of image slicing)
* rollaxis : roll an image axis backwards
* synchronized_order : match coordinate systems between images
* iter_axis : make iterator to iterate over an image axis
* is_image : test for an object obeying the Image API
"""
import warnings
from copy import copy
import numpy as np
from nibabel.onetime import setattr_on_read
# These imports are used in the fromarray and subsample functions only, not in
# Image
from ..reference.coordinate_map import (AffineTransform, CoordinateSystem,
input_axis_index)
from ..reference.array_coords import ArrayCoordMap
class Image(object):
""" The `Image` class provides the core object type used in nipy.
An `Image` represents a volumetric brain image and provides means
for manipulating the image data. Most functions in the image module
operate on `Image` objects.
Notes
-----
Images can be created through the module functions. See nipy.io for
image IO such as ``load`` and ``save``
Examples
--------
Load an image from disk
>>> from nipy.testing import anatfile
>>> from nipy.io.api import load_image
>>> img = load_image(anatfile)
Make an image from an array. We need to make a meaningful coordinate map
for the image.
>>> arr = np.zeros((21,64,64), dtype=np.int16)
>>> cmap = AffineTransform('kji', 'zxy', np.eye(4))
>>> img = Image(arr, cmap)
"""
_doc = {}
# Dictionary to store docs for attributes that are properties. We
# want these docs to conform with our documentation standard, but
# they need to be passed into the property function. Defining
# them separately allows us to do this without a lot of clutter
# in the property line.
###################################################################
#
# Attributes
#
###################################################################
metadata = {}
_doc['metadata'] = "Dictionary containing additional information."
coordmap = AffineTransform(CoordinateSystem('ijk'),
CoordinateSystem('xyz'),
np.diag([3,5,7,1]))
_doc['coordmap'] = "Affine transform mapping from axes coordinates to reference coordinates."
@setattr_on_read
def shape(self):
return self._data.shape
_doc['shape'] = "Shape of data array."
@setattr_on_read
def ndim(self):
return len(self._data.shape)
_doc['ndim'] = "Number of data dimensions."
@setattr_on_read
def reference(self):
return self.coordmap.function_range
_doc['reference'] = "Reference coordinate system."
@setattr_on_read
def axes(self):
return self.coordmap.function_domain
_doc['axes'] = "Axes of image."
@setattr_on_read
def affine(self):
if hasattr(self.coordmap, "affine"):
return self.coordmap.affine
raise AttributeError, 'Nonlinear transform does not have an affine.'
_doc['affine'] = "Affine transformation if one exists."
###################################################################
#
# Properties
#
###################################################################
def _getheader(self):
# data loaded from a file may have a header
warnings.warn("Please don't use ``img.header``; use"
"``img.metadata['header'] instead",
DeprecationWarning,
stacklevel=2)
hdr = self.metadata.get('header')
if hdr is None:
raise AttributeError('Image created from arrays '
'may not have headers.')
return hdr
def _setheader(self, header):
warnings.warn("Please don't use ``img.header``; use"
"``img.metadata['header'] instead",
DeprecationWarning,
stacklevel=2)
self.metadata['header'] = header
_doc['header'] = \
"""The file header structure for this image, if available. This interface
will soon go away - you should use ``img.metadata['header'] instead.
"""
header = property(_getheader, _setheader, doc=_doc['header'])
###################################################################
#
# Constructor
#
###################################################################
def __init__(self, data, coordmap, metadata=None):
"""Create an `Image` object from array and `CoordinateMap` object.
Images are often created through the ``load_image`` function in the nipy
base namespace.
Parameters
----------
data : array-like
object that as attribute ``shape`` and returns an array from
``np.asarray(data)``
coordmap : `AffineTransform` object
coordmap mapping the domain (input) voxel axes of the image to the
range (reference, output) axes - usually mm in real world space
metadata : dict, optional
Freeform metadata for image. Most common contents is ``header``
from nifti etc loaded images.
See Also
--------
load_image : load ``Image`` from a file
save_image : save ``Image`` to a file
"""
if metadata is None:
metadata = {}
else: # Shallow copy
metadata = copy(metadata)
ndim = len(data.shape)
if not isinstance(coordmap, AffineTransform):
raise ValueError('coordmap must be an AffineTransform')
# self._data is an array-like object. It must have a shape attribute
# (see above) and return an array from np.array(data)
self._data = data
self.coordmap = coordmap
if coordmap.function_domain.ndim != ndim:
raise ValueError('the number of axes implied by the coordmap do '
'not match the number of axes of the data')
self.metadata = metadata
###################################################################
#
# Methods
#
###################################################################
def reordered_reference(self, order=None):
""" Return new Image with reordered output coordinates
New Image coordmap has reordered output coordinates. This does
not transpose the data.
Parameters
----------
order : None, sequence, optional
sequence of int (giving indices) or str (giving names) - expressing
new order of coordmap output coordinates. None (the default)
results in reversed ordering.
Returns
-------
r_img : object
Image of same class as `self`, with reordered output coordinates.
Examples
--------
>>> cmap = AffineTransform.from_start_step(
... 'ijk', 'xyz', [1, 2, 3], [4, 5, 6], 'domain', 'range')
>>> im = Image(np.empty((30,40,50)), cmap)
>>> im_reordered = im.reordered_reference([2,0,1])
>>> im_reordered.shape
(30, 40, 50)
>>> im_reordered.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='domain', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('z', 'x', 'y'), name='range', coord_dtype=float64),
affine=array([[ 0., 0., 6., 3.],
[ 4., 0., 0., 1.],
[ 0., 5., 0., 2.],
[ 0., 0., 0., 1.]])
)
"""
if order is None:
order = range(self.ndim)[::-1]
elif type(order[0]) == type(''):
order = [self.reference.index(s) for s in order]
new_cmap = self.coordmap.reordered_range(order)
return self.__class__.from_image(self, coordmap=new_cmap)
def reordered_axes(self, order=None):
""" Return a new Image with reordered input coordinates.
This transposes the data as well.
Parameters
----------
order : None, sequence, optional
Sequence of int (giving indices) or str (giving names) - expressing
new order of coordmap output coordinates. None (the default)
results in reversed ordering.
Returns
-------
r_img : object
Image of same class as `self`, with reordered output coordinates.
Examples
--------
>>> cmap = AffineTransform.from_start_step(
... 'ijk', 'xyz', [1, 2, 3], [4, 5, 6], 'domain', 'range')
>>> cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='domain', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='range', coord_dtype=float64),
affine=array([[ 4., 0., 0., 1.],
[ 0., 5., 0., 2.],
[ 0., 0., 6., 3.],
[ 0., 0., 0., 1.]])
)
>>> im = Image(np.empty((30,40,50)), cmap)
>>> im_reordered = im.reordered_axes([2,0,1])
>>> im_reordered.shape
(50, 30, 40)
>>> im_reordered.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='domain', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='range', coord_dtype=float64),
affine=array([[ 0., 4., 0., 1.],
[ 0., 0., 5., 2.],
[ 6., 0., 0., 3.],
[ 0., 0., 0., 1.]])
)
"""
if order is None:
order = range(self.ndim)[::-1]
elif type(order[0]) == type(''):
order = [self.axes.index(s) for s in order]
new_cmap = self.coordmap.reordered_domain(order)
# Only transpose if we have to so as to avoid calling
# self.get_data
if order != range(self.ndim):
new_data = np.transpose(self.get_data(), order)
else:
new_data = self._data
return self.__class__.from_image(self,
data=new_data,
coordmap=new_cmap)
def renamed_axes(self, **names_dict):
""" Return a new image with input (domain) axes renamed
Axes renamed according to the input dictionary.
Parameters
----------
\*\*names_dict : dict
with keys being old names, and values being new names
Returns
-------
newimg : Image
An Image with the same data, having its axes renamed.
Examples
--------
>>> data = np.random.standard_normal((11,9,4))
>>> im = Image(data, AffineTransform.from_params('ijk', 'xyz', np.identity(4), 'domain', 'range'))
>>> im_renamed = im.renamed_axes(i='slice')
>>> print im_renamed.axes
CoordinateSystem(coord_names=('slice', 'j', 'k'), name='domain', coord_dtype=float64)
"""
new_cmap = self.coordmap.renamed_domain(names_dict)
return self.__class__.from_image(self, coordmap=new_cmap)
def renamed_reference(self, **names_dict):
""" Return new image with renamed output (range) coordinates
Coordinates renamed according to the dictionary
Parameters
----------
\*\*names_dict : dict
with keys being old names, and values being new names
Returns
-------
newimg : Image
An Image with the same data, having its output coordinates renamed.
Examples
--------
>>> data = np.random.standard_normal((11,9,4))
>>> im = Image(data, AffineTransform.from_params('ijk', 'xyz', np.identity(4), 'domain', 'range'))
>>> im_renamed_reference = im.renamed_reference(x='newx', y='newy')
>>> print im_renamed_reference.reference
CoordinateSystem(coord_names=('newx', 'newy', 'z'), name='range', coord_dtype=float64)
"""
new_cmap = self.coordmap.renamed_range(names_dict)
return self.__class__.from_image(self, coordmap=new_cmap)
def __setitem__(self, index, value):
"""Setting values of an image, set values in the data array."""
warnings.warn("Please don't use ``img[x] = y``; use "
"``img.get_data()[x] = y`` instead",
DeprecationWarning,
stacklevel=2)
self._data[index] = value
def __array__(self):
"""Return data as a numpy array."""
warnings.warn('Please use get_data instead - will be deprecated',
DeprecationWarning,
stacklevel=2)
return self.get_data()
def get_data(self):
"""Return data as a numpy array."""
return np.asanyarray(self._data)
def __getitem__(self, slice_object):
""" Slicing an image returns an Image.
Parameters
----------
slice_object: int, slice or sequence of slice
An object representing a numpy 'slice'.
Returns
-------
img_subsampled: Image
An Image with data self.get_data()[slice_object] and an
appropriately corrected CoordinateMap.
Examples
--------
>>> from nipy.io.api import load_image
>>> from nipy.testing import funcfile
>>> im = load_image(funcfile)
>>> frame3 = im[:,:,:,3]
>>> np.allclose(frame3.get_data(), im.get_data()[:,:,:,3])
True
"""
data = self.get_data()[slice_object]
g = ArrayCoordMap(self.coordmap, self.shape)[slice_object]
coordmap = g.coordmap
if coordmap.function_domain.ndim > 0:
return self.__class__.from_image(self,
data=data,
coordmap=coordmap)
else:
return data
def __iter__(self):
""" Images do not have default iteration
This is because it's not obvious that axis 0 is the right axis to
iterate over. For example, we often want to iterate over the time or
volume axis, and this is more likely to be axis 3
"""
raise TypeError("Images do not have default iteration; "
"you can use ``iter_axis(img, axis)`` instead.")
def __eq__(self, other):
return (isinstance(other, self.__class__)
and np.all(self.get_data() == other.get_data())
and np.all(self.affine == other.affine)
and (self.axes.coord_names == other.axes.coord_names))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=2)
representation = \
'Image(\n data=%s,\n coordmap=%s)' % (
'\n '.join(repr(self._data).split('\n')),
'\n '.join(repr(self.coordmap).split('\n')))
np.set_printoptions(**options)
return representation
@classmethod
def from_image(klass, img, data=None, coordmap=None, metadata=None):
""" Classmethod makes new instance of this `klass` from instance `img`
Parameters
----------
data : array-like
object that as attribute ``shape`` and returns an array from
``np.asarray(data)``
coordmap : `AffineTransform` object
coordmap mapping the domain (input) voxel axes of the image to the
range (reference, output) axes - usually mm in real world space
metadata : dict, optional
Freeform metadata for image. Most common contents is ``header``
from nifti etc loaded images.
Returns
-------
img : `klass` instance
New image with data from `data`, coordmap from `coordmap` maybe
metadata from `metadata`
Notes
-----
Subclasses of ``Image`` with different semantics for ``__init__`` will
need to override this classmethod.
Examples
--------
>>> from nipy import load_image
>>> from nipy.core.api import Image
>>> from nipy.testing import anatfile
>>> aimg = load_image(anatfile)
>>> arr = np.arange(24).reshape((2,3,4))
>>> img = Image.from_image(aimg, data=arr)
"""
if data is None:
data = img._data
if coordmap is None:
coordmap = copy(img.coordmap)
if metadata is None:
metadata = copy(img.metadata)
return klass(data, coordmap, metadata)
class SliceMaker(object):
""" This class just creates slice objects for image resampling
It only has a __getitem__ method that returns its argument.
XXX Wouldn't need this if there was a way
XXX to do this
XXX subsample(img, [::2,::3,10:1:-1])
XXX
XXX Could be something like this Subsample(img)[::2,::3,10:1:-1]
"""
def __getitem__(self, index):
return index
slice_maker = SliceMaker()
def subsample(img, slice_object):
""" Subsample an image
Please don't use this function, but use direct image slicing instead. That
is, replace::
frame3 = subsample(im, slice_maker[:,:,:,3])
with::
frame3 = im[:,:,:,3]
Parameters
----------
img : Image
slice_object: int, slice or sequence of slice
An object representing a numpy 'slice'.
Returns
-------
img_subsampled: Image
An Image with data img.get_data()[slice_object] and an appropriately
corrected CoordinateMap.
Examples
--------
>>> from nipy.io.api import load_image
>>> from nipy.testing import funcfile
>>> from nipy.core.api import subsample, slice_maker
>>> im = load_image(funcfile)
>>> frame3 = subsample(im, slice_maker[:,:,:,3])
>>> np.allclose(frame3.get_data(), im.get_data()[:,:,:,3])
True
"""
warnings.warn('subsample is deprecated, please use image '
'slicing instead (e.g. img[:,:,1]',
DeprecationWarning,
stacklevel=2)
return img.__getitem__(slice_object)
def fromarray(data, innames, outnames):
"""Create an image from array `data`, and input/output coordinate names
The mapping between the input and output coordinate names is the identity
matrix.
Please don't use this routine, but instead prefer::
from nipy.core.api import Image, AffineTransform
img = Image(data, AffineTransform(innames, outnames, np.eye(4)))
where ``4`` is ``len(innames) + 1``.
Parameters
----------
data : numpy array
A numpy array of three dimensions.
innames : sequence
a list of input axis names
innames : sequence
a list of output axis names
Returns
-------
image : An `Image` object
See Also
--------
load : function for loading images
save : function for saving images
Examples
--------
>>> img = fromarray(np.zeros((2,3,4)), 'ijk', 'xyz')
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64),
affine=array([[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]])
)
"""
warnings.warn('fromarray is deprecated, please use the Image '
'constructor instead',
DeprecationWarning,
stacklevel=2)
ndim = len(data.shape)
coordmap = AffineTransform.from_start_step(innames,
outnames,
(0.,)*ndim,
(1.,)*ndim)
return Image(data, coordmap)
@np.deprecate_with_doc('Please use rollimg instead')
def rollaxis(img, axis, inverse=False):
""" Roll `axis` backwards, until it lies in the first position.
It also reorders the reference coordinates by the same ordering.
This is done to preserve a diagonal affine matrix if image.affine
is diagonal. It also makes it possible to unambiguously specify
an axis to roll along in terms of either a reference name (i.e. 'z')
or an axis name (i.e. 'slice').
This function is deprecated; please use ``rollimg`` instead.
Parameters
----------
img : Image
Image whose axes and reference coordinates are to be reordered
by rolling.
axis : str or int
Axis to be rolled, can be specified by name or as an integer.
inverse : bool, optional
If inverse is True, then axis must be an integer and the first axis is
returned to the position axis. This keyword is deprecated and we'll
remove it in a future version of nipy.
Returns
-------
newimg : Image
Image with reordered axes and reference coordinates.
Examples
--------
>>> data = np.zeros((30,40,50,5))
>>> affine_transform = AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))
>>> im = Image(data, affine_transform)
>>> im.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64),
affine=array([[ 1., 0., 0., 0., 0.],
[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> im_t_first = rollaxis(im, 't')
>>> np.diag(im_t_first.affine)
array([ 4., 1., 2., 3., 1.])
>>> im_t_first.shape
(5, 30, 40, 50)
>>> im_t_first.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('t', 'x', 'y', 'z'), name='', coord_dtype=float64),
affine=array([[ 4., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 2., 0., 0.],
[ 0., 0., 0., 3., 0.],
[ 0., 0., 0., 0., 1.]])
)
"""
if inverse not in (True, False):
raise ValueError('Inverse should be True or False; did you mean to '
'use the ``rollimg` function instead?')
if isinstance(axis, int) and axis < 0:
axis = img.ndim + axis
if inverse:
if type(axis) != type(0):
raise ValueError('If carrying out inverse rolling, '
'axis must be an integer')
order = range(1, img.ndim)
order.insert(axis, 0)
return img.reordered_axes(order).reordered_reference(order)
if axis not in (range(img.axes.ndim) +
list(img.axes.coord_names) +
list(img.reference.coord_names)):
raise ValueError('axis must be an axis number,'
'an axis name or a reference name')
# Find out which index axis corresonds to
in_index = out_index = -1
if type(axis) == type(''):
try:
in_index = img.axes.index(axis)
except:
pass
try:
out_index = img.reference.index(axis)
except:
pass
if in_index > 0 and out_index > 0 and in_index != out_index:
raise ValueError('ambiguous choice of axis -- it exists '
'both in as an axis name and a '
'reference name')
if in_index >= 0:
axis = in_index
else:
axis = out_index
if axis == -1:
axis += img.axes.ndim
order = range(img.ndim)
order.remove(axis)
order.insert(0, axis)
return img.reordered_axes(order).reordered_reference(order)
def rollimg(img, axis, start=0, fix0=True):
""" Roll `axis` backwards in the inputs, until it lies before `start`
Parameters
----------
img : Image
Image whose axes and reference coordinates are to be reordered by
rollimg.
axis : str or int
Axis to be rolled, can be specified by name or as an integer. If an
integer, axis is an input axis. If a name, can be name of input or
output axis. If an output axis, we search for the closest matching
input axis, and raise an AxisError if this fails.
start : str or int, optional
position before which to roll axis `axis`. Default to 0. Can again be
an integer (input axis) or name of input or output axis.
fix0 : bool, optional
Whether to allow for zero scaling when searching for an input axis
matching an output axis. Useful for images where time scaling is 0.
Returns
-------
newimg : Image
Image with reordered input axes and corresponding data.
Examples
--------
>>> data = np.zeros((30,40,50,5))
>>> affine_transform = AffineTransform('ijkl', 'xyzt', np.diag([1,2,3,4,1]))
>>> im = Image(data, affine_transform)
>>> im.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64),
affine=array([[ 1., 0., 0., 0., 0.],
[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> im_t_first = rollimg(im, 't')
>>> im_t_first.shape
(5, 30, 40, 50)
>>> im_t_first.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64),
affine=array([[ 0., 1., 0., 0., 0.],
[ 0., 0., 2., 0., 0.],
[ 0., 0., 0., 3., 0.],
[ 4., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1.]])
)
"""
axis = input_axis_index(img.coordmap, axis, fix0)
start = input_axis_index(img.coordmap, start, fix0)
order = range(img.ndim)
order.remove(axis)
if axis < start:
start -= 1
order.insert(start, axis)
return img.reordered_axes(order)
def iter_axis(img, axis, asarray=False):
""" Return generator to slice an image `img` over `axis`
Parameters
----------
img : ``Image`` instance
axis : int or str
axis identifier, either name or axis number
asarray : {False, True}, optional
Returns
-------
g : generator
such that list(g) returns a list of slices over `axis`. If `asarray` is
`False` the slices are images. If `asarray` is True, slices are the
data from the images.
Examples
--------
>>> data = np.arange(24).reshape((4,3,2))
>>> img = Image(data, AffineTransform('ijk', 'xyz', np.eye(4)))
>>> slices = list(iter_axis(img, 'j'))
>>> len(slices)
3
>>> slices[0].shape
(4, 2)
>>> slices = list(iter_axis(img, 'k', asarray=True))
>>> slices[1].sum() == data[:,:,1].sum()
True
"""
rimg = rollimg(img, axis)
for i in range(rimg.shape[0]):
if asarray:
yield rimg[i].get_data()
else:
yield rimg[i]
def synchronized_order(img, target_img,
axes=True,
reference=True):
""" Reorder reference and axes of `img` to match target_img.
Parameters
----------
img : Image
target_img : Image
axes : bool, optional
If True, synchronize the order of the axes.
reference : bool, optional
If True, synchronize the order of the reference coordinates.
Returns
-------
newimg : Image
An Image satisfying newimg.axes == target.axes (if axes == True),
newimg.reference == target.reference (if reference == True).
Examples
--------
>>> data = np.random.standard_normal((3,4,7,5))
>>> im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))
>>> im_scrambled = im.reordered_axes('iljk').reordered_reference('txyz')
>>> im == im_scrambled
False
>>> im_unscrambled = synchronized_order(im_scrambled, im)
>>> im == im_unscrambled
True
The images don't have to be the same shape
>>> data2 = np.random.standard_normal((3,11,9,4))
>>> im2 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))
>>> im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz')
>>> im_unscrambled2 = synchronized_order(im_scrambled2, im)
>>> im_unscrambled2.coordmap == im.coordmap
True
or have the same coordmap
>>> data3 = np.random.standard_normal((3,11,9,4))
>>> im3 = Image(data3, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1])))
>>> im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz')
>>> im_unscrambled3 = synchronized_order(im_scrambled3, im)
>>> im_unscrambled3.axes == im.axes
True
>>> im_unscrambled3.reference == im.reference
True
>>> im_unscrambled4 = synchronized_order(im_scrambled3, im, axes=False)
>>> im_unscrambled4.axes == im.axes
False
>>> im_unscrambled4.axes == im_scrambled3.axes
True
>>> im_unscrambled4.reference == im.reference
True
"""
# Caution, we can't just use target_img.reference because other subclasses
# of Image may not have all axes in the .reference attribute.
target_axes = target_img.axes # = target_img.coordmap.function_domain
# the below not necessarily == target_image.reference
target_reference = target_img.coordmap.function_range
if axes:
img = img.reordered_axes(target_axes.coord_names)
if reference:
img = img.reordered_reference(target_reference.coord_names)
return img
def is_image(obj):
''' Returns true if this object obeys the Image API
This allows us to test for something that is duck-typing an image.
For now an array must have a 'coordmap' attribute, and a callable
'get_data' attribute.
Parameters
----------
obj : object
object for which to test API
Returns
-------
is_img : bool
True if object obeys image API
Examples
--------
>>> from nipy.testing import anatfile
>>> from nipy.io.api import load_image
>>> img = load_image(anatfile)
>>> is_image(img)
True
>>> class C(object): pass
>>> c = C()
>>> is_image(c)
False
'''
if not hasattr(obj, 'coordmap') or not hasattr(obj, 'metadata'):
return False
return callable(getattr(obj, 'get_data'))
|
py | 1a2fa0d94235cb305d4326941f062b6b4474b851 | #!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import os
import sys
import socket
from math import ceil, floor
from resource_management.core.logger import Logger
from resource_management.libraries.functions.mounted_dirs_helper import get_mounts_with_multiple_data_dirs
from stack_advisor import DefaultStackAdvisor
class ODPi20StackAdvisor(DefaultStackAdvisor):
def __init__(self):
super(ODPi20StackAdvisor, self).__init__()
Logger.initialize_logger()
def getComponentLayoutValidations(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
items = super(ODPi20StackAdvisor, self).getComponentLayoutValidations(services, hosts)
# Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
# Use a set for fast lookup
hostsSet = set(super(ODPi20StackAdvisor, self).getActiveHosts([host["Hosts"] for host in hosts["items"]])) #[host["Hosts"]["host_name"] for host in hosts["items"]]
hostsCount = len(hostsSet)
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
# Validating cardinality
for component in componentsList:
if component["StackServiceComponents"]["cardinality"] is not None:
componentName = component["StackServiceComponents"]["component_name"]
componentDisplayName = component["StackServiceComponents"]["display_name"]
componentHosts = []
if component["StackServiceComponents"]["hostnames"] is not None:
componentHosts = [componentHost for componentHost in component["StackServiceComponents"]["hostnames"] if componentHost in hostsSet]
componentHostsCount = len(componentHosts)
cardinality = str(component["StackServiceComponents"]["cardinality"])
# cardinality types: null, 1+, 1-2, 1, ALL
message = None
if "+" in cardinality:
hostsMin = int(cardinality[:-1])
if componentHostsCount < hostsMin:
message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
elif "-" in cardinality:
nums = cardinality.split("-")
hostsMin = int(nums[0])
hostsMax = int(nums[1])
if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
elif "ALL" == cardinality:
if componentHostsCount != hostsCount:
message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
else:
if componentHostsCount != int(cardinality):
message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
if message is not None:
items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
# Validating host-usage
usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
usedHostsList = [item for sublist in usedHostsListList for item in sublist]
nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]
for host in nonUsedHostsList:
items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
return items
def getServiceConfigurationRecommenderDict(self):
return {
"YARN": self.recommendYARNConfigurations,
"MAPREDUCE2": self.recommendMapReduce2Configurations,
"HDFS": self.recommendHDFSConfigurations,
"HBASE": self.recommendHbaseConfigurations,
"STORM": self.recommendStormConfigurations,
"AMBARI_METRICS": self.recommendAmsConfigurations,
"RANGER": self.recommendRangerConfigurations
}
def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
putYarnProperty = self.putProperty(configurations, "yarn-site", services)
putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
nodemanagerMinRam = 1048576 # 1TB in mb
if "referenceNodeManagerHost" in clusterData:
nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
putYarnEnvProperty('min_user_id', self.get_system_min_uid())
sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
if sc_queue_name is not None:
putYarnEnvProperty("service_check.queue.name", sc_queue_name)
containerExecutorGroup = 'hadoop'
if 'cluster-env' in services['configurations'] and 'user_group' in services['configurations']['cluster-env']['properties']:
containerExecutorGroup = services['configurations']['cluster-env']['properties']['user_group']
putYarnProperty("yarn.nodemanager.linux-container-executor.group", containerExecutorGroup)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
if "TEZ" in servicesList:
ambari_user = self.getAmbariUser(services)
ambariHostName = socket.getfqdn()
putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(ambari_user), "*")
old_ambari_user = self.getOldAmbariUser(services)
if old_ambari_user is not None:
putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
putMapredProperty = self.putProperty(configurations, "mapred-site", services)
putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
if mr_queue is not None:
putMapredProperty("mapreduce.job.queuename", mr_queue)
def getAmbariUser(self, services):
ambari_user = services['ambari-server-properties']['ambari-server.user']
if "cluster-env" in services["configurations"] \
and "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"] \
and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
ambari_user = ambari_user.split('@')[0]
return ambari_user
def getOldAmbariUser(self, services):
ambari_user = None
if "cluster-env" in services["configurations"]:
if "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
ambari_user = services['ambari-server-properties']['ambari-server.user']
elif "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"]:
ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
ambari_user = ambari_user.split('@')[0]
return ambari_user
def recommendAmbariProxyUsersForHDFS(self, services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute):
if "HDFS" in servicesList:
ambari_user = self.getAmbariUser(services)
ambariHostName = socket.getfqdn()
putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(ambari_user), "*")
old_ambari_user = self.getOldAmbariUser(services)
if old_ambari_user is not None:
putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
def recommendHadoopProxyUsers (self, configurations, services, hosts):
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
users = {}
if 'forced-configurations' not in services:
services["forced-configurations"] = []
if "HDFS" in servicesList:
hdfs_user = None
if "hadoop-env" in services["configurations"] and "hdfs_user" in services["configurations"]["hadoop-env"]["properties"]:
hdfs_user = services["configurations"]["hadoop-env"]["properties"]["hdfs_user"]
if not hdfs_user in users and hdfs_user is not None:
users[hdfs_user] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "hadoop-env", "propertyName" : "hdfs_user"}
if "OOZIE" in servicesList:
oozie_user = None
if "oozie-env" in services["configurations"] and "oozie_user" in services["configurations"]["oozie-env"]["properties"]:
oozie_user = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
oozieServerrHosts = self.getHostsWithComponent("OOZIE", "OOZIE_SERVER", services, hosts)
if oozieServerrHosts is not None:
oozieServerHostsNameList = []
for oozieServerHost in oozieServerrHosts:
oozieServerHostsNameList.append(oozieServerHost["Hosts"]["host_name"])
oozieServerHostsNames = ",".join(oozieServerHostsNameList)
if not oozie_user in users and oozie_user is not None:
users[oozie_user] = {"propertyHosts" : oozieServerHostsNames,"propertyGroups" : "*", "config" : "oozie-env", "propertyName" : "oozie_user"}
hive_user = None
if "HIVE" in servicesList:
webhcat_user = None
if "hive-env" in services["configurations"] and "hive_user" in services["configurations"]["hive-env"]["properties"] \
and "webhcat_user" in services["configurations"]["hive-env"]["properties"]:
hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
webhcat_user = services["configurations"]["hive-env"]["properties"]["webhcat_user"]
hiveServerHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER", services, hosts)
hiveServerInteractiveHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER_INTERACTIVE", services, hosts)
webHcatServerHosts = self.getHostsWithComponent("HIVE", "WEBHCAT_SERVER", services, hosts)
if hiveServerHosts is not None:
hiveServerHostsNameList = []
for hiveServerHost in hiveServerHosts:
hiveServerHostsNameList.append(hiveServerHost["Hosts"]["host_name"])
# Append Hive Server Interactive host as well, as it is Hive2/HiveServer2 component.
if hiveServerInteractiveHosts:
for hiveServerInteractiveHost in hiveServerInteractiveHosts:
hiveServerInteractiveHostName = hiveServerInteractiveHost["Hosts"]["host_name"]
if hiveServerInteractiveHostName not in hiveServerHostsNameList:
hiveServerHostsNameList.append(hiveServerInteractiveHostName)
Logger.info("Appended (if not exiting), Hive Server Interactive Host : '{0}', to Hive Server Host List : '{1}'".format(hiveServerInteractiveHostName, hiveServerHostsNameList))
hiveServerHostsNames = ",".join(hiveServerHostsNameList) # includes Hive Server interactive host also.
Logger.info("Hive Server and Hive Server Interactive (if enabled) Host List : {0}".format(hiveServerHostsNameList))
if not hive_user in users and hive_user is not None:
users[hive_user] = {"propertyHosts" : hiveServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "hive_user"}
if webHcatServerHosts is not None:
webHcatServerHostsNameList = []
for webHcatServerHost in webHcatServerHosts:
webHcatServerHostsNameList.append(webHcatServerHost["Hosts"]["host_name"])
webHcatServerHostsNames = ",".join(webHcatServerHostsNameList)
if not webhcat_user in users and webhcat_user is not None:
users[webhcat_user] = {"propertyHosts" : webHcatServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "webhcat_user"}
if "YARN" in servicesList:
yarn_user = None
if "yarn-env" in services["configurations"] and "yarn_user" in services["configurations"]["yarn-env"]["properties"]:
yarn_user = services["configurations"]["yarn-env"]["properties"]["yarn_user"]
rmHosts = self.getHostsWithComponent("YARN", "RESOURCEMANAGER", services, hosts)
if len(rmHosts) > 1:
rmHostsNameList = []
for rmHost in rmHosts:
rmHostsNameList.append(rmHost["Hosts"]["host_name"])
rmHostsNames = ",".join(rmHostsNameList)
if not yarn_user in users and yarn_user is not None:
users[yarn_user] = {"propertyHosts" : rmHostsNames, "config" : "yarn-env", "propertyName" : "yarn_user"}
if "FALCON" in servicesList:
falconUser = None
if "falcon-env" in services["configurations"] and "falcon_user" in services["configurations"]["falcon-env"]["properties"]:
falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
if not falconUser in users and falconUser is not None:
users[falconUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "falcon-env", "propertyName" : "falcon_user"}
if "SPARK" in servicesList:
livyUser = None
if "livy-env" in services["configurations"] and "livy_user" in services["configurations"]["livy-env"]["properties"]:
livyUser = services["configurations"]["livy-env"]["properties"]["livy_user"]
if not livyUser in users and livyUser is not None:
users[livyUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "livy-env", "propertyName" : "livy_user"}
if "SPARK2" in servicesList:
livyUser = None
if "livy2-env" in services["configurations"] and "livy_user" in services["configurations"]["livy2-env"]["properties"]:
livyUser = services["configurations"]["livy2-env"]["properties"]["livy_user"]
if not livyUser in users and livyUser is not None:
users[livy2User] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "livy2-env", "propertyName" : "livy_user"}
putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
for user_name, user_properties in users.iteritems():
if hive_user and hive_user == user_name:
if "propertyHosts" in user_properties:
services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(hive_user)})
# Add properties "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" to core-site for all users
putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(user_name) , user_properties["propertyHosts"])
Logger.info("Updated hadoop.proxyuser.{0}.hosts as : {1}".format(hive_user, user_properties["propertyHosts"]))
if "propertyGroups" in user_properties:
putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(user_name) , user_properties["propertyGroups"])
# Remove old properties if user was renamed
userOldValue = getOldValue(self, services, user_properties["config"], user_properties["propertyName"])
if userOldValue is not None and userOldValue != user_name:
putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(userOldValue), 'delete', 'true')
services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(userOldValue)})
services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(user_name)})
if "propertyGroups" in user_properties:
putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(userOldValue), 'delete', 'true')
services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(userOldValue)})
services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(user_name)})
self.recommendAmbariProxyUsersForHDFS(services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute)
def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
putHDFSSiteProperty = self.putProperty(configurations, "hdfs-site", services)
putHDFSSitePropertyAttributes = self.putPropertyAttribute(configurations, "hdfs-site")
putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
# Check if NN HA is enabled and recommend removing dfs.namenode.rpc-address
hdfsSiteProperties = getServicesSiteProperties(services, "hdfs-site")
nameServices = None
if hdfsSiteProperties and 'dfs.internal.nameservices' in hdfsSiteProperties:
nameServices = hdfsSiteProperties['dfs.internal.nameservices']
if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:
nameServices = hdfsSiteProperties['dfs.nameservices']
if nameServices and "dfs.ha.namenodes.%s" % nameServices in hdfsSiteProperties:
namenodes = hdfsSiteProperties["dfs.ha.namenodes.%s" % nameServices]
if len(namenodes.split(',')) > 1:
putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
#Initialize default 'dfs.datanode.data.dir' if needed
if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
dataDirs = '/hadoop/hdfs/data'
putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
else:
dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
# dfs.datanode.du.reserved should be set to 10-15% of volume size
# For each host selects maximum size of the volume. Then gets minimum for all hosts.
# This ensures that each host will have at least one data dir with available space.
reservedSizeRecommendation = 0l #kBytes
for host in hosts["items"]:
mountPoints = []
mountPointDiskAvailableSpace = [] #kBytes
for diskInfo in host["Hosts"]["disk_info"]:
mountPoints.append(diskInfo["mountpoint"])
mountPointDiskAvailableSpace.append(long(diskInfo["size"]))
maxFreeVolumeSizeForHost = 0l #kBytes
for dataDir in dataDirs:
mp = getMountPointForDir(dataDir, mountPoints)
for i in range(len(mountPoints)):
if mp == mountPoints[i]:
if mountPointDiskAvailableSpace[i] > maxFreeVolumeSizeForHost:
maxFreeVolumeSizeForHost = mountPointDiskAvailableSpace[i]
if not reservedSizeRecommendation or maxFreeVolumeSizeForHost and maxFreeVolumeSizeForHost < reservedSizeRecommendation:
reservedSizeRecommendation = maxFreeVolumeSizeForHost
if reservedSizeRecommendation:
reservedSizeRecommendation = max(reservedSizeRecommendation * 1024 / 8, 1073741824) # At least 1Gb is reserved
putHDFSSiteProperty('dfs.datanode.du.reserved', reservedSizeRecommendation) #Bytes
# recommendations for "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" properties in core-site
self.recommendHadoopProxyUsers(configurations, services, hosts)
def recommendHbaseConfigurations(self, configurations, clusterData, services, hosts):
# recommendations for HBase env config
# If cluster size is < 100, hbase master heap = 2G
# else If cluster size is < 500, hbase master heap = 4G
# else hbase master heap = 8G
# for small test clusters use 1 gb
hostsCount = 0
if hosts and "items" in hosts:
hostsCount = len(hosts["items"])
hbaseMasterRam = {
hostsCount < 20: 1,
20 <= hostsCount < 100: 2,
100 <= hostsCount < 500: 4,
500 <= hostsCount: 8
}[True]
putHbaseProperty = self.putProperty(configurations, "hbase-env", services)
putHbaseProperty('hbase_regionserver_heapsize', int(clusterData['hbaseRam']) * 1024)
putHbaseProperty('hbase_master_heapsize', hbaseMasterRam * 1024)
# recommendations for HBase site config
putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
if 'hbase-site' in services['configurations'] and 'hbase.superuser' in services['configurations']['hbase-site']['properties'] \
and 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties'] \
and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:
putHbaseSiteProperty("hbase.superuser", services['configurations']['hbase-env']['properties']['hbase_user'])
def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
putRangerAdminProperty = self.putProperty(configurations, "admin-properties", services)
# Build policymgr_external_url
protocol = 'http'
ranger_admin_host = 'localhost'
port = '6080'
# Check if http is disabled. For HDP-2.3 this can be checked in ranger-admin-site/ranger.service.http.enabled
# For Ranger-0.4.0 this can be checked in ranger-site/http.enabled
if ('ranger-site' in services['configurations'] and 'http.enabled' in services['configurations']['ranger-site']['properties'] \
and services['configurations']['ranger-site']['properties']['http.enabled'].lower() == 'false') or \
('ranger-admin-site' in services['configurations'] and 'ranger.service.http.enabled' in services['configurations']['ranger-admin-site']['properties'] \
and services['configurations']['ranger-admin-site']['properties']['ranger.service.http.enabled'].lower() == 'false'):
# HTTPS protocol is used
protocol = 'https'
# Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.https.port
if 'ranger-admin-site' in services['configurations'] and \
'ranger.service.https.port' in services['configurations']['ranger-admin-site']['properties']:
port = services['configurations']['ranger-admin-site']['properties']['ranger.service.https.port']
# In Ranger-0.4.0 port stored in ranger-site https.service.port
elif 'ranger-site' in services['configurations'] and \
'https.service.port' in services['configurations']['ranger-site']['properties']:
port = services['configurations']['ranger-site']['properties']['https.service.port']
else:
# HTTP protocol is used
# Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.http.port
if 'ranger-admin-site' in services['configurations'] and \
'ranger.service.http.port' in services['configurations']['ranger-admin-site']['properties']:
port = services['configurations']['ranger-admin-site']['properties']['ranger.service.http.port']
# In Ranger-0.4.0 port stored in ranger-site http.service.port
elif 'ranger-site' in services['configurations'] and \
'http.service.port' in services['configurations']['ranger-site']['properties']:
port = services['configurations']['ranger-site']['properties']['http.service.port']
ranger_admin_hosts = self.getComponentHostNames(services, "RANGER", "RANGER_ADMIN")
if ranger_admin_hosts:
if len(ranger_admin_hosts) > 1 \
and services['configurations'] \
and 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties'] \
and services['configurations']['admin-properties']['properties']['policymgr_external_url'] \
and services['configurations']['admin-properties']['properties']['policymgr_external_url'].strip():
# in case of HA deployment keep the policymgr_external_url specified in the config
policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
else:
ranger_admin_host = ranger_admin_hosts[0]
policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port)
putRangerAdminProperty('policymgr_external_url', policymgr_external_url)
rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
if rangerServiceVersion == '0.4.0':
# Recommend ldap settings based on ambari.properties configuration
# If 'ambari.ldap.isConfigured' == true
# For Ranger version 0.4.0
if 'ambari-server-properties' in services and \
'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
putUserSyncProperty = self.putProperty(configurations, "usersync-properties", services)
serverProperties = services['ambari-server-properties']
if 'authentication.ldap.managerDn' in serverProperties:
putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])
if 'authentication.ldap.primaryUrl' in serverProperties:
ldap_protocol = 'ldap://'
if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
ldap_protocol = 'ldaps://'
ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
putUserSyncProperty('SYNC_LDAP_URL', ldapUrl)
if 'authentication.ldap.userObjectClass' in serverProperties:
putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])
if 'authentication.ldap.usernameAttribute' in serverProperties:
putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])
# Set Ranger Admin Authentication method
if 'admin-properties' in services['configurations'] and 'usersync-properties' in services['configurations'] and \
'SYNC_SOURCE' in services['configurations']['usersync-properties']['properties']:
rangerUserSyncSource = services['configurations']['usersync-properties']['properties']['SYNC_SOURCE']
authenticationMethod = rangerUserSyncSource.upper()
if authenticationMethod != 'FILE':
putRangerAdminProperty('authentication_method', authenticationMethod)
# Recommend xasecure.audit.destination.hdfs.dir
# For Ranger version 0.4.0
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
include_hdfs = "HDFS" in servicesList
if include_hdfs:
if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
default_fs += '/ranger/audit/%app-type%/%time:yyyyMMdd%'
putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', default_fs)
# Recommend Ranger Audit properties for ranger supported services
# For Ranger version 0.4.0
ranger_services = [
{'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-plugin-properties'},
{'service_name': 'HBASE', 'audit_file': 'ranger-hbase-plugin-properties'},
{'service_name': 'HIVE', 'audit_file': 'ranger-hive-plugin-properties'},
{'service_name': 'KNOX', 'audit_file': 'ranger-knox-plugin-properties'},
{'service_name': 'STORM', 'audit_file': 'ranger-storm-plugin-properties'}
]
for item in range(len(ranger_services)):
if ranger_services[item]['service_name'] in servicesList:
component_audit_file = ranger_services[item]['audit_file']
if component_audit_file in services["configurations"]:
ranger_audit_dict = [
{'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'XAAUDIT.DB.IS_ENABLED'},
{'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'XAAUDIT.HDFS.IS_ENABLED'},
{'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'XAAUDIT.HDFS.DESTINATION_DIRECTORY'}
]
putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
for item in ranger_audit_dict:
if item['filename'] in services["configurations"] and item['configname'] in services["configurations"][item['filename']]["properties"]:
if item['filename'] in configurations and item['configname'] in configurations[item['filename']]["properties"]:
rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
else:
rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
def getAmsMemoryRecommendation(self, services, hosts):
# MB per sink in hbase heapsize
HEAP_PER_MASTER_COMPONENT = 50
HEAP_PER_SLAVE_COMPONENT = 10
schMemoryMap = {
"HDFS": {
"NAMENODE": HEAP_PER_MASTER_COMPONENT,
"DATANODE": HEAP_PER_SLAVE_COMPONENT
},
"YARN": {
"RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
},
"HBASE": {
"HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
"HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
},
"ACCUMULO": {
"ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
"ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
},
"KAFKA": {
"KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
},
"FLUME": {
"FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
},
"STORM": {
"NIMBUS": HEAP_PER_MASTER_COMPONENT,
},
"AMBARI_METRICS": {
"METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
"METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
}
}
total_sinks_count = 0
# minimum heap size
hbase_heapsize = 500
for serviceName, componentsDict in schMemoryMap.items():
for componentName, multiplier in componentsDict.items():
schCount = len(
self.getHostsWithComponent(serviceName, componentName, services,
hosts))
hbase_heapsize += int((schCount * multiplier) ** 0.9)
total_sinks_count += schCount
collector_heapsize = int(hbase_heapsize/4 if hbase_heapsize > 2048 else 512)
return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count
def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
# Storm AMS integration
if 'AMBARI_METRICS' in servicesList:
putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
if 'cluster-env' in services['configurations'] and \
'metrics_collector_vip_host' in services['configurations']['cluster-env']['properties']:
metric_collector_host = services['configurations']['cluster-env']['properties']['metrics_collector_vip_host']
else:
metric_collector_host = 'localhost' if len(amsCollectorHosts) == 0 else amsCollectorHosts[0]
putAmsSiteProperty("timeline.metrics.service.webapp.address", str(metric_collector_host) + ":6188")
log_dir = "/var/log/ambari-metrics-collector"
if "ams-env" in services["configurations"]:
if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
putHbaseEnvProperty("hbase_log_dir", log_dir)
defaultFs = 'file:///'
if "core-site" in services["configurations"] and \
"fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
operatingMode = "embedded"
if "ams-site" in services["configurations"]:
if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
if operatingMode == "distributed":
putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
else:
putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
zk_port_default = []
if "ams-hbase-site" in services["configurations"]:
if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
# Skip recommendation item if default value is present
if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
zkPort = self.getZKPort(services)
putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
mountpoints = ["/"]
for collectorHostName in amsCollectorHosts:
for host in hosts["items"]:
if host["Hosts"]["host_name"] == collectorHostName:
mountpoints = self.getPreferredMountPoints(host["Hosts"])
break
isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
if isLocalRootDir:
rootDir = re.sub("^file:///|/", "", rootDir, count=1)
rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
if len(mountpoints) > 1 and isLocalRootDir:
tmpDir = os.path.join(mountpoints[1], tmpDir)
else:
tmpDir = os.path.join(mountpoints[0], tmpDir)
putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
if operatingMode == "distributed":
putAmsHbaseSiteProperty("hbase.rootdir", defaultFs + "/user/ams/hbase")
if operatingMode == "embedded":
if isLocalRootDir:
putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
else:
putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
# blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
if len(amsCollectorHosts) > 1:
pass
else:
# blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
if total_sinks_count >= 2000:
putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
elif total_sinks_count >= 500:
putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
else:
putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
pass
metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
# Distributed mode heap size
if operatingMode == "distributed":
hbase_heapsize = max(hbase_heapsize, 768)
putHbaseEnvProperty("hbase_master_heapsize", "512")
putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
putHbaseEnvProperty("regionserver_xmn_size", round_to_n(0.15*hbase_heapsize,64))
else:
# Embedded mode heap size : master + regionserver
hbase_rs_heapsize = 768
putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize+hbase_rs_heapsize),64))
# If no local DN in distributed mode
if operatingMode == "distributed":
dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
# call by Kerberos wizard sends only the service being affected
# so it is possible for dn_hosts to be None but not amsCollectorHosts
if dn_hosts and len(dn_hosts) > 0:
if set(amsCollectorHosts).intersection(dn_hosts):
collector_cohosted_with_dn = "true"
else:
collector_cohosted_with_dn = "false"
putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
#split points
scriptDir = os.path.dirname(os.path.abspath(__file__))
metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
sys.path.append(os.path.join(metricsDir, 'scripts'))
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
from split_points import FindSplitPointsForAMSRegions
ams_hbase_site = None
ams_hbase_env = None
# Overriden properties form the UI
if "ams-hbase-site" in services["configurations"]:
ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
if "ams-hbase-env" in services["configurations"]:
ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
# Recommendations
if not ams_hbase_site:
ams_hbase_site = configurations["ams-hbase-site"]["properties"]
if not ams_hbase_env:
ams_hbase_env = configurations["ams-hbase-env"]["properties"]
split_point_finder = FindSplitPointsForAMSRegions(
ams_hbase_site, ams_hbase_env, serviceMetricsDir, operatingMode, servicesList)
result = split_point_finder.get_split_points()
precision_splits = ' '
aggregate_splits = ' '
if result.precision:
precision_splits = result.precision
if result.aggregate:
aggregate_splits = result.aggregate
putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
component_grafana_exists = False
for service in services['services']:
if 'components' in service:
for component in service['components']:
if 'StackServiceComponents' in component:
# If Grafana is installed the hostnames would indicate its location
if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
len(component['StackServiceComponents']['hostnames']) != 0:
component_grafana_exists = True
break
pass
if not component_grafana_exists:
putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
pass
def getHostNamesWithComponent(self, serviceName, componentName, services):
"""
Returns the list of hostnames on which service component is installed
"""
if services is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
return componentHostnames
return []
def getHostsWithComponent(self, serviceName, componentName, services, hosts):
if services is not None and hosts is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
componentHosts = [host for host in hosts["items"] if host["Hosts"]["host_name"] in componentHostnames]
return componentHosts
return []
def getHostWithComponent(self, serviceName, componentName, services, hosts):
componentHosts = self.getHostsWithComponent(serviceName, componentName, services, hosts)
if (len(componentHosts) > 0):
return componentHosts[0]
return None
def getHostComponentsByCategories(self, hostname, categories, services, hosts):
components = []
if services is not None and hosts is not None:
for service in services["services"]:
components.extend([componentEntry for componentEntry in service["components"]
if componentEntry["StackServiceComponents"]["component_category"] in categories
and hostname in componentEntry["StackServiceComponents"]["hostnames"]])
return components
def getZKHostPortString(self, services, include_port=True):
"""
Returns the comma delimited string of zookeeper server host with the configure port installed in a cluster
Example: zk.host1.org:2181,zk.host2.org:2181,zk.host3.org:2181
include_port boolean param -> If port is also needed.
"""
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
include_zookeeper = "ZOOKEEPER" in servicesList
zookeeper_host_port = ''
if include_zookeeper:
zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
zookeeper_host_port_arr = []
if include_port:
zookeeper_port = self.getZKPort(services)
for i in range(len(zookeeper_hosts)):
zookeeper_host_port_arr.append(zookeeper_hosts[i] + ':' + zookeeper_port)
else:
for i in range(len(zookeeper_hosts)):
zookeeper_host_port_arr.append(zookeeper_hosts[i])
zookeeper_host_port = ",".join(zookeeper_host_port_arr)
return zookeeper_host_port
def getZKPort(self, services):
zookeeper_port = '2181' #default port
if 'zoo.cfg' in services['configurations'] and ('clientPort' in services['configurations']['zoo.cfg']['properties']):
zookeeper_port = services['configurations']['zoo.cfg']['properties']['clientPort']
return zookeeper_port
def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
hBaseInstalled = False
if 'HBASE' in servicesList:
hBaseInstalled = True
cluster = {
"cpu": 0,
"disk": 0,
"ram": 0,
"hBaseInstalled": hBaseInstalled,
"components": components
}
if len(hosts["items"]) > 0:
nodeManagerHosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
# NodeManager host with least memory is generally used in calculations as it will work in larger hosts.
if nodeManagerHosts is not None and len(nodeManagerHosts) > 0:
nodeManagerHost = nodeManagerHosts[0];
for nmHost in nodeManagerHosts:
if nmHost["Hosts"]["total_mem"] < nodeManagerHost["Hosts"]["total_mem"]:
nodeManagerHost = nmHost
host = nodeManagerHost["Hosts"]
cluster["referenceNodeManagerHost"] = host
else:
host = hosts["items"][0]["Hosts"]
cluster["referenceHost"] = host
cluster["cpu"] = host["cpu_count"]
cluster["disk"] = len(host["disk_info"])
cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
ramRecommendations = [
{"os":1, "hbase":1},
{"os":2, "hbase":1},
{"os":2, "hbase":2},
{"os":4, "hbase":4},
{"os":6, "hbase":8},
{"os":8, "hbase":8},
{"os":8, "hbase":8},
{"os":12, "hbase":16},
{"os":24, "hbase":24},
{"os":32, "hbase":32},
{"os":64, "hbase":32}
]
index = {
cluster["ram"] <= 4: 0,
4 < cluster["ram"] <= 8: 1,
8 < cluster["ram"] <= 16: 2,
16 < cluster["ram"] <= 24: 3,
24 < cluster["ram"] <= 48: 4,
48 < cluster["ram"] <= 64: 5,
64 < cluster["ram"] <= 72: 6,
72 < cluster["ram"] <= 96: 7,
96 < cluster["ram"] <= 128: 8,
128 < cluster["ram"] <= 256: 9,
256 < cluster["ram"]: 10
}[1]
cluster["reservedRam"] = ramRecommendations[index]["os"]
cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
cluster["minContainerSize"] = {
cluster["ram"] <= 4: 256,
4 < cluster["ram"] <= 8: 512,
8 < cluster["ram"] <= 24: 1024,
24 < cluster["ram"]: 2048
}[1]
totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
if cluster["hBaseInstalled"]:
totalAvailableRam -= cluster["hbaseRam"]
cluster["totalAvailableRam"] = max(512, totalAvailableRam * 1024)
'''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
cluster["containers"] = round(max(3,
min(2 * cluster["cpu"],
min(ceil(1.8 * cluster["disk"]),
cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
'''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
'''If greater than 1GB, value will be in multiples of 512.'''
if cluster["ramPerContainer"] > 1024:
cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
cluster["mapMemory"] = int(cluster["ramPerContainer"])
cluster["reduceMemory"] = cluster["ramPerContainer"]
cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
return cluster
def getServiceConfigurationValidators(self):
return {
"HDFS": { "hdfs-site": self.validateHDFSConfigurations,
"hadoop-env": self.validateHDFSConfigurationsEnv},
"MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
"YARN": {"yarn-site": self.validateYARNConfigurations,
"yarn-env": self.validateYARNEnvConfigurations},
"HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
"STORM": {"storm-site": self.validateStormConfigurations},
"AMBARI_METRICS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
"ams-hbase-env": self.validateAmsHbaseEnvConfigurations,
"ams-site": self.validateAmsSiteConfigurations}
}
def validateMinMax(self, items, recommendedDefaults, configurations):
# required for casting to the proper numeric type before comparison
def convertToNumber(number):
try:
return int(number)
except ValueError:
return float(number)
for configName in configurations:
validationItems = []
if configName in recommendedDefaults and "property_attributes" in recommendedDefaults[configName]:
for propertyName in recommendedDefaults[configName]["property_attributes"]:
if propertyName in configurations[configName]["properties"]:
if "maximum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
propertyName in recommendedDefaults[configName]["properties"]:
userValue = convertToNumber(configurations[configName]["properties"][propertyName])
maxValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["maximum"])
if userValue > maxValue:
validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is greater than the recommended maximum of {0} ".format(maxValue))}])
if "minimum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
propertyName in recommendedDefaults[configName]["properties"]:
userValue = convertToNumber(configurations[configName]["properties"][propertyName])
minValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["minimum"])
if userValue < minValue:
validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is less than the recommended minimum of {0} ".format(minValue))}])
items.extend(self.toConfigurationValidationProblems(validationItems, configName))
pass
def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
op_mode = properties.get("timeline.metrics.service.operation.mode")
correct_op_mode_item = None
if op_mode not in ("embedded", "distributed"):
correct_op_mode_item = self.getErrorItem("Correct value should be set.")
pass
validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
return self.toConfigurationValidationProblems(validationItems, "ams-site")
def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
ams_site = getSiteProperties(configurations, "ams-site")
core_site = getSiteProperties(configurations, "core-site")
collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
recommendedDiskSpace = 10485760
# TODO validate configuration for multiple AMBARI_METRICS collectors
if len(amsCollectorHosts) > 1:
pass
else:
if total_sinks_count > 2000:
recommendedDiskSpace = 104857600 # * 1k == 100 Gb
elif total_sinks_count > 500:
recommendedDiskSpace = 52428800 # * 1k == 50 Gb
elif total_sinks_count > 250:
recommendedDiskSpace = 20971520 # * 1k == 20 Gb
validationItems = []
rootdir_item = None
op_mode = ams_site.get("timeline.metrics.service.operation.mode")
default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
hbase_rootdir = properties.get("hbase.rootdir")
hbase_tmpdir = properties.get("hbase.tmp.dir")
distributed = properties.get("hbase.cluster.distributed")
is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
if op_mode == "distributed" and is_local_root_dir:
rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
elif op_mode == "embedded":
if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
"Example - file:// for localFS")
pass
distributed_item = None
if op_mode == "distributed" and not distributed.lower() == "true":
distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
"distributed mode")
if op_mode == "embedded" and distributed.lower() == "true":
distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
zkPort = self.getZKPort(services)
hbase_zk_client_port_item = None
if distributed.lower() == "true" and op_mode == "distributed" and \
hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
"should be the cluster zookeeper server port : {0}".format(zkPort))
if distributed.lower() == "false" and op_mode == "embedded" and \
hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
"should be a different port than cluster zookeeper port."
"(default:61181)")
validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
{"config-name":'hbase.cluster.distributed', "item": distributed_item },
{"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
for collectorHostName in amsCollectorHosts:
for host in hosts["items"]:
if host["Hosts"]["host_name"] == collectorHostName:
if op_mode == 'embedded' or is_local_root_dir:
validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
if is_local_root_dir:
mountPoints = []
for mountPoint in host["Hosts"]["disk_info"]:
mountPoints.append(mountPoint["mountpoint"])
hbase_rootdir_mountpoint = getMountPointForDir(hbase_rootdir, mountPoints)
hbase_tmpdir_mountpoint = getMountPointForDir(hbase_tmpdir, mountPoints)
preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
# hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
# if multiple preferred_mountpoints exist
if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
len(preferred_mountpoints) > 1:
item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
"{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
# if METRICS_COLLECTOR is co-hosted with DATANODE
# cross-check dfs.datanode.data.dir and hbase.rootdir
# they shouldn't share same disk partition IO
hdfs_site = getSiteProperties(configurations, "hdfs-site")
dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
if dn_hosts and collectorHostName in dn_hosts and ams_site and \
dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
for dfs_datadir in dfs_datadirs:
dfs_datadir_mountpoint = getMountPointForDir(dfs_datadir, mountPoints)
if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
"{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
break
# If no local DN in distributed mode
elif collectorHostName not in dn_hosts and distributed.lower() == "true":
item = self.getWarnItem("It's recommended to install Datanode component on {0} "
"to speed up IO operations between HDFS and Metrics "
"Collector in distributed mode ".format(collectorHostName))
validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
# Short circuit read should be enabled in distibuted mode
# if local DN installed
else:
validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
# Storm AMS integration
if 'AMBARI_METRICS' in servicesList and "metrics.reporter.register" in properties and \
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter" not in properties.get("metrics.reporter.register"):
validationItems.append({"config-name": 'metrics.reporter.register',
"item": self.getWarnItem(
"Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.")})
return self.toConfigurationValidationProblems(validationItems, "storm-site")
def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
ams_env = getSiteProperties(configurations, "ams-env")
amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
validationItems = []
mb = 1024 * 1024
gb = 1024 * mb
regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
if regionServerItem:
validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
if hbaseMasterHeapsizeItem:
validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
if logDirItem:
validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
collector_heapsize = to_number(ams_env.get("metrics_collector_heapsize"))
hbase_master_heapsize = to_number(properties["hbase_master_heapsize"])
hbase_master_xmn_size = to_number(properties["hbase_master_xmn_size"])
hbase_regionserver_heapsize = to_number(properties["hbase_regionserver_heapsize"])
hbase_regionserver_xmn_size = to_number(properties["regionserver_xmn_size"])
# Validate Xmn settings.
masterXmnItem = None
regionServerXmnItem = None
is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
if is_hbase_distributed:
minMasterXmn = 0.12 * hbase_master_heapsize
maxMasterXmn = 0.2 * hbase_master_heapsize
if hbase_master_xmn_size < minMasterXmn:
masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
"(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
if hbase_master_xmn_size > maxMasterXmn:
masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
"(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
if hbase_regionserver_xmn_size < minRegionServerXmn:
regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
"(12% of hbase_regionserver_heapsize)"
.format(int(ceil(minRegionServerXmn))))
if hbase_regionserver_xmn_size > maxRegionServerXmn:
regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
"(20% of hbase_regionserver_heapsize)"
.format(int(floor(maxRegionServerXmn))))
else:
minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
maxMasterXmn = 0.2 * (hbase_master_heapsize + hbase_regionserver_heapsize)
if hbase_master_xmn_size < minMasterXmn:
masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
"(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
.format(int(ceil(minMasterXmn))))
if hbase_master_xmn_size > maxMasterXmn:
masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
"(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
.format(int(floor(maxMasterXmn))))
if masterXmnItem:
validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
if regionServerXmnItem:
validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
if hbaseMasterHeapsizeItem is None:
hostMasterComponents = {}
for service in services["services"]:
for component in service["components"]:
if component["StackServiceComponents"]["hostnames"] is not None:
for hostName in component["StackServiceComponents"]["hostnames"]:
if self.isMasterComponent(component):
if hostName not in hostMasterComponents.keys():
hostMasterComponents[hostName] = []
hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
for collectorHostName in amsCollectorHosts:
for host in hosts["items"]:
if host["Hosts"]["host_name"] == collectorHostName:
# AMS Collector co-hosted with other master components in bigger clusters
if len(hosts['items']) > 31 and \
len(hostMasterComponents[collectorHostName]) > 2 and \
host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
"It is recommended to use a separate host for the " \
"Ambari Metrics Collector component and ensure " \
"the host has sufficient memory available."
hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
if hbaseMasterHeapsizeItem:
validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
# Check for unused RAM on AMS Collector node
hostComponents = []
for service in services["services"]:
for component in service["components"]:
if component["StackServiceComponents"]["hostnames"] is not None:
if collectorHostName in component["StackServiceComponents"]["hostnames"]:
hostComponents.append(component["StackServiceComponents"]["component_name"])
requiredMemory = getMemorySizeRequired(hostComponents, configurations)
unusedMemory = host["Hosts"]["total_mem"] * 1024 - requiredMemory # in bytes
if unusedMemory > 4*gb: # warn user, if more than 4GB RAM is unused
heapPropertyToIncrease = "hbase_regionserver_heapsize" if is_hbase_distributed else "hbase_master_heapsize"
xmnPropertyToIncrease = "regionserver_xmn_size" if is_hbase_distributed else "hbase_master_xmn_size"
recommended_collector_heapsize = int((unusedMemory - 4*gb)/5) + collector_heapsize*mb
recommended_hbase_heapsize = int((unusedMemory - 4*gb)*4/5) + to_number(properties.get(heapPropertyToIncrease))*mb
recommended_hbase_heapsize = min(32*gb, recommended_hbase_heapsize) #Make sure heapsize <= 32GB
recommended_xmn_size = round_to_n(0.12*recommended_hbase_heapsize/mb,128)
if collector_heapsize < recommended_collector_heapsize or \
to_number(properties[heapPropertyToIncrease]) < recommended_hbase_heapsize:
collectorHeapsizeItem = self.getWarnItem("{0} MB RAM is unused on the host {1} based on components " \
"assigned. Consider allocating {2} MB to " \
"metrics_collector_heapsize in ams-env, " \
"{3} MB to {4} in ams-hbase-env"
.format(unusedMemory/mb, collectorHostName,
recommended_collector_heapsize/mb,
recommended_hbase_heapsize/mb,
heapPropertyToIncrease))
validationItems.extend([{"config-name": heapPropertyToIncrease, "item": collectorHeapsizeItem}])
if to_number(properties[xmnPropertyToIncrease]) < recommended_hbase_heapsize:
xmnPropertyToIncreaseItem = self.getWarnItem("Consider allocating {0} MB to use up some unused memory "
"on host".format(recommended_xmn_size))
validationItems.extend([{"config-name": xmnPropertyToIncrease, "item": xmnPropertyToIncreaseItem}])
pass
return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
def getPreferredMountPoints(self, hostInfo):
# '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
"/etc/hostname", "/tmp"]
undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
mountPoints = []
if hostInfo and "disk_info" in hostInfo:
mountPointsDict = {}
for mountpoint in hostInfo["disk_info"]:
if not (mountpoint["mountpoint"] in undesirableMountPoints or
mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
mountpoint["type"] in undesirableFsTypes or
mountpoint["available"] == str(0)):
mountPointsDict[mountpoint["mountpoint"]] = to_number(mountpoint["available"])
if mountPointsDict:
mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
mountPoints.append("/")
return mountPoints
def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, hostInfo):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
dir = properties[propertyName]
if not dir.startswith("file://") or dir == recommendedDefaults.get(propertyName):
return None
dir = re.sub("^file://", "", dir, count=1)
mountPoints = []
for mountPoint in hostInfo["disk_info"]:
mountPoints.append(mountPoint["mountpoint"])
mountPoint = getMountPointForDir(dir, mountPoints)
if "/" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint:
return self.getWarnItem("It is not recommended to use root partition for {0}".format(propertyName))
return None
def validatorEnoughDiskSpace(self, properties, propertyName, hostInfo, reqiuredDiskSpace):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
dir = properties[propertyName]
if not dir.startswith("file://"):
return None
dir = re.sub("^file://", "", dir, count=1)
mountPoints = {}
for mountPoint in hostInfo["disk_info"]:
mountPoints[mountPoint["mountpoint"]] = to_number(mountPoint["available"])
mountPoint = getMountPointForDir(dir, mountPoints.keys())
if not mountPoints:
return self.getErrorItem("No disk info found on host %s" % hostInfo["host_name"])
if mountPoints[mountPoint] < reqiuredDiskSpace:
msg = "Ambari Metrics disk space requirements not met. \n" \
"Recommended disk space for partition {0} is {1}G"
return self.getWarnItem(msg.format(mountPoint, reqiuredDiskSpace/1048576)) # in Gb
return None
def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
if propertyName not in recommendedDefaults:
# If a property name exists in say hbase-env and hbase-site (which is allowed), then it will exist in the
# "properties" dictionary, but not necessarily in the "recommendedDefaults" dictionary". In this case, ignore it.
return None
if not propertyName in properties:
return self.getErrorItem("Value should be set")
value = to_number(properties[propertyName])
if value is None:
return self.getErrorItem("Value should be integer")
defaultValue = to_number(recommendedDefaults[propertyName])
if defaultValue is None:
return None
if value < defaultValue:
return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
return None
def validatorEqualsPropertyItem(self, properties1, propertyName1,
properties2, propertyName2,
emptyAllowed=False):
if not propertyName1 in properties1:
return self.getErrorItem("Value should be set for %s" % propertyName1)
if not propertyName2 in properties2:
return self.getErrorItem("Value should be set for %s" % propertyName2)
value1 = properties1.get(propertyName1)
if value1 is None and not emptyAllowed:
return self.getErrorItem("Empty value for %s" % propertyName1)
value2 = properties2.get(propertyName2)
if value2 is None and not emptyAllowed:
return self.getErrorItem("Empty value for %s" % propertyName2)
if value1 != value2:
return self.getWarnItem("It is recommended to set equal values "
"for properties {0} and {1}".format(propertyName1, propertyName2))
return None
def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults,
propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set for %s" % propertyName)
value = properties.get(propertyName)
if not propertyName in recommendedDefaults:
return self.getErrorItem("Value should be recommended for %s" % propertyName)
recommendedValue = recommendedDefaults.get(propertyName)
if value != recommendedValue:
return self.getWarnItem("It is recommended to set value {0} "
"for property {1}".format(recommendedValue, propertyName))
return None
def validateMinMemorySetting(self, properties, defaultValue, propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
if defaultValue is None:
return self.getErrorItem("Config's default value can't be null or undefined")
value = properties[propertyName]
if value is None:
return self.getErrorItem("Value can't be null or undefined")
try:
valueInt = to_number(value)
# TODO: generify for other use cases
defaultValueInt = int(str(defaultValue).strip())
if valueInt < defaultValueInt:
return self.getWarnItem("Value is less than the minimum recommended default of -Xmx" + str(defaultValue))
except:
return None
return None
def validatorYarnQueue(self, properties, recommendedDefaults, propertyName, services):
if propertyName not in properties:
return self.getErrorItem("Value should be set")
capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
leaf_queue_names = self.getAllYarnLeafQueues(capacity_scheduler_properties)
queue_name = properties[propertyName]
if len(leaf_queue_names) == 0:
return None
elif queue_name not in leaf_queue_names:
return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
return None
def recommendYarnQueue(self, services, catalog_name=None, queue_property=None):
old_queue_name = None
if services and 'configurations' in services:
configurations = services["configurations"]
if catalog_name in configurations and queue_property in configurations[catalog_name]["properties"]:
old_queue_name = configurations[catalog_name]["properties"][queue_property]
capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
leaf_queues = sorted(self.getAllYarnLeafQueues(capacity_scheduler_properties))
if leaf_queues and (old_queue_name is None or old_queue_name not in leaf_queues):
return leaf_queues.pop()
elif old_queue_name and old_queue_name in leaf_queues:
return None
return "default"
def validateXmxValue(self, properties, recommendedDefaults, propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
value = properties[propertyName]
defaultValue = recommendedDefaults[propertyName]
if defaultValue is None:
return self.getErrorItem("Config's default value can't be null or undefined")
if not checkXmxValueFormat(value) and checkXmxValueFormat(defaultValue):
# Xmx is in the default-value but not the value, should be an error
return self.getErrorItem('Invalid value format')
if not checkXmxValueFormat(defaultValue):
# if default value does not contain Xmx, then there is no point in validating existing value
return None
valueInt = formatXmxSizeToBytes(getXmxSize(value))
defaultValueXmx = getXmxSize(defaultValue)
defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
if valueInt < defaultValueInt:
return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
return None
def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
{"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
{"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
{"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
{"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
{"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
{"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},
{"config-name": 'mapreduce.job.queuename', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]
return self.toConfigurationValidationProblems(validationItems, "mapred-site")
def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
clusterEnv = getSiteProperties(configurations, "cluster-env")
validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
{"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
{"config-name": 'yarn.nodemanager.linux-container-executor.group', "item": self.validatorEqualsPropertyItem(properties, "yarn.nodemanager.linux-container-executor.group", clusterEnv, "user_group")},
{"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [{"config-name": 'service_check.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'service_check.queue.name', services)} ]
return self.toConfigurationValidationProblems(validationItems, "yarn-env")
def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
hbase_site = getSiteProperties(configurations, "hbase-site")
validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
{"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},
{"config-name": "hbase_user", "item": self.validatorEqualsPropertyItem(properties, "hbase_user", hbase_site, "hbase.superuser")} ]
return self.toConfigurationValidationProblems(validationItems, "hbase-env")
def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
clusterEnv = getSiteProperties(configurations, "cluster-env")
validationItems = [{"config-name": 'dfs.datanode.du.reserved', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'dfs.datanode.du.reserved')},
{"config-name": 'dfs.datanode.data.dir', "item": self.validatorOneDataDirPerPartition(properties, 'dfs.datanode.data.dir', services, hosts, clusterEnv)}]
return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
{"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
{"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
def validatorOneDataDirPerPartition(self, properties, propertyName, services, hosts, clusterEnv):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
dirs = properties[propertyName]
if not (clusterEnv and "one_dir_per_partition" in clusterEnv and clusterEnv["one_dir_per_partition"].lower() == "true"):
return None
dataNodeHosts = self.getDataNodeHosts(services, hosts)
warnings = set()
for host in dataNodeHosts:
hostName = host["Hosts"]["host_name"]
mountPoints = []
for diskInfo in host["Hosts"]["disk_info"]:
mountPoints.append(diskInfo["mountpoint"])
if get_mounts_with_multiple_data_dirs(mountPoints, dirs):
# A detailed message can be too long on large clusters:
# warnings.append("Host: " + hostName + "; Mount: " + mountPoint + "; Data directories: " + ", ".join(dirList))
warnings.add(hostName)
break;
if len(warnings) > 0:
return self.getWarnItem("cluster-env/one_dir_per_partition is enabled but there are multiple data directories on the same mount. Affected hosts: {0}".format(", ".join(sorted(warnings))))
return None
"""
Returns the list of Data Node hosts.
"""
def getDataNodeHosts(self, services, hosts):
if len(hosts["items"]) > 0:
dataNodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
if dataNodeHosts is not None:
return dataNodeHosts
return []
def getMastersWithMultipleInstances(self):
return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']
def getNotValuableComponents(self):
return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
def getNotPreferableOnServerComponents(self):
return ['GANGLIA_SERVER', 'METRICS_COLLECTOR']
def getCardinalitiesDict(self,host):
return {
'ZOOKEEPER_SERVER': {"min": 3},
'HBASE_MASTER': {"min": 1},
}
def getComponentLayoutSchemes(self):
return {
'NAMENODE': {"else": 0},
'SECONDARY_NAMENODE': {"else": 1},
'HBASE_MASTER': {6: 0, 31: 2, "else": 3},
'HISTORYSERVER': {31: 1, "else": 2},
'RESOURCEMANAGER': {31: 1, "else": 2},
'OOZIE_SERVER': {6: 1, 31: 2, "else": 3},
'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
'METRICS_COLLECTOR': {3: 2, 6: 2, 31: 3, "else": 5},
}
def get_system_min_uid(self):
login_defs = '/etc/login.defs'
uid_min_tag = 'UID_MIN'
comment_tag = '#'
uid_min = uid_default = '1000'
uid = None
if os.path.exists(login_defs):
with open(login_defs, 'r') as f:
data = f.read().split('\n')
# look for uid_min_tag in file
uid = filter(lambda x: uid_min_tag in x, data)
# filter all lines, where uid_min_tag was found in comments
uid = filter(lambda x: x.find(comment_tag) > x.find(uid_min_tag) or x.find(comment_tag) == -1, uid)
if uid is not None and len(uid) > 0:
uid = uid[0]
comment = uid.find(comment_tag)
tag = uid.find(uid_min_tag)
if comment == -1:
uid_tag = tag + len(uid_min_tag)
uid_min = uid[uid_tag:].strip()
elif comment > tag:
uid_tag = tag + len(uid_min_tag)
uid_min = uid[uid_tag:comment].strip()
# check result for value
try:
int(uid_min)
except ValueError:
return uid_default
return uid_min
def mergeValidators(self, parentValidators, childValidators):
for service, configsDict in childValidators.iteritems():
if service not in parentValidators:
parentValidators[service] = {}
parentValidators[service].update(configsDict)
def checkSiteProperties(self, siteProperties, *propertyNames):
"""
Check if properties defined in site properties.
:param siteProperties: config properties dict
:param *propertyNames: property names to validate
:returns: True if all properties defined, in other cases returns False
"""
if siteProperties is None:
return False
for name in propertyNames:
if not (name in siteProperties):
return False
return True
"""
Returns the dictionary of configs for 'capacity-scheduler'.
"""
def getCapacitySchedulerProperties(self, services):
capacity_scheduler_properties = dict()
received_as_key_value_pair = True
if "capacity-scheduler" in services['configurations']:
if "capacity-scheduler" in services['configurations']["capacity-scheduler"]["properties"]:
cap_sched_props_as_str = services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
if cap_sched_props_as_str:
cap_sched_props_as_str = str(cap_sched_props_as_str).split('\n')
if len(cap_sched_props_as_str) > 0 and cap_sched_props_as_str[0] != 'null':
# Received confgs as one "\n" separated string
for property in cap_sched_props_as_str:
key, sep, value = property.partition("=")
capacity_scheduler_properties[key] = value
Logger.info("'capacity-scheduler' configs is passed-in as a single '\\n' separated string. "
"count(services['configurations']['capacity-scheduler']['properties']['capacity-scheduler']) = "
"{0}".format(len(capacity_scheduler_properties)))
received_as_key_value_pair = False
else:
Logger.info("Passed-in services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'] is 'null'.")
else:
Logger.info("'capacity-schdeuler' configs not passed-in as single '\\n' string in "
"services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'].")
if not capacity_scheduler_properties:
# Received configs as a dictionary (Generally on 1st invocation).
capacity_scheduler_properties = services['configurations']["capacity-scheduler"]["properties"]
Logger.info("'capacity-scheduler' configs is passed-in as a dictionary. "
"count(services['configurations']['capacity-scheduler']['properties']) = {0}".format(len(capacity_scheduler_properties)))
else:
Logger.error("Couldn't retrieve 'capacity-scheduler' from services.")
Logger.info("Retrieved 'capacity-scheduler' received as dictionary : '{0}'. configs : {1}" \
.format(received_as_key_value_pair, capacity_scheduler_properties.items()))
return capacity_scheduler_properties, received_as_key_value_pair
"""
Gets all YARN leaf queues.
"""
def getAllYarnLeafQueues(self, capacitySchedulerProperties):
config_list = capacitySchedulerProperties.keys()
yarn_queues = None
leafQueueNames = set()
if 'yarn.scheduler.capacity.root.queues' in config_list:
yarn_queues = capacitySchedulerProperties.get('yarn.scheduler.capacity.root.queues')
if yarn_queues:
toProcessQueues = yarn_queues.split(",")
while len(toProcessQueues) > 0:
queue = toProcessQueues.pop()
queueKey = "yarn.scheduler.capacity.root." + queue + ".queues"
if queueKey in capacitySchedulerProperties:
# If parent queue, add children
subQueues = capacitySchedulerProperties[queueKey].split(",")
for subQueue in subQueues:
toProcessQueues.append(queue + "." + subQueue)
else:
# Leaf queues
# We only take the leaf queue name instead of the complete path, as leaf queue names are unique in YARN.
# Eg: If YARN queues are like :
# (1). 'yarn.scheduler.capacity.root.a1.b1.c1.d1',
# (2). 'yarn.scheduler.capacity.root.a1.b1.c2',
# (3). 'yarn.scheduler.capacity.root.default,
# Added leaf queues names are as : d1, c2 and default for the 3 leaf queues.
leafQueuePathSplits = queue.split(".")
if leafQueuePathSplits > 0:
leafQueueName = leafQueuePathSplits[-1]
leafQueueNames.add(leafQueueName)
return leafQueueNames
def get_service_component_meta(self, service, component, services):
"""
Function retrieve service component meta information as dict from services.json
If no service or component found, would be returned empty dict
Return value example:
"advertise_version" : true,
"bulk_commands_display_name" : "",
"bulk_commands_master_component_name" : "",
"cardinality" : "1+",
"component_category" : "CLIENT",
"component_name" : "HBASE_CLIENT",
"custom_commands" : [ ],
"decommission_allowed" : false,
"display_name" : "HBase Client",
"has_bulk_commands_definition" : false,
"is_client" : true,
"is_master" : false,
"reassign_allowed" : false,
"recovery_enabled" : false,
"service_name" : "HBASE",
"stack_name" : "HDP",
"stack_version" : "2.5",
"hostnames" : [ "host1", "host2" ]
:type service str
:type component str
:type services dict
:rtype dict
"""
__stack_services = "StackServices"
__stack_service_components = "StackServiceComponents"
if not services:
return {}
service_meta = [item for item in services["services"] if item[__stack_services]["service_name"] == service]
if len(service_meta) == 0:
return {}
service_meta = service_meta[0]
component_meta = [item for item in service_meta["components"] if item[__stack_service_components]["component_name"] == component]
if len(component_meta) == 0:
return {}
return component_meta[0][__stack_service_components]
def is_secured_cluster(self, services):
"""
Detects if cluster is secured or not
:type services dict
:rtype bool
"""
return services and "cluster-env" in services["configurations"] and\
"security_enabled" in services["configurations"]["cluster-env"]["properties"] and\
services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true"
def get_services_list(self, services):
"""
Returns available services as list
:type services dict
:rtype list
"""
if not services:
return []
return [service["StackServices"]["service_name"] for service in services["services"]]
def get_components_list(self, service, services):
"""
Return list of components for specific service
:type service str
:type services dict
:rtype list
"""
__stack_services = "StackServices"
__stack_service_components = "StackServiceComponents"
if not services:
return []
service_meta = [item for item in services["services"] if item[__stack_services]["service_name"] == service]
if len(service_meta) == 0:
return []
service_meta = service_meta[0]
return [item[__stack_service_components]["component_name"] for item in service_meta["components"]]
def getOldValue(self, services, configType, propertyName):
if services:
if 'changed-configurations' in services.keys():
changedConfigs = services["changed-configurations"]
for changedConfig in changedConfigs:
if changedConfig["type"] == configType and changedConfig["name"]== propertyName and "old_value" in changedConfig:
return changedConfig["old_value"]
return None
# Validation helper methods
def getSiteProperties(configurations, siteName):
siteConfig = configurations.get(siteName)
if siteConfig is None:
return None
return siteConfig.get("properties")
def getServicesSiteProperties(services, siteName):
configurations = services.get("configurations")
if not configurations:
return None
siteConfig = configurations.get(siteName)
if siteConfig is None:
return None
return siteConfig.get("properties")
def to_number(s):
try:
return int(re.sub("\D", "", s))
except ValueError:
return None
def checkXmxValueFormat(value):
p = re.compile('-Xmx(\d+)(b|k|m|g|p|t|B|K|M|G|P|T)?')
matches = p.findall(value)
return len(matches) == 1
def getXmxSize(value):
p = re.compile("-Xmx(\d+)(.?)")
result = p.findall(value)[0]
if len(result) > 1:
# result[1] - is a space or size formatter (b|k|m|g etc)
return result[0] + result[1].lower()
return result[0]
def formatXmxSizeToBytes(value):
value = value.lower()
if len(value) == 0:
return 0
modifier = value[-1]
if modifier == ' ' or modifier in "0123456789":
modifier = 'b'
m = {
modifier == 'b': 1,
modifier == 'k': 1024,
modifier == 'm': 1024 * 1024,
modifier == 'g': 1024 * 1024 * 1024,
modifier == 't': 1024 * 1024 * 1024 * 1024,
modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024
}[1]
return to_number(value) * m
def getPort(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def isSecurePort(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
def getMountPointForDir(dir, mountPoints):
"""
:param dir: Directory to check, even if it doesn't exist.
:return: Returns the closest mount point as a string for the directory.
if the "dir" variable is None, will return None.
If the directory does not exist, will return "/".
"""
bestMountFound = None
if dir:
dir = re.sub("^file://", "", dir, count=1).strip().lower()
# If the path is "/hadoop/hdfs/data", then possible matches for mounts could be
# "/", "/hadoop/hdfs", and "/hadoop/hdfs/data".
# So take the one with the greatest number of segments.
for mountPoint in mountPoints:
# Ensure that the mount path and the dir path ends with "/"
# The mount point "/hadoop" should not match with the path "/hadoop1"
if os.path.join(dir, "").startswith(os.path.join(mountPoint, "")):
if bestMountFound is None:
bestMountFound = mountPoint
elif os.path.join(bestMountFound, "").count(os.path.sep) < os.path.join(mountPoint, "").count(os.path.sep):
bestMountFound = mountPoint
return bestMountFound
def getHeapsizeProperties():
return { "NAMENODE": [{"config-name": "hadoop-env",
"property": "namenode_heapsize",
"default": "1024m"}],
"DATANODE": [{"config-name": "hadoop-env",
"property": "dtnode_heapsize",
"default": "1024m"}],
"REGIONSERVER": [{"config-name": "hbase-env",
"property": "hbase_regionserver_heapsize",
"default": "1024m"}],
"HBASE_MASTER": [{"config-name": "hbase-env",
"property": "hbase_master_heapsize",
"default": "1024m"}],
"HIVE_CLIENT": [{"config-name": "hive-site",
"property": "hive.heapsize",
"default": "1024m"}],
"HISTORYSERVER": [{"config-name": "mapred-env",
"property": "jobhistory_heapsize",
"default": "1024m"}],
"OOZIE_SERVER": [{"config-name": "oozie-env",
"property": "oozie_heapsize",
"default": "1024m"}],
"RESOURCEMANAGER": [{"config-name": "yarn-env",
"property": "resourcemanager_heapsize",
"default": "1024m"}],
"NODEMANAGER": [{"config-name": "yarn-env",
"property": "nodemanager_heapsize",
"default": "1024m"}],
"APP_TIMELINE_SERVER": [{"config-name": "yarn-env",
"property": "apptimelineserver_heapsize",
"default": "1024m"}],
"ZOOKEEPER_SERVER": [{"config-name": "zookeeper-env",
"property": "zookeeper_heapsize",
"default": "1024m"}],
"METRICS_COLLECTOR": [{"config-name": "ams-hbase-env",
"property": "hbase_master_heapsize",
"default": "1024"},
{"config-name": "ams-hbase-env",
"property": "hbase_regionserver_heapsize",
"default": "1024"},
{"config-name": "ams-env",
"property": "metrics_collector_heapsize",
"default": "512"}],
"ATLAS_SERVER": [{"config-name": "atlas-env",
"property": "atlas_server_xmx",
"default": "2048"}]
}
def getMemorySizeRequired(components, configurations):
totalMemoryRequired = 512*1024*1024 # 512Mb for OS needs
for component in components:
if component in getHeapsizeProperties().keys():
heapSizeProperties = getHeapsizeProperties()[component]
for heapSizeProperty in heapSizeProperties:
try:
properties = configurations[heapSizeProperty["config-name"]]["properties"]
heapsize = properties[heapSizeProperty["property"]]
except KeyError:
heapsize = heapSizeProperty["default"]
# Assume Mb if no modifier
if len(heapsize) > 1 and heapsize[-1] in '0123456789':
heapsize = str(heapsize) + "m"
totalMemoryRequired += formatXmxSizeToBytes(heapsize)
return totalMemoryRequired
def round_to_n(mem_size, n=128):
return int(round(mem_size / float(n))) * int(n)
|
py | 1a2fa11f07440aff970ba75f83f5d80a1d284b01 | # -*- encoding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import itertools
import h2o
from h2o.job import H2OJob
from h2o.frame import H2OFrame
from h2o.exceptions import H2OValueError
from h2o.estimators.estimator_base import H2OEstimator
from h2o.two_dim_table import H2OTwoDimTable
from h2o.display import H2ODisplay
from h2o.grid.metrics import * # NOQA
from h2o.utils.backward_compatibility import backwards_compatible
from h2o.utils.shared_utils import deprecated, quoted
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, is_type
class H2OGridSearch(backwards_compatible()):
"""
Grid Search of a Hyper-Parameter Space for a Model
:param model: The type of model to be explored initialized with optional parameters that will be
unchanged across explored models.
:param hyper_params: A dictionary of string parameters (keys) and a list of values to be explored by grid
search (values).
:param str grid_id: The unique id assigned to the resulting grid object. If none is given, an id will
automatically be generated.
:param search_criteria: A dictionary of directives which control the search of the hyperparameter space.
The default strategy "Cartesian" covers the entire space of hyperparameter combinations. Specify the
"RandomDiscrete" strategy to get random search of all the combinations of your hyperparameters.
RandomDiscrete should usually be combined with at least one early stopping criterion: max_models
and/or max_runtime_secs, e.g::
>>> criteria = {"strategy": "RandomDiscrete", "max_models": 42,
... "max_runtime_secs": 28800, "seed": 1234}
>>> criteria = {"strategy": "RandomDiscrete", "stopping_metric": "AUTO",
... "stopping_tolerance": 0.001, "stopping_rounds": 10}
>>> criteria = {"strategy": "RandomDiscrete", "stopping_rounds": 5,
... "stopping_metric": "misclassification",
... "stopping_tolerance": 0.00001}
:returns: a new H2OGridSearch instance
Examples
--------
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> hyper_parameters = {'alpha': [0.01,0.5], 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters)
>>> training_data = h2o.import_file("smalldata/logreg/benign.csv")
>>> gs.train(x=range(3) + range(4,11),y=3, training_frame=training_data)
>>> gs.show()
"""
def __init__(self, model, hyper_params, grid_id=None, search_criteria=None, export_checkpoints_dir=None):
super(H2OGridSearch, self).__init__()
assert_is_type(model, None, H2OEstimator, lambda mdl: issubclass(mdl, H2OEstimator))
assert_is_type(hyper_params, dict)
assert_is_type(grid_id, None, str)
assert_is_type(search_criteria, None, dict)
if not (model is None or is_type(model, H2OEstimator)): model = model()
self._id = grid_id
self.model = model
self.hyper_params = dict(hyper_params)
self.search_criteria = None if search_criteria is None else dict(search_criteria)
self.export_checkpoints_dir = export_checkpoints_dir
self._grid_json = None
self.models = None # list of H2O Estimator instances
self._parms = {} # internal, for object recycle #
self.parms = {} # external#
self._future = False # used by __repr__/show to query job state#
self._job = None # used when _future is True#
@property
def grid_id(self):
"""A key that identifies this grid search object in H2O."""
return self._id
@grid_id.setter
def grid_id(self, value):
oldname = self.grid_id
self._id = value
h2o.rapids('(rename "{}" "{}")'.format(oldname, value))
@property
def model_ids(self):
return [i['name'] for i in self._grid_json["model_ids"]]
@property
def hyper_names(self):
return self._grid_json["hyper_names"]
@property
def failed_params(self):
return self._grid_json.get("failed_params", None)
@property
def failure_details(self):
return self._grid_json.get("failure_details", None)
@property
def failure_stack_traces(self):
return self._grid_json.get("failure_stack_traces", None)
@property
def failed_raw_params(self):
return self._grid_json.get("failed_raw_params", None)
def start(self, x, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Asynchronous model build by specifying the predictor columns, response column, and any
additional frame-specific values.
To block for results, call :meth:`join`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
"""
self._future = True
self.train(x=x,
y=y,
training_frame=training_frame,
offset_column=offset_column,
fold_column=fold_column,
weights_column=weights_column,
validation_frame=validation_frame,
**params)
def join(self):
"""Wait until grid finishes computing."""
self._future = False
self._job.poll()
self._job = None
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Train the model synchronously (i.e. do not return until the model finishes training).
To train asynchronously call :meth:`start`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
"""
algo_params = locals()
parms = self._parms.copy()
parms.update({k: v for k, v in algo_params.items() if k not in ["self", "params", "algo_params", "parms"]})
# dictionaries have special handling in grid search, avoid the implicit conversion
parms["search_criteria"] = None if self.search_criteria is None else str(self.search_criteria)
parms["export_checkpoints_dir"] = self.export_checkpoints_dir
parms["hyper_parameters"] = None if self.hyper_params is None else str(self.hyper_params) # unique to grid search
parms.update({k: v for k, v in list(self.model._parms.items()) if v is not None}) # unique to grid search
parms.update(params)
if '__class__' in parms: # FIXME: hackt for PY3
del parms['__class__']
y = algo_params["y"]
tframe = algo_params["training_frame"]
if tframe is None: raise ValueError("Missing training_frame")
if y is not None:
if is_type(y, list, tuple):
if len(y) == 1:
parms["y"] = y[0]
else:
raise ValueError('y must be a single column reference')
if x is None:
if(isinstance(y, int)):
xset = set(range(training_frame.ncols)) - {y}
else:
xset = set(training_frame.names) - {y}
else:
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-training_frame.ncols <= xi < training_frame.ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(training_frame.names[xi])
else:
if xi not in training_frame.names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
x = list(xset)
parms["x"] = x
self.build_model(parms)
def build_model(self, algo_params):
"""(internal)"""
if algo_params["training_frame"] is None: raise ValueError("Missing training_frame")
x = algo_params.pop("x")
y = algo_params.pop("y", None)
training_frame = algo_params.pop("training_frame")
validation_frame = algo_params.pop("validation_frame", None)
is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"])
algo = self.model._compute_algo() # unique to grid search
is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_unsupervised and y is None: raise ValueError("Missing response")
if not is_unsupervised:
y = y if y in training_frame.names else training_frame.names[y]
self.model._estimator_type = "classifier" if training_frame.types[y] == "enum" else "regressor"
self._model_build(x, y, training_frame, validation_frame, algo_params)
def _model_build(self, x, y, tframe, vframe, kwargs):
kwargs['training_frame'] = tframe
if vframe is not None: kwargs["validation_frame"] = vframe
if is_type(y, int): y = tframe.names[y]
if y is not None: kwargs['response_column'] = y
if not is_type(x, list, tuple): x = [x]
if is_type(x[0], int):
x = [tframe.names[i] for i in x]
offset = kwargs["offset_column"]
folds = kwargs["fold_column"]
weights = kwargs["weights_column"]
ignored_columns = list(set(tframe.names) - set(x + [y, offset, folds, weights]))
kwargs["ignored_columns"] = None if not ignored_columns else [quoted(col) for col in ignored_columns]
kwargs = dict([(k, kwargs[k].frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if
kwargs[k] is not None]) # gruesome one-liner
algo = self.model._compute_algo() # unique to grid search
if self.grid_id is not None: kwargs["grid_id"] = self.grid_id
rest_ver = kwargs.pop("_rest_version") if "_rest_version" in kwargs else None
grid = H2OJob(h2o.api("POST /99/Grid/%s" % algo, data=kwargs), job_type=(algo + " Grid Build"))
if self._future:
self._job = grid
return
grid.poll()
grid_json = h2o.api("GET /99/Grids/%s" % (grid.dest_key))
failure_messages_stacks = ""
error_index = 0
if len(grid_json["failure_details"]) > 0:
print("Errors/Warnings building gridsearch model\n")
# will raise error if no grid model is returned, store error messages here
for error_message in grid_json["failure_details"]:
if isinstance(grid_json["failed_params"][error_index], dict):
for h_name in grid_json['hyper_names']:
print("Hyper-parameter: {0}, {1}".format(h_name,
grid_json['failed_params'][error_index][h_name]))
if len(grid_json["failure_stack_traces"]) > error_index:
print("failure_details: {0}\nfailure_stack_traces: "
"{1}\n".format(error_message, grid_json['failure_stack_traces'][error_index]))
failure_messages_stacks += error_message+'\n'
error_index += 1
self.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']]
for model in self.models:
model._estimator_type = self.model._estimator_type
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
# sometimes no model is returned due to bad parameter values provided by the user.
if len(grid_json['model_ids']) > 0:
first_model_json = h2o.api("GET /%d/Models/%s" %
(rest_ver or 3, grid_json['model_ids'][0]['name']))['models'][0]
self._resolve_grid(grid.dest_key, grid_json, first_model_json)
else:
if len(failure_messages_stacks)>0:
raise ValueError(failure_messages_stacks)
else:
raise ValueError("Gridsearch returns no model due to bad parameter values or other reasons....")
def _resolve_grid(self, grid_id, grid_json, first_model_json):
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = grid_id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = self._parms
self.export_checkpoints_dir = m._grid_json["export_checkpoints_dir"]
H2OEstimator.mixin(self, model_class)
self.__dict__.update(m.__dict__.copy())
def __getitem__(self, item):
return self.models[item]
def __iter__(self):
nmodels = len(self.models)
return (self[i] for i in range(nmodels))
def __len__(self):
return len(self.models)
def __repr__(self):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data to be predicted on.
:returns: H2OFrame filled with predictions.
"""
return {model.model_id: model.predict(test_data) for model in self.models}
def is_cross_validated(self):
"""Return True if the model was cross-validated."""
return {model.model_id: model.is_cross_validated() for model in self.models}
def xval_keys(self):
"""Model keys for the cross-validated model."""
return {model.model_id: model.xval_keys() for model in self.models}
def get_xval_models(self, key=None):
"""
Return a Model object.
:param str key: If None, return all cross-validated models; otherwise return the model
specified by the key.
:returns: A model or a list of models.
"""
return {model.model_id: model.get_xval_models(key) for model in self.models}
def xvals(self):
"""Return the list of cross-validated models."""
return {model.model_id: model.xvals for model in self.models}
def deepfeatures(self, test_data, layer):
"""
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
"""
return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models}
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
return {model.model_id: model.weights(matrix_id) for model in self.models}
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
"""
return {model.model_id: model.biases(vector_id) for model in self.models}
def normmul(self):
"""Normalization/Standardization multipliers for numeric predictors."""
return {model.model_id: model.normmul() for model in self.models}
def normsub(self):
"""Normalization/Standardization offsets for numeric predictors."""
return {model.model_id: model.normsub() for model in self.models}
def respmul(self):
"""Normalization/Standardization multipliers for numeric response."""
return {model.model_id: model.respmul() for model in self.models}
def respsub(self):
"""Normalization/Standardization offsets for numeric response."""
return {model.model_id: model.respsub() for model in self.models}
def catoffsets(self):
"""
Categorical offsets for one-hot encoding
"""
return {model.model_id: model.catoffsets() for model in self.models}
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models}
def scoring_history(self):
"""
Retrieve model scoring history.
:returns: Score history (H2OTwoDimTable)
"""
return {model.model_id: model.scoring_history() for model in self.models}
def summary(self, header=True):
"""Print a detailed summary of the explored models."""
table = []
for model in self.models:
model_summary = model._model_json["output"]["model_summary"]
r_values = list(model_summary.cell_values[0])
r_values[0] = model.model_id
table.append(r_values)
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(table,columns=self.col_header)
# return
print()
if header:
print('Grid Summary:')
print()
H2ODisplay(table, header=['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left")
def show(self):
"""Print models sorted by metric."""
hyper_combos = itertools.product(*list(self.hyper_params.values()))
if not self.models:
c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]
print(H2OTwoDimTable(
col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],
table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))
else:
print(self.sorted_metric_table())
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list/pandas DataFrame.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A dictionary of lists or Pandas DataFrame instances.
"""
return {model.model_id: model.varimp(use_pandas) for model in self.models}
def residual_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param bool train: Get the residual deviance for the training set. If both train and valid are False,
then train is selected by default.
:param bool valid: Get the residual deviance for the validation set. If both train and valid are True,
then train is selected by default.
:param bool xval: Get the residual deviance for the cross-validated models.
:returns: the residual deviance, or None if it is not present.
"""
return {model.model_id: model.residual_deviance(train, valid, xval) for model in self.models}
def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the residual dof for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the residual dof for the validation set. If both train and valid are True, then
train is selected by default.
:param bool xval: Get the residual dof for the cross-validated models.
:returns: the residual degrees of freedom, or None if they are not present.
"""
return {model.model_id: model.residual_degrees_of_freedom(train, valid, xval) for model in self.models}
def null_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param bool train: Get the null deviance for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the null deviance for the validation set. If both train and valid are True, then
train is selected by default.
:param bool xval: Get the null deviance for the cross-validated models.
:returns: the null deviance, or None if it is not present.
"""
return {model.model_id: model.null_deviance(train, valid, xval) for model in self.models}
def null_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the null dof for the training set. If both train and valid are False, then train is
selected by default.
:param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is
selected by default.
:param bool xval: Get the null dof for the cross-validated models.
:returns: the null dof, or None if it is not present.
"""
return {model.model_id: model.null_degrees_of_freedom(train, valid, xval) for model in self.models}
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients)."""
for i, model in enumerate(self.models):
print('Model', i)
model.pprint_coef()
print()
def coef(self):
"""Return the coefficients that can be applied to the non-standardized data.
Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly.
"""
return {model.model_id: model.coef() for model in self.models}
def coef_norm(self):
"""Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance.
"""
return {model.model_id: model.coef_norm() for model in self.models}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be ``1 - MSE/var``, where ``var`` is computed as ``sigma^2``.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the R^2 value for the training data.
:param bool valid: If valid is True, then return the R^2 value for the validation data.
:param bool xval: If xval is True, then return the R^2 value for the cross validation data.
:returns: The R^2 for this regression model.
"""
return {model.model_id: model.r2(train, valid, xval) for model in self.models}
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MSE value for the training data.
:param bool valid: If valid is True, then return the MSE value for the validation data.
:param bool xval: If xval is True, then return the MSE value for the cross validation data.
:returns: The MSE for this regression model.
"""
return {model.model_id: model.mse(train, valid, xval) for model in self.models}
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Log Loss value for the training data.
:param bool valid: If valid is True, then return the Log Loss value for the validation data.
:param bool xval: If xval is True, then return the Log Loss value for the cross validation data.
:returns: The Log Loss for this binomial model.
"""
return {model.model_id: model.logloss(train, valid, xval) for model in self.models}
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Mean Residual Deviance value for the training data.
:param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:returns: The Mean Residual Deviance for this regression model.
"""
return {model.model_id: model.mean_residual_deviance(train, valid, xval) for model in self.models}
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AUC value for the training data.
:param bool valid: If valid is True, then return the AUC value for the validation data.
:param bool xval: If xval is True, then return the AUC value for the validation data.
:returns: The AUC.
"""
return {model.model_id: model.auc(train, valid, xval) for model in self.models}
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AIC value for the training data.
:param bool valid: If valid is True, then return the AIC value for the validation data.
:param bool xval: If xval is True, then return the AIC value for the validation data.
:returns: The AIC.
"""
return {model.model_id: model.aic(train, valid, xval) for model in self.models}
def gini(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Gini Coefficient value for the training data.
:param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:returns: The Gini Coefficient for this binomial model.
"""
return {model.model_id: model.gini(train, valid, xval) for model in self.models}
def get_hyperparams(self, id, display=True):
"""
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)
else model.params[h]['actual']
for h in self.hyper_params]
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return res
def get_hyperparams_dict(self, id, display=True):
"""
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
model_params = dict()
# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
for param_name in self.hyper_names:
model_params[param_name] = model.params[param_name]['actual'][0] if \
isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return model_params
def sorted_metric_table(self):
"""
Retrieve summary table of an H2O Grid Search.
:returns: The summary table as an H2OTwoDimTable or a Pandas DataFrame.
"""
summary = self._grid_json["summary_table"]
if summary is not None: return summary.as_data_frame()
print("No sorted metric table for this grid search")
@staticmethod
def _metrics_class(model_json):
model_type = model_json["output"]["model_category"]
if model_type == "Binomial":
model_class = H2OBinomialGridSearch
elif model_type == "Clustering":
model_class = H2OClusteringGridSearch
elif model_type == "Regression":
model_class = H2ORegressionGridSearch
elif model_type == "Multinomial":
model_class = H2OMultinomialGridSearch
elif model_type == "Ordinal":
model_class = H2OOrdinalGridSearch
elif model_type == "AutoEncoder":
model_class = H2OAutoEncoderGridSearch
elif model_type == "DimReduction":
model_class = H2ODimReductionGridSearch
else:
raise NotImplementedError(model_type)
return model_class
def get_grid(self, sort_by=None, decreasing=None):
"""
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
"""
if sort_by is None and decreasing is None: return self
grid_json = h2o.api("GET /99/Grids/%s" % self._id, data={"sort_by": sort_by, "decreasing": decreasing})
grid = H2OGridSearch(self.model, self.hyper_params, self._id)
grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered
first_model_json = h2o.api("GET /99/Models/%s" % grid_json['model_ids'][0]['name'])['models'][0]
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = self._id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = grid._parms
H2OEstimator.mixin(grid, model_class)
grid.__dict__.update(m.__dict__.copy())
return grid
# Deprecated functions; left here for backward compatibility
_bcim = {
"giniCoef": lambda self, *args, **kwargs: self.gini(*args, **kwargs)
}
@deprecated("grid.sort_by() is deprecated; use grid.get_grid() instead")
def sort_by(self, metric, increasing=True):
"""Deprecated since 2016-12-12, use grid.get_grid() instead."""
if metric[-1] != ')': metric += '()'
c_values = [list(x) for x in zip(*sorted(eval('self.' + metric + '.items()'), key=lambda k_v: k_v[1]))]
c_values.insert(1, [self.get_hyperparams(model_id, display=False) for model_id in c_values[0]])
if not increasing:
for col in c_values: col.reverse()
if metric[-2] == '(': metric = metric[:-2]
return H2OTwoDimTable(
col_header=['Model Id', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']', metric],
table_header='Grid Search Results for ' + self.model.__class__.__name__,
cell_values=[list(x) for x in zip(*c_values)])
|
py | 1a2fa1bf108cc114e4925cc6529c2d4f8504e65a | # -*- coding: utf-8 -*-
import os
from nbformat.v4.nbbase import new_notebook, new_code_cell, new_markdown_cell, new_raw_cell
from jupytext.compare import compare, compare_notebooks
import jupytext
def test_read_simple_file(script="""# ---
# title: Simple file
# ---
# %% [markdown]
# This is a markdown cell
# %% [md]
# This is also a markdown cell
# %% [raw]
# This is a raw cell
# %%% sub-cell title
# This is a sub-cell
# %%%% sub-sub-cell title
# This is a sub-sub-cell
# %% And now a code cell
1 + 2 + 3 + 4
5
6
# %%magic # this is a commented magic, not a cell
7
"""):
nb = jupytext.reads(script, 'py:percent')
compare_notebooks(new_notebook(cells=[
new_raw_cell('---\ntitle: Simple file\n---'),
new_markdown_cell('This is a markdown cell'),
new_markdown_cell('This is also a markdown cell', metadata={'region_name': 'md'}),
new_raw_cell('This is a raw cell'),
new_code_cell('# This is a sub-cell', metadata={'title': 'sub-cell title', 'cell_depth': 1}),
new_code_cell('# This is a sub-sub-cell', metadata={'title': 'sub-sub-cell title', 'cell_depth': 2}),
new_code_cell('''1 + 2 + 3 + 4
5
6
%%magic # this is a commented magic, not a cell
7''', metadata={'title': 'And now a code cell'})]), nb)
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_read_cell_with_metadata(
script="""# %% a code cell with parameters {"tags": ["parameters"]}
a = 3
"""):
nb = jupytext.reads(script, 'py:percent')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'code'
assert nb.cells[0].source == 'a = 3'
assert nb.cells[0].metadata == {
'title': 'a code cell with parameters',
'tags': ['parameters']}
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_read_nbconvert_script(script="""
# coding: utf-8
# A markdown cell
# In[1]:
import pandas as pd
pd.options.display.max_rows = 6
pd.options.display.max_columns = 20
# Another markdown cell
# In[2]:
1 + 1
# Again, a markdown cell
# In[33]:
2 + 2
# <codecell>
3 + 3
"""):
assert jupytext.formats.guess_format(script, '.py')[0] == 'percent'
nb = jupytext.reads(script, '.py')
assert len(nb.cells) == 5
def test_read_remove_blank_lines(script="""# %%
import pandas as pd
# %% Display a data frame
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]},
index=pd.Index(['x0', 'x1'], name='x'))
df
# %% Pandas plot {"tags": ["parameters"]}
df.plot(kind='bar')
# %% sample class
class MyClass:
pass
# %% a function
def f(x):
return 42 * x
"""):
nb = jupytext.reads(script, 'py')
assert len(nb.cells) == 5
for i in range(5):
assert nb.cells[i].cell_type == 'code'
assert not nb.cells[i].source.startswith('\n')
assert not nb.cells[i].source.endswith('\n')
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_no_crash_on_square_bracket(script="""# %% In [2]
print('Hello')
"""):
nb = jupytext.reads(script, 'py')
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_nbconvert_cell(script="""# In[2]:
print('Hello')
"""):
nb = jupytext.reads(script, 'py')
script2 = jupytext.writes(nb, 'py:percent')
expected = """# %%
print('Hello')
"""
compare(script2, expected)
def test_nbformat_v3_nbpy_cell(script="""# <codecell>
print('Hello')
"""):
nb = jupytext.reads(script, 'py')
script2 = jupytext.writes(nb, 'py:percent')
expected = """# %%
print('Hello')
"""
compare(script2, expected)
def test_multiple_empty_cells():
nb = new_notebook(cells=[new_code_cell(), new_code_cell(), new_code_cell()],
metadata={'jupytext': {'notebook_metadata_filter': '-all'}})
text = jupytext.writes(nb, 'py:percent')
expected = """# %%
# %%
# %%
"""
compare(text, expected)
nb2 = jupytext.reads(text, 'py:percent')
nb2.metadata = nb.metadata
compare(nb2, nb)
def test_first_cell_markdown_191():
text = """# %% [markdown]
# Docstring
# %%
from math import pi
# %% [markdown]
# Another markdown cell
"""
nb = jupytext.reads(text, 'py')
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[1].cell_type == 'code'
assert nb.cells[2].cell_type == 'markdown'
def test_multiline_comments_in_markdown_1():
text = """# %% [markdown]
'''
a
long
cell
'''
"""
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "a\nlong\ncell"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_multiline_comments_in_markdown_2():
text = '''# %% [markdown]
"""
a
long
cell
"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "a\nlong\ncell"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_multiline_comments_format_option():
text = '''# %% [markdown]
"""
a
long
cell
"""
'''
nb = new_notebook(cells=[new_markdown_cell("a\nlong\ncell")],
metadata={'jupytext': {'cell_markers': '"""',
'notebook_metadata_filter': '-all'}})
py = jupytext.writes(nb, 'py:percent')
compare(py, text)
def test_multiline_comments_in_raw_cell():
text = '''# %% [raw]
"""
some
text
"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'raw'
assert nb.cells[0].source == "some\ntext"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_multiline_comments_in_markdown_cell_no_line_return():
text = '''# %% [markdown]
"""a
long
cell"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "a\nlong\ncell"
def test_multiline_comments_in_markdown_cell_is_robust_to_additional_cell_marker():
text = '''# %% [markdown]
"""
some text, and a fake cell marker
# %% [raw]
"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "some text, and a fake cell marker\n# %% [raw]"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_cell_markers_option_in_contents_manager(tmpdir):
tmp_ipynb = str(tmpdir.join('notebook.ipynb'))
tmp_py = str(tmpdir.join('notebook.py'))
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmpdir)
nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\nlong\ncell')],
metadata={'jupytext': {'formats': 'ipynb,py:percent',
'notebook_metadata_filter': '-all',
'cell_markers': "'''"}})
cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')
assert os.path.isfile(tmp_ipynb)
assert os.path.isfile(tmp_py)
with open(tmp_py) as fp:
text = fp.read()
compare(text, """# %%
1 + 1
# %% [markdown]
'''
a
long
cell
'''
""")
nb2 = jupytext.read(tmp_py)
compare_notebooks(nb, nb2)
def test_default_cell_markers_in_contents_manager(tmpdir):
tmp_ipynb = str(tmpdir.join('notebook.ipynb'))
tmp_py = str(tmpdir.join('notebook.py'))
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmpdir)
cm.default_cell_markers = "'''"
nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\nlong\ncell')],
metadata={'jupytext': {'formats': 'ipynb,py:percent',
'notebook_metadata_filter': '-all'}})
cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')
assert os.path.isfile(tmp_ipynb)
assert os.path.isfile(tmp_py)
with open(tmp_py) as fp:
text = fp.read()
compare(text, """# %%
1 + 1
# %% [markdown]
'''
a
long
cell
'''
""")
nb2 = jupytext.read(tmp_py)
compare_notebooks(nb, nb2)
def test_default_cell_markers_in_contents_manager_does_not_impact_light_format(tmpdir):
tmp_ipynb = str(tmpdir.join('notebook.ipynb'))
tmp_py = str(tmpdir.join('notebook.py'))
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmpdir)
cm.default_cell_markers = "'''"
nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\nlong\ncell')],
metadata={'jupytext': {'formats': 'ipynb,py',
'notebook_metadata_filter': '-all'}})
cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')
assert os.path.isfile(tmp_ipynb)
assert os.path.isfile(tmp_py)
with open(tmp_py) as fp:
text = fp.read()
compare(text, """1 + 1
# a
# long
# cell
""")
nb2 = jupytext.read(tmp_py)
compare_notebooks(nb, nb2)
def test_single_triple_quote_works(no_jupytext_version_number, text='''# ---
# jupyter:
# jupytext:
# cell_markers: '"""'
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# ---
# %%
print("hello")
''', notebook=new_notebook(cells=[new_code_cell('print("hello")')])):
compare_notebooks(jupytext.reads(text, 'py'), notebook)
def test_docstring_with_quadruple_quote(nb=new_notebook(cells=[
new_code_cell('''def fun_1(df):
""""
docstring starting with 4 double quotes and ending with 3
"""
return df'''),
new_code_cell('''def fun_2(df):
"""
docstring
"""
return df''')
])):
"""Reproduces https://github.com/mwouts/jupytext/issues/460"""
py = jupytext.writes(nb, 'py:percent')
nb2 = jupytext.reads(py, 'py')
compare_notebooks(nb2, nb)
|
py | 1a2fa20ce8699c0d89d64e49ca36b015ea10ffd0 | # Configuration file for the skipper script
intro_duration=60 # Duration in seconds, change this value
|
py | 1a2fa219503a17c62a8de6cd825611b4b91aa4eb | all_item_types = {
'cards': 'createCard',
'boards': 'createBoard',
'lists': 'createList',
'comments': 'commentCard',
'createChecklist': 'addChecklistToCard',
'updateCheck': 'updateCheckItemStateOnCard',
'moveCard': 'updateCard'
} |
py | 1a2fa29986fe3882f57c423208672f82034bd1e3 | if __name__ == "__main__":
import os
import sys
sys.path.append(os.getcwd() + "/../../")
import pandas as pd
import itertools
from kge_from_text import folder_definitions as fd
import kge_from_text.models.term_embeddings as tt
import kge_from_text.bridges.clean_bridge as bridge
from kge_from_text.evaluators.evaluator_handler import EvaluatorHandler
from kge_from_text.evaluators.analogy_evaluator import AnalogyEvaluator
import kge_from_text.models.tee_embeddings as tee
combinations = [(5, 400), (5, 500)]
entity_vector_name = "2016_data/entity_vectors"
type_vector_name = "2016_data/type_vectors"
conactenated_name = "2016_data/concatenated_vectors"
conactenated_name_time = "2016_data/concatenated_vectors_time"
temporal_csv = "2016_data/temporal_vectors.csv"
annotated_entity_file = "2016_data/annotated_text_with_entities"
annotated_type_file = "2016_data/annotated_text_with_types"
type_of_entity_file = "2016_data/type_to_entity_data.ttl"
pure_text_model = "2016_data/text_with_words"
# Declare An Evaluator
evalu = EvaluatorHandler(fd.EVALUATION_RESULTS_ROOT, name="word_base")
for w_e, s_e in combinations:
# ENTITY
model_w = tt.TermEmbedding("text")
model_w.fit(input_text=fd.STARTING_DATA_ROOT + pure_text_model,
output_file_path=fd.PRODUCED_MODELS_ROOT + "2016_data/", _size=s_e, _window=w_e, load_model_if_exits = True)
analogies = pd.read_csv(fd.GOLD_STANDARDS + "mikolov", names=["First", "Second", "Third", "Fourth"],
sep=" ")
br = bridge.CleanBridge()
analogy_eval = AnalogyEvaluator(br, model_w, analogies)
evalu.run_evaluation(analogy_eval)
analogies = pd.read_csv(fd.GOLD_STANDARDS + "currency", names=["First", "Second", "Third", "Fourth"],
sep=" ")
analogy_eval = AnalogyEvaluator(br, model_w, analogies)
evalu.run_evaluation(analogy_eval)
|
py | 1a2fa2c8737687b828f51c7e64396aca36a6f1fc | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import st2common.content.utils as content_utils
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.action import ActionAliasAPI
from st2common.persistence.actionalias import ActionAlias
__all__ = [
'AliasesRegistrar',
'register_aliases'
]
LOG = logging.getLogger(__name__)
class AliasesRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_aliases_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register aliases from all of the
discovered packs.
:return: Number of aliases registered.
:rtype: ``int``
"""
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='aliases')
for pack, aliases_dir in six.iteritems(content):
try:
LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)
aliases = self._get_aliases_from_pack(aliases_dir)
count = self._register_aliases_from_pack(pack=pack, aliases=aliases)
registered_count += count
except:
LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)
return registered_count
def register_aliases_from_pack(self, pack_dir):
"""
Register all the aliases from the provided pack.
:return: Number of aliases registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
aliases_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='aliases')
registered_count = 0
if not aliases_dir:
return registered_count
LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)
try:
aliases = self._get_aliases_from_pack(aliases_dir=aliases_dir)
registered_count = self._register_aliases_from_pack(pack=pack, aliases=aliases)
except:
LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)
return 0
return registered_count
def _get_aliases_from_pack(self, aliases_dir):
return self.get_resources_from_pack(resources_dir=aliases_dir)
def _register_action_alias(self, pack, action_alias):
content = self._meta_loader.load(action_alias)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
action_alias_api = ActionAliasAPI(**content)
action_alias_api.validate()
action_alias_db = ActionAliasAPI.to_model(action_alias_api)
try:
action_alias_db.id = ActionAlias.get_by_name(action_alias_api.name).id
except ValueError:
LOG.info('ActionAlias %s not found. Creating new one.', action_alias)
try:
action_alias_db = ActionAlias.add_or_update(action_alias_db)
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias updated. Action alias %s from %s.', action_alias_db,
action_alias, extra=extra)
except Exception:
LOG.exception('Failed to create action alias %s.', action_alias_api.name)
raise
def _register_aliases_from_pack(self, pack, aliases):
registered_count = 0
for alias in aliases:
try:
LOG.debug('Loading alias from %s.', alias)
self._register_action_alias(pack, alias)
except Exception:
LOG.exception('Unable to register alias: %s', alias)
continue
else:
registered_count += 1
return registered_count
def register_aliases(packs_base_paths=None, pack_dir=None):
if packs_base_paths:
assert(isinstance(packs_base_paths, list))
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = AliasesRegistrar()
if pack_dir:
result = registrar.register_aliases_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_aliases_from_packs(base_dirs=packs_base_paths)
return result
|
py | 1a2fa31a8570b50309787abff5676b29d6214a61 | import sys
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import firebase_admin
from firebase_admin import credentials
import pandas as pd
import numpy as np
import random
# import google_cloud_firestore
from google.cloud import firestore as fs
cred = credentials.Certificate("./keys/firebaseAdminAuth.json")
def getRestaurants():
db = firestore.client()
rst_ref = db.collection(u'root/restaurants/rstList')
docs = rst_ref.stream()
rst_list = []
for doc in docs:
rst_list.append(doc.id)
# print(f'{doc.id} => {doc.to_dict()}')
return rst_list
def checkDocument(docPath):
db = firestore.client()
doc_ref = db.document(docPath)
return doc_ref.get().exists
|
py | 1a2fa3427bc865b29d503ed11ce2365f0c5d78c4 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from tensorflow.python import ipu
from ipu_tensorflow_addons.keras.layers import Embedding, LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.optimizers import Adam
if tf.__version__[0] != '2':
raise ImportError("TensorFlow 2 is required for this example")
max_features = 20000
minibatch_size = 32
# Define the dataset.
def get_dataset():
(x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=80)
ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds = ds.repeat()
ds = ds.map(lambda x, y: (x, tf.cast(y, tf.int32)))
ds = ds.batch(minibatch_size, drop_remainder=True)
return ds
# Define the model.
def get_model():
return tf.keras.Sequential(
[Embedding(max_features, 128),
LSTM(128, dropout=0.2),
Dense(1, activation='sigmoid')])
def main():
# Configure IPUs.
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Set up IPU strategy.
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
model = get_model()
model.compile(steps_per_execution=384, loss='binary_crossentropy', optimizer=Adam(0.005))
model.fit(get_dataset(), steps_per_epoch=768, epochs=3)
if __name__ == '__main__':
main()
|
py | 1a2fa34abd33c56738177c4aa8448cf67fd8e4b0 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v2.model.dashboard_list_item_request import DashboardListItemRequest
globals()["DashboardListItemRequest"] = DashboardListItemRequest
class DashboardListDeleteItemsRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"dashboards": ([DashboardListItemRequest],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"dashboards": "dashboards", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DashboardListDeleteItemsRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
dashboards ([DashboardListItemRequest]): List of dashboards to delete from the dashboard list.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | 1a2fa3ede57f67addf10c8752e0b455a74edc0de | from .claims import Claims
from .cose import COSE
from .cose_key import COSEKey
from .cwt import (
CWT,
decode,
encode,
encode_and_encrypt,
encode_and_mac,
encode_and_sign,
set_private_claim_names,
)
from .encrypted_cose_key import EncryptedCOSEKey
from .exceptions import CWTError, DecodeError, EncodeError, VerifyError
from .helpers.hcert import load_pem_hcert_dsc
from .recipient import Recipient
from .signer import Signer
__version__ = "1.3.2"
__title__ = "cwt"
__description__ = "A Python implementation of CWT/COSE"
__url__ = "https://python-cwt.readthedocs.io"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "AJITOMI Daisuke"
__email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "Copyright 2021 AJITOMI Daisuke"
__all__ = [
"encode",
"encode_and_mac",
"encode_and_sign",
"encode_and_encrypt",
"decode",
"set_private_claim_names",
"CWT",
"COSE",
"COSEKey",
"EncryptedCOSEKey",
"Claims",
"Recipient",
"Signer",
"load_pem_hcert_dsc",
"CWTError",
"EncodeError",
"DecodeError",
"VerifyError",
]
|
py | 1a2fa456371772b9300572a23d9fc92c7cc9be8a | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pacvim(MakefilePackage):
"""Pacvim is a command-line-based game based off of Pacman.
The main purpose of this software is to familiarize individuals
with Vim."""
homepage = "https://github.com/jmoon018/PacVim"
url = "https://github.com/jmoon018/PacVim/archive/v1.1.1.tar.gz"
version('1.1.1', sha256='c869c5450fbafdfe8ba8a8a9bba3718775926f276f0552052dcfa090d21acb28')
depends_on('ncurses')
def edit(self, stage, prefix):
makefile = FileFilter('Makefile')
makefile.filter(r'PREFIX = /usr/local',
'PREFIX={0}'.format(self.prefix))
|
py | 1a2fa50945f1a6c08082b410417c53dee9e84e86 | #!/usr/bin/python3.6
activate_this = '/home/ubuntu/flaskapp/venv/bin/activate_this.py'
with open(activate_this) as f:
exec(f.read(), dict(__file__=activate_this))
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/home/ubuntu/flaskapp/flaskapp/")
from manage import app as application
if __name__ == "__main__":
application.run()
|
py | 1a2fa5e57cb4657488deed52fa03fa55c4854aa3 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .. import activations, initializations
from ..utils.theano_utils import shared_zeros, alloc_zeros_matrix
from ..layers.core import Layer
from .. import regularizers
from six.moves import range
class BLSTM(Layer):
def __init__(self, input_dim, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False,
is_entity=False, regularize=False):
self.is_entity = is_entity
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input = T.tensor3()
self.W_if = self.init((self.input_dim, self.output_dim))
self.W_ib = self.init((self.input_dim, self.output_dim))
self.U_if = self.inner_init((self.output_dim, self.output_dim))
self.U_ib = self.inner_init((self.output_dim, self.output_dim))
self.b_if = shared_zeros((self.output_dim))
self.b_ib = shared_zeros((self.output_dim))
self.W_ff = self.init((self.input_dim, self.output_dim))
self.W_fb = self.init((self.input_dim, self.output_dim))
self.U_ff = self.inner_init((self.output_dim, self.output_dim))
self.U_fb = self.inner_init((self.output_dim, self.output_dim))
self.b_ff = shared_zeros((self.output_dim))
self.b_fb = shared_zeros((self.output_dim))
self.W_cf = self.init((self.input_dim, self.output_dim))
self.W_cb = self.init((self.input_dim, self.output_dim))
self.U_cf = self.inner_init((self.output_dim, self.output_dim))
self.U_cb = self.inner_init((self.output_dim, self.output_dim))
self.b_cf = shared_zeros((self.output_dim))
self.b_cb = shared_zeros((self.output_dim))
self.W_of = self.init((self.input_dim, self.output_dim))
self.W_ob = self.init((self.input_dim, self.output_dim))
self.U_of = self.inner_init((self.output_dim, self.output_dim))
self.U_ob = self.inner_init((self.output_dim, self.output_dim))
self.b_of = shared_zeros((self.output_dim))
self.b_ob = shared_zeros((self.output_dim))
self.W_yf = self.init((self.output_dim, self.output_dim))
self.W_yb = self.init((self.output_dim, self.output_dim))
#self.W_y = self.init((self.output_dim, self.output_dim))
self.b_y = shared_zeros((self.output_dim))
self.params = [
self.W_if, self.U_if, self.b_if,
self.W_ib, self.U_ib, self.b_ib,
self.W_cf, self.U_cf, self.b_cf,
self.W_cb, self.U_cb, self.b_cb,
self.W_ff, self.U_ff, self.b_ff,
self.W_fb, self.U_fb, self.b_fb,
self.W_of, self.U_of, self.b_of,
self.W_ob, self.U_ob, self.b_ob,
self.W_yf, self.W_yb, self.b_y
#self.W_y, self.b_y
]
if regularize:
self.regularizers = []
for i in self.params:
self.regularizers.append(regularizers.my_l2)
if weights is not None:
self.set_weights(weights)
def _step(self,
xi_t, xf_t, xo_t, xc_t,
h_tm1, c_tm1,
u_i, u_f, u_o, u_c):
i_t = self.inner_activation(xi_t + T.dot(h_tm1, u_i))
f_t = self.inner_activation(xf_t + T.dot(h_tm1, u_f))
c_t = f_t * c_tm1 + i_t * self.activation(xc_t + T.dot(h_tm1, u_c))
o_t = self.inner_activation(xo_t + T.dot(h_tm1, u_o))
h_t = o_t * self.activation(c_t)
return h_t, c_t
def output(self, train):
X = self.get_input(train)
X = X.dimshuffle((1,0,2))
if self.is_entity:
Entity = X[-1:].dimshuffle(1,0,2)
X = X[:-1]
b_y = self.b_y
b_yn = T.repeat(T.repeat(b_y.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)), X.shape[1], axis=0)
xif = T.dot(X, self.W_if) + self.b_if
xib = T.dot(X, self.W_ib) + self.b_ib
xff = T.dot(X, self.W_ff) + self.b_ff
xfb = T.dot(X, self.W_fb) + self.b_fb
xcf = T.dot(X, self.W_cf) + self.b_cf
xcb = T.dot(X, self.W_cb) + self.b_cb
xof = T.dot(X, self.W_of) + self.b_of
xob = T.dot(X, self.W_ob) + self.b_ob
[outputs_f, memories_f], updates_f = theano.scan(
self._step,
sequences=[xif, xff, xof, xcf],
outputs_info=[
alloc_zeros_matrix(X.shape[1], self.output_dim),
alloc_zeros_matrix(X.shape[1], self.output_dim)
],
non_sequences=[self.U_if, self.U_ff, self.U_of, self.U_cf],
truncate_gradient=self.truncate_gradient
)
[outputs_b, memories_b], updates_b = theano.scan(
self._step,
sequences=[xib, xfb, xob, xcb],
outputs_info=[
alloc_zeros_matrix(X.shape[1], self.output_dim),
alloc_zeros_matrix(X.shape[1], self.output_dim)
],
non_sequences=[self.U_ib, self.U_fb, self.U_ob, self.U_cb],
truncate_gradient=self.truncate_gradient
)
if self.return_sequences:
y = T.add(T.add(
T.tensordot(outputs_f.dimshuffle((1,0,2)), self.W_yf, [[2],[0]]),
T.tensordot(outputs_b[::-1].dimshuffle((1,0,2)), self.W_yb, [[2],[0]])),
b_yn)
# y = T.add(T.tensordot(
# T.add(outputs_f.dimshuffle((1, 0, 2)),
# outputs_b[::-1].dimshuffle((1,0,2))),
# self.W_y,[[2],[0]]),b_yn)
if self.is_entity:
return T.concatenate([y, Entity], axis=1)
else:
return y
return T.concatenate((outputs_f[-1], outputs_b[0]))
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"inner_init":self.inner_init.__name__,
"activation":self.activation.__name__,
"truncate_gradient":self.truncate_gradient,
"return_sequences":self.return_sequences}
class BRNN(Layer):
'''
Fully connected Bi-directional RNN where:
Output at time=t is fed back to input for time=t+1 in a forward pass
Output at time=t is fed back to input for time=t-1 in a backward pass
'''
def __init__(self, input_dim, output_dim,
init='uniform', inner_init='orthogonal', activation='sigmoid', weights=None,
truncate_gradient=-1, return_sequences=False, is_entity=False, regularize=False):
#whyjay
self.is_entity = is_entity
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.activation = activations.get(activation)
self.return_sequences = return_sequences
self.input = T.tensor3()
self.W_o = self.init((self.input_dim, self.output_dim))
self.W_if = self.init((self.input_dim, self.output_dim)) # Input -> Forward
self.W_ib = self.init((self.input_dim, self.output_dim)) # Input -> Backward
self.W_ff = self.init((self.output_dim, self.output_dim)) # Forward tm1 -> Forward t
self.W_bb = self.init((self.output_dim, self.output_dim)) # Backward t -> Backward tm1
self.b_if = shared_zeros((self.output_dim))
self.b_ib = shared_zeros((self.output_dim))
self.b_f = shared_zeros((self.output_dim))
self.b_b = shared_zeros((self.output_dim))
self.b_o = shared_zeros((self.output_dim))
self.params = [self.W_o,self.W_if,self.W_ib, self.W_ff, self.W_bb,self.b_if,self.b_ib, self.b_f, self.b_b, self.b_o]
if regularize:
self.regularizers = []
for i in self.params:
self.regularizers.append(regularizers.my_l2)
if weights is not None:
self.set_weights(weights)
def _step(self, x_t, h_tm1, u,b):
return self.activation(x_t + T.dot(h_tm1, u)+b)
def output(self, train):
X = self.get_input(train) # shape: (nb_samples, time (padded with zeros at the end), input_dim)
# new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
X = X.dimshuffle((1, 0, 2))
if self.is_entity:
lenX=X.shape[0]
Entity=X[lenX-1:].dimshuffle(1,0,2)
X=X[:lenX-1]
xf = self.activation(T.dot(X, self.W_if) + self.b_if)
xb = self.activation(T.dot(X, self.W_ib) + self.b_ib)
b_o=self.b_o
b_on= T.repeat(T.repeat(b_o.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)),X.shape[1],axis=0)
# Iterate forward over the first dimension of the x array (=time).
outputs_f, updates_f = theano.scan(
self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
sequences=xf, # tensors to iterate over, inputs to _step
# initialization of the output. Input to _step with default tap=-1.
outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),
non_sequences=[self.W_ff,self.b_f], # static inputs to _step
truncate_gradient=self.truncate_gradient
)
# Iterate backward over the first dimension of the x array (=time).
outputs_b, updates_b = theano.scan(
self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
sequences=xb, # tensors to iterate over, inputs to _step
# initialization of the output. Input to _step with default tap=-1.
outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),
non_sequences=[self.W_bb,self.b_b], # static inputs to _step
truncate_gradient=self.truncate_gradient,
go_backwards=True # Iterate backwards through time
)
#return outputs_f.dimshuffle((1, 0, 2))
if self.return_sequences:
if self.is_entity:
return T.concatenate([T.add(T.tensordot(T.add(outputs_f.dimshuffle((1, 0, 2)), outputs_b[::-1].dimshuffle((1,0,2))),self.W_o,[[2],[0]]),b_on),Entity],axis=1)
else:
return T.add(T.tensordot(T.add(outputs_f.dimshuffle((1, 0, 2)), outputs_b[::-1].dimshuffle((1,0,2))),self.W_o,[[2],[0]]),b_on)
return T.concatenate((outputs_f[-1], outputs_b[0]))
def get_config(self):
return {"name":self.__class__.__name__,
"input_dim":self.input_dim,
"output_dim":self.output_dim,
"init":self.init.__name__,
"inner_init":self.inner_init.__name__,
"activation":self.activation.__name__,
"truncate_gradient":self.truncate_gradient,
"return_sequences":self.return_sequences}
|
py | 1a2fa736ea02f74aecb934a198adf2cbb76fca03 | import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="smoothing", parent_name="carpet.aaxis", **kwargs):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
**kwargs,
)
|
py | 1a2fa73a21c9d5f030cc677eaa4d27eaf9c8c2e2 | import numpy as np
from finitewave.core.model import CardiacModel
from finitewave.cpuwave2D.model.aliev_panfilov_2d.aliev_panfilov_kernels_2d \
import AlievPanfilovKernels2D
_npfloat = "float64"
class AlievPanfilov2D(CardiacModel):
def __init__(self):
CardiacModel.__init__(self)
self.v = np.ndarray
self.w = np.ndarray
self.state_vars = ["u", "v"]
self.npfloat = 'float64'
def initialize(self):
super().initialize()
weights_shape = self.cardiac_tissue.weights.shape
shape = self.cardiac_tissue.mesh.shape
self.diffuse_kernel = AlievPanfilovKernels2D().get_diffuse_kernel(weights_shape)
self.ionic_kernel = AlievPanfilovKernels2D().get_ionic_kernel()
self.v = np.zeros(shape, dtype=self.npfloat)
def run_ionic_kernel(self):
self.ionic_kernel(self.u_new, self.u, self.v, self.cardiac_tissue.mesh,
self.dt)
|