ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a5126c435572d5ce4c4a1315dc59f841c9446d6 | from graphbrain.meaning.corefs import main_coref
def is_actor(hg, edge):
"""Checks if the edge is a coreference to an actor."""
if edge.type()[0] == 'c':
return hg.exists(('actor/p/.', main_coref(hg, edge)))
else:
return False
def find_actors(hg, edge):
"""Returns set of all coreferences to actors found in the edge."""
actors = set()
if is_actor(hg, edge):
actors.add(main_coref(hg, edge))
if not edge.is_atom():
for item in edge:
actors |= find_actors(hg, item)
return actors
|
py | 1a51276dba6c30c7bbcadc7180a0d7891c8b3359 | '''
Kattis - jackpot
Simply get the LCM of all numbers. Note the property that LCM(a, b, c, ...) = LCM(LCM(a, b), c, ...)
GCD also has this property.
Time: O(n * log(INT_MAX)) Space O(n), Assuming Euclidean algorithm is O(log (INT_MAX))
'''
from math import gcd
def lcm(a, b):
return a * b // gcd(a, b)
num_tc = int(input())
for _ in range(num_tc):
n = int(input())
arr = list(map(int, input().split()))
cur = arr[0]
if (n == 1):
print(cur)
continue
for i in range(1, n):
cur = lcm(cur, arr[i])
if (cur > 1e9):
print("More than a billion.")
break
if (i == n-1):
print(cur)
|
py | 1a5127ea5e476e7e5b7497124fad502e5dbaff12 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions andsss
# limitations under the License.
"""Tests for coverage_data_utils.py"""
from unittest import mock
import pandas as pd
import pandas.testing as pd_test
from analysis import coverage_data_utils
FUZZER = 'afl'
BENCHMARK = 'libpng-1.2.56'
EXPERIMENT_FILESTORE_PATH = 'gs://fuzzbench-data/myexperiment'
SAMPLE_DF = pd.DataFrame([{
'experiment_filestore': 'gs://fuzzbench-data',
'experiment': 'exp1',
'fuzzer': FUZZER,
'benchmark': BENCHMARK
}, {
'experiment_filestore': 'gs://fuzzbench-data2',
'experiment': 'exp2',
'fuzzer': 'libfuzzer',
'benchmark': BENCHMARK
}])
def create_coverage_data():
"""Utility function to create test data."""
return {
'afl libpng-1.2.56': [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
'libfuzzer libpng-1.2.56': [[0, 0, 1, 1], [0, 0, 2, 3], [0, 0, 3, 3],
[0, 0, 4, 4]]
}
def test_get_unique_region_dict():
"""Tests get_unique_region_dict() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
unique_region_dict = coverage_data_utils.get_unique_region_dict(
benchmark_coverage_dict)
expected_dict = {
(0, 0, 2, 2): ['afl'],
(0, 0, 2, 3): ['libfuzzer'],
(0, 0, 4, 4): ['libfuzzer']
}
assert expected_dict == unique_region_dict
def test_get_unique_region_cov_df():
"""Tests get_unique_region_cov_df() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
unique_region_dict = coverage_data_utils.get_unique_region_dict(
benchmark_coverage_dict)
fuzzer_names = ['afl', 'libfuzzer']
unique_region_df = coverage_data_utils.get_unique_region_cov_df(
unique_region_dict, fuzzer_names)
unique_region_df = unique_region_df.sort_values(by=['fuzzer']).reset_index(
drop=True)
expected_df = pd.DataFrame([{
'fuzzer': 'afl',
'unique_regions_covered': 1
}, {
'fuzzer': 'libfuzzer',
'unique_regions_covered': 2
}])
assert unique_region_df.equals(expected_df)
def test_get_benchmark_cov_dict():
"""Tests that get_benchmark_cov_dict() returns correct dictionary."""
coverage_dict = create_coverage_data()
benchmark = 'libpng-1.2.56'
benchmark_cov_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, benchmark)
expected_cov_dict = {
"afl": {(0, 0, 3, 3), (0, 0, 2, 2), (0, 0, 1, 1)},
"libfuzzer": {(0, 0, 4, 4), (0, 0, 3, 3), (0, 0, 2, 3), (0, 0, 1, 1)}
}
assert expected_cov_dict == benchmark_cov_dict
def test_get_pairwise_unique_coverage_table():
"""Tests that get_pairwise_unique_coverage_table() gives the
correct dataframe."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
fuzzers = ['libfuzzer', 'afl']
table = coverage_data_utils.get_pairwise_unique_coverage_table(
benchmark_coverage_dict, fuzzers)
expected_table = pd.DataFrame([[0, 1], [2, 0]],
index=fuzzers,
columns=fuzzers)
pd_test.assert_frame_equal(table, expected_table)
def test_get_fuzzer_benchmark_covered_regions_filestore_path():
"""Tests that get_fuzzer_benchmark_covered_regions_filestore_path returns
the correct result."""
assert (
coverage_data_utils.get_fuzzer_benchmark_covered_regions_filestore_path(
FUZZER, BENCHMARK,
EXPERIMENT_FILESTORE_PATH) == ('gs://fuzzbench-data/myexperiment/'
'coverage/data/libpng-1.2.56/afl/'
'covered_regions.json'))
def test_fuzzer_and_benchmark_to_key():
"""Tests that fuzzer_and_benchmark_to_key returns the correct result."""
assert (coverage_data_utils.fuzzer_and_benchmark_to_key(
FUZZER, BENCHMARK) == 'afl libpng-1.2.56')
def test_key_to_fuzzer_and_benchmark():
"""Tests that key_to_fuzzer_and_benchmark returns the correct result."""
assert (coverage_data_utils.key_to_fuzzer_and_benchmark('afl libpng-1.2.56')
== (FUZZER, BENCHMARK))
def test_fuzzer_benchmark_key_roundtrip():
"""Tests that key_to_fuzzer_and_benchmark(fuzzer_and_benchmark_to_key(X, Y))
returns (X, Y)."""
assert (coverage_data_utils.key_to_fuzzer_and_benchmark(
coverage_data_utils.fuzzer_and_benchmark_to_key(
FUZZER, BENCHMARK)) == (FUZZER, BENCHMARK))
def test_get_experiment_filestore_path_for_fuzzer_benchmark():
"""Tests that get_experiment_filestore_path_for_fuzzer_benchmark returns the
right result."""
filestore_path = (
coverage_data_utils.get_experiment_filestore_path_for_fuzzer_benchmark(
FUZZER, BENCHMARK, SAMPLE_DF))
assert filestore_path == 'gs://fuzzbench-data/exp1'
@mock.patch('analysis.coverage_data_utils.logger.warning')
def test_get_experiment_filestore_path_for_fuzzer_benchmark_multiple(
mocked_warning):
"""Tests that get_experiment_filestore_path_for_fuzzer_benchmark returns the
right result when there are multiple filestores for a single pair and that a
warning is logged.."""
df = pd.DataFrame([{
'experiment_filestore': 'gs://fuzzbench-data',
'experiment': 'exp1',
'fuzzer': FUZZER,
'benchmark': BENCHMARK
}, {
'experiment_filestore': 'gs://fuzzbench-data2',
'experiment': 'exp2',
'fuzzer': FUZZER,
'benchmark': BENCHMARK
}])
filestore_path = (
coverage_data_utils.get_experiment_filestore_path_for_fuzzer_benchmark(
FUZZER, BENCHMARK, df))
assert filestore_path in ('gs://fuzzbench-data/exp1',
'gs://fuzzbench-data2/exp2')
assert mocked_warning.call_count == 1
def test_get_experiment_filestore_paths():
"""Tests that get_experiment_filestore_paths returns the right result."""
df = pd.DataFrame([{
'experiment_filestore': 'gs://fuzzbench-data',
'experiment': 'exp1'
}, {
'experiment_filestore': 'gs://fuzzbench-data2',
'experiment': 'exp2'
}])
assert sorted(coverage_data_utils.get_experiment_filestore_paths(df)) == [
'gs://fuzzbench-data/exp1', 'gs://fuzzbench-data2/exp2'
]
def test_coverage_report_filestore_path():
"""Tests that get_coverage_report_filestore_path returns the correct
result."""
expected_cov_report_url = ('gs://fuzzbench-data/exp1/coverage/reports/'
'libpng-1.2.56/afl/index.html')
assert coverage_data_utils.get_coverage_report_filestore_path(
FUZZER, BENCHMARK, SAMPLE_DF) == expected_cov_report_url
|
py | 1a5128cb582687dfedd982955440f032013ecde3 | import requests
print('Hello, World!') |
py | 1a5128f63761cad1cb2f08d17b4a29ba97960ffe | import torch
from torch.nn.functional import softmax
import json
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
from transformers import AutoTokenizer, AutoModelForMaskedLM
import numpy as np
import sys
import random
import os
from os import path
def get_abstract(article):
return ' '.join([x['text'] for x in article['abstract']])
def get_pls(article):
return article['pls'] if article['pls_type'] == 'long' else ' '.join([x['text'] for x in article['pls']])
def mask_batch(tokens, tokenizer, num_mask):
indexed_tokens = []
mask_indices = []
for i in range(10):
cur_mask_indices = random.sample(list(range(1,len(tokens)-1)), num_mask)
masked_tokens = [tokens[index] for index in cur_mask_indices]
for index in cur_mask_indices:
tokens[index] = '[MASK]'
indexed_tokens.append(tokenizer.convert_tokens_to_ids(tokens))
mask_indices.append(cur_mask_indices)
for j in range(num_mask):
index = cur_mask_indices[j]
tokens[index] = masked_tokens[j]
return indexed_tokens, mask_indices
def run_model_sentence(tokens, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
(indexed_tokens, mask_indices) = mask_batch(tokens, tokenizer, num_mask)
predictions = []
model.eval()
model.to(device)
start_index = 0
while start_index < len(indexed_tokens):
end_index = min(start_index + batch_size, len(indexed_tokens))
cur_indexed_tokens = torch.tensor(indexed_tokens[start_index:end_index], dtype=torch.long).to(device)
segment_ids = torch.ones((end_index-start_index, len(tokens)), dtype=torch.long).to(device)
with torch.no_grad():
outputs = model.forward(cur_indexed_tokens, token_type_ids=segment_ids)
predictions.append(outputs[0].to('cpu'))
start_index = end_index
predictions = torch.cat(predictions, dim=0)
return predictions, mask_indices
def eval_sentence(sentence, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
tokens = tokenizer.tokenize(sentence)
if len(tokens) > 510:
tokens = tokens[:510]
tokens = ['[CLS]'] + tokens + ['[SEP]']
#if num_mask is a float, treat as a percentage of tokens to mask,
#of course excluding the CLS and SEP tokens
if type(num_mask) == float:
num_mask = int(num_mask * (len(tokens)-2))
(predictions, mask_indices) = run_model_sentence(tokens, tokenizer, model, num_mask, batch_size, device)
probabilities = []
for i in range(len(predictions)):
for mask_index in mask_indices[i]:
distribution = softmax(predictions[i, mask_index], dim=0)
masked_token_index = tokenizer.convert_tokens_to_ids(tokens[mask_index])
prob = distribution[masked_token_index].item()
probabilities.append(prob)
return probabilities
def eval_paragraph(paragraph, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
probabilities = []
for sent in sent_tokenize(paragraph):
if type(num_mask) == int and len(tokenizer.tokenize(sent)) < num_mask:
print('skipping sentence...')
continue
probabilities += eval_sentence(sent, tokenizer, model, num_mask, batch_size, device)
return probabilities
def eval_article(article, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
abstract_probs = eval_paragraph(article['abstract'], tokenizer, model, num_mask, batch_size, device)
pls_probs = eval_paragraph(article['pls'], tokenizer, model, num_mask, batch_size, device)
gen_probs = eval_paragraph(article['gen'], tokenizer, model, num_mask, batch_size, device)
return abstract_probs, pls_probs, gen_probs
def probability_results(data, input_file_name, tokenizer, model, file_name, num_mask=5, batch_size=1, device='cuda'):
prefix = path.split(input_file_name)[-2]
#read in the dois already processed (if the file_name exists) so that they
#can be ignored in this run
already = set()
if path.isfile(path.join(prefix, file_name)):
with open(path.join(prefix, file_name)) as f:
for l in f.readlines():
if len(l) > 0:
already.add(l.split(', ')[0])
for index,article in enumerate(data):
if article['doi'] in already:
continue
print(index)
(abstract_probs, pls_probs, gen_probs) = eval_article(article, tokenizer, model, num_mask, batch_size, device)
abstract_avg = np.mean(abstract_probs)
pls_avg = np.mean(pls_probs)
gen_avg = np.mean(gen_probs)
with open(path.join(prefix, file_name), 'a+', 1) as f:
f.write(f'{article["doi"]} {abstract_avg} {pls_avg} {gen_avg}\n')
f.flush()
model_name = sys.argv[1]
input_file_name = sys.argv[2]
file_name = sys.argv[3]
num_mask = float(sys.argv[4]) if '.' in sys.argv[4] else int(sys.argv[4])
batch_size = int(sys.argv[5])
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForMaskedLM.from_pretrained(model_name)
start_index = int(sys.argv[6])
end_index = int(sys.argv[7])
print(input_file_name)
sys.exit()
data = json.load(open(input_file_name))
probability_results(data[start_index:end_index], input_file_name, tokenizer, model, file_name, num_mask, batch_size, device='cuda')
|
py | 1a512a61018e69e597d41ad89791d54861209bf8 | """Binary for running Mosquitto MQTT bridge with credential rotation."""
import datetime
import os
import signal
import subprocess
import time
import jwt
import psutil
def create_jwt_token():
"""Creates a JWT (https://jwt.io) to establish an MQTT connection.
Returns:
An MQTT auth token generated from the given project_id and private
key
Raises:
ValueError: If the private_key_file does not contain a known key.
"""
token = {
# The time that the token was issued at
'iat':
datetime.datetime.utcnow(),
# The time the token expires.
'exp':
datetime.datetime.utcnow() +
datetime.timedelta(minutes=int(os.environ["JWT_EXPIRATION_MINUTES"])),
# The audience field should always be set to the GCP project id.
'aud':
os.environ["PROJECT_ID"]
}
# Read the private key file.
with open("/etc/mqtt/ec_private.pem", 'r') as f:
private_key = f.read()
print ('Creating JWT from private key file')
return jwt.encode(token, private_key, algorithm='ES256').decode('utf-8')
def update_config(config):
"""Updates mosquitto condig file on disk.
Args:
config: path to the Mosquitto config
"""
# TODO proper template tooling?
config = config.replace('GOOGLE_CLOUD_JWT', create_jwt_token()).replace(
'CONFIG_DEVICE_ID', os.environ["BRIDGE_DEVICE_ID"]).replace(
'CONFIG_PROJECT_ID', os.environ["PROJECT_ID"]).replace(
'CONFIG_LOCATION', os.environ["CLOUD_REGION"]).replace(
'CONFIG_REGISTRY', os.environ["REGISTRY_ID"]).replace(
'BASE_DIR', '/etc/mqtt')
with open("/mosquitto/config/mosquitto.conf", 'w') as dst_conf:
dst_conf.write(config)
print ('Wrote config to {}'.format("/mosquitto/config/mosquitto.conf"))
def main():
with open("/etc/mqtt/bridge.conf.tmpl", 'r') as src_conf:
config_template = src_conf.read()
update_config(config_template)
while True:
time.sleep(int(os.environ["CONFIG_REFRESH_MINUTES"]) * 60)
print ('Restarting MQTT Bridge')
update_config(config_template)
procs = [procObj for procObj in psutil.process_iter() if 'mosquitto' in procObj.name().lower() ]
if len(procs):
procs[0].send_signal(signal.SIGTERM)
if __name__ == '__main__':
main() |
py | 1a512ab8ef03baf240d77260f2d1cf0dff32e573 | #-*- coding: utf-8 -*-
import re
import pickle
# say-as ๊ธฐ๋ณธ ๊ท์น: ์ฐธ๊ณ ๋
ผ๋ฌธ <๊ธฐ์ ๋ฌธ์์ ์ฐ๋ฆฌ๋ง ์ซ์ ์ฐ๊ธฐ, ๊ถ์ฑ๊ท>
_mandarin_num = {"0": "๊ณต", "1": "์ผ", "2": "์ด", "3": "์ผ", "4": "์ฌ", "5": "์ค", "6": "์ก", "7": "์น ",
"8": "ํ", "9": "๊ตฌ", "10": "์ญ", "100": "๋ฐฑ", "1000": "์ฒ", "10000": "๋ง", "100000000": "์ต",
"1000000000000": "์กฐ"}
_korean_num = {"1": "ํ", "2": "๋", "3": "์ธ", "4": "๋ค", "5": "๋ค์ฏ", "6": "์ฌ์ฏ", "7": "์ผ๊ณฑ",
'8': "์ฌ๋", "9": "์ํ", "10": "์ด", "20": "์ค๋ฌผ", "30": "์๋ฅธ", "40": "๋งํ", "50": "์ฐ",
"60": "์์", "70": "์ผํ", "80": "์ฌ๋ ", "90": "์ํ"}
_korean_end_word = ['๊ฐ', '๋', '๋ง๋ฆฌ', '๋ฒ', '์ด', '์', '์๋ฃจ', '๋ฐ',
'์ฃฝ', '์ฑ', '์ผค๋ ', '์พ', '์', 'ํฌ๊ธฐ', '๋ฒ์งธ', '๊ฐ์ง', '๊ณณ',
'์ด', '์ฒ', '์บ', '๋ฐฐ', '๊ทธ๋ฃจ', '๋ช
', '๋ฒ', '๋ฌ', '๊ฒน', '๊ฑด', '๋']
_exception_korean_end_word = ['๊ฐ๊ตญ', '๋ฌ๋ฌ', '๊ฐ์']
_english_word = {'a': '์์ด', 'b': '๋น', 'c': '์จ', 'd': '๋', 'e': '์ด', 'f': '์ํ', 'g': '์ฅ', 'h': '์์ด์น',
'i': '์์ด', 'j': '์ ์ด', 'k': '์ผ์ด', 'l': '์', 'n': '์', 'm': '์ ', 'o': '์ค', 'p': 'ํผ',
'q': 'ํ', 'r':'์ผ', 's': '์์ค', 't': 'ํฐ', 'u':'์ ', 'v':'๋ธ์ด', 'w':'๋๋ธ์ ', 'x': '์์ค',
'y': '์์ด', 'z': '์ง'}
_special_num_sub = {'A4': '์์ดํฌ', 'G20': '์ง์ด์ญ', 'G2': '์งํฌ', 'U2': '์ ํฌ',
'2PM': 'ํฌํผ์ ', '88์ฌ๋ฆผํฝ': 'ํํ์ฌ๋ฆผํฝ',
'119์': '์ผ์ผ๊ตฌ์', '112์ ๊ณ ': '์ผ์ผ์ด์ ๊ณ ', '๋น
3': '๋น
์ฐ๋ฆฌ', '4๋๊ฐ': '์ฌ๋๊ฐ'}
# lexicon ๊ธฐ๋ณธ ๊ท์น: ์ฐธ๊ณ ์ฌ์ <๊ตญ๋ฆฝ๊ตญ์ด์ ํ์ค๊ตญ์ด๋์ฌ์ >
with open('./tts/text/dictionary/lexicon.pickle', 'rb') as handle:
_lexicon = pickle.load(handle)
# sub ๊ธฐ๋ณธ ๊ท์น
with open('./tts/text/dictionary/sub.pickle', 'rb') as handle:
_sub = pickle.load(handle)
with open('./tts/text/dictionary/num_sub.pickle', 'rb') as handle:
_num_sub = pickle.load(handle)
_num_sub['ใ'] = '๋ฐ๋ฆฌ๋ฏธํฐ'
def read1to999(n):
units = [''] + list('์ญ๋ฐฑ์ฒ')
nums = '์ผ์ด์ผ์ฌ์ค์ก์น ํ๊ตฌ'
result = []
i = 0
while n > 0:
n, r = divmod(n, 10)
if r > 0:
if units[i] == '':
result.append(nums[r - 1] + units[i])
else:
if r == 1:
result.append(units[i])
else:
result.append(nums[r - 1] + units[i])
i += 1
return ''.join(result[::-1])
def readNumM(n):
"""
ํ์๋ก ์ซ์ ์ฝ๊ธฐ
"""
result = ''
if n >= 1000000000000:
r, n = divmod(n, 10000000000000)
tmp = read1to999(r)
if len(tmp) == 1 and tmp[-1] == '์ผ':
result += '์กฐ'
else:
result += tmp + "์กฐ"
if n >= 100000000:
r, n = divmod(n, 100000000)
tmp = read1to999(r)
if len(tmp) == 1 and tmp[-1] == '์ผ':
result += '์ต'
else:
result += tmp + "์ต"
if n >= 10000:
r, n = divmod(n, 10000)
tmp = read1to999(r)
if len(tmp) == 1 and tmp[-1] == '์ผ':
result += '๋ง'
else:
result += tmp + "๋ง"
result += read1to999(n)
return result
def readNumK(intNum):
"""
ํ๊ธ๋ก ์ซ์ ์ฝ๊ธฐ
"""
tmp_list = list(_korean_num.keys())
num_list = list()
for num in tmp_list:
num_list.append(int(num))
num_list.sort(reverse=True)
result = ""
for num in num_list:
if intNum >= num:
intNum -= num
result += _korean_num[str(num)]
return result
def txt_preprocessing(txt):
word_list = txt.split(' ') # for tts
for k, word in enumerate(word_list):
# lexicon & sub ๋ฐ์ ๊ต์ฒด
english = re.sub('[^a-zA-Z]', '', word)
not_checked = 1
if english != '' and not re.findall('\d', word):
# lexicon ์ฒ๋ฆฌ
for key, value in _lexicon.items():
if key.lower() == english.lower():
word_list[k] = word_list[k].replace(english, value)
not_checked = 0
# sub ์ฒ๋ฆฌ
for key, value in _sub.items():
if key.lower() == english.lower():
word_list[k] = word_list[k].replace(english, value)
not_checked = 0
elif re.findall('\d+', word):
# num_sub ์ฒ๋ฆฌ
for key, value in _num_sub.items():
if key in word:
word_list[k] = word_list[k].replace(key, value)
not_checked = 0
# say-as ๋ฐ์ ๊ต์ฒด
seperated_num = 0
if '-' in word:
seperated_num = 1
if '.' in word:
if word[-1] != '.':
word_list[k].replace('.', '์ ')
if ',' in word:
if word[-1] != ',':
word_list[k].replace(',', '')
word.replace(',', '')
strNum_list = re.findall('\d+', word) # ๊ฐ ์ค๋ณต ์ ์ ๊ฑฐํด ๋๊ฐ๋ฉด์ ์ฒ๋ฆฌ ํ์
prev = -1
for strNum in strNum_list:
pos = word.index(strNum)
if prev == pos: # ์ฝ์ ๊ฐ ์ค๋ณต ์ฒ๋ฆฌ
continue
wList = [word[0:pos], word[pos: pos + len(strNum)], word[pos + len(strNum):]]
wList = [w for w in wList if not w == '']
check = ""
# ์ฒ์์ด 0์ผ๋ก ์์ํ๋ฉด ํ๋ฌธ-๋ถ๋ฆฌ
if strNum[0] == '0' or seperated_num:
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
if word_list[k-1] == '์นด๋๋ฒํธ๋':
word_list[k]= word_list[k].replace('-', '๋ค์')
else:
word_list[k]= word_list[k].replace('-', '์')
else:
for i, w in enumerate(wList):
# ์ซ์ ๋ค์ ๋ถ๋ ๊ฒ์ด ์์ ๋, ํ๋ฌธ
if len(wList) == (i + 1):
if k > 1:
if word_list[k - 1][0] == '-':
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
break
if k + 1 < len(word_list):
if word_list[k + 1] == '':
check = "ํ๋ฌธ"
elif word_list[k + 1][0] == '-':
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
elif word_list[k + 1][0] in _korean_end_word:
check = "ํ๊ธ"
else:
check = "ํ๋ฌธ"
else:
check = "ํ๋ฌธ"
break
elif w == strNum:
# ์ซ์ ๋ค์ ๋ถ๋ ๊ฒ์ ๋ฐ๋ผ ํ๊ธ, ํ๋ฌธ ์ ํ
if wList[i + 1][0] in _korean_end_word:
check = "ํ๊ธ"
else:
check = "ํ๋ฌธ"
break
tmpNum = ""
intNum = int(strNum)
if check == "ํ๋ฌธ-๋ถ๋ฆฌ":
for s in strNum:
# ํ๊ธ์์ฉ ์ฝ๊ธฐ (0 == ๊ณต)
tmpNum += _mandarin_num[s]
elif check == "ํ๋ฌธ":
# ์ซ์ ํ๋ฌธ ์ฝ๊ธฐ
tmpNum = readNumM(intNum)
else: # check == "ํ๊ธ"
# 100์ด์ ํ๋ฌธ ์ฝ๊ธฐ + ์ดํ ํ๊ธ ์ฝ๊ธฐ
tmpNum = readNumM(intNum // 100 * 100) + readNumK(intNum % 100)
word_list[k] = word_list[k].replace(strNum, tmpNum)
elif '-' in word:
word_list[k] = word_list[k].replace('-', '์')
if not_checked:
tmp = ''
for char in word_list[k]:
if char.lower() in _english_word.keys():
not_checked = 0
tmp += _english_word[char.lower()]
else:
tmp += char
word_list[k] = tmp
tts_sentence = word_list[0]
for word in word_list[1:]: # ๊ธธ์ด 1 ์์ธ์ฒ๋ฆฌ ํ์
tts_sentence += ' ' + word
return tts_sentence
def txt_preprocessing_only_num(txt):
word_list = txt.split(' ')
for k, word in enumerate(word_list):
strNum_list = re.findall('\d+', word)
not_special_case = True
for key, value in _special_num_sub.items():
if key in word:
not_special_case = False
word_list[k] = word_list[k].replace(key, value)
if not_special_case and strNum_list:
# num_sub ์ฒ๋ฆฌ
for key, value in _num_sub.items():
if key in word:
if 'k' + key in word:
key = 'k' + key
value = 'ํฌ๋ก' + value
elif 'm' + key in word:
key = 'm' + key
value = '๋ฐ๋ฆฌ' + value
elif 'c' + key in word:
key = 'c' + key
value = '์ผํฐ' + value
word_list[k] = word_list[k].replace(key, value)
break
# say-as ๋ฐ์ ๊ต์ฒด
seperated_num = 0
if '-' in word:
seperated_num = 1
if '.' in word:
if word[-1] != '.':
word_list[k] = word_list[k].replace('.', '์ ')
if ',' in word:
if word[-1] != ',':
word_list[k] = word_list[k].replace(',', '')
if 'ยท' in word:
word_list[k] = word_list[k].replace('ยท', '')
prev = -1
for strNum in sorted(strNum_list, key=lambda x:len(x), reverse=True):
pos = word.index(strNum)
if prev == pos: # ์ฝ์ ๊ฐ ์ค๋ณต ์ฒ๋ฆฌ
continue
wList = [word[0:pos], word[pos: pos + len(strNum)], word[pos + len(strNum):]]
wList = [w for w in wList if not w == '']
check = ""
one_change = False
if 'ยท' in word:
check = 'ํ๋ฌธ-๋ถ๋ฆฌ'
one_change = True
elif re.findall('(\d+)-(\d+)', word):
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
if word_list[k-1] == '์นด๋๋ฒํธ๋':
word_list[k] = word_list[k].replace('-','๋ค์')
else:
word_list[k] = word_list[k].replace('-','์')
elif strNum[0] == '0': # ์ฒ์์ด 0์ผ๋ก ์์ํ๋ฉด ํ๋ฌธ-๋ถ๋ฆฌ
if len(strNum) == 1:
word_list[k] = word_list[k].replace('0', '์')
continue
elif '00' in strNum:
key = ''
value = ''
for _ in range(strNum.count('0')):
key += '0'
value += '๋ก'
word_list[k] = word_list[k].replace(key, value)
continue
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
else:
for i, w in enumerate(wList):
# ์ซ์ ๋ค์ ๋ถ๋ ๊ฒ์ด ์์ ๋, ํ๋ฌธ
if len(wList) == (i + 1):
if k > 1:
if word_list[k - 1][0] == '-':
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
break
if k + 1 < len(word_list):
if word_list[k + 1][0] == '-':
check = "ํ๋ฌธ-๋ถ๋ฆฌ"
elif len(word_list[k+1]) >= 2:
if word_list[k+1][:2] in _korean_end_word:
check = "ํ๊ธ"
break
elif word_list[k + 1][0] in _korean_end_word:
check = "ํ๊ธ"
for e in _exception_korean_end_word:
if e in word_list[k+1]:
check = 'ํ๋ฌธ'
break
else:
check = "ํ๋ฌธ"
else:
check = "ํ๋ฌธ"
break
elif w == strNum:
# ์ซ์ ๋ค์ ๋ถ๋ ๊ฒ์ ๋ฐ๋ผ ํ๊ธ, ํ๋ฌธ ์ ํ
if len(wList[i+1]) >= 2:
if wList[i+1][:2] in _korean_end_word:
check = 'ํ๊ธ'
break
if wList[i + 1][0] in _korean_end_word:
check = "ํ๊ธ"
for e in _exception_korean_end_word:
if e in wList[i+1]:
check = 'ํ๋ฌธ'
break
else:
check = "ํ๋ฌธ"
break
tmpNum = ""
intNum = int(strNum)
if check == "ํ๋ฌธ-๋ถ๋ฆฌ":
for s in strNum:
# ํ๊ธ์์ฉ ์ฝ๊ธฐ (0 == ๊ณต)
tmpNum += _mandarin_num[s]
elif check == "ํ๋ฌธ":
# ์ซ์ ํ๋ฌธ ์ฝ๊ธฐ
tmpNum = readNumM(intNum)
else: # check == "ํ๊ธ"
# 100์ด์ ํ๋ฌธ ์ฝ๊ธฐ + ์ดํ ํ๊ธ ์ฝ๊ธฐ
if intNum > 99:
tmpNum = readNumM(intNum)
else:
tmpNum = readNumK(intNum)
# tmpNum = readNumM(intNum // 100 * 100) + readNumK(intNum % 100)
word_list[k] = word_list[k].replace(strNum, tmpNum)
if word_list:
word_list = [' ' + w for w in word_list]
tts_sentence = ''.join(word_list)
tts_sentence = tts_sentence[1:]
return tts_sentence
else:
return ' ' |
py | 1a512ada26a67444caf04ce3a7020c10077be53b | import pathlib
import random
import sys
import panel as pn
import param
_COLORS = [
("#00A170", "white"),
("#DAA520", "white"),
("#2F4F4F", "white"),
("#F08080", "white"),
("#4099da", "white"), # lightblue
]
_LOGOS = {
"default": "https://panel.holoviz.org/_static/logo_stacked.png",
"dark": "https://raw.githubusercontent.com/holoviz/panel/98389a8dead125bcb7c60dc2c1564e112d89d3fa/doc/_static/logo_stacked_dark_theme.png",
}
_MENU_FILE = pathlib.Path(__file__).parent / "menu.html"
_MENU_TEXT = _MENU_FILE.read_text()
_ACE_THEMES={
"default": "chrome",
"dark": "tomorrow_night_eighties"
}
RAW_CSS = """
.sidenav .menu-item-active a {
background: var(--accent-fill-active);
color: white;
}
"""
if not RAW_CSS in pn.config.raw_css:
pn.config.raw_css.append(RAW_CSS)
def _mock_panel():
def _reload(module=None):
if module is not None:
for module in pn.io.reload._modules:
if module in sys.modules:
del sys.modules[module]
for cb in pn.io.reload._callbacks.values():
cb.stop()
pn.io.reload._callbacks.clear()
if pn.state.location:
pn.state.location.reload = True
for loc in pn.state._locations.values():
loc.reload = True
pn.io.reload._reload = _reload
_mock_panel()
#tests
class Configuration(param.Parameterized):
theme = param.String()
site = param.String(default="Panel@PyData 2021")
title = param.String()
url = param.String()
logo = param.String()
accent_base_color = param.Color()
header_color = param.Color()
header_accent_base_color = param.Color("white")
header_background = param.Color()
main_max_width = param.String("95%")
sidebar_width = param.Integer(400)
ace_theme=param.String()
def __init__(self, random=False, **params):
"""Configuration for your (Fast) Template
Args:
random (bool, optional): Whether or not to provide randomized values. Defaults to False.
"""
super().__init__(**params)
self.theme = self._get_theme()
if random:
color_index = self._get_random_color_index()
else:
color_index=0
self.accent_base_color = _COLORS[color_index][0]
self.header_color = _COLORS[color_index][1]
self.header_background = self.accent_base_color
self.logo=_LOGOS[self.theme]
self.ace_theme=_ACE_THEMES[self.theme]
def _get_theme(self):
if pn.template.FastListTemplate().theme==pn.template.DarkTheme:
return "dark"
return "default"
def _get_random_color_index(self):
if not "color" in pn.state.cache:
pn.state.cache["color"]=-1
color = pn.state.cache["color"]+1
if color==len(_COLORS):
color=0
pn.state.cache["color"]=color
return color
@property
def _collapsed_icon(self) -> str:
return f"""<svg style="stroke: { self.accent_base_color }" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="collapsed-icon">
<path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
<path d="M9 5.44446V12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
<path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
</svg>"""
@property
def _expanded_icon(self) -> str:
return f"""<svg style="stroke: { self.accent_base_color }" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="expanded-icon">
<path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
<path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
</svg>
"""
@property
def menu(self) -> str:
"""Returns a HTML Menu"""
test=f'<li><a href="{ self.url }">{ self.title }</a></li>'
return (
_MENU_TEXT
.replace("{ COLLAPSED_ICON }", self._collapsed_icon)
.replace("{ EXPANDED_ICON }", self._expanded_icon)
.replace(f'<li><a href="{ self.url }">{ self.title }</a></li>', f'<li class="menu-item-active"><a href="{ self.url }">{ self.title }</a></li>')
)
def get_logo_pane(self, **params):
return pn.pane.PNG(
self.logo,
link_url="https://panel.holoviz.org",
embed=False,
sizing_mode="fixed",
align="center",
**params
)
if __name__.startswith("bokeh"):
config = Configuration(title="Works in your Notebook and IDE", url="works_in_your_notebook_and_ide", random=True)
pn.template.FastListTemplate(
title="Test Configuration",
site=config.site,
header_accent_base_color=config.header_accent_base_color,
header_background=config.header_background,
header_color=config.header_color,
sidebar_footer=config.menu,
accent_base_color=config.accent_base_color,
main=[pn.pane.PNG(config.logo)],
).servable()
|
py | 1a512afa955db9ae343b7bc3f8415930970c515a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions for dealing with files"""
import pkg_resources
import pathlib
EXAMPLE_AUDIO = "example_data/Kevin_MacLeod_-_Vibe_Ace.ogg"
__all__ = ["example_audio_file", "find_files"]
def example_audio_file():
"""Get the path to an included audio example file.
.. raw:: html
<div xmlns:cc="http://creativecommons.org/ns#"
xmlns:dct="http://purl.org/dc/terms/"
about="http://freemusicarchive.org/music/Kevin_MacLeod/Jazz_Sampler/Vibe_Ace_1278">
<span property="dct:title">Vibe Ace</span>
(<a rel="cc:attributionURL" property="cc:attributionName"
href="http://freemusicarchive.org/music/Kevin_MacLeod/">Kevin MacLeod</a>)
/ <a rel="license" href="http://creativecommons.org/licenses/by/3.0/">CC BY 3.0</a>
</div>
Examples
--------
>>> # Load the waveform from the example track
>>> y, sr = librosa.load(librosa.util.example_audio_file())
Returns
-------
filename : str
Path to the audio example file included with librosa
"""
return pkg_resources.resource_filename(__name__, EXAMPLE_AUDIO)
def find_files(
directory, ext=None, recurse=True, case_sensitive=False, limit=None, offset=0
):
"""Get a sorted list of (audio) files in a directory or directory sub-tree.
Examples
--------
>>> # Get all audio files in a directory sub-tree
>>> files = librosa.util.find_files('~/Music')
>>> # Look only within a specific directory, not the sub-tree
>>> files = librosa.util.find_files('~/Music', recurse=False)
>>> # Only look for mp3 files
>>> files = librosa.util.find_files('~/Music', ext='mp3')
>>> # Or just mp3 and ogg
>>> files = librosa.util.find_files('~/Music', ext=['mp3', 'ogg'])
>>> # Only get the first 10 files
>>> files = librosa.util.find_files('~/Music', limit=10)
>>> # Or last 10 files
>>> files = librosa.util.find_files('~/Music', offset=-10)
Parameters
----------
directory : Path object
Path to look for files
ext : str or list of str
A file extension or list of file extensions to include in the search.
Default: `['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']`
recurse : boolean
If `True`, then all subfolders of `directory` will be searched.
Otherwise, only `directory` will be searched.
case_sensitive : boolean
If `False`, files matching upper-case version of
extensions will be included.
limit : int > 0 or None
Return at most `limit` files. If `None`, all files are returned.
offset : int
Return files starting at `offset` within the list.
Use negative values to offset from the end of the list.
Returns
-------
files : list of str
The list of audio files.
"""
directory = pathlib.Path(directory)
if ext is None:
ext = ["aac", "au", "flac", "m4a", "mp3", "ogg", "wav"]
elif isinstance(ext, str):
ext = [ext]
# Cast into a set
ext = set(ext)
# Generate upper-case versions
if not case_sensitive:
# Force to lower-case
ext = set([e.lower() for e in ext])
# Add in upper-case versions
ext |= set([e.upper() for e in ext])
files = set()
if recurse:
files = __get_files(directory, ext, True)
else:
files = __get_files(directory, ext, False)
files = list(files)
files.sort()
files = files[offset:]
if limit is not None:
files = files[:limit]
return files
def __get_files(dir_name: pathlib.Path, extensions: set, recur: bool):
"""Helper function to get files in a single directory"""
# Expand out the directory
dir_name = dir_name.expanduser().absolute()
my_files = set()
if recur:
for sub_ext in extensions:
my_files |= set(dir_name.rglob("*." + sub_ext))
else:
for sub_ext in extensions:
my_files |= set(dir_name.glob("*." + sub_ext))
return my_files
|
py | 1a512b264fca96694fd6b631bf3c75b364b65451 | # -*- coding: utf-8 -*-
"""
webapp2_extras.sessions_memcache
================================
Extended sessions stored in memcache.
App Engine-specific modules were moved to webapp2_extras.appengine.
This module is here for compatibility purposes.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import warnings
warnings.warn(DeprecationWarning(
'webapp2_extras.sessions_memcache is deprecated. '
'App Engine-specific modules were moved to webapp2_extras.appengine.'),
stacklevel=1)
from webapp2_extras.appengine.sessions_memcache import *
|
py | 1a512b838a3af229edc9db52ec5b9d9662f5c037 | import tkinter as tk
root=tk.Tk()
root.geometry('350x100')
lb_rgb=tk.Label(text='rgb',fg='#000',bg='#fff')
lb_rrggbb=tk.Label(text='rrggbb',fg='#abcdef',bg='#123456')
lb_rrrgggbbb=tk.Label(text='rrrgggbbb',fg='#123456789',bg='#987abcdef')
lb_colorname=tk.Label(text='colorname',fg='magenta',bg='yellow')
[widget.pack() for widget in (lb_rgb,lb_rrggbb,lb_rrrgggbbb,lb_colorname)]
root.mainloop() |
py | 1a512beb264cebcb4947baa1873b09e99b650cfe | '''
This module was downloaded from the pycroscopy github page: https://github.com/pycroscopy/pycroscopy
It was edited slightly with contributor Jessica Kong @kongjy to accomodate the new format
in which PiFM data is taken with a polarizer installed.
'''
import os
import numpy as np
from pyUSID.io.translator import Translator
from pyUSID.io import write_utils
from pyUSID import USIDataset
import pyUSID as usid
import h5py
class PiFMTranslator(Translator):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
"""
def translate(self, path, append_path='', grp_name='Measurement'):
"""
Parameters
----------
file_path : String / unicode
Absolute path of the .ibw file
verbose : Boolean (Optional)
Whether or not to show print statements for debugging
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
parm_encoding : str, optional
Codec to be used to decode the bytestrings into Python strings if needed.
Default 'utf-8'
Returns
-------
h5_path : String / unicode
Absolute path of the .h5 file
"""
self.get_path(path)
self.read_anfatec_params()
self.read_file_desc()
self.read_spectrograms()
self.read_imgs()
self.read_spectra()
self.make_pos_vals_inds_dims()
self.create_hdf5_file(append_path, grp_name)
self.write_spectrograms()
self.write_images()
self.write_spectra()
self.write_ps_spectra()
return self.h5_f
def get_path(self, path):
"""writes full path, directory, and file name as attributes to class"""
# get paths/get params dictionary, img/spectrogram/spectrum descriptions
self.path = path
full_path = os.path.realpath(self.path)
directory = os.path.dirname(full_path)
# file name
basename = os.path.basename(self.path)
self.full_path = full_path
self.directory = directory
self.basename = basename
#these dictionary parameters will be written to hdf5 file under measurement attributes
def read_anfatec_params(self):
"""reads the scan parameters and writes them to a dictionary"""
params_dictionary = {}
params = True
with open(self.path, 'r', encoding="ISO-8859-1") as f:
for line in f:
if params:
sline = [val.strip() for val in line.split(':')]
if len(sline) == 2 and sline[0][0] != ';':
params_dictionary[sline[0]] = sline[1]
#in ANFATEC parameter files, all attributes are written before file references.
if sline[0].startswith('FileDesc'):
params = False
f.close()
self.params_dictionary = params_dictionary
self.x_len, self.y_len = int(params_dictionary['xPixel']), int(params_dictionary['yPixel'])
def read_file_desc(self):
"""reads spectrogram, image, and spectra file descriptions and stores all to dictionary where
the key:value pairs are filename:[all descriptors]"""
spectrogram_desc = {}
img_desc = {}
spectrum_desc = {}
pspectrum_desc = {}
with open(self.path,'r', encoding="ISO-8859-1") as f:
## can be made more concise...by incorporating conditons with loop control
lines = f.readlines()
for index, line in enumerate(lines):
sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#img_desc['filename'] = caption, scale, physical unit, offset
img_desc[file_desc[0]] = file_desc[1:]
#if true, file describes spectrogram (ie hyperspectral image)
if sline[0].startswith('FileDesc2Begin'):
no_descriptors = 10
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#caption, bytes perpixel, scale, physical unit, offset, offset, datatype, bytes per reading
#filename wavelengths, phys units wavelengths.
spectrogram_desc[file_desc[0]] = file_desc[1:]
if sline[0].startswith('AFMSpectrumDescBegin'):
file_desc = []
line_desc = [val.strip() for val in lines[index+1].split(':')][1]
if 'powerspectrum' in line_desc:
no_descriptors = 2
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
pspectrum_desc[file_desc[0]] = file_desc[1:]
else:
no_descriptors = 7
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
spectrum_desc[file_desc[0]] = file_desc[1:]
f.close()
self.img_desc = img_desc
self.spectrogram_desc = spectrogram_desc
self.spectrum_desc = spectrum_desc
self.pspectrum_desc = pspectrum_desc
def read_spectrograms(self):
"""reads spectrograms, associated spectral values, and saves them in two dictionaries"""
spectrograms = {}
spectrogram_spec_vals = {}
for file_name, descriptors in self.spectrogram_desc.items():
# load and save spectroscopic values
spec_vals_i = np.loadtxt(os.path.join(self.directory, file_name.strip('.int') + 'Wavelengths.txt'))
#if true, data is acquired with polarizer, with an attenuation data column
if np.array(spec_vals_i).ndim == 2:
spectrogram_spec_vals[file_name] = spec_vals_i[:,0]
attenuation = {}
attenuation[file_name] = spec_vals_i[:,1]
self.attenuation = attenuation
else:
spectrogram_spec_vals[file_name] = spec_vals_i
# load and save spectrograms
spectrogram_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
spectrograms[file_name] = np.zeros((self.x_len, self.y_len, len(spec_vals_i)))
for y, line in enumerate(np.split(spectrogram_i, self.y_len)):
for x, pt_spectrum in enumerate(np.split(line, self.x_len)):
spectrograms[file_name][x, y, :] = pt_spectrum * float(descriptors[2])
self.spectrograms = spectrograms
self.spectrogram_spec_vals = spectrogram_spec_vals
def read_imgs(self):
"""reads images and saves to dictionary"""
imgs = {}
for file_name, descriptors in self.img_desc.items():
img_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
imgs[file_name] = np.zeros((self.x_len, self.y_len))
for y, line in enumerate(np.split(img_i, self.y_len)):
for x, pixel in enumerate(np.split(line, self.x_len)):
imgs[file_name][x, y] = pixel * float(descriptors[1])
self.imgs = imgs
def read_spectra(self):
"""reads all point spectra and saves to dictionary"""
spectra = {}
spectra_spec_vals = {}
spectra_x_y_dim_name = {}
for file_name, descriptors in self.spectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
for file_name, descriptors in self.pspectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
self.spectra = spectra
self.spectra_spec_vals = spectra_spec_vals
self.spectra_x_y_dim_name = spectra_x_y_dim_name
def make_pos_vals_inds_dims(self):
x_range = float(self.params_dictionary['XScanRange'])
y_range = float(self.params_dictionary['YScanRange'])
x_center = float(self.params_dictionary['xCenter'])
y_center = float(self.params_dictionary['yCenter'])
x_start = x_center-(x_range/2); x_end = x_center+(x_range/2)
y_start = y_center-(y_range/2); y_end = y_center+(y_range/2)
dx = x_range/self.x_len
dy = y_range/self.y_len
#assumes y scan direction:down; scan angle: 0 deg
y_linspace = -np.arange(y_start, y_end, step=dy)
x_linspace = np.arange(x_start, x_end, step=dx)
pos_ind, pos_val = write_utils.build_ind_val_matrices(unit_values=(x_linspace, y_linspace), is_spectral=False)
#usid.write_utils.Dimension uses ascii encoding, which can not encode
# micron symbol, so we replace it, if present, with the letter u.
pos_dims = [usid.write_utils.Dimension('X', self.params_dictionary['XPhysUnit'].replace('\xb5', 'u'), self.x_len),
usid.write_utils.Dimension('Y', self.params_dictionary['YPhysUnit'].replace('\xb5', 'u'), self.y_len)]
self.pos_ind, self.pos_val, self.pos_dims = pos_ind, pos_val, pos_dims
def create_hdf5_file(self, append_path='', grp_name='Measurement'):
if not append_path:
h5_path = os.path.join(self.directory, self.basename.replace('.txt', '.h5'))
if os.path.exists(h5_path):
raise FileExistsError
#if file already exists. (maybe there is a better way to check for this)
else:
self.h5_f = h5py.File(h5_path, mode='w')
else:
if not os.path.exists(append_path):
raise Exception('File does not exist. Check pathname.')
self.h5_f = h5py.File(append_path, mode='r+')
self.h5_meas_grp = usid.hdf_utils.create_indexed_group(self.h5_f, grp_name)
usid.hdf_utils.write_simple_attrs(self.h5_meas_grp, self.params_dictionary)
return
def write_spectrograms(self):
if bool(self.spectrogram_desc):
for spectrogram_f, descriptors in self.spectrogram_desc.items():
channel_i = usid.hdf_utils.create_indexed_group(self.h5_meas_grp, 'Channel_')
spec_vals_i = self.spectrogram_spec_vals[spectrogram_f]
spectrogram_spec_dims = usid.write_utils.Dimension('Wavelength', descriptors[8], spec_vals_i)
h5_raw = usid.hdf_utils.write_main_dataset(channel_i, # parent HDF5 group
(self.x_len *
self.y_len, len(spec_vals_i)), # shape of Main dataset
'Raw_Data', # Name of main dataset
'Spectrogram', # Physical quantity contained in Main dataset
descriptors[3], # Units for the physical quantity
self.pos_dims, # Position dimensions
spectrogram_spec_dims, # Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs={'Caption': descriptors[0],
'Bytes_Per_Pixel': descriptors[1],
'Scale': descriptors[2],
'Physical_Units': descriptors[3],
'Offset': descriptors[4],
'Datatype': descriptors[5],
'Bytes_Per_Reading': descriptors[6],
'Wavelength_File': descriptors[7],
'Wavelength_Units': descriptors[8]})
h5_raw.h5_pos_vals[:, :] = self.pos_val
h5_raw[:, :] = self.spectrograms[spectrogram_f].reshape(h5_raw.shape)
def write_images(self):
if bool(self.img_desc):
for img_f, descriptors in self.img_desc.items():
#check for existing spectrogram or image and link position/spec inds/vals
#at most two channels worth of need to be checked
try:
str_main = str(usid.hdf_utils.get_all_main(self.h5_f['Measurement_000/Channel_000']))
i_beg = str_main.find('located at: \n\t') + 14
i_end = str_main.find('\nData contains') - 1
data_loc = str_main[i_beg:i_end]
channel_data = USIDataset(self.h5_f[data_loc])
h5_pos_inds = channel_data.h5_pos_inds
h5_pos_vals = channel_data.h5_pos_vals
pos_dims = None
write_pos_vals = False
if channel_data.spec_dim_sizes[0] == 1:
h5_spec_inds = channel_data.h5_spec_inds
h5_spec_vals = channel_data.h5_spec_vals
spec_dims = None
#if channel 000 is spectrogram, check next dataset
elif channel_data.spec_dim_sizes[0] !=1:
str_main = str(usid.hdf_utils.get_all_main(self.h5_f['Measurement_000/Channel_001']))
i_beg = str_main.find('located at: \n\t') + 14
i_end = str_main.find('\nData contains') - 1
data_loc = str_main[i_beg:i_end]
channel_data = USIDataset(self.h5_f[data_loc])
#channel data is an image, & we link their spec inds/vals
if channel_data.spec_dim_sizes[0] == 1:
h5_spec_inds = channel_data.h5_spec_inds
h5_spec_vals = channel_data.h5_spec_vals
spec_dims = None
#in case where channel does not exist, we make new spec/pos inds/vals
except KeyError:
#pos dims
h5_pos_inds = None
h5_pos_vals = None
pos_dims = self.pos_dims
write_pos_vals = True
#spec dims
h5_spec_inds = None
h5_spec_vals = None
spec_dims = usid.write_utils.Dimension('arb', 'a.u', 1)
channel_i = usid.hdf_utils.create_indexed_group(self.h5_meas_grp,'Channel_')
h5_raw = usid.hdf_utils.write_main_dataset(channel_i, #parent HDF5 group
(self.x_len * self.y_len, 1), # shape of Main dataset
'Raw_' + descriptors[0].replace('-', '_'),
# Name of main dataset
descriptors[0],
# Physical quantity contained in Main dataset
descriptors[2], # Units for the physical quantity
h5_pos_inds=h5_pos_inds,
h5_pos_vals=h5_pos_vals,
# Position dimensions
pos_dims=pos_dims,
# Spectroscopic dimensions
h5_spec_inds=h5_spec_inds,
h5_spec_vals=h5_spec_vals,
spec_dims=spec_dims,
dtype=np.float32, # data type / precision
main_dset_attrs={'Caption': descriptors[0],
'Scale': descriptors[1],
'Physical_Units': descriptors[2],
'Offset': descriptors[3]})
h5_raw[:, :] = self.imgs[img_f].reshape(h5_raw.shape)
if write_pos_vals:
h5_raw.h5_pos_vals[:, :] = self.pos_val
def write_spectra(self):
if bool(self.spectrum_desc):
for spec_f, descriptors in self.spectrum_desc.items():
#create new measurement group for ea spectrum
self.h5_meas_grp = usid.hdf_utils.create_indexed_group(self.h5_f, 'Measurement_')
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
spec_i_spec_dims = usid.write_utils.Dimension(x_name, x_unit, self.spectra_spec_vals[spec_f])
spec_i_pos_dims = [usid.write_utils.Dimension('X',
self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
np.array([float(descriptors[1])])),
usid.write_utils.Dimension('Y',
self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
np.array([float(descriptors[1])]))]
#write data to a channel in the measurement group
spec_i_ch = usid.hdf_utils.create_indexed_group(self.h5_meas_grp, 'Spectrum_')
h5_raw = usid.hdf_utils.write_main_dataset(spec_i_ch, # parent HDF5 group
(1, len(self.spectra_spec_vals[spec_f])), # shape of Main dataset
'Raw_Spectrum',
# Name of main dataset
y_name,
# Physical quantity contained in Main dataset
y_unit, # Units for the physical quantity
# Position dimensions
pos_dims=spec_i_pos_dims, spec_dims=spec_i_spec_dims,
# Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs={'XLoc': descriptors[0],
'YLoc': descriptors[1]})
h5_raw[:, :] = self.spectra[spec_f].reshape(h5_raw.shape)
def write_ps_spectra(self):
if bool(self.pspectrum_desc):
for spec_f, descriptors in self.pspectrum_desc.items():
# create new measurement group for ea spectrum
self.h5_meas_grp = usid.hdf_utils.create_indexed_group(self.h5_f, 'Measurement_')
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
spec_i_spec_dims = usid.write_utils.Dimension(x_name, x_unit, self.spectra_spec_vals[spec_f])
spec_i_pos_dims = [usid.write_utils.Dimension('X',
self.params_dictionary['XPhysUnit'].replace(
'\xb5', 'u'),
np.array([0])),
usid.write_utils.Dimension('Y',
self.params_dictionary['YPhysUnit'].replace(
'\xb5', 'u'),
np.array([0]))]
# write data to a channel in the measurement group
spec_i_ch = usid.hdf_utils.create_indexed_group(self.h5_meas_grp, 'PowerSpectrum_')
h5_raw = usid.hdf_utils.write_main_dataset(spec_i_ch, # parent HDF5 group
(1, len(self.spectra_spec_vals[spec_f])),
# shape of Main dataset
'Raw_Spectrum',
# Name of main dataset
y_name,
# Physical quantity contained in Main dataset
y_unit, # Units for the physical quantity
# Position dimensions
pos_dims=spec_i_pos_dims, spec_dims=spec_i_spec_dims,
# Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs={'XLoc': 0,
'YLoc': 0})
h5_raw[:, :] = self.spectra[spec_f].reshape(h5_raw.shape)
|
py | 1a512ceecd4d63da59ec6e4910518820be7fea1c | from django.urls import path,re_path
from measure import views
from rest_framework.routers import DefaultRouter
app_name = 'measure'
router = DefaultRouter()
urlpatterns = [
re_path(r'api/MeasureRecordCreate/', views.MeasureRecordCreate.as_view()),
re_path(r'api/MeasureRecordList/(?P<userid>[a-zA-Z0-9]+)/(?P<incident_category>[0-9]+)$', views.MeasureRecordList.as_view()),
re_path(r'api/MeasureRecordListByUser/(?P<userid>[a-zA-Z0-9]+)$', views.MeasureRecordListByUser.as_view())
]
|
py | 1a512de630c61faf09776d4a8bc853b03ddb05ef | from django.shortcuts import render
from .models import student_info
from .form import (student_info, studentform)
def st_info(request):
if request.method == 'POST':
if request.POST.get('first_name') and request.POST.get('second_name') and request.POST.get('date_birth') and request.POST.get('admin_no') and request.POST.get(telephone):
post = student_info()
first_name = request.POST.get('fname', None)
second_name = request.POST.get('sname', None)
date_birth = request.POST.get('dateofbirth', None)
admin_no = request.POST.get('admission', None)
telephone = request.POST.get('tel', None)
Title = 'STUDENT INFORMATION'
form = student_info(request.POST)
# form.save()
# form = student(request.POST or None, request.FILES or None)
student_context = {
'fname': first_name,
'sname': second_name,
'dateofbirth': date_birth,
'admission':admin_no,
'tel':telephone
}
return render(request, 'student.html', student_context)
else:
return render(request,'student.html')
|
py | 1a512e2e118f0b0406fc39d422b32773bc7e468f | from colorama import Fore
from time import sleep
from config import token
from codecs import open
from requests import get
import os
logo = """
__ _ _ _ _
/ _| | | | | (_) |
__ _| |_ ___| |_ ___| |__ _| |_
\ \/ / _/ _ \ __/ __| '_ \| | __|
> <| || __/ || (__| | | | | |_
/_/\_\_| \___|\__\___|_| |_|_|\__|
v1.3 xstratumm
"""
intro = """
xfetchit uses public VKontakte API (https://vk.com/dev/methods).
Only you are responsible for your actions.
"""
LINK = "https://api.vk.com/method/"
def cls():
os.system("cls" if os.name == "nt" else "clear")
def donate():
wallets = """
I will be very grateful for any crypto from you,
thx! :3
BTC 1HzA8mZxksDGNuTMu5sKUottp9S8bv9NKA
ETH 0xe9a30E9c2aa2D72c224e771c316aE9a7F4fdE36A
LTC LKeWWBWSN7JQxBKDx32WQnJYPD77LdNSrx
ZEC t1HvDeXHFtoTBYHbzNpVH5ocLgnannmdhhc
Dash XrFaQBuBK7GKtPyWWEy8vsTguer4qRqNCX
ETC 0x6d5644C78CBB78542c6219E3815ffE7EbEBd88bf
QTUM QeQ9SaJEHJ9uR2Apa9ymonfpAudnamBUuY
TRX TKojkeYBDY74ghqrFrj9dTWziw6y2Mh1CN
"""
cls()
print(wallets)
def fetch(offset, group_id):
r = get(LINK + "groups.getMembers",
params={"access_token": token, "v": 5.9, "group_id": group_id, "offset": offset, "fields": "contacts"}).json()
return r
def parse(user, parsed):
if not "mobile_phone" in user or not user["mobile_phone"]:
pass
else:
user = user["mobile_phone"]
if user[0] in ["7", "8", "+"]:
parsed.append(user)
def groupParse(group_id):
r = get(LINK + "groups.getMembers",
params={"access_token": token, "v": 5.9, "group_id": group_id, "fields": "contacts"}).json()
if not "response" in r:
print("\nInvalid group ID or screen name (or group is private).")
print("Please check it and try one more time.")
else:
cls()
print("Number of members: " + str(r["response"]["count"]))
print("\nStarting parsing in 5 seconds.")
sleep(5)
cls()
print("Parsing started.")
print("It can take some time according to amount of group members.\n")
print("Wait...")
users = r["response"]["items"]
count = r["response"]["count"]
parsed = []
for user in users:
parse(user, parsed)
if count >= 1000:
left = count - len(users)
if left <= 1000:
r = get(LINK + "groups.getMembers",
params={"access_token": token, "v": 5.9, "group_id": group_id, "offset": 1000, "fields": "contacts"}).json()
for user in r["response"]["items"]:
parse(user, parsed)
else:
offset = 0
while left >= 1000:
offset += 1000
left -= 1000
r = fetch(offset, group_id)
for user in r["response"]["items"]:
parse(user, parsed)
offset += left
r = fetch(offset, group_id)
for user in r["response"]["items"]:
parse(user, parsed)
cls()
if len(parsed) == 0:
print("Parsing ended, but " + Fore.RED + "nothing found" + Fore.RESET + ".\nTry another group.")
else:
print("Parsing ended. Found: " + str(len(parsed)) + " numbers")
print("\nSaving results to \"found.txt\"")
if os.path.isfile("found.txt"):
f = open("found.txt", 'a', "utf-8")
else:
f = open("found.txt", "w", "utf-8")
for user in parsed:
f.write(user + "\r\n")
f.close()
def main():
cls()
print(Fore.RED + logo + Fore.RESET + intro + "\n")
print("Choose:\n\n1) Parse phone numbers\n" + "2) Exit\n" +
Fore.YELLOW + "3) Donate\n" + Fore.RESET)
choice = input("> ")
if choice == "1":
cls()
print("Choose:\n\n" + Fore.BLUE + "1) Group" + Fore.RESET + "\n*parses" +
" all users' phone numbers from specified group\n\n" +
"2) Exit\n")
choice = input("> ")
if choice == "1":
cls()
group_id = input(Fore.BLUE + "Enter group ID or its screen name\n" + Fore.RESET + "> ")
groupParse(group_id)
elif choice == "2":
exit(0)
else:
print("\nInvalid choice.\nPlease read one more time.")
elif choice == "2":
exit(0)
elif choice == "3":
donate()
exit(0)
else:
print("\nInvalid choice.\nPlease read one more time.")
if __name__ == "__main__":
if len(token) < 85:
print("\nInvalid token.\n\nPlease configure it in\n\"config.py\"")
else:
main()
|
py | 1a512e5c8b3b064743410ebb0efff9d3a37b26ee | """Output primitives for the binding generator classes.
This should really be a class, but then everybody would be passing
the output object to each other. I chose for the simpler approach
of a module with a global variable. Use SetOutputFile() or
SetOutputFileName() to change the output file.
"""
_NeedClose = 0
def SetOutputFile(file = None, needclose = 0):
"""Call this with an open file object to make it the output file.
Call it without arguments to close the current file (if necessary)
and reset it to sys.stdout.
If the second argument is true, the new file will be explicitly closed
on a subsequence call.
"""
global _File, _NeedClose
if _NeedClose:
tmp = _File
_NeedClose = 0
_File = None
tmp.close()
if file is None:
import sys
file = sys.stdout
_File = file
_NeedClose = file and needclose
def SetOutputFileName(filename = None):
"""Call this with a filename to make it the output file.
Call it without arguments to close the current file (if necessary)
and reset it to sys.stdout.
"""
SetOutputFile()
if filename:
SetOutputFile(open(filename, 'w'), 1)
SetOutputFile() # Initialize _File
_Level = 0 # Indentation level
def GetLevel():
""""Return the current indentation level."""
return _Level
def SetLevel(level):
"""Set the current indentation level.
This does no type or range checking -- use at own risk.
"""
global _Level
_Level = level
def Output(format = "", *args):
VaOutput(format, args)
def VaOutput(format, args):
"""Call this with a format string and and argument tuple for the format.
A newline is always added. Each line in the output is indented
to the proper indentation level -- even if the result of the
format expansion contains embedded newlines. Exception: lines
beginning with '#' are not indented -- these are assumed to be
C preprprocessor lines.
"""
text = format % args
if _Level > 0:
indent = '\t' * _Level
import string
lines = string.splitfields(text, '\n')
for i in range(len(lines)):
if lines[i] and lines[i][0] != '#':
lines[i] = indent + lines[i]
text = string.joinfields(lines, '\n')
_File.write(text + '\n')
def IndentLevel(by = 1):
"""Increment the indentation level by one.
When called with an argument, adds it to the indentation level.
"""
global _Level
if _Level+by < 0:
raise Error, "indentation underflow (internal error)"
_Level = _Level + by
def DedentLevel(by = 1):
"""Decrement the indentation level by one.
When called with an argument, subtracts it from the indentation level.
"""
IndentLevel(-by)
def OutIndent(format = "", *args):
"""Combine Output() followed by IndentLevel().
If no text is given, acts like lone IndentLevel().
"""
if format: VaOutput(format, args)
IndentLevel()
def OutDedent(format = "", *args):
"""Combine Output() followed by DedentLevel().
If no text is given, acts like loneDedentLevel().
"""
if format: VaOutput(format, args)
DedentLevel()
def OutLbrace(format = "", *args):
"""Like Output, but add a '{' and increase the indentation level.
If no text is given a lone '{' is output.
"""
if format:
format = format + " {"
else:
format = "{"
VaOutput(format, args)
IndentLevel()
def OutRbrace():
"""Decrease the indentation level and output a '}' on a line by itself."""
DedentLevel()
Output("}")
def OutHeader(text, dash):
"""Output a header comment using a given dash character."""
n = 64 - len(text)
Output()
Output("/* %s %s %s */", dash * (n/2), text, dash * (n - n/2))
Output()
def OutHeader1(text):
"""Output a level 1 header comment (uses '=' dashes)."""
OutHeader(text, "=")
def OutHeader2(text):
"""Output a level 2 header comment (uses '-' dashes)."""
OutHeader(text, "-")
def Out(text):
"""Output multiline text that's internally indented.
Pass this a multiline character string. The whitespace before the
first nonblank line of the string will be subtracted from all lines.
The lines are then output using Output(), but without interpretation
of formatting (if you need formatting you can do it before the call).
Recommended use:
Out('''
int main(argc, argv)
int argc;
char *argv;
{
printf("Hello, world\\n");
exit(0);
}
''')
Caveat: the indentation must be consistent -- if you use three tabs
in the first line, (up to) three tabs are removed from following lines,
but a line beginning with 24 spaces is not trimmed at all. Don't use
this as a feature.
"""
# (Don't you love using triple quotes *inside* triple quotes? :-)
import string
lines = string.splitfields(text, '\n')
indent = ""
for line in lines:
if string.strip(line):
for c in line:
if c not in string.whitespace:
break
indent = indent + c
break
n = len(indent)
for line in lines:
if line[:n] == indent:
line = line[n:]
else:
for c in indent:
if line[:1] <> c: break
line = line[1:]
VaOutput("%s", line)
def _test():
"""Test program. Run when the module is run as a script."""
OutHeader1("test bgenOutput")
Out("""
#include <Python.h>
#include <stdio.h>
main(argc, argv)
int argc;
char **argv;
{
int i;
""")
IndentLevel()
Output("""\
/* Here are a few comment lines.
Just to test indenting multiple lines.
End of the comment lines. */
""")
Output("for (i = 0; i < argc; i++)")
OutLbrace()
Output('printf("argv[%%d] = %%s\\n", i, argv[i]);')
OutRbrace()
Output("exit(0)")
OutRbrace()
OutHeader2("end test")
if __name__ == '__main__':
_test()
|
py | 1a512ec1948106a842cad67d1cffbb02aafa0db8 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..doctools import document
from .stat import stat
@document
class stat_identity(stat):
"""
Identity (do nothing) statistic
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_PARAMS = {'geom': 'point', 'position': 'identity',
'na_rm': False}
@classmethod
def compute_panel(cls, data, scales, **params):
return data
|
py | 1a5130269261a6bccca29952d887826f117ebecf | import import_target, import_target as aliased
from import_target import func, other_func
from import_target import func as aliased_func, other_func as aliased_other_func
from import_star import *
assert import_target.X == import_target.func()
assert import_target.X == func()
assert import_target.Y == other_func()
assert import_target.X == aliased.X
assert import_target.Y == aliased.Y
assert import_target.X == aliased_func()
assert import_target.Y == aliased_other_func()
assert STAR_IMPORT == '123'
# TODO: Once we can determine current directory, use that to construct this
# path:
#import sys
#sys.path.append("snippets/import_directory")
#import nested_target
#try:
# X
#except NameError:
# pass
#else:
# raise AssertionError('X should not be imported')
|
py | 1a5130edac396d3698385e8aacff0cd65b1fa5af | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2018, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import deque
from datetime import datetime
from time import sleep
from warnings import warn
from py2neo.cypher import cypher_escape
from py2neo.data import Table, Record
from py2neo.internal.addressing import get_connection_data
from py2neo.internal.caching import ThreadLocalEntityCache
from py2neo.internal.compat import string_types, xstr
from py2neo.internal.util import version_tuple, title_case, snake_case
from py2neo.matching import NodeMatcher, RelationshipMatcher
update_stats_keys = [
"constraints_added",
"constraints_removed",
"indexes_added",
"indexes_removed",
"labels_added",
"labels_removed",
"nodes_created",
"nodes_deleted",
"properties_set",
"relationships_deleted",
"relationships_created",
]
class Database(object):
""" Accessor for an entire Neo4j graph database installation over
Bolt or HTTP. Within the py2neo object hierarchy, a :class:`.Database`
contains a :class:`.Graph` in which most activity occurs. Currently,
Neo4j only supports one `Graph` per `Database`.
An explicit URI can be passed to the constructor::
>>> from py2neo import Database
>>> db = Database("bolt://camelot.example.com:7687")
Alternatively, the default value of ``bolt://localhost:7687`` is
used::
>>> default_db = Database()
>>> default_db
<Database uri='bolt://localhost:7687'>
"""
_instances = {}
_driver = None
_graphs = None
@classmethod
def forget_all(cls):
""" Forget all cached :class:`.Database` details.
"""
for _, db in cls._instances.items():
db._driver.close()
db._driver = None
cls._instances.clear()
def __new__(cls, uri=None, **settings):
connection_data = get_connection_data(uri, **settings)
key = connection_data["hash"]
try:
inst = cls._instances[key]
except KeyError:
inst = super(Database, cls).__new__(cls)
inst._connection_data = connection_data
from py2neo.internal.http import HTTPDriver, HTTPSDriver
from neo4j.v1 import Driver
inst._driver = Driver(connection_data["uri"],
auth=connection_data["auth"],
encrypted=connection_data["secure"],
user_agent=connection_data["user_agent"])
inst._graphs = {}
cls._instances[key] = inst
return inst
def __repr__(self):
class_name = self.__class__.__name__
data = self._connection_data
return "<%s uri=%r secure=%r user_agent=%r>" % (
class_name, data["uri"], data["secure"], data["user_agent"])
def __eq__(self, other):
try:
return self.uri == other.uri
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._connection_data["hash"])
def __contains__(self, database):
return database in self._graphs
def __getitem__(self, database):
if database == "data" and database not in self._graphs:
self._graphs[database] = Graph(**self._connection_data)
return self._graphs[database]
def __setitem__(self, database, graph):
self._graphs[database] = graph
def __iter__(self):
yield "data"
@property
def driver(self):
return self._driver
@property
def uri(self):
""" The URI to which this `Database` is connected.
"""
return self._connection_data["uri"]
@property
def default_graph(self):
""" The default graph exposed by this database.
:rtype: :class:`.Graph`
"""
return self["data"]
def keys(self):
return list(self)
def query_jmx(self, namespace, instance=None, name=None, type=None):
""" Query the JMX service attached to this database.
"""
d = {}
for nom, _, attributes in self.default_graph.run("CALL dbms.queryJmx('')"):
ns, _, terms = nom.partition(":")
if ns != namespace:
continue
terms = dict(tuple(term.partition("=")[0::2]) for term in terms.split(","))
if instance is not None and instance != terms["instance"]:
continue
if name is not None and name != terms["name"]:
continue
if type is not None and type != terms["type"]:
continue
for attr_name, attr_data in attributes.items():
attr_value = attr_data.get("value")
if attr_value == "true":
d[attr_name] = True
elif attr_value == "false":
d[attr_name] = False
elif isinstance(attr_value, string_types) and "." in attr_value:
try:
d[attr_name] = float(attr_value)
except (TypeError, ValueError):
d[attr_name] = attr_value
else:
try:
d[attr_name] = int(attr_value)
except (TypeError, ValueError):
d[attr_name] = attr_value
return d
@property
def name(self):
""" Return the name of the active Neo4j database.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return info.get("DatabaseName")
@property
def kernel_start_time(self):
""" Return the time from which this Neo4j instance was in operational mode.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return datetime.fromtimestamp(info["KernelStartTime"] / 1000.0)
@property
def kernel_version(self):
""" Return the version of Neo4j.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
version_string = info["KernelVersion"].partition("version:")[-1].partition(",")[0].strip()
return version_tuple(version_string)
@property
def product(self):
""" Return the product name.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return info["KernelVersion"]
@property
def store_creation_time(self):
""" Return the time when this Neo4j graph store was created.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return datetime.fromtimestamp(info["StoreCreationDate"] / 1000.0)
@property
def store_id(self):
""" Return an identifier that, together with store creation time,
uniquely identifies this Neo4j graph store.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return info["StoreId"]
@property
def primitive_counts(self):
""" Return a dictionary of estimates of the numbers of different
kinds of Neo4j primitives.
"""
return self.query_jmx("org.neo4j", name="Primitive count")
@property
def store_file_sizes(self):
""" Return a dictionary of file sizes for each file in the Neo4j
graph store.
"""
return self.query_jmx("org.neo4j", name="Store file sizes")
@property
def config(self):
""" Return a dictionary of the configuration parameters used to
configure Neo4j.
"""
return self.query_jmx("org.neo4j", name="Configuration")
class Graph(object):
""" The `Graph` class represents the graph data storage space within
a Neo4j graph database. Connection details are provided using URIs
and/or individual settings.
Supported URI schemes are:
- ``http``
- ``https``
- ``bolt``
- ``bolt+routing``
The full set of supported `settings` are:
============== ============================================= ============== =============
Keyword Description Type Default
============== ============================================= ============== =============
``auth`` A 2-tuple of (user, password) tuple ``('neo4j', 'password')``
``host`` Database server host name str ``'localhost'``
``password`` Password to use for authentication str ``'password'``
``port`` Database server port int ``7687``
``scheme`` Use a specific URI scheme str ``'bolt'``
``secure`` Use a secure connection (TLS) bool ``False``
``user`` User to authenticate as str ``'neo4j'``
``user_agent`` User agent to send for all connections str `(depends on URI scheme)`
============== ============================================= ============== =============
Each setting can be provided as a keyword argument or as part of
an ``http:``, ``https:``, ``bolt:`` or ``bolt+routing:`` URI. Therefore, the examples
below are equivalent::
>>> from py2neo import Graph
>>> graph_1 = Graph()
>>> graph_2 = Graph(host="localhost")
>>> graph_3 = Graph("bolt://localhost:7687")
Once obtained, the `Graph` instance provides direct or indirect
access to most of the functionality available within py2neo.
"""
#: The :class:`.Database` to which this :class:`.Graph` belongs.
database = None
#: The :class:`.Schema` resource for this :class:`.Graph`.
schema = None
node_cache = ThreadLocalEntityCache()
relationship_cache = ThreadLocalEntityCache()
def __new__(cls, uri=None, **settings):
name = settings.pop("name", "data")
database = Database(uri, **settings)
if name in database:
inst = database[name]
else:
inst = object.__new__(cls)
inst.database = database
inst.schema = Schema(inst)
inst.__name__ = name
database[name] = inst
return inst
def __repr__(self):
return "<Graph database=%r name=%r>" % (self.database, self.__name__)
def __eq__(self, other):
try:
return self.database == other.database and self.__name__ == other.__name__
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.relationships)
def __bool__(self):
return True
__nonzero__ = __bool__
def begin(self, autocommit=False):
""" Begin a new :class:`.Transaction`.
:param autocommit: if :py:const:`True`, the transaction will
automatically commit after the first operation
"""
return Transaction(self, autocommit)
def create(self, subgraph):
""" Run a :meth:`.Transaction.create` operation within a
:class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
with self.begin() as tx:
tx.create(subgraph)
def delete(self, subgraph):
""" Run a :meth:`.Transaction.delete` operation within an
`autocommit` :class:`.Transaction`. To delete only the
relationships, use the :meth:`.separate` method.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
"""
self.begin(autocommit=True).delete(subgraph)
def delete_all(self):
""" Delete all nodes and relationships from this :class:`.Graph`.
.. warning::
This method will permanently remove **all** nodes and relationships
from the graph and cannot be undone.
"""
self.run("MATCH (a) DETACH DELETE a")
self.node_cache.clear()
self.relationship_cache.clear()
def evaluate(self, cypher, parameters=None, **kwparameters):
""" Run a :meth:`.Transaction.evaluate` operation within an
`autocommit` :class:`.Transaction`.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:return: first value from the first record returned or
:py:const:`None`.
"""
return self.begin(autocommit=True).evaluate(cypher, parameters, **kwparameters)
def exists(self, subgraph):
""" Run a :meth:`.Transaction.exists` operation within an
`autocommit` :class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:return:
"""
return self.begin(autocommit=True).exists(subgraph)
def match(self, nodes=None, r_type=None, limit=None):
""" Match and return all relationships with specific criteria.
For example, to find all of Alice's friends::
for rel in graph.match((alice, ), r_type="FRIEND"):
print(rel.end_node["name"])
:param nodes: Sequence or Set of start and end nodes (:const:`None` means any node);
a Set implies a match in any direction
:param r_type: type of relationships to match (:const:`None` means any type)
:param limit: maximum number of relationships to match (:const:`None` means unlimited)
"""
return RelationshipMatcher(self).match(nodes=nodes, r_type=r_type).limit(limit)
def match_one(self, nodes=None, r_type=None):
""" Match and return one relationship with specific criteria.
:param nodes: Sequence or Set of start and end nodes (:const:`None` means any node);
a Set implies a match in any direction
:param r_type: type of relationships to match (:const:`None` means any type)
"""
rels = list(self.match(nodes=nodes, r_type=r_type, limit=1))
if rels:
return rels[0]
else:
return None
def merge(self, subgraph, label=None, *property_keys):
""" Run a :meth:`.Transaction.merge` operation within an
`autocommit` :class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:param label: label on which to match any existing nodes
:param property_keys: property keys on which to match any existing nodes
"""
with self.begin() as tx:
tx.merge(subgraph, label, *property_keys)
@property
def name(self):
return self.__name__
@property
def nodes(self):
""" Obtain a :class:`.NodeMatcher` for this graph.
This can be used to find nodes that match given criteria:
>>> graph = Graph()
>>> graph.nodes[1234]
(_1234:Person {name: 'Alice'})
>>> graph.nodes.get(1234)
(_1234:Person {name: 'Alice'})
>>> graph.nodes.match("Person", name="Alice").first()
(_1234:Person {name: 'Alice'})
Nodes can also be efficiently counted using this attribute:
>>> len(graph.nodes)
55691
>>> len(graph.nodes.match("Person", age=33))
12
"""
return NodeMatcher(self)
def pull(self, subgraph):
""" Pull data to one or more entities from their remote counterparts.
:param subgraph: the collection of nodes and relationships to pull
"""
with self.begin() as tx:
tx.pull(subgraph)
def push(self, subgraph):
""" Push data from one or more entities to their remote counterparts.
:param subgraph: the collection of nodes and relationships to push
"""
with self.begin() as tx:
tx.push(subgraph)
@property
def relationships(self):
""" Obtain a :class:`.RelationshipMatcher` for this graph.
This can be used to find relationships that match given criteria
as well as efficiently count relationships.
"""
return RelationshipMatcher(self)
def run(self, cypher, parameters=None, **kwparameters):
""" Run a :meth:`.Transaction.run` operation within an
`autocommit` :class:`.Transaction`.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:param kwparameters: extra keyword parameters
:return:
"""
return self.begin(autocommit=True).run(cypher, parameters, **kwparameters)
def separate(self, subgraph):
""" Run a :meth:`.Transaction.separate` operation within an
`autocommit` :class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
self.begin(autocommit=True).separate(subgraph)
class Schema(object):
""" The schema resource attached to a `Graph` instance.
"""
def __init__(self, graph):
self.graph = graph
@property
def node_labels(self):
""" The set of node labels currently defined within the graph.
"""
return frozenset(record[0] for record in self.graph.run("CALL db.labels"))
@property
def relationship_types(self):
""" The set of relationship types currently defined within the graph.
"""
return frozenset(record[0] for record in self.graph.run("CALL db.relationshipTypes"))
def create_index(self, label, *property_keys):
""" Create a schema index for a label and property
key combination.
"""
self.graph.run("CREATE INDEX ON :%s(%s)" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
while property_keys not in self.get_indexes(label):
sleep(0.1)
def create_uniqueness_constraint(self, label, *property_keys):
""" Create a uniqueness constraint for a label.
"""
self.graph.run("CREATE CONSTRAINT ON (a:%s) "
"ASSERT a.%s IS UNIQUE" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
while property_keys not in self.get_uniqueness_constraints(label):
sleep(0.1)
def drop_index(self, label, *property_keys):
""" Remove label index for a given property key.
"""
self.graph.run("DROP INDEX ON :%s(%s)" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
def drop_uniqueness_constraint(self, label, *property_keys):
""" Remove the uniqueness constraint for a given property key.
"""
self.graph.run("DROP CONSTRAINT ON (a:%s) "
"ASSERT a.%s IS UNIQUE" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
def _get_indexes(self, label, t=None):
indexes = []
for record in self.graph.run("CALL db.indexes"):
description = record['description'] if 'description' in record.keys() else None
lbl = record['label'] if 'label' in record.keys() else None
properties = record['properties'] if 'properties' in record.keys() else []
state = record['state'] if 'state' in record.keys() else None
typ = record['type'] if 'type' in record.keys() else None
provider = record['provider'] if 'provider' in record.keys() else None
# minimum requirements are values for description, state, and type
if description is None or state is None or typ is None:
raise RuntimeError("Unexpected response from procedure "
"db.indexes (%d fields)" % len(record))
if state not in (u"ONLINE", u"online"):
continue
if t and typ != t:
continue
if not lbl or not properties:
from py2neo.cypher.lexer import CypherLexer
from pygments.token import Token
tokens = list(CypherLexer().get_tokens(description))
for token_type, token_value in tokens:
if token_type is Token.Name.Label:
lbl = token_value.strip("`")
elif token_type is Token.Name.Variable:
properties.append(token_value.strip("`"))
if not lbl or not properties:
continue
if lbl == label:
indexes.append(tuple(properties))
return indexes
def get_indexes(self, label):
""" Fetch a list of indexed property keys for a label.
"""
return self._get_indexes(label)
def get_uniqueness_constraints(self, label):
""" Fetch a list of unique constraints for a label.
"""
return self._get_indexes(label, "node_unique_property")
class Result(object):
""" Wraps a BoltStatementResult
"""
def __init__(self, graph, entities, result):
from neo4j.v1 import BoltStatementResult
from py2neo.internal.http import HTTPStatementResult
from py2neo.internal.packstream import PackStreamHydrator
self.result = result
self.result.error_class = GraphError.hydrate
# TODO: un-yuk this
if isinstance(result, HTTPStatementResult):
self.result._hydrant.entities = entities
self.result_iterator = iter(self.result)
elif isinstance(result, BoltStatementResult):
self.result._hydrant = PackStreamHydrator(graph, result.keys(), entities)
self.result_iterator = iter(map(Record, self.result))
else:
raise RuntimeError("Unexpected statement result class %r" % result.__class__.__name__)
def keys(self):
""" Return the keys for the whole data set.
"""
return self.result.keys()
def summary(self):
""" Return the summary.
"""
return self.result.summary()
def plan(self):
""" Return the query plan, if available.
"""
metadata = self.result.summary().metadata
plan = {}
if "plan" in metadata:
plan.update(metadata["plan"])
if "profile" in metadata:
plan.update(metadata["profile"])
if "http_plan" in metadata:
plan.update(metadata["http_plan"]["root"])
def collapse_args(data):
if "args" in data:
for key in data["args"]:
data[key] = data["args"][key]
del data["args"]
if "children" in data:
for child in data["children"]:
collapse_args(child)
def snake_keys(data):
if isinstance(data, list):
for item in data:
snake_keys(item)
return
if not isinstance(data, dict):
return
for key, value in list(data.items()):
new_key = snake_case(key)
if new_key != key:
data[new_key] = value
del data[key]
if isinstance(value, (list, dict)):
snake_keys(value)
collapse_args(plan)
snake_keys(plan)
return plan
def stats(self):
""" Return the query statistics.
"""
return vars(self.result.summary().counters)
def fetch(self):
""" Fetch and return the next item.
"""
try:
return next(self.result_iterator)
except StopIteration:
return None
class GraphError(Exception):
"""
"""
__cause__ = None
http_status_code = None
code = None
message = None
@classmethod
def hydrate(cls, data):
code = data["code"]
message = data["message"]
_, classification, category, title = code.split(".")
if classification == "ClientError":
try:
error_cls = ClientError.get_mapped_class(code)
except KeyError:
error_cls = ClientError
message = "%s: %s" % (title_case(title), message)
elif classification == "DatabaseError":
error_cls = DatabaseError
elif classification == "TransientError":
error_cls = TransientError
else:
error_cls = cls
inst = error_cls(message)
inst.code = code
inst.message = message
return inst
def __new__(cls, *args, **kwargs):
try:
exception = kwargs["exception"]
error_cls = type(xstr(exception), (cls,), {})
except KeyError:
error_cls = cls
return Exception.__new__(error_cls, *args)
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
for key, value in kwargs.items():
setattr(self, key.lower(), value)
class ClientError(GraphError):
""" The Client sent a bad request - changing the request might yield a successful outcome.
"""
@classmethod
def get_mapped_class(cls, status):
from neo4j.exceptions import ConstraintError, CypherSyntaxError, CypherTypeError, Forbidden, AuthError
return {
# ConstraintError
"Neo.ClientError.Schema.ConstraintValidationFailed": ConstraintError,
"Neo.ClientError.Schema.ConstraintViolation": ConstraintError,
"Neo.ClientError.Statement.ConstraintVerificationFailed": ConstraintError,
"Neo.ClientError.Statement.ConstraintViolation": ConstraintError,
# CypherSyntaxError
"Neo.ClientError.Statement.InvalidSyntax": CypherSyntaxError,
"Neo.ClientError.Statement.SyntaxError": CypherSyntaxError,
# CypherTypeError
"Neo.ClientError.Procedure.TypeError": CypherTypeError,
"Neo.ClientError.Statement.InvalidType": CypherTypeError,
"Neo.ClientError.Statement.TypeError": CypherTypeError,
# Forbidden
"Neo.ClientError.General.ForbiddenOnReadOnlyDatabase": Forbidden,
"Neo.ClientError.General.ReadOnly": Forbidden,
"Neo.ClientError.Schema.ForbiddenOnConstraintIndex": Forbidden,
"Neo.ClientError.Schema.IndexBelongsToConstrain": Forbidden,
"Neo.ClientError.Security.Forbidden": Forbidden,
"Neo.ClientError.Transaction.ForbiddenDueToTransactionType": Forbidden,
# Unauthorized
"Neo.ClientError.Security.AuthorizationFailed": AuthError,
"Neo.ClientError.Security.Unauthorized": AuthError,
}[status]
class DatabaseError(GraphError):
""" The database failed to service the request.
"""
class TransientError(GraphError):
""" The database cannot service the request right now, retrying later might yield a successful outcome.
"""
class TransactionFinished(GraphError):
""" Raised when actions are attempted against a :class:`.Transaction`
that is no longer available for use.
"""
class Transaction(object):
""" A transaction is a logical container for multiple Cypher statements.
"""
session = None
_finished = False
def __init__(self, graph, autocommit=False):
self.graph = graph
self.autocommit = autocommit
self.entities = deque()
self.driver = driver = self.graph.database.driver
self.session = driver.session()
self.results = []
if autocommit:
self.transaction = None
else:
self.transaction = self.session.begin_transaction()
def __del__(self):
if self.session:
self.session.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.commit()
else:
self.rollback()
def _assert_unfinished(self):
if self._finished:
raise TransactionFinished(self)
def finished(self):
""" Indicates whether or not this transaction has been completed
or is still open.
"""
return self._finished
def run(self, cypher, parameters=None, **kwparameters):
""" Send a Cypher statement to the server for execution and return
a :py:class:`.Cursor` for navigating its result.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:returns: :py:class:`.Cursor` object
"""
from neo4j.v1 import CypherError
self._assert_unfinished()
try:
entities = self.entities.popleft()
except IndexError:
entities = {}
try:
if self.transaction:
result = self.transaction.run(cypher, parameters, **kwparameters)
else:
result = self.session.run(cypher, parameters, **kwparameters)
except CypherError as error:
raise GraphError.hydrate({"code": error.code, "message": error.message})
else:
r = Result(self.graph, entities, result)
self.results.append(r)
return Cursor(r)
finally:
if not self.transaction:
self.finish()
def process(self):
""" Send all pending statements to the server for processing.
"""
self._assert_unfinished()
self.session.sync()
def finish(self):
self.process()
if self.transaction:
self.transaction.close()
self._assert_unfinished()
self._finished = True
self.session.close()
self.session = None
def commit(self):
""" Commit the transaction.
"""
if self.transaction:
self.transaction.success = True
self.finish()
def rollback(self):
""" Roll back the current transaction, undoing all actions previously taken.
"""
self._assert_unfinished()
if self.transaction:
self.transaction.success = False
self.finish()
def evaluate(self, cypher, parameters=None, **kwparameters):
""" Execute a single Cypher statement and return the value from
the first column of the first record.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:returns: single return value or :const:`None`
"""
return self.run(cypher, parameters, **kwparameters).evaluate(0)
def create(self, subgraph):
""" Create remote nodes and relationships that correspond to those in a
local subgraph. Any entities in *subgraph* that are already bound to
remote entities will remain unchanged, those which are not will become
bound to their newly-created counterparts.
For example::
>>> from py2neo import Graph, Node, Relationship
>>> g = Graph()
>>> tx = g.begin()
>>> a = Node("Person", name="Alice")
>>> tx.create(a)
>>> b = Node("Person", name="Bob")
>>> ab = Relationship(a, "KNOWS", b)
>>> tx.create(ab)
>>> tx.commit()
>>> g.exists(ab)
True
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
creatable object
"""
try:
create = subgraph.__db_create__
except AttributeError:
raise TypeError("No method defined to create object %r" % subgraph)
else:
create(self)
def delete(self, subgraph):
""" Delete the remote nodes and relationships that correspond to
those in a local subgraph. To delete only the relationships, use
the :meth:`.separate` method.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
delete = subgraph.__db_delete__
except AttributeError:
raise TypeError("No method defined to delete object %r" % subgraph)
else:
delete(self)
def exists(self, subgraph):
""" Determine whether one or more graph entities all exist within the
database. Note that if any nodes or relationships in *subgraph* are not
bound to remote counterparts, this method will return ``False``.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
:returns: ``True`` if all entities exist remotely, ``False`` otherwise
"""
try:
exists = subgraph.__db_exists__
except AttributeError:
raise TypeError("No method defined to check existence of object %r" % subgraph)
else:
return exists(self)
def merge(self, subgraph, primary_label=None, primary_key=None):
""" Merge nodes and relationships from a local subgraph into the
database. Each node and relationship is merged independently, with
nodes merged first and relationships merged second.
For each node, the merge is carried out by comparing that node with a
potential remote equivalent on the basis of a label and property value.
If no remote match is found, a new node is created. The label and
property to use for comparison are determined by `primary_label` and
`primary_key` but may be overridden for individual nodes by the
presence of `__primarylabel__` and `__primarykey__` attributes on
the node itself. Note that multiple property keys may be specified by
using a tuple.
For each relationship, the merge is carried out by comparing that
relationship with a potential remote equivalent on the basis of matching
start and end nodes plus relationship type. If no remote match is found,
a new relationship is created.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:param primary_label: label on which to match any existing nodes
:param primary_key: property key(s) on which to match any existing
nodes
"""
try:
merge = subgraph.__db_merge__
except AttributeError:
raise TypeError("No method defined to merge object %r" % subgraph)
else:
merge(self, primary_label, primary_key)
def pull(self, subgraph):
""" Update local entities from their remote counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, pull properties
and node labels into the local copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
pull = subgraph.__db_pull__
except AttributeError:
raise TypeError("No method defined to pull object %r" % subgraph)
else:
return pull(self)
def push(self, subgraph):
""" Update remote entities from their local counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, push properties
and node labels into the remote copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
push = subgraph.__db_push__
except AttributeError:
raise TypeError("No method defined to push object %r" % subgraph)
else:
return push(self)
def separate(self, subgraph):
""" Delete the remote relationships that correspond to those in a local
subgraph. This leaves any nodes untouched.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
separate = subgraph.__db_separate__
except AttributeError:
raise TypeError("No method defined to separate object %r" % subgraph)
else:
separate(self)
class Cursor(object):
""" A `Cursor` is a navigator for a stream of records.
A cursor can be thought of as a window onto an underlying data
stream. All cursors in py2neo are "forward-only", meaning that
navigation starts before the first record and may proceed only in a
forward direction.
It is not generally necessary for application code to instantiate a
cursor directly as one will be returned by any Cypher execution method.
However, cursor creation requires only a :class:`.DataSource` object
which contains the logic for how to access the source data that the
cursor navigates.
Many simple cursor use cases require only the :meth:`.forward` method
and the :attr:`.current` attribute. To navigate through all available
records, a `while` loop can be used::
while cursor.forward():
print(cursor.current["name"])
If only the first record is of interest, a similar `if` structure will
do the job::
if cursor.forward():
print(cursor.current["name"])
To combine `forward` and `current` into a single step, use the built-in
py:func:`next` function::
print(next(cursor)["name"])
Cursors are also iterable, so can be used in a loop::
for record in cursor:
print(record["name"])
For queries that are expected to return only a single value within a
single record, use the :meth:`.evaluate` method. This will return the
first value from the next record or :py:const:`None` if neither the
field nor the record are present::
print(cursor.evaluate())
"""
def __init__(self, result):
self._result = result
self._current = None
def __next__(self):
if self.forward():
return self._current
else:
raise StopIteration()
# Exists only for Python 2 iteration compatibility
next = __next__
def __iter__(self):
while self.forward():
yield self._current
def __getitem__(self, key):
return self._current[key]
@property
def current(self):
""" Returns the current record or :py:const:`None` if no record
has yet been selected.
"""
return self._current
def close(self):
""" Close this cursor and free up all associated resources.
"""
self._result = None
self._current = None
def keys(self):
""" Return the field names for the records in the stream.
"""
return self._result.keys()
def summary(self):
""" Return the result summary.
"""
return self._result.summary()
def plan(self):
""" Return the plan returned with this result, if any.
"""
return self._result.plan()
def stats(self):
""" Return the query statistics.
"""
s = dict.fromkeys(update_stats_keys, 0)
s.update(self._result.stats())
s["contains_updates"] = bool(sum(s.get(k, 0) for k in update_stats_keys))
return s
def forward(self, amount=1):
""" Attempt to move the cursor one position forward (or by
another amount if explicitly specified). The cursor will move
position by up to, but never more than, the amount specified.
If not enough scope for movement remains, only that remainder
will be consumed. The total amount moved is returned.
:param amount: the amount to move the cursor
:returns: the amount that the cursor was able to move
"""
if amount == 0:
return 0
assert amount > 0
amount = int(amount)
moved = 0
fetch = self._result.fetch
while moved != amount:
new_current = fetch()
if new_current is None:
break
else:
self._current = new_current
moved += 1
return moved
def evaluate(self, field=0):
""" Return the value of the first field from the next record
(or the value of another field if explicitly specified).
This method attempts to move the cursor one step forward and,
if successful, selects and returns an individual value from
the new current record. By default, this value will be taken
from the first value in that record but this can be overridden
with the `field` argument, which can represent either a
positional index or a textual key.
If the cursor cannot be moved forward or if the record contains
no values, :py:const:`None` will be returned instead.
This method is particularly useful when it is known that a
Cypher query returns only a single value.
:param field: field to select value from (optional)
:returns: value of the field or :py:const:`None`
Example:
>>> from py2neo import Graph
>>> g = Graph()
>>> g.run("MATCH (a) WHERE a.email={x} RETURN a.name", x="[email protected]").evaluate()
'Bob Robertson'
"""
if self.forward():
try:
return self[field]
except IndexError:
return None
else:
return None
def data(self):
""" Consume and extract the entire result as a list of
dictionaries.
::
>>> from py2neo import Graph
>>> graph = Graph()
>>> graph.run("MATCH (a:Person) RETURN a.name, a.born LIMIT 4").data()
[{'a.born': 1964, 'a.name': 'Keanu Reeves'},
{'a.born': 1967, 'a.name': 'Carrie-Anne Moss'},
{'a.born': 1961, 'a.name': 'Laurence Fishburne'},
{'a.born': 1960, 'a.name': 'Hugo Weaving'}]
:return: the full query result
:rtype: `list` of `dict`
"""
return [record.data() for record in self]
def to_table(self):
""" Consume and extract the entire result as a :class:`.Table`
object.
:return: the full query result
"""
return Table(self)
def to_subgraph(self):
""" Consume and extract the entire result as a :class:`.Subgraph`
containing the union of all the graph structures within.
:return: :class:`.Subgraph` object
"""
s = None
for record in self:
s_ = record.to_subgraph()
if s_ is not None:
if s is None:
s = s_
else:
s |= s_
return s
def to_ndarray(self, dtype=None, order='K'):
""" Consume and extract the entire result as a
`numpy.ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`_.
.. note::
This method requires `numpy` to be installed.
:param dtype:
:param order:
:warns: If `numpy` is not installed
:returns: `ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`__ object.
"""
try:
from numpy import array
except ImportError:
warn("Numpy is not installed.")
raise
else:
return array(list(map(list, self)), dtype=dtype, order=order)
def to_series(self, field=0, index=None, dtype=None):
""" Consume and extract one field of the entire result as a
`pandas.Series <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`_.
.. note::
This method requires `pandas` to be installed.
:param field:
:param index:
:param dtype:
:warns: If `pandas` is not installed
:returns: `Series <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`__ object.
"""
try:
from pandas import Series
except ImportError:
warn("Pandas is not installed.")
raise
else:
return Series([record[field] for record in self], index=index, dtype=dtype)
def to_data_frame(self, index=None, columns=None, dtype=None):
""" Consume and extract the entire result as a
`pandas.DataFrame <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe>`_.
::
>>> from py2neo import Graph
>>> graph = Graph()
>>> graph.run("MATCH (a:Person) RETURN a.name, a.born LIMIT 4").to_data_frame()
a.born a.name
0 1964 Keanu Reeves
1 1967 Carrie-Anne Moss
2 1961 Laurence Fishburne
3 1960 Hugo Weaving
.. note::
This method requires `pandas` to be installed.
:param index: Index to use for resulting frame.
:param columns: Column labels to use for resulting frame.
:param dtype: Data type to force.
:warns: If `pandas` is not installed
:returns: `DataFrame <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`__ object.
"""
try:
from pandas import DataFrame
except ImportError:
warn("Pandas is not installed.")
raise
else:
return DataFrame(list(map(dict, self)), index=index, columns=columns, dtype=dtype)
def to_matrix(self, mutable=False):
""" Consume and extract the entire result as a
`sympy.Matrix <http://docs.sympy.org/latest/tutorial/matrices.html>`_.
.. note::
This method requires `sympy` to be installed.
:param mutable:
:returns: `Matrix <http://docs.sympy.org/latest/tutorial/matrices.html>`_ object.
"""
try:
from sympy import MutableMatrix, ImmutableMatrix
except ImportError:
warn("Sympy is not installed.")
raise
else:
if mutable:
return MutableMatrix(list(map(list, self)))
else:
return ImmutableMatrix(list(map(list, self)))
|
py | 1a51321a05bc6d5853a09e6b5f0eb734ab006599 | """
Display number of ongoing tickets from RT queues.
Configuration parameters:
cache_timeout: how often we refresh this module in seconds (default 300)
db: database to use (default '')
format: see placeholders below (default 'general: {General}')
host: database host to connect to (default '')
password: login password (default '')
threshold_critical: set bad color above this threshold (default 20)
threshold_warning: set degraded color above this threshold (default 10)
timeout: timeout for database connection (default 5)
user: login user (default '')
Format placeholders:
{YOUR_QUEUE_NAME} number of ongoing RT tickets (open+new+stalled)
Color options:
color_bad: Exceeded threshold_critical
color_degraded: Exceeded threshold_warning
Requires:
PyMySQL: https://pypi.org/project/PyMySQL/
or
MySQL-python: https://pypi.org/project/MySQL-python/
It features thresholds to colorize the output and forces a low timeout to
limit the impact of a server connectivity problem on your i3bar freshness.
@author ultrabug
SAMPLE OUTPUT
{'full_text': 'general: 24'}
"""
try:
import pymysql as mysql
except: # noqa e722 // (ImportError, ModuleNotFoundError): # py2/py3
import MySQLdb as mysql
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 300
db = ""
format = "general: {General}"
host = ""
password = ""
threshold_critical = 20
threshold_warning = 10
timeout = 5
user = ""
def rt(self):
has_one_queue_formatted = False
response = {"full_text": ""}
tickets = {}
mydb = mysql.connect(
host=self.host,
user=self.user,
passwd=self.password,
db=self.db,
connect_timeout=self.timeout,
)
mycr = mydb.cursor()
mycr.execute(
"""select q.Name as queue, coalesce(total,0) as total
from Queues as q
left join (
select t.Queue as queue, count(t.id) as total
from Tickets as t
where Status = 'new' or Status = 'open' or Status = 'stalled'
group by t.Queue)
as s on s.Queue = q.id
group by q.Name;"""
)
for row in mycr.fetchall():
queue, nb_tickets = row
if queue == "___Approvals":
continue
tickets[queue] = nb_tickets
if queue in self.format:
has_one_queue_formatted = True
if nb_tickets > self.threshold_critical:
response.update({"color": self.py3.COLOR_BAD})
elif nb_tickets > self.threshold_warning and "color" not in response:
response.update({"color": self.py3.COLOR_DEGRADED})
if has_one_queue_formatted:
response["full_text"] = self.py3.safe_format(self.format, tickets)
else:
response["full_text"] = f"queue(s) not found ({self.format})"
mydb.close()
response["cached_until"] = self.py3.time_in(self.cache_timeout)
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
py | 1a5133065d4d5e2a0349dafa4de346672b98e595 | from __future__ import division
import inspect
import os
from collections import OrderedDict, namedtuple
from copy import copy
from distutils.version import LooseVersion
from itertools import product
import corner
import json
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import lines as mpllines
import numpy as np
import pandas as pd
import scipy.stats
from scipy.special import logsumexp
from . import utils
from .utils import (
logger, infer_parameters_from_function,
check_directory_exists_and_if_not_mkdir,
latex_plot_format, safe_save_figure,
BilbyJsonEncoder, load_json,
move_old_file, get_version_information,
decode_bilby_json,
)
from .prior import Prior, PriorDict, DeltaFunction
def result_file_name(outdir, label, extension='json', gzip=False):
""" Returns the standard filename used for a result file
Parameters
----------
outdir: str
Name of the output directory
label: str
Naming scheme of the output file
extension: str, optional
Whether to save as `hdf5` or `json`
gzip: bool, optional
Set to True to append `.gz` to the extension for saving in gzipped format
Returns
-------
str: File name of the output file
"""
if extension in ['json', 'hdf5']:
if extension == 'json' and gzip:
return os.path.join(outdir, '{}_result.{}.gz'.format(label, extension))
else:
return os.path.join(outdir, '{}_result.{}'.format(label, extension))
else:
raise ValueError("Extension type {} not understood".format(extension))
def _determine_file_name(filename, outdir, label, extension, gzip):
""" Helper method to determine the filename """
if filename is not None:
return filename
else:
if (outdir is None) and (label is None):
raise ValueError("No information given to load file")
else:
return result_file_name(outdir, label, extension, gzip)
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False):
""" Reads in a stored bilby result object
Parameters
----------
filename: str
Path to the file to be read (alternative to giving the outdir and label)
outdir, label, extension: str
Name of the output directory, label and extension used for the default
naming scheme.
"""
filename = _determine_file_name(filename, outdir, label, extension, gzip)
# Get the actual extension (may differ from the default extension if the filename is given)
extension = os.path.splitext(filename)[1].lstrip('.')
if extension == 'gz': # gzipped file
extension = os.path.splitext(os.path.splitext(filename)[0])[1].lstrip('.')
if 'json' in extension:
result = Result.from_json(filename=filename)
elif ('hdf5' in extension) or ('h5' in extension):
result = Result.from_hdf5(filename=filename)
elif extension is None:
raise ValueError("No filetype extension provided")
else:
raise ValueError("Filetype {} not understood".format(extension))
return result
def get_weights_for_reweighting(
result, new_likelihood=None, new_prior=None, old_likelihood=None,
old_prior=None):
""" Calculate the weights for reweight()
See bilby.core.result.reweight() for help with the inputs
Returns
-------
ln_weights: array
An array of the natural-log weights
new_log_likelihood_array: array
An array of the natural-log likelihoods
new_log_prior_array: array
An array of the natural-log priors
"""
nposterior = len(result.posterior)
old_log_likelihood_array = np.zeros(nposterior)
old_log_prior_array = np.zeros(nposterior)
new_log_likelihood_array = np.zeros(nposterior)
new_log_prior_array = np.zeros(nposterior)
for ii, sample in result.posterior.iterrows():
# Convert sample to dictionary
par_sample = {key: sample[key] for key in result.search_parameter_keys}
if old_likelihood is not None:
old_likelihood.parameters.update(par_sample)
old_log_likelihood_array[ii] = old_likelihood.log_likelihood()
else:
old_log_likelihood_array[ii] = sample["log_likelihood"]
if new_likelihood is not None:
new_likelihood.parameters.update(par_sample)
new_log_likelihood_array[ii] = new_likelihood.log_likelihood()
else:
# Don't perform likelihood reweighting (i.e. likelihood isn't updated)
new_log_likelihood_array[ii] = old_log_likelihood_array[ii]
if old_prior is not None:
old_log_prior_array[ii] = old_prior.ln_prob(par_sample)
else:
old_log_prior_array[ii] = sample["log_prior"]
if new_prior is not None:
new_log_prior_array[ii] = new_prior.ln_prob(par_sample)
else:
# Don't perform prior reweighting (i.e. prior isn't updated)
new_log_prior_array[ii] = old_log_prior_array[ii]
ln_weights = (
new_log_likelihood_array + new_log_prior_array - old_log_likelihood_array - old_log_prior_array)
return ln_weights, new_log_likelihood_array, new_log_prior_array
def rejection_sample(posterior, weights):
""" Perform rejection sampling on a posterior using weights
Parameters
----------
posterior: pd.DataFrame or np.ndarray of shape (nsamples, nparameters)
The dataframe or array containing posterior samples
weights: np.ndarray
An array of weights
Returns
-------
reweighted_posterior: pd.DataFrame
The posterior resampled using rejection sampling
"""
keep = weights > np.random.uniform(0, max(weights), weights.shape)
return posterior[keep]
def reweight(result, label=None, new_likelihood=None, new_prior=None,
old_likelihood=None, old_prior=None):
""" Reweight a result to a new likelihood/prior using rejection sampling
Parameters
----------
label: str, optional
An updated label to apply to the result object
new_likelihood: bilby.core.likelood.Likelihood, (optional)
If given, the new likelihood to reweight too. If not given, likelihood
reweighting is not applied
new_prior: bilby.core.prior.PriorDict, (optional)
If given, the new prior to reweight too. If not given, prior
reweighting is not applied
old_likelihood: bilby.core.likelihood.Likelihood, (optional)
If given, calculate the old likelihoods from this object. If not given,
the values stored in the posterior are used.
old_prior: bilby.core.prior.PriorDict, (optional)
If given, calculate the old prior from this object. If not given,
the values stored in the posterior are used.
Returns
-------
result: bilby.core.result.Result
A copy of the result object with a reweighted posterior
"""
result = copy(result)
nposterior = len(result.posterior)
logger.info("Reweighting posterior with {} samples".format(nposterior))
ln_weights, new_log_likelihood_array, new_log_prior_array = get_weights_for_reweighting(
result, new_likelihood=new_likelihood, new_prior=new_prior,
old_likelihood=old_likelihood, old_prior=old_prior)
# Overwrite the likelihood and prior evaluations
result.posterior["log_likelihood"] = new_log_likelihood_array
result.posterior["log_prior"] = new_log_prior_array
weights = np.exp(ln_weights)
result.posterior = rejection_sample(result.posterior, weights=weights)
logger.info("Rejection sampling resulted in {} samples".format(len(result.posterior)))
result.meta_data["reweighted_using_rejection_sampling"] = True
result.log_evidence += logsumexp(ln_weights) - np.log(nposterior)
result.priors = new_prior
if label:
result.label = label
else:
result.label += "_reweighted"
return result
class Result(object):
def __init__(self, label='no_label', outdir='.', sampler=None,
search_parameter_keys=None, fixed_parameter_keys=None,
constraint_parameter_keys=None, priors=None,
sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
num_likelihood_evaluations=None, walkers=None,
max_autocorrelation_time=None, use_ratio=None,
parameter_labels=None, parameter_labels_with_unit=None,
gzip=False, version=None):
""" A class to store the results of the sampling run
Parameters
----------
label, outdir, sampler: str
The label, output directory, and sampler used
search_parameter_keys, fixed_parameter_keys, constraint_parameter_keys: list
Lists of the search, constraint, and fixed parameter keys.
Elements of the list should be of type `str` and match the keys
of the `prior`
priors: dict, bilby.core.prior.PriorDict
A dictionary of the priors used in the run
sampler_kwargs: dict
Key word arguments passed to the sampler
injection_parameters: dict
A dictionary of the injection parameters
meta_data: dict
A dictionary of meta data to store about the run
posterior: pandas.DataFrame
A pandas data frame of the posterior
samples, nested_samples: array_like
An array of the output posterior samples and the unweighted samples
log_evidence, log_evidence_err, log_noise_evidence, log_bayes_factor: float
Natural log evidences
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
num_likelihood_evaluations: int
The number of times the likelihood function is called
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float
The time taken to complete the sampling
nburn: int
The number of burn-in steps discarded for MCMC samplers
walkers: array_like
The samplers taken by a ensemble MCMC samplers
max_autocorrelation_time: float
The estimated maximum autocorrelation time for MCMC samplers
use_ratio: bool
A boolean stating whether the likelihood ratio, as opposed to the
likelihood was used during sampling
parameter_labels, parameter_labels_with_unit: list
Lists of the latex-formatted parameter labels
gzip: bool
Set to True to gzip the results file (if using json format)
version: str,
Version information for software used to generate the result. Note,
this information is generated when the result object is initialized
Note
---------
All sampling output parameters, e.g. the samples themselves are
typically not given at initialisation, but set at a later stage.
"""
self.label = label
self.outdir = os.path.abspath(outdir)
self.sampler = sampler
self.search_parameter_keys = search_parameter_keys
self.fixed_parameter_keys = fixed_parameter_keys
self.constraint_parameter_keys = constraint_parameter_keys
self.parameter_labels = parameter_labels
self.parameter_labels_with_unit = parameter_labels_with_unit
self.priors = priors
self.sampler_kwargs = sampler_kwargs
self.meta_data = meta_data
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
self.use_ratio = use_ratio
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.num_likelihood_evaluations = num_likelihood_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
self.prior_values = None
self._kde = None
@classmethod
def from_hdf5(cls, filename=None, outdir=None, label=None):
""" Read in a saved .h5 data file
Parameters
----------
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
-------
result: bilby.core.result.Result
Raises
-------
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
import deepdish
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
if os.path.isfile(filename):
dictionary = deepdish.io.load(filename)
# Some versions of deepdish/pytables return the dictionary as
# a dictionary with a key 'data'
if len(dictionary) == 1 and 'data' in dictionary:
dictionary = dictionary['data']
if "priors" in dictionary:
# parse priors from JSON string (allowing for backwards
# compatibility)
if not isinstance(dictionary["priors"], PriorDict):
try:
priordict = PriorDict()
for key, value in dictionary["priors"].items():
if key not in ["__module__", "__name__", "__prior_dict__"]:
priordict[key] = decode_bilby_json(value)
dictionary["priors"] = priordict
except Exception as e:
raise IOError(
"Unable to parse priors from '{}':\n{}".format(
filename, e,
)
)
try:
if isinstance(dictionary.get('posterior', None), dict):
dictionary['posterior'] = pd.DataFrame(dictionary['posterior'])
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
@classmethod
def from_json(cls, filename=None, outdir=None, label=None, gzip=False):
""" Read in a saved .json data file
Parameters
----------
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
-------
result: bilby.core.result.Result
Raises
-------
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
filename = _determine_file_name(filename, outdir, label, 'json', gzip)
if os.path.isfile(filename):
dictionary = load_json(filename, gzip)
try:
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
def __str__(self):
"""Print a summary """
if getattr(self, 'posterior', None) is not None:
if getattr(self, 'log_noise_evidence', None) is not None:
return ("nsamples: {:d}\n"
"ln_noise_evidence: {:6.3f}\n"
"ln_evidence: {:6.3f} +/- {:6.3f}\n"
"ln_bayes_factor: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_noise_evidence, self.log_evidence,
self.log_evidence_err, self.log_bayes_factor,
self.log_evidence_err))
else:
return ("nsamples: {:d}\n"
"ln_evidence: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_evidence, self.log_evidence_err))
else:
return ''
@property
def priors(self):
if self._priors is not None:
return self._priors
else:
raise ValueError('Result object has no priors')
@priors.setter
def priors(self, priors):
if isinstance(priors, dict):
if isinstance(priors, PriorDict):
self._priors = priors
else:
self._priors = PriorDict(priors)
if self.parameter_labels is None:
if 'H_eff5' in self.search_parameter_keys:
self.priors['H_eff5'].latex_label = '$H_{eff5}$'
if 'chi_1' in self.search_parameter_keys:
self.priors['chi_1'].latex_label = '$\\chi_1$'
if 'chi_2' in self.search_parameter_keys:
self.priors['chi_2'].latex_label = '$\\chi_2$'
if 'Q_tilde' in self.search_parameter_keys:
self.priors['Q_tilde'].latex_label = '$\\tilde{Q}$'
self.parameter_labels = [self.priors[k].latex_label for k in
self.search_parameter_keys]
if self.parameter_labels_with_unit is None:
self.parameter_labels_with_unit = [
self.priors[k].latex_label_with_unit for k in
self.search_parameter_keys]
elif priors is None:
self._priors = priors
self.parameter_labels = self.search_parameter_keys
self.parameter_labels_with_unit = self.search_parameter_keys
else:
raise ValueError("Input priors not understood")
@property
def samples(self):
""" An array of samples """
if self._samples is not None:
return self._samples
else:
raise ValueError("Result object has no stored samples")
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def num_likelihood_evaluations(self):
""" number of likelihood evaluations """
if self._num_likelihood_evaluations is not None:
return self._num_likelihood_evaluations
else:
raise ValueError("Result object has no stored likelihood evaluations")
@num_likelihood_evaluations.setter
def num_likelihood_evaluations(self, num_likelihood_evaluations):
self._num_likelihood_evaluations = num_likelihood_evaluations
@property
def nested_samples(self):
"""" An array of unweighted samples """
if self._nested_samples is not None:
return self._nested_samples
else:
raise ValueError("Result object has no stored nested samples")
@nested_samples.setter
def nested_samples(self, nested_samples):
self._nested_samples = nested_samples
@property
def walkers(self):
"""" An array of the ensemble walkers """
if self._walkers is not None:
return self._walkers
else:
raise ValueError("Result object has no stored walkers")
@walkers.setter
def walkers(self, walkers):
self._walkers = walkers
@property
def nburn(self):
"""" An array of the ensemble walkers """
if self._nburn is not None:
return self._nburn
else:
raise ValueError("Result object has no stored nburn")
@nburn.setter
def nburn(self, nburn):
self._nburn = nburn
@property
def posterior(self):
""" A pandas data frame of the posterior """
if self._posterior is not None:
return self._posterior
else:
raise ValueError("Result object has no stored posterior")
@posterior.setter
def posterior(self, posterior):
self._posterior = posterior
@property
def log_10_bayes_factor(self):
return self.log_bayes_factor / np.log(10)
@property
def log_10_evidence(self):
return self.log_evidence / np.log(10)
@property
def log_10_evidence_err(self):
return self.log_evidence_err / np.log(10)
@property
def log_10_noise_evidence(self):
return self.log_noise_evidence / np.log(10)
@property
def version(self):
return self._version
@version.setter
def version(self, version):
if version is None:
self._version = 'bilby={}'.format(utils.get_version_information())
else:
self._version = version
def _get_save_data_dictionary(self):
# This list defines all the parameters saved in the result object
save_attrs = [
'label', 'outdir', 'sampler', 'log_evidence', 'log_evidence_err',
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'constraint_parameter_keys',
'sampling_time', 'sampler_kwargs', 'use_ratio',
'log_likelihood_evaluations', 'log_prior_evaluations',
'num_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels', 'parameter_labels_with_unit',
'version']
dictionary = OrderedDict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
except ValueError as e:
logger.debug("Unable to save {}, message: {}".format(attr, e))
pass
return dictionary
def save_to_file(self, filename=None, overwrite=False, outdir=None,
extension='json', gzip=False):
"""
Writes the Result to a json or deepdish h5 file
Parameters
----------
filename: optional,
Filename to write to (overwrites the default)
overwrite: bool, optional
Whether or not to overwrite an existing result file.
default=False
outdir: str, optional
Path to the outdir. Default is the one stored in the result object.
extension: str, optional {json, hdf5, True}
Determines the method to use to store the data (if True defaults
to json)
gzip: bool, optional
If true, and outputing to a json file, this will gzip the resulting
file and add '.gz' to the file extension.
"""
if extension is True:
extension = "json"
outdir = self._safe_outdir_creation(outdir, self.save_to_file)
if filename is None:
filename = result_file_name(outdir, self.label, extension, gzip)
move_old_file(filename, overwrite)
# Convert the prior to a string representation for saving on disk
dictionary = self._get_save_data_dictionary()
# Convert callable sampler_kwargs to strings
if dictionary.get('sampler_kwargs', None) is not None:
for key in dictionary['sampler_kwargs']:
if hasattr(dictionary['sampler_kwargs'][key], '__call__'):
dictionary['sampler_kwargs'][key] = str(dictionary['sampler_kwargs'])
try:
# convert priors to JSON dictionary for both JSON and hdf5 files
dictionary["priors"] = dictionary["priors"]._get_json_dict()
if extension == 'json':
if gzip:
import gzip
# encode to a string
json_str = json.dumps(dictionary, cls=BilbyJsonEncoder).encode('utf-8')
with gzip.GzipFile(filename, 'w') as file:
file.write(json_str)
else:
with open(filename, 'w') as file:
json.dump(dictionary, file, indent=2, cls=BilbyJsonEncoder)
elif extension == 'hdf5':
import deepdish
for key in dictionary:
if isinstance(dictionary[key], pd.DataFrame):
dictionary[key] = dictionary[key].to_dict()
deepdish.io.save(filename, dictionary)
else:
raise ValueError("Extension type {} not understood".format(extension))
except Exception as e:
logger.error("\n\n Saving the data has failed with the "
"following message:\n {} \n\n".format(e))
def save_posterior_samples(self, filename=None, outdir=None, label=None):
""" Saves posterior samples to a file
Generates a .dat file containing the posterior samples and auxillary
data saved in the posterior. Note, strings in the posterior are
removed while complex numbers will be given as absolute values with
abs appended to the column name
Parameters
----------
filename: str
Alternative filename to use. Defaults to
outdir/label_posterior_samples.dat
outdir, label: str
Alternative outdir and label to use
"""
if filename is None:
if label is None:
label = self.label
outdir = self._safe_outdir_creation(outdir, self.save_posterior_samples)
filename = '{}/{}_posterior_samples.dat'.format(outdir, label)
else:
outdir = os.path.dirname(filename)
self._safe_outdir_creation(outdir, self.save_posterior_samples)
# Drop non-numeric columns
df = self.posterior.select_dtypes([np.number]).copy()
# Convert complex columns to abs
for key in df.keys():
if np.any(np.iscomplex(df[key])):
complex_term = df.pop(key)
df.loc[:, key + "_abs"] = np.abs(complex_term)
df.loc[:, key + "_angle"] = np.angle(complex_term)
logger.info("Writing samples file to {}".format(filename))
df.to_csv(filename, index=False, header=True, sep=' ')
def get_latex_labels_from_parameter_keys(self, keys):
""" Returns a list of latex_labels corresponding to the given keys
Parameters
----------
keys: list
List of strings corresponding to the desired latex_labels
Returns
-------
list: The desired latex_labels
"""
latex_labels = []
for key in keys:
if key in self.search_parameter_keys:
idx = self.search_parameter_keys.index(key)
label = self.parameter_labels_with_unit[idx]
elif key in self.parameter_labels:
label = key
else:
label = None
logger.debug(
'key {} not a parameter label or latex label'.format(key)
)
if label is None:
label = key.replace("_", " ")
latex_labels.append(label)
return latex_labels
@property
def covariance_matrix(self):
""" The covariance matrix of the samples the posterior """
samples = self.posterior[self.search_parameter_keys].values
return np.cov(samples.T)
@property
def posterior_volume(self):
""" The posterior volume """
if self.covariance_matrix.ndim == 0:
return np.sqrt(self.covariance_matrix)
else:
return 1 / np.sqrt(np.abs(np.linalg.det(
1 / self.covariance_matrix)))
@staticmethod
def prior_volume(priors):
""" The prior volume, given a set of priors """
return np.prod([priors[k].maximum - priors[k].minimum for k in priors])
def occam_factor(self, priors):
""" The Occam factor,
See Chapter 28, `Mackay "Information Theory, Inference, and Learning
Algorithms" <http://www.inference.org.uk/itprnn/book.html>`_ Cambridge
University Press (2003).
"""
return self.posterior_volume / self.prior_volume(priors)
@property
def bayesian_model_dimensionality(self):
""" Characterises how many parameters are effectively constraint by the data
See <https://arxiv.org/abs/1903.06682>
Returns
-------
float: The model dimensionality
"""
return 2 * (np.mean(self.posterior['log_likelihood']**2) -
np.mean(self.posterior['log_likelihood'])**2)
def get_one_dimensional_median_and_error_bar(self, key, fmt='.2f',
quantiles=(0.16, 0.84)):
""" Calculate the median and error bar for a given key
Parameters
----------
key: str
The parameter key for which to calculate the median and error bar
fmt: str, ('.2f')
A format string
quantiles: list, tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
Returns
-------
summary: namedtuple
An object with attributes, median, lower, upper and string
"""
summary = namedtuple('summary', ['median', 'lower', 'upper', 'string'])
if len(quantiles) != 2:
raise ValueError("quantiles must be of length 2")
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(self.posterior[key], quants_to_compute * 100)
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
fmt = "{{0:{0}}}".format(fmt).format
string_template = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
summary.string = string_template.format(
fmt(summary.median), fmt(summary.minus), fmt(summary.plus))
return summary
@latex_plot_format
def plot_single_density(self, key, prior=None, cumulative=False,
title=None, truth=None, save=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=(0.16, 0.84), dpi=300):
""" Plot a 1D marginal density, either probability or cumulative.
Parameters
----------
key: str
Name of the parameter to plot
prior: {bool (True), bilby.core.prior.Prior}
If true, add the stored prior probability density function to the
one-dimensional marginal distributions. If instead a Prior
is provided, this will be plotted.
cumulative: bool
If true plot the CDF
title: bool
If true, add 1D title of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
truth: {bool, float}
If true, plot self.injection_parameters[parameter].
If float, plot this value.
save: bool:
If true, save plot to disk.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The fontsizes for the labels and titles
quantiles: tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
Returns
-------
figure: matplotlib.pyplot.figure
A matplotlib figure object
"""
logger.info('Plotting {} marginal distribution'.format(key))
label = self.get_latex_labels_from_parameter_keys([key])[0]
fig, ax = plt.subplots()
try:
ax.hist(self.posterior[key].values, bins=bins, density=True,
histtype='step', cumulative=cumulative)
except ValueError as e:
logger.info(
'Failed to generate 1d plot for {}, error message: {}'
.format(key, e))
return
ax.set_xlabel(label, fontsize=label_fontsize)
if truth is not None:
ax.axvline(truth, ls='-', color='orange')
summary = self.get_one_dimensional_median_and_error_bar(
key, quantiles=quantiles)
ax.axvline(summary.median - summary.minus, ls='--', color='C0')
ax.axvline(summary.median + summary.plus, ls='--', color='C0')
if title:
ax.set_title(summary.string, fontsize=title_fontsize)
if isinstance(prior, Prior):
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
if cumulative is False:
ax.plot(theta, prior.prob(theta), color='C2')
else:
ax.plot(theta, prior.cdf(theta), color='C2')
if save:
fig.tight_layout()
if cumulative:
file_name = file_base_name + key + '_cdf'
else:
file_name = file_base_name + key + '_pdf'
safe_save_figure(fig=fig, filename=file_name, dpi=dpi)
plt.close(fig)
else:
return fig
def plot_marginals(self, parameters=None, priors=None, titles=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=(0.16, 0.84), dpi=300,
outdir=None):
""" Plot 1D marginal distributions
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The font sizes for the labels and titles
quantiles: tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
outdir: str, optional
Path to the outdir. Default is the one store in the result object.
Returns
-------
"""
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
truths = parameters
elif parameters is None:
plot_parameter_keys = self.posterior.keys()
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
else:
plot_parameter_keys = list(parameters)
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
if file_base_name is None:
outdir = self._safe_outdir_creation(outdir, self.plot_marginals)
file_base_name = '{}/{}_1d/'.format(outdir, self.label)
check_directory_exists_and_if_not_mkdir(file_base_name)
if priors is True:
priors = getattr(self, 'priors', dict())
elif isinstance(priors, dict):
pass
elif priors in [False, None]:
priors = dict()
else:
raise ValueError('Input priors={} not understood'.format(priors))
for i, key in enumerate(plot_parameter_keys):
if not isinstance(self.posterior[key].values[0], float):
continue
prior = priors.get(key, None)
truth = truths.get(key, None)
for cumulative in [False, True]:
self.plot_single_density(
key, prior=prior, cumulative=cumulative, title=titles,
truth=truth, save=True, file_base_name=file_base_name,
bins=bins, label_fontsize=label_fontsize, dpi=dpi,
title_fontsize=title_fontsize, quantiles=quantiles)
@latex_plot_format
def plot_corner(self, parameters=None, priors=None, titles=True, save=True,
filename=None, dpi=300, **kwargs):
""" Plot a corner-plot
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
save: bool, optional
If true, save the image using the given label and outdir
filename: str, optional
If given, overwrite the default filename
dpi: int, optional
Dots per inch resolution of the plot
**kwargs:
Other keyword arguments are passed to `corner.corner`. We set some
defaults to improve the basic look and feel, but these can all be
overridden. Also optional an 'outdir' argument which can be used
to override the outdir set by the absolute path of the result object.
Notes
-----
The generation of the corner plot themselves is done by the corner
python module, see https://corner.readthedocs.io for more
information.
Truth-lines can be passed in in several ways. Either as the values
of the parameters dict, or a list via the `truths` kwarg. If
injection_parameters where given to run_sampler, these will auto-
matically be added to the plot. This behaviour can be stopped by
adding truths=False.
Returns
-------
fig:
A matplotlib figure instance
"""
# If in testing mode, not corner plots are generated
if utils.command_line_args.bilby_test_mode:
return
# bilby default corner kwargs. Overwritten by anything passed to kwargs
defaults_kwargs = dict(
bins=50, smooth=0.9, label_kwargs=dict(fontsize=16),
title_kwargs=dict(fontsize=16), color='#0072C1',
truth_color='tab:orange', quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
plot_density=False, plot_datapoints=True, fill_contours=True,
max_n_ticks=3)
if LooseVersion(matplotlib.__version__) < "2.1":
defaults_kwargs['hist_kwargs'] = dict(normed=True)
else:
defaults_kwargs['hist_kwargs'] = dict(density=True)
if 'lionize' in kwargs and kwargs['lionize'] is True:
defaults_kwargs['truth_color'] = 'tab:blue'
defaults_kwargs['color'] = '#FF8C00'
defaults_kwargs.update(kwargs)
kwargs = defaults_kwargs
# Handle if truths was passed in
if 'truth' in kwargs:
kwargs['truths'] = kwargs.pop('truth')
if "truths" in kwargs:
truths = kwargs.get('truths')
if isinstance(parameters, list) and isinstance(truths, list):
if len(parameters) != len(truths):
raise ValueError(
"Length of parameters and truths don't match")
elif isinstance(truths, dict) and parameters is None:
parameters = kwargs.pop('truths')
elif isinstance(truths, bool):
pass
elif truths is None:
kwargs["truths"] = False
else:
raise ValueError(
"Combination of parameters and truths not understood")
# If injection parameters where stored, use these as parameter values
# but do not overwrite input parameters (or truths)
cond1 = getattr(self, 'injection_parameters', None) is not None
cond2 = parameters is None
cond3 = bool(kwargs.get("truths", True))
if cond1 and cond2 and cond3:
parameters = {
key: self.injection_parameters.get(key, np.nan)
for key in self.search_parameter_keys
}
# If parameters is a dictionary, use the keys to determine which
# parameters to plot and the values as truths.
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
kwargs['truths'] = list(parameters.values())
elif parameters is None:
plot_parameter_keys = self.search_parameter_keys
else:
plot_parameter_keys = list(parameters)
# Get latex formatted strings for the plot labels
kwargs['labels'] = kwargs.get(
'labels', self.get_latex_labels_from_parameter_keys(
plot_parameter_keys))
kwargs["labels"] = sanity_check_labels(kwargs["labels"])
# Unless already set, set the range to include all samples
# This prevents ValueErrors being raised for parameters with no range
kwargs['range'] = kwargs.get('range', [1] * len(plot_parameter_keys))
# Remove truths if it is a bool
if isinstance(kwargs.get('truths'), bool):
kwargs.pop('truths')
# Create the data array to plot and pass everything to corner
xs = self.posterior[plot_parameter_keys].values
if len(plot_parameter_keys) > 1:
fig = corner.corner(xs, **kwargs)
else:
ax = kwargs.get("ax", plt.subplot())
ax.hist(xs, bins=kwargs["bins"], color=kwargs["color"],
histtype="step", **kwargs["hist_kwargs"])
ax.set_xlabel(kwargs["labels"][0])
fig = plt.gcf()
axes = fig.get_axes()
# Add the titles
if titles and kwargs.get('quantiles', None) is not None:
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
if ax.title.get_text() == '':
ax.set_title(self.get_one_dimensional_median_and_error_bar(
par, quantiles=kwargs['quantiles']).string,
**kwargs['title_kwargs'])
# Add priors to the 1D plots
if priors is True:
priors = getattr(self, 'priors', False)
if isinstance(priors, dict):
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
ax.plot(theta, priors[par].prob(theta), color='C2')
elif priors in [False, None]:
pass
else:
raise ValueError('Input priors={} not understood'.format(priors))
if save:
if filename is None:
outdir = self._safe_outdir_creation(kwargs.get('outdir'), self.plot_corner)
filename = '{}/{}_corner.png'.format(outdir, self.label)
logger.debug('Saving corner plot to {}'.format(filename))
safe_save_figure(fig=fig, filename=filename, dpi=dpi)
plt.close(fig)
return fig
@latex_plot_format
def plot_walkers(self, **kwargs):
""" Method to plot the trace of the walkers in an ensemble MCMC plot """
if hasattr(self, 'walkers') is False:
logger.warning("Cannot plot_walkers as no walkers are saved")
return
if utils.command_line_args.bilby_test_mode:
return
nwalkers, nsteps, ndim = self.walkers.shape
idxs = np.arange(nsteps)
fig, axes = plt.subplots(nrows=ndim, figsize=(6, 3 * ndim))
walkers = self.walkers[:, :, :]
parameter_labels = sanity_check_labels(self.parameter_labels)
for i, ax in enumerate(axes):
ax.plot(idxs[:self.nburn + 1], walkers[:, :self.nburn + 1, i].T,
lw=0.1, color='r')
ax.set_ylabel(parameter_labels[i])
for i, ax in enumerate(axes):
ax.plot(idxs[self.nburn:], walkers[:, self.nburn:, i].T, lw=0.1,
color='k')
ax.set_ylabel(parameter_labels[i])
fig.tight_layout()
outdir = self._safe_outdir_creation(kwargs.get('outdir'), self.plot_walkers)
filename = '{}/{}_walkers.png'.format(outdir, self.label)
logger.debug('Saving walkers plot to {}'.format('filename'))
safe_save_figure(fig=fig, filename=filename)
plt.close(fig)
@latex_plot_format
def plot_with_data(self, model, x, y, ndraws=1000, npoints=1000,
xlabel=None, ylabel=None, data_label='data',
data_fmt='o', draws_label=None, filename=None,
maxl_label='max likelihood', dpi=300, outdir=None):
""" Generate a figure showing the data and fits to the data
Parameters
----------
model: function
A python function which when called as `model(x, **kwargs)` returns
the model prediction (here `kwargs` is a dictionary of key-value
pairs of the model parameters.
x, y: np.ndarray
The independent and dependent data to plot
ndraws: int
Number of draws from the posterior to plot
npoints: int
Number of points used to plot the smoothed fit to the data
xlabel, ylabel: str
Labels for the axes
data_label, draws_label, maxl_label: str
Label for the data, draws, and max likelihood legend
data_fmt: str
Matpltolib fmt code, defaults to `'-o'`
dpi: int
Passed to `plt.savefig`
filename: str
If given, the filename to use. Otherwise, the filename is generated
from the outdir and label attributes.
outdir: str, optional
Path to the outdir. Default is the one store in the result object.
"""
# Determine model_posterior, the subset of the full posterior which
# should be passed into the model
model_keys = infer_parameters_from_function(model)
model_posterior = self.posterior[model_keys]
xsmooth = np.linspace(np.min(x), np.max(x), npoints)
fig, ax = plt.subplots()
logger.info('Plotting {} draws'.format(ndraws))
for _ in range(ndraws):
s = model_posterior.sample().to_dict('records')[0]
ax.plot(xsmooth, model(xsmooth, **s), alpha=0.25, lw=0.1, color='r',
label=draws_label)
try:
if all(~np.isnan(self.posterior.log_likelihood)):
logger.info('Plotting maximum likelihood')
s = model_posterior.iloc[self.posterior.log_likelihood.idxmax()]
ax.plot(xsmooth, model(xsmooth, **s), lw=1, color='k',
label=maxl_label)
except (AttributeError, TypeError):
logger.debug(
"No log likelihood values stored, unable to plot max")
ax.plot(x, y, data_fmt, markersize=2, label=data_label)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
ax.legend(numpoints=3)
fig.tight_layout()
if filename is None:
outdir = self._safe_outdir_creation(outdir, self.plot_with_data)
filename = '{}/{}_plot_with_data'.format(outdir, self.label)
safe_save_figure(fig=fig, filename=filename, dpi=dpi)
plt.close(fig)
@staticmethod
def _add_prior_fixed_values_to_posterior(posterior, priors):
if priors is None:
return posterior
for key in priors:
if isinstance(priors[key], DeltaFunction):
posterior[key] = priors[key].peak
elif isinstance(priors[key], float):
posterior[key] = priors[key]
return posterior
def samples_to_posterior(self, likelihood=None, priors=None,
conversion_function=None, npool=1):
"""
Convert array of samples to posterior (a Pandas data frame)
Also applies the conversion function to any stored posterior
Parameters
----------
likelihood: bilby.likelihood.GravitationalWaveTransient, optional
GravitationalWaveTransient likelihood used for sampling.
priors: bilby.prior.PriorDict, optional
Dictionary of prior object, used to fill in delta function priors.
conversion_function: function, optional
Function which adds in extra parameters to the data frame,
should take the data_frame, likelihood and prior as arguments.
"""
try:
data_frame = self.posterior
except ValueError:
data_frame = pd.DataFrame(
self.samples, columns=self.search_parameter_keys)
data_frame = self._add_prior_fixed_values_to_posterior(
data_frame, priors)
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None and priors is not None:
data_frame['log_prior'] = priors.ln_prob(
dict(data_frame[self.search_parameter_keys]), axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None:
if "npool" in inspect.getargspec(conversion_function).args:
data_frame = conversion_function(data_frame, likelihood, priors, npool=npool)
else:
data_frame = conversion_function(data_frame, likelihood, priors)
self.posterior = data_frame
def calculate_prior_values(self, priors):
"""
Evaluate prior probability for each parameter for each sample.
Parameters
----------
priors: dict, PriorDict
Prior distributions
"""
self.prior_values = pd.DataFrame()
for key in priors:
if key in self.posterior.keys():
if isinstance(priors[key], DeltaFunction):
continue
else:
self.prior_values[key]\
= priors[key].prob(self.posterior[key].values)
def get_all_injection_credible_levels(self, keys=None):
"""
Get credible levels for all parameters
Parameters
----------
keys: list, optional
A list of keys for which return the credible levels, if None,
defaults to search_parameter_keys
Returns
-------
credible_levels: dict
The credible levels at which the injected parameters are found.
"""
if keys is None:
keys = self.search_parameter_keys
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot compute credible levels.")
credible_levels = {key: self.get_injection_credible_level(key)
for key in keys
if isinstance(self.injection_parameters.get(key, None), float)}
return credible_levels
def get_injection_credible_level(self, parameter):
"""
Get the credible level of the injected parameter
Calculated as CDF(injection value)
Parameters
----------
parameter: str
Parameter to get credible level for
Returns
-------
float: credible level
"""
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot copmute credible levels.")
if parameter in self.posterior and\
parameter in self.injection_parameters:
credible_level =\
sum(self.posterior[parameter].values <
self.injection_parameters[parameter]) / len(self.posterior)
return credible_level
else:
return np.nan
def _check_attribute_match_to_other_object(self, name, other_object):
""" Check attribute name exists in other_object and is the same
Parameters
----------
name: str
Name of the attribute in this instance
other_object: object
Other object with attributes to compare with
Returns
-------
bool: True if attribute name matches with an attribute of other_object, False otherwise
"""
a = getattr(self, name, False)
b = getattr(other_object, name, False)
logger.debug('Checking {} value: {}=={}'.format(name, a, b))
if (a is not False) and (b is not False):
type_a = type(a)
type_b = type(b)
if type_a == type_b:
if type_a in [str, float, int, dict, list]:
try:
return a == b
except ValueError:
return False
elif type_a in [np.ndarray]:
return np.all(a == b)
return False
@property
def kde(self):
""" Kernel density estimate built from the stored posterior
Uses `scipy.stats.gaussian_kde` to generate the kernel density
"""
if self._kde:
return self._kde
else:
self._kde = scipy.stats.gaussian_kde(
self.posterior[self.search_parameter_keys].values.T)
return self._kde
def posterior_probability(self, sample):
""" Calculate the posterior probability for a new sample
This queries a Kernel Density Estimate of the posterior to calculate
the posterior probability density for the new sample.
Parameters
----------
sample: dict, or list of dictionaries
A dictionary containing all the keys from
self.search_parameter_keys and corresponding values at which to
calculate the posterior probability
Returns
-------
p: array-like,
The posterior probability of the sample
"""
if isinstance(sample, dict):
sample = [sample]
ordered_sample = [[s[key] for key in self.search_parameter_keys]
for s in sample]
return self.kde(ordered_sample)
def _safe_outdir_creation(self, outdir=None, caller_func=None):
if outdir is None:
outdir = self.outdir
try:
utils.check_directory_exists_and_if_not_mkdir(outdir)
except PermissionError:
raise FileMovedError("Can not write in the out directory.\n"
"Did you move the here file from another system?\n"
"Try calling " + caller_func.__name__ + " with the 'outdir' "
"keyword argument, e.g. " + caller_func.__name__ + "(outdir='.')")
return outdir
def get_weights_by_new_prior(self, old_prior, new_prior, prior_names=None):
""" Calculate a list of sample weights based on the ratio of new to old priors
Parameters
----------
old_prior: PriorDict,
The prior used in the generation of the original samples.
new_prior: PriorDict,
The prior to use to reweight the samples.
prior_names: list
A list of the priors to include in the ratio during reweighting.
Returns
-------
weights: array-like,
A list of sample weights.
"""
weights = []
# Shared priors - these will form a ratio
if prior_names is not None:
shared_parameters = {key: self.posterior[key] for key in new_prior if
key in old_prior and key in prior_names}
else:
shared_parameters = {key: self.posterior[key] for key in new_prior if key in old_prior}
parameters = [{key: self.posterior[key][i] for key in shared_parameters.keys()}
for i in range(len(self.posterior))]
for i in range(len(self.posterior)):
weight = 1
for prior_key in shared_parameters.keys():
val = self.posterior[prior_key][i]
weight *= new_prior.evaluate_constraints(parameters[i])
weight *= new_prior[prior_key].prob(val) / old_prior[prior_key].prob(val)
weights.append(weight)
return weights
def to_arviz(self, prior=None):
""" Convert the Result object to an ArviZ InferenceData object.
Parameters
----------
prior: int
If a positive integer is given then that number of prior
samples will be drawn and stored in the ArviZ InferenceData
object.
Returns
-------
azdata: InferenceData
The ArviZ InferenceData object.
"""
try:
import arviz as az
except ImportError:
logger.debug(
"ArviZ is not installed, so cannot convert to InferenceData"
)
posdict = {}
for key in self.posterior:
posdict[key] = self.posterior[key].values
if "log_likelihood" in posdict:
loglikedict = {
"log_likelihood": posdict.pop("log_likelihood")
}
else:
if self.log_likelihood_evaluations is not None:
loglikedict = {
"log_likelihood": self.log_likelihood_evaluations
}
else:
loglikedict = None
priorsamples = None
if prior is not None:
if self.priors is None:
logger.warning(
"No priors are in the Result object, so prior samples "
"will not be included in the output."
)
else:
priorsamples = self.priors.sample(size=prior)
azdata = az.from_dict(
posterior=posdict,
log_likelihood=loglikedict,
prior=priorsamples,
)
# add attributes
version = {
"inference_library": "bilby: {}".format(self.sampler),
"inference_library_version": get_version_information()
}
azdata.posterior.attrs.update(version)
if "log_likelihood" in azdata._groups:
azdata.log_likelihood.attrs.update(version)
if "prior" in azdata._groups:
azdata.prior.attrs.update(version)
return azdata
class ResultList(list):
def __init__(self, results=None):
""" A class to store a list of :class:`bilby.core.result.Result` objects
from equivalent runs on the same data. This provides methods for
outputing combined results.
Parameters
----------
results: list
A list of `:class:`bilby.core.result.Result`.
"""
super(ResultList, self).__init__()
for result in results:
self.append(result)
def append(self, result):
"""
Append a :class:`bilby.core.result.Result`, or set of results, to the
list.
Parameters
----------
result: :class:`bilby.core.result.Result` or filename
pointing to a result object, to append to the list.
"""
if isinstance(result, Result):
super(ResultList, self).append(result)
elif isinstance(result, str):
super(ResultList, self).append(read_in_result(result))
else:
raise TypeError("Could not append a non-Result type")
def combine(self):
"""
Return the combined results in a :class:bilby.core.result.Result`
object.
"""
if len(self) == 0:
return Result()
elif len(self) == 1:
return copy(self[0])
else:
result = copy(self[0])
if result.label is not None:
result.label += '_combined'
self.check_consistent_sampler()
self.check_consistent_data()
self.check_consistent_parameters()
self.check_consistent_priors()
# check which kind of sampler was used: MCMC or Nested Sampling
if result._nested_samples is not None:
posteriors, result = self._combine_nested_sampled_runs(result)
else:
posteriors = [res.posterior for res in self]
combined_posteriors = pd.concat(posteriors, ignore_index=True)
result.posterior = combined_posteriors.sample(len(combined_posteriors)) # shuffle
return result
def _combine_nested_sampled_runs(self, result):
"""
Combine multiple nested sampling runs.
Currently this keeps posterior samples from each run in proportion with
the evidence for each individual run
Parameters
----------
result: bilby.core.result.Result
The result object to put the new samples in.
Returns
-------
posteriors: list
A list of pandas DataFrames containing the reduced sample set from
each run.
result: bilby.core.result.Result
The result object with the combined evidences.
"""
self.check_nested_samples()
# Combine evidences
log_evidences = np.array([res.log_evidence for res in self])
result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
else:
result.log_evidence_err = np.nan
# Combined posteriors with a weighting
result_weights = np.exp(log_evidences - np.max(log_evidences))
posteriors = list()
for res, frac in zip(self, result_weights):
selected_samples = (np.random.uniform(size=len(res.posterior)) < frac)
posteriors.append(res.posterior[selected_samples])
# remove original nested_samples
result.nested_samples = None
result.sampler_kwargs = None
return posteriors, result
def check_nested_samples(self):
for res in self:
try:
res.nested_samples
except ValueError:
raise ResultListError("Not all results contain nested samples")
def check_consistent_priors(self):
for res in self:
for p in self[0].priors.keys():
if not self[0].priors[p] == res.priors[p] or len(self[0].priors) != len(res.priors):
raise ResultListError("Inconsistent priors between results")
def check_consistent_parameters(self):
if not np.all([set(self[0].search_parameter_keys) == set(res.search_parameter_keys) for res in self]):
raise ResultListError("Inconsistent parameters between results")
def check_consistent_data(self):
if not np.all([res.log_noise_evidence == self[0].log_noise_evidence for res in self])\
and not np.all([np.isnan(res.log_noise_evidence) for res in self]):
raise ResultListError("Inconsistent data between results")
def check_consistent_sampler(self):
if not np.all([res.sampler == self[0].sampler for res in self]):
raise ResultListError("Inconsistent samplers between results")
@latex_plot_format
def plot_multiple(results, filename=None, labels=None, colours=None,
save=True, evidences=False, corner_labels=None, **kwargs):
""" Generate a corner plot overlaying two sets of results
Parameters
----------
results: list
A list of `bilby.core.result.Result` objects containing the samples to
plot.
filename: str
File name to save the figure to. If None (default), a filename is
constructed from the outdir of the first element of results and then
the labels for all the result files.
labels: list
List of strings to use when generating a legend. If None (default), the
`label` attribute of each result in `results` is used.
colours: list
The colours for each result. If None, default styles are applied.
save: bool
If true, save the figure
kwargs: dict
All other keyword arguments are passed to `result.plot_corner` (except
for the keyword `labels` for which you should use the dedicated
`corner_labels` input).
However, `show_titles` and `truths` are ignored since they would be
ambiguous on such a plot.
evidences: bool, optional
Add the log-evidence calculations to the legend. If available, the
Bayes factor will be used instead.
corner_labels: list, optional
List of strings to be passed to the input `labels` to `result.plot_corner`.
Returns
-------
fig:
A matplotlib figure instance
"""
kwargs['show_titles'] = False
kwargs['truths'] = None
if corner_labels is not None:
kwargs['labels'] = corner_labels
fig = results[0].plot_corner(save=False, **kwargs)
default_filename = '{}/{}'.format(results[0].outdir, 'combined')
lines = []
default_labels = []
for i, result in enumerate(results):
if colours:
c = colours[i]
else:
c = 'C{}'.format(i)
hist_kwargs = kwargs.get('hist_kwargs', dict())
hist_kwargs['color'] = c
fig = result.plot_corner(fig=fig, save=False, color=c, **kwargs)
default_filename += '_{}'.format(result.label)
lines.append(mpllines.Line2D([0], [0], color=c))
default_labels.append(result.label)
# Rescale the axes
for i, ax in enumerate(fig.axes):
ax.autoscale()
plt.draw()
if labels is None:
labels = default_labels
labels = sanity_check_labels(labels)
if evidences:
if np.isnan(results[0].log_bayes_factor):
template = ' $\mathrm{{ln}}(Z)={lnz:1.3g}$'
else:
template = ' $\mathrm{{ln}}(B)={lnbf:1.3g}$'
labels = [template.format(lnz=result.log_evidence,
lnbf=result.log_bayes_factor)
for ii, result in enumerate(results)]
axes = fig.get_axes()
ndim = int(np.sqrt(len(axes)))
axes[ndim - 1].legend(lines, labels)
if filename is None:
filename = default_filename
if save:
safe_save_figure(fig=fig, filename=filename)
return fig
@latex_plot_format
def make_pp_plot(results, filename=None, save=True, confidence_interval=[0.68, 0.95, 0.997],
lines=None, legend_fontsize='x-small', keys=None, title=True,
confidence_interval_alpha=0.1,
**kwargs):
"""
Make a P-P plot for a set of runs with injected signals.
Parameters
----------
results: list
A list of Result objects, each of these should have injected_parameters
filename: str, optional
The name of the file to save, the default is "outdir/pp.png"
save: bool, optional
Whether to save the file, default=True
confidence_interval: (float, list), optional
The confidence interval to be plotted, defaulting to 1-2-3 sigma
lines: list
If given, a list of matplotlib line formats to use, must be greater
than the number of parameters.
legend_fontsize: float
The font size for the legend
keys: list
A list of keys to use, if None defaults to search_parameter_keys
confidence_interval_alpha: float, list, optional
The transparency for the background condifence interval
kwargs:
Additional kwargs to pass to matplotlib.pyplot.plot
Returns
-------
fig, pvals:
matplotlib figure and a NamedTuple with attributes `combined_pvalue`,
`pvalues`, and `names`.
"""
if keys is None:
keys = results[0].search_parameter_keys
credible_levels = pd.DataFrame()
for result in results:
credible_levels = credible_levels.append(
result.get_all_injection_credible_levels(keys), ignore_index=True)
if lines is None:
colors = ["C{}".format(i) for i in range(8)]
linestyles = ["-", "--", ":"]
lines = ["{}{}".format(a, b) for a, b in product(linestyles, colors)]
if len(lines) < len(credible_levels.keys()):
raise ValueError("Larger number of parameters than unique linestyles")
x_values = np.linspace(0, 1, 1001)
N = len(credible_levels)
fig, ax = plt.subplots()
if isinstance(confidence_interval, float):
confidence_interval = [confidence_interval]
if isinstance(confidence_interval_alpha, float):
confidence_interval_alpha = [confidence_interval_alpha] * len(confidence_interval)
elif len(confidence_interval_alpha) != len(confidence_interval):
raise ValueError(
"confidence_interval_alpha must have the same length as confidence_interval")
for ci, alpha in zip(confidence_interval, confidence_interval_alpha):
edge_of_bound = (1. - ci) / 2.
lower = scipy.stats.binom.ppf(1 - edge_of_bound, N, x_values) / N
upper = scipy.stats.binom.ppf(edge_of_bound, N, x_values) / N
# The binomial point percent function doesn't always return 0 @ 0,
# so set those bounds explicitly to be sure
lower[0] = 0
upper[0] = 0
ax.fill_between(x_values, lower, upper, alpha=alpha, color='k')
pvalues = []
logger.info("Key: KS-test p-value")
for ii, key in enumerate(credible_levels):
pp = np.array([sum(credible_levels[key].values < xx) /
len(credible_levels) for xx in x_values])
pvalue = scipy.stats.kstest(credible_levels[key], 'uniform').pvalue
pvalues.append(pvalue)
logger.info("{}: {}".format(key, pvalue))
try:
name = results[0].priors[key].latex_label
except AttributeError:
name = key
if name == 'H_eff5':
name = '$H_{eff5}$'
if name == 'chi_1':
name = '$\\chi_1$'
if name == 'chi_2':
name = '$\\chi_2$'
if name == 'Q_tilde':
name = '$\\tilde{Q}$'
label = "{} ({:2.3f})".format(name, pvalue)
plt.plot(x_values, pp, lines[ii], label=label, **kwargs)
Pvals = namedtuple('pvals', ['combined_pvalue', 'pvalues', 'names'])
pvals = Pvals(combined_pvalue=scipy.stats.combine_pvalues(pvalues)[1],
pvalues=pvalues,
names=list(credible_levels.keys()))
logger.info(
"Combined p-value: {}".format(pvals.combined_pvalue))
if title:
ax.set_title("N={}, p-value={:2.4f}".format(
len(results), pvals.combined_pvalue))
ax.set_xlabel("C.I.")
ax.set_ylabel("Fraction of events in C.I.")
ax.legend(handlelength=2, labelspacing=0.25, fontsize=legend_fontsize)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
fig.tight_layout()
if save:
if filename is None:
filename = 'outdir/pp.png'
safe_save_figure(fig=fig, filename=filename, dpi=500)
return fig, pvals
def sanity_check_labels(labels):
""" Check labels for plotting to remove matplotlib errors """
for ii, lab in enumerate(labels):
if "_" in lab and "$" not in lab:
labels[ii] = lab.replace("_", "-")
return labels
class ResultError(Exception):
""" Base exception for all Result related errors """
class ResultListError(ResultError):
""" For Errors occuring during combining results. """
class FileMovedError(ResultError):
""" Exceptions that occur when files have been moved """
|
py | 1a513373add8250dfa28479a4b2652e1520684a3 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import datetime
import json
import math
import os
import time
import sys
import unittest
from artists.miro.constraints import (
DatasetConstraints,
Fields,
FieldConstraints,
MinConstraint,
MaxConstraint,
SignConstraint,
TypeConstraint,
MaxNullsConstraint,
NoDuplicatesConstraint,
AllowedValuesConstraint,
MinLengthConstraint,
MaxLengthConstraint,
constraint_class,
strip_lines,
)
TESTDATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'testdata')
class TestConstraints(unittest.TestCase):
def test_constraint_repr(self):
self.assertEqual(repr(MinConstraint(7)),
'MinConstraint(value=7, precision=None)')
self.assertEqual(repr(MinConstraint('a')),
"MinConstraint(value='a', precision=None)")
self.assertEqual(repr(MinConstraint('a', precision='closed')),
"MinConstraint(value='a', precision='closed')")
self.assertEqual(repr(MinLengthConstraint(3)),
"MinLengthConstraint(value=3)")
self.assertEqual(repr(MaxConstraint(-3)),
'MaxConstraint(value=-3, precision=None)')
self.assertEqual(repr(MaxConstraint('KJ')),
"MaxConstraint(value='KJ', precision=None)")
self.assertEqual(repr(MaxConstraint(4.2, precision='closed')),
"MaxConstraint(value=4.2, precision='closed')")
self.assertEqual(repr(MaxLengthConstraint(0)),
"MaxLengthConstraint(value=0)")
self.assertEqual(repr(SignConstraint('positive')),
"SignConstraint(value='positive')")
self.assertEqual(repr(MaxNullsConstraint(0)),
"MaxNullsConstraint(value=0)")
self.assertEqual(repr(NoDuplicatesConstraint()),
"NoDuplicatesConstraint(value=True)")
self.assertEqual(repr(TypeConstraint('int')),
"TypeConstraint(value='int')")
self.assertEqual(repr(TypeConstraint(['int', 'real'])),
"TypeConstraint(value=['int', 'real'])")
self.assertEqual(repr(AllowedValuesConstraint(['a', 'b'])),
"AllowedValuesConstraint(value=['a', 'b'])")
def test_constraint_class(self):
goods = {
'type': 'TypeConstraint',
'min': 'MinConstraint',
'min_length': 'MinLengthConstraint',
'max': 'MaxConstraint',
'max_length': 'MaxLengthConstraint',
'sign': 'SignConstraint',
'max_nulls': 'MaxNullsConstraint',
'no_duplicates': 'NoDuplicatesConstraint',
'allowed_values': 'AllowedValuesConstraint',
}
for k,v in goods.items():
self.assertEqual(constraint_class(k), v)
def testBadConstraints(self):
self.assertRaisesRegex(TypeError, 'unexpected keyword',
SignConstraint, precision='closed')
self.assertRaises(AssertionError,
MinConstraint, 3, precision='unknown')
self.assertRaises(AssertionError,
SignConstraint, 'not too positive')
self.assertRaises(AssertionError,
TypeConstraint, 'float')
self.assertRaises(AssertionError,
TypeConstraint, ['int', 'float'])
self.assertRaises(AssertionError,
TypeConstraint, ['int', None])
def testFieldConstraintsDict(self):
c = FieldConstraints('one', [TypeConstraint('int'),
MinConstraint(3),
MaxConstraint(7),
SignConstraint('positive'),
MaxNullsConstraint(0),
NoDuplicatesConstraint()])
dfc = Fields([c])
self.assertEqual(strip_lines(json.dumps(dfc.to_dict_value(),
indent=4)),
'''{
"one": {
"type": "int",
"min": 3,
"max": 7,
"sign": "positive",
"max_nulls": 0,
"no_duplicates": true
}
}''')
c = FieldConstraints('one', [TypeConstraint('int'),
MinConstraint(3, precision='closed'),
MaxConstraint(7, precision='fuzzy'),
SignConstraint('positive'),
MaxNullsConstraint(0),
NoDuplicatesConstraint()])
dfc = Fields([c])
self.assertEqual(strip_lines(json.dumps(dfc.to_dict_value(),
indent=4)),
'''{
"one": {
"type": "int",
"min": {
"value": 3,
"precision": "closed"
},
"max": {
"value": 7,
"precision": "fuzzy"
},
"sign": "positive",
"max_nulls": 0,
"no_duplicates": true
}
}''')
def testload(self):
path = os.path.join(TESTDATA_DIR, 'ddd.tdda')
constraints = DatasetConstraints(loadpath=path)
# print(constraints)
if sys.version_info.major < 3:
# Quieten down Python3's vexatious complaining
TestConstraints.assertRaisesRegex = TestConstraints.assertRaisesRegexp
if __name__ == '__main__':
unittest.main()
|
py | 1a5136559244ff3bee06a9c97afe68a8d155d407 |
def _is_displayed_filter(item):
try:
result = item.is_displayed()
#print("is_displayed? %s" % result)
return True
#return result
except Exception as e:
print(e)
return False
|
py | 1a51366ca291f2414312722836609ca0ca5737af | # python
from chempy import io
from chempy import protein
from chempy import protein_mmff
from chempy import bond_mmff
#
#print 'normal'
#protein_mmff.check_sum(protein_mmff.normal)
#print 'n_terminal'
#protein_mmff.check_sum(protein_mmff.n_terminal)
#print 'c_terminal'
#protein_mmff.check_sum(protein_mmff.c_terminal)
model= io.pdb.fromFile("../../test/dat/pept.pdb")
model= protein.generate(model,forcefield=protein_mmff,bondfield=bond_mmff)
for a in model.atom:
a.numeric_type = protein_mmff.alpha_map[a.text_type]
sm = 0
for a in model.atom:
sm = sm + a.partial_charge
print " prot: net partial charge on protein is %8.3f" % sm
print " prot: (this should be integral)!"
io.pkl.toFile(model,"generate_mmff.pkl")
|
py | 1a5139b933b9dcdbf9bb161fb4a9a5e3a33ec728 | # -*- coding: utf-8 -*-
#
# Testing for server caching.
#
# ------------------------------------------------
# imports
# -------
import unittest
import pytest
import requests
from xml.etree import ElementTree
from . import config
# tests
# -----
@pytest.mark.usefixtures("server")
class TestLocalMock(unittest.TestCase):
def test_no_file(self):
response = requests.get(config.URL + '/missing')
self.assertEqual(response.status_code, 404)
return
def test_status(self):
response = requests.get(config.URL + '/_/status')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
return
def test_get_root(self):
# json
response = requests.get(config.URL + '/json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['type'], 'json')
# xml
response = requests.get(config.URL + '/xml')
self.assertEqual(response.status_code, 200)
data = ElementTree.fromstring(response.content)
self.assertEqual(data.find('status').text, 'ok')
self.assertEqual(data.find('type').text, 'xml')
# text
response = requests.get(config.URL + '/txt')
self.assertEqual(response.status_code, 200)
data = str(response.text.rstrip())
self.assertEqual(data, 'test')
return
def test_get_query(self):
# json
response = requests.get(config.URL + '/query/json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['type'], 'json')
# xml
response = requests.get(config.URL + '/query/xml')
self.assertEqual(response.status_code, 200)
data = ElementTree.fromstring(response.content)
self.assertEqual(data.find('status').text, 'ok')
self.assertEqual(data.find('type').text, 'xml')
# text
response = requests.get(config.URL + '/query/txt')
self.assertEqual(response.status_code, 200)
data = str(response.text.rstrip())
self.assertEqual(data, 'test')
return
def test_get_params_single(self):
response = requests.get(config.URL + '?arg=test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
def test_get_params_multiple(self):
response = requests.get(config.URL + '?narg=ntest&arg=test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
def test_get_query_params_single(self):
response = requests.get(config.URL + '/query?arg=test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
def test_get_query_params_multiple(self):
response = requests.get(config.URL + '/query?narg=ntest&arg=test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
def test_post_payload(self):
# test resource: 1906fde
response = requests.post(config.URL, json={'data': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
return
def test_post_params_payload(self):
# test resource: arg=test1906fde
response = requests.post(config.URL + '?arg=test', json={'data': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
def test_post_query_payload(self):
# test resource: query/1906fde
response = requests.post(config.URL + '/query', json={'data': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
return
def test_post_query_params_payload(self):
# test resource: query/arg=test1906fde
response = requests.post(config.URL + '/query?arg=test', json={'data': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
def test_get_named_param(self):
import uuid
uu = str(uuid.uuid4())
# root
response = requests.get(config.URL + '/' + uu)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertTrue('uuid' in data)
# query
response = requests.get(config.URL + '/query/' + uu)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertTrue('uuid' in data)
return
def test_resolve_with_post_method(self):
# GET
response = requests.get(config.URL + '/json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['type'], 'json')
# POST
response = requests.post(config.URL + '/json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['type'], 'json')
# PUT
response = requests.put(config.URL + '/json')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['type'], 'json')
return
@pytest.mark.usefixtures("server")
class TestDynamicMock(unittest.TestCase):
# GET
def test_get_simple(self):
response = requests.get(config.URL + '/simple')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
return
def test_get_simple_args(self):
response = requests.get(config.URL + '/simple?arg=test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['arg'], 'test')
return
def test_get_nested(self):
response = requests.get(config.URL + '/nested/test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
return
def test_get_nested_args(self):
response = requests.get(config.URL + '/nested/test?arg=test')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['arg'], 'test')
return
# POST
def test_post_simple(self):
response = requests.post(config.URL + '/simple', json={'param': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
return
def test_post_simple_args(self):
response = requests.post(config.URL + '/simple?arg=test', json={'param': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['arg'], 'test')
self.assertEqual(data['param'], 'test')
return
def test_post_nested(self):
response = requests.post(config.URL + '/nested/test', json={'payload': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['payload'], 'test')
return
def test_post_nested_args(self):
response = requests.post(config.URL + '/nested/test?arg=test', json={'payload': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['arg'], 'test')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['payload'], 'test')
return
# PUT
def test_put_simple(self):
response = requests.put(config.URL + '/simple', json={'param': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
return
def test_put_simple_args(self):
response = requests.put(config.URL + '/simple?arg=test', json={'param': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['arg'], 'test')
self.assertEqual(data['param'], 'test')
return
def test_put_nested(self):
response = requests.put(config.URL + '/nested/test', json={'payload': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['payload'], 'test')
return
def test_put_nested_args(self):
response = requests.put(config.URL + '/nested/test?arg=test', json={'payload': 'test'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'ok')
self.assertEqual(data['arg'], 'test')
self.assertEqual(data['param'], 'test')
self.assertEqual(data['payload'], 'test')
return
|
py | 1a5139eab8c37e6abf96e51703246fbb16c912d0 | """
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
"""
็ปๅถ็นไบ
Args:
points:็นไบ
gt_boxes:็ๅผbox ๏ผN, 7๏ผ
ref_boxes:้ขๆตbox ๏ผM, 7๏ผ
ref_scores:้ขๆตๅๆฐ (M,)
ref_labels:้ขๆต็ฑปๅซ (M,)
"""
# 1.ๅคๆญๆฐๆฎ็ฑปๅ๏ผๅนถๅฐๆฐๆฎไปtensor่ฝฌๅไธบnumpy็array
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.ones((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 0, 1))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 1, 0), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
|
py | 1a513a88a7314c8bd656186af8247e5f7717cdb5 | import torch
from torch import nn
from MedicalNet.models import resnet
def generate_model(opt):
assert opt.model in [
'resnet'
]
if opt.model == 'resnet':
assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
if opt.model_depth == 10:
model = resnet.resnet10(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
elif opt.model_depth == 18:
model = resnet.resnet18(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
elif opt.model_depth == 34:
model = resnet.resnet34(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
elif opt.model_depth == 50:
model = resnet.resnet50(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
elif opt.model_depth == 101:
model = resnet.resnet101(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
elif opt.model_depth == 152:
model = resnet.resnet152(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
elif opt.model_depth == 200:
model = resnet.resnet200(
sample_input_W=opt.input_W,
sample_input_H=opt.input_H,
sample_input_D=opt.input_D,
shortcut_type=opt.resnet_shortcut,
no_cuda=opt.no_cuda,
num_seg_classes=opt.n_seg_classes)
if not opt.no_cuda:
if len(opt.gpu_id) > 1:
model = model.cuda()
model = nn.DataParallel(model, device_ids=opt.gpu_id)
net_dict = model.state_dict()
else:
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_id[0])
model = model.cuda()
model = nn.DataParallel(model, device_ids=None)
net_dict = model.state_dict()
else:
net_dict = model.state_dict()
# load pretrain
if opt.pretrain_path:
print('loading pretrained model {}'.format(opt.pretrain_path))
pretrain = torch.load(opt.pretrain_path)
pretrain_dict = {k: v for k, v in pretrain['state_dict'].items() if k in net_dict.keys()}
net_dict.update(pretrain_dict)
model.load_state_dict(net_dict)
new_parameters = []
for pname, p in model.named_parameters():
for layer_name in opt.new_layer_names:
if pname.find(layer_name) >= 0:
new_parameters.append(p)
break
new_parameters_id = list(map(id, new_parameters))
base_parameters = list(filter(lambda p: id(p) not in new_parameters_id, model.parameters()))
parameters = {'base_parameters': base_parameters,
'new_parameters': new_parameters}
return model, parameters
return model, model.parameters()
|
py | 1a513b4f2b01274207b9937e18a8fb16db770fb2 | # vim:ts=4:sts=4:sw=4:expandtab
import copy
import datetime
import dateutil.parser
import glob
import json
import logging
import math
from multiprocessing import Process
import os
import random
import shutil
import subprocess
import sys
import tempfile
import traceback
from threading import Thread
import time
import uuid
from kolejka.common import kolejka_config, foreman_config
from kolejka.common import KolejkaTask, KolejkaResult, KolejkaLimits
from kolejka.common import MemoryAction, TimeAction, parse_memory
from kolejka.client import KolejkaClient
from kolejka.common.gpu import gpu_stats
from kolejka.common.images import (
pull_docker_image,
get_docker_image_size,
check_docker_image_existance,
list_docker_images,
remove_docker_image
)
from kolejka.worker.stage0 import stage0
from kolejka.worker.volume import check_python_volume
def manage_images(pull, size, necessary_images, priority_images):
necessary_size = sum(necessary_images.values(), 0)
free_size = size - necessary_size
assert free_size >= 0
docker_images = list_docker_images()
p_images = dict()
for image in priority_images:
if image in docker_images:
p_images[image] = docker_images[image]
priority_images = p_images
keep_images = set()
for image in necessary_images:
keep_images.add(image)
list_images = list(priority_images.items())
random.shuffle(list_images)
li = list(docker_images.items())
random.shuffle(li)
list_images += li
for image,size in list_images:
if image in keep_images:
continue
if size <= free_size:
free_size -= size
keep_images.add(image)
for image in docker_images:
if image not in keep_images:
remove_docker_image(image)
for image, size in necessary_images.items():
pull_image = pull
if not pull_image:
if not check_docker_image_existance(image):
pull_image = True
if pull_image:
pull_docker_image(image)
image_size = get_docker_image_size(image)
assert image_size <= size
def foreman_single(temp_path, task):
config = foreman_config()
with tempfile.TemporaryDirectory(temp_path) as jailed_path:
if task.limits.workspace is not None:
subprocess.run(['mount', '-t', 'tmpfs', '-o', 'size='+str(task.limits.workspace), 'none', jailed_path], check=True)
try:
task_path = os.path.join(jailed_path, 'task')
result_path = os.path.join(jailed_path, 'result')
temp_path = os.path.join(jailed_path, 'temp')
os.makedirs(task_path, exist_ok=True)
os.makedirs(result_path, exist_ok=True)
os.makedirs(temp_path, exist_ok=True)
task.path = task_path
client = KolejkaClient()
client.task_get(task.id, task_path)
for k,f in task.files.items():
f.path = k
task.commit()
stage0(task.path, result_path, temp_path=temp_path, consume_task_folder=True)
result = KolejkaResult(result_path)
result.tags = config.tags
client.result_put(result)
except:
traceback.print_exc()
finally:
if task.limits.storage is not None:
subprocess.run(['umount', '-l', jailed_path])
def foreman():
config = foreman_config()
gstats = gpu_stats().gpus
limits = KolejkaLimits()
limits.cpus = config.cpus
limits.memory = config.memory
limits.swap = config.swap
limits.pids = config.pids
limits.storage = config.storage
limits.image = config.image
limits.workspace = config.workspace
limits.time = config.time
limits.network = config.network
limits.gpus = config.gpus
if limits.gpus is None:
limits.gpus = len(gstats)
limits.gpu_memory = config.gpu_memory
for k,v in gstats.items():
if limits.gpu_memory is None:
limits.gpu_memory = v.memory_total
elif v.memory_total is not None:
limits.gpu_memory = min(limits.gpu_memory, v.memory_total)
client = KolejkaClient()
logging.debug('Foreman tags: {}, limits: {}'.format(config.tags, limits.dump()))
while True:
try:
tasks = client.dequeue(config.concurency, limits, config.tags)
if len(tasks) == 0:
time.sleep(config.interval)
else:
check_python_volume()
while len(tasks) > 0:
resources = KolejkaLimits()
resources.copy(limits)
image_usage = dict()
processes = list()
cpus_offset = 0
gpus_offset = 0
for task in tasks:
if len(processes) >= config.concurency:
break
if task.exclusive and len(processes) > 0:
break
task.limits.update(limits)
task.limits.cpus_offset = cpus_offset
task.limits.gpus_offset = gpus_offset
ok = True
if resources.cpus is not None and task.limits.cpus > resources.cpus:
ok = False
if task.limits.gpus is not None and task.limits.gpus > 0:
if resources.gpus is None or task.limits.gpus > resources.gpus:
ok = False
if resources.gpu_memory is not None and task.limits.gpu_memory > resources.gpu_memory:
ok = False
if resources.memory is not None and task.limits.memory > resources.memory:
ok = False
if resources.swap is not None and task.limits.swap > resources.swap:
ok = False
if resources.pids is not None and task.limits.pids > resources.pids:
ok = False
if resources.storage is not None and task.limits.storage > resources.storage:
ok = False
if resources.image is not None:
image_usage_add = max(image_usage.get(task.image, 0), task.limits.image) - image_usage.get(task.image, 0)
if image_usage_add > resources.image:
ok = False
if resources.workspace is not None and task.limits.workspace > resources.workspace:
ok = False
if ok:
proc = Process(target=foreman_single, args=(config.temp_path, task))
processes.append(proc)
cpus_offset += task.limits.cpus
if resources.cpus is not None:
resources.cpus -= task.limits.cpus
if resources.gpus is not None and task.limits.gpus is not None:
resources.gpus -= task.limits.gpus
gpus_offset += task.limits.gpus
if resources.memory is not None:
resources.memory -= task.limits.memory
if resources.swap is not None:
resources.swap -= task.limits.swap
if resources.pids is not None:
resources.pids -= task.limits.pids
if resources.storage is not None:
resources.storage -= task.limits.storage
if resources.image is not None:
resources.image -= image_usage_add
image_usage[task.image] = max(image_usage.get(task.image, 0), task.limits.image)
if resources.workspace is not None:
resources.workspace -= task.limits.workspace
tasks = tasks[1:]
if task.exclusive:
break
else:
break
if config.image is not None:
manage_images(
config.pull,
config.image,
image_usage,
[task.image for task in tasks]
)
for proc in processes:
proc.start()
for proc in processes:
proc.join()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
time.sleep(config.interval)
def config_parser(parser):
parser.add_argument('--auto-tags', type=bool, help='add automatically generated machine tags', default=True)
parser.add_argument('--pull', action='store_true', help='always pull images, even if local version is present', default=False)
parser.add_argument('--tags', type=str, help='comma separated list of machine tags')
parser.add_argument('--temp', type=str, help='temp folder')
parser.add_argument('--interval', type=float, help='dequeue interval (in seconds)')
parser.add_argument('--concurency', type=int, help='number of simultaneous tasks')
parser.add_argument('--cpus', type=int, help='cpus limit')
parser.add_argument('--memory', action=MemoryAction, help='memory limit')
parser.add_argument('--swap', action=MemoryAction, help='swap limit')
parser.add_argument('--pids', type=int, help='pids limit')
parser.add_argument('--storage', action=MemoryAction, help='storage limit')
parser.add_argument('--image', action=MemoryAction, help='image size limit')
parser.add_argument('--workspace', action=MemoryAction, help='workspace size limit')
parser.add_argument('--time', action=TimeAction, help='time limit')
parser.add_argument('--network', type=bool, help='allow netowrking')
parser.add_argument('--gpus', type=int, help='gpus limit')
parser.add_argument('--gpu-memory', type=MemoryAction, help='gpu memory limit')
def execute(args):
kolejka_config(args=args)
foreman()
parser.set_defaults(execute=execute)
|
py | 1a513e9386324d0c4e8586c26aa3d520518132f4 | """Helper functions for beam search."""
import numpy as np
from queue import PriorityQueue
from future.utils import implements_iterator
def InitBeam(phrase, user_id, m):
# Need to find the hidden state for the last char in the prefix.
prev_hidden = np.zeros((1, 2 * m.params.num_units))
for word in phrase[:-1]:
feed_dict = {
m.model.prev_hidden_state: prev_hidden,
m.model.prev_word: [m.char_vocab[word]],
m.model.beam_size: 4
}
prev_hidden = m.session.run(m.model.next_hidden_state, feed_dict)
return prev_hidden
class BeamItem(object):
"""This is a node in the beam search tree.
Each node holds four things: a log probability, a list of previous words, and
the two hidden state vectors.
"""
def __init__(self, prev_word, prev_hidden, log_prob=0.0):
self.log_probs = log_prob
if type(prev_word) == list:
self.words = prev_word
else:
self.words = [prev_word]
self.prev_hidden = prev_hidden
def __le__(self, other):
return self.log_probs <= other.log_probs
def __lt__(self, other):
return self.log_probs < other.log_probs
def __ge__(self, other):
return self.log_probs >= other.log_probs
def __gt__(self, other):
return self.log_probs > other.log_probs
def __eq__(self, other):
return self.log_probs == other.log_probs
def __str__(self):
return 'beam {0:.3f}: '.format(self.log_probs) + ''.join(self.words)
class BeamQueue(object):
"""Bounded priority queue."""
def __init__(self, max_size=10):
self.max_size = max_size
self.size = 0
self.bound = None
self.q = PriorityQueue()
def Insert(self, item):
self.size += 1
self.q.put((-item.log_probs, item))
if self.size > self.max_size:
self.Eject()
def CheckBound(self, val):
# If the queue is full then we know that there is no chance of a new item
# being accepted if it's priority is worse than the last thing that got
# ejected.
return self.size < self.max_size or self.bound is None or val < self.bound
def Eject(self):
score, _ = self.q.get()
self.bound = -score
self.size -= 1
def __iter__(self):
return self
def __next__(self):
if not self.q.empty():
_, item = self.q.get()
return item
raise StopIteration
def next(self):
return self.__next__()
def GetCompletions(prefix, user_id, m, branching_factor=8, beam_size=300,
stop='</S>'):
""" Find top completions for a given prefix, user and model."""
m.Lock(user_id) # pre-compute the adaptive recurrent matrix
prev_state = InitBeam(prefix, user_id, m)
nodes = [BeamItem(prefix, prev_state)]
for i in range(36):
new_nodes = BeamQueue(max_size=beam_size)
current_nodes = []
for node in nodes:
if i > 0 and node.words[-1] == stop: # don't extend past the stop token
new_nodes.Insert(node) # copy over finished beams
else:
current_nodes.append(node) # these ones will get extended
if len(current_nodes) == 0:
return new_nodes # all beams have finished
# group together all the nodes in the queue for efficient computation
prev_hidden = np.vstack([item.prev_hidden for item in current_nodes])
prev_words = np.array([m.char_vocab[item.words[-1]] for item in current_nodes])
feed_dict = {
m.model.prev_word: prev_words,
m.model.prev_hidden_state: prev_hidden,
m.model.beam_size: branching_factor
}
current_char, current_char_p, prev_hidden = m.session.run(
[m.beam_chars, m.model.selected_p, m.model.next_hidden_state],
feed_dict)
for i, node in enumerate(current_nodes):
for new_word, top_value in zip(current_char[i, :], current_char_p[i, :]):
new_cost = top_value + node.log_probs
if new_nodes.CheckBound(new_cost): # only create a new object if it fits in beam
new_beam = BeamItem(node.words + [new_word], prev_hidden[i, :],
log_prob=new_cost)
new_nodes.Insert(new_beam)
nodes = new_nodes
return nodes
def FirstNonMatch(s1, s2, start=0):
# returns the position of the first non-matching character
min_len = min(len(s1), len(s2))
for i in xrange(start, min_len):
if s1[i] != s2[i]:
return i
return min_len
def GetSavedKeystrokes(m, query, branching_factor=4, beam_size=100):
"""Find the shortest prefix that gets the right completion.
Uses binary search.
"""
left = 1
right = len(query)
while left <= right:
midpoint = (left + right) / 2
prefix = ['<S>'] + list(query[:midpoint])
completions = GetCompletions(
prefix, 0, m, branching_factor=branching_factor, beam_size=beam_size)
top_completion = list(completions)[-1]
top_completion = ''.join(top_completion.words[1:-1])
if top_completion == query:
right = midpoint - 1
else:
left = midpoint + 1
return left
|
py | 1a513fa6fc6875f73565686b07d96b3014f71980 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import metrics
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _labeled_dataset_fn():
# First four batches of x: labels, predictions -> (labels == predictions)
# 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False
# 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False
# 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False
# 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True
return dataset_ops.Dataset.range(1000).map(
lambda x: {"labels": x % 5, "predictions": x % 3}).batch(
4, drop_remainder=True)
def _boolean_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# T, T -> TP; F, T -> FP; T, F -> FN
# F, F -> TN; T, T -> TP; F, T -> FP
# T, F -> FN; F, F -> TN; T, T -> TP
# F, T -> FP; T, F -> FN; F, F -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [True, True, False, False]}).repeat().batch(
3, drop_remainder=True)
def _threshold_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN
# False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP
# True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP
# False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch(
3, drop_remainder=True)
def _regression_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [1., .5, 1., 0.],
"predictions": [1., .75, .25, 0.]}).repeat()
def all_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
],
mode=["graph"])
def tpu_combinations():
return combinations.combine(
distribution=[strategy_combinations.tpu_strategy,],
mode=["graph"])
class KerasMetricsTest(test.TestCase, parameterized.TestCase):
def _test_metric(self, distribution, dataset_fn, metric_init_fn, expected_fn):
with ops.Graph().as_default(), distribution.scope():
metric = metric_init_fn()
iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())
updates = distribution.experimental_local_results(
distribution.run(metric, args=(iterator.get_next(),)))
batches_per_update = distribution.num_replicas_in_sync
self.evaluate(iterator.initializer)
self.evaluate([v.initializer for v in metric.variables])
batches_consumed = 0
for i in range(4):
batches_consumed += batches_per_update
self.evaluate(updates)
self.assertAllClose(expected_fn(batches_consumed),
self.evaluate(metric.result()),
0.001,
msg="After update #" + str(i+1))
if batches_consumed >= 4: # Consume 4 input batches in total.
break
@ds_combinations.generate(all_combinations() + tpu_combinations())
def testMean(self, distribution):
def _dataset_fn():
return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(
4, drop_remainder=True)
def _expected_fn(num_batches):
# Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.
return num_batches * 2 - 0.5
self._test_metric(distribution, _dataset_fn, metrics.Mean, _expected_fn)
if __name__ == "__main__":
test.main()
|
py | 1a513fd1df262cf2f1aaa46b98dea606cec603b1 | from django.test import TestCase
from l10n.validators import aupostcode, capostcode, uspostcode
from l10n import l10n_settings
from l10n.utils import moneyfmt
from decimal import Decimal
class AUPostCodeTest(TestCase):
def test_valid(self):
code = aupostcode.validate("2000")
self.assertEqual('2000', code)
code = aupostcode.validate(" 2000 ")
self.assertEqual('2000', code)
def test_invalid(self):
try:
code = capostcode.validate("")
self.fail('Invalid blank postal code not caught')
except:
pass
try:
code = capostcode.validate("no")
self.fail('Invalid postal code "no" not caught')
except:
pass
class CAPostCodeTest(TestCase):
def test_valid(self):
code = capostcode.validate("M5V2T6")
self.assertEqual('M5V2T6', code)
code = capostcode.validate("m5v2t6")
self.assertEqual('M5V2T6', code)
def test_invalid(self):
try:
code = capostcode.validate("")
self.fail('Invalid blank postal code not caught')
except:
pass
try:
code = capostcode.validate("no")
self.fail('Invalid postal code "no" not caught')
except:
pass
try:
code = capostcode.validate("M5V M5V")
self.fail('Invalid postal code "M5V M5V" not caught')
except:
pass
try:
code = capostcode.validate("D5V 2T6")
self.fail('Invalid postal code "D5V 2T6" not caught -- "D" is not a valid major geographic area or province.')
except:
pass
class USPostCodeTest(TestCase):
def test_five_digit(self):
zipcode = uspostcode.validate("66044")
self.assertEqual('66044', zipcode)
try:
zipcode = uspostcode.validate(" 66044 ")
self.fail("Invalid postal code not caught")
except:
pass
def test_nine_digit(self):
zipcode = uspostcode.validate("94043-1351")
self.assertEqual('94043-1351', zipcode)
try:
zipcode = uspostcode.validate(" 94043-1351 ")
self.fail('Invalide postal code not caught')
except:
pass
def test_invalid(self):
try:
code = uspostcode.validate("")
self.fail('Invalid blank postal code not caught')
except:
pass
try:
zipcode = uspostcode.validate("no")
self.fail('Invalid ZIP code "no" not caught')
except:
pass
class MoneyFmtTest(TestCase):
def testUSD(self):
l10n_settings.set_l10n_setting('default_currency', 'USD')
val = Decimal('10.00')
self.assertEqual(moneyfmt(val), '$10.00')
self.assertEqual(moneyfmt(val, currency_code='USD'), '$10.00')
def testGBP(self):
l10n_settings.set_l10n_setting('default_currency', 'GBP')
val = Decimal('10.00')
self.assertEqual(moneyfmt(val), 'ยฃ10.00')
self.assertEqual(moneyfmt(val, currency_code='GBP'), 'ยฃ10.00')
self.assertEqual(moneyfmt(val, currency_code='USD'), '$10.00')
val = Decimal('-100.00')
self.assertEqual(moneyfmt(val), '-ยฃ100.00')
def testFake(self):
currencies = l10n_settings.get_l10n_setting('currency_formats')
currencies['FAKE'] = {'symbol': '^ ', 'positive': "%(val)0.2f ^", 'negative': "(%(val)0.2f) ^", 'decimal': ','}
l10n_settings.set_l10n_setting('currency_formats', currencies)
val = Decimal('10.00')
self.assertEqual(moneyfmt(val, currency_code='FAKE'), '10,00 ^')
val = Decimal('-50.00')
self.assertEqual(moneyfmt(val, currency_code='FAKE'), '(50,00) ^')
|
py | 1a5141b1615fd021b6bee8e978f8e7b355ae6847 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class ListMergeRequestsRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'codeup', '2020-04-14', 'ListMergeRequests')
self.set_uri_pattern('/api/v4/merge_requests/advanced_search')
self.set_method('GET')
def get_BeforeDate(self):
return self.get_query_params().get('BeforeDate')
def set_BeforeDate(self,BeforeDate):
self.add_query_param('BeforeDate',BeforeDate)
def get_AssigneeIdList(self):
return self.get_query_params().get('AssigneeIdList')
def set_AssigneeIdList(self,AssigneeIdList):
self.add_query_param('AssigneeIdList',AssigneeIdList)
def get_AccessToken(self):
return self.get_query_params().get('AccessToken')
def set_AccessToken(self,AccessToken):
self.add_query_param('AccessToken',AccessToken)
def get_SubscriberCodeupIdList(self):
return self.get_query_params().get('SubscriberCodeupIdList')
def set_SubscriberCodeupIdList(self,SubscriberCodeupIdList):
self.add_query_param('SubscriberCodeupIdList',SubscriberCodeupIdList)
def get_AfterDate(self):
return self.get_query_params().get('AfterDate')
def set_AfterDate(self,AfterDate):
self.add_query_param('AfterDate',AfterDate)
def get_OrganizationId(self):
return self.get_query_params().get('OrganizationId')
def set_OrganizationId(self,OrganizationId):
self.add_query_param('OrganizationId',OrganizationId)
def get_GroupIdList(self):
return self.get_query_params().get('GroupIdList')
def set_GroupIdList(self,GroupIdList):
self.add_query_param('GroupIdList',GroupIdList)
def get_Search(self):
return self.get_query_params().get('Search')
def set_Search(self,Search):
self.add_query_param('Search',Search)
def get_AuthorCodeupIdList(self):
return self.get_query_params().get('AuthorCodeupIdList')
def set_AuthorCodeupIdList(self,AuthorCodeupIdList):
self.add_query_param('AuthorCodeupIdList',AuthorCodeupIdList)
def get_AuthorIdList(self):
return self.get_query_params().get('AuthorIdList')
def set_AuthorIdList(self,AuthorIdList):
self.add_query_param('AuthorIdList',AuthorIdList)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ProjectIdList(self):
return self.get_query_params().get('ProjectIdList')
def set_ProjectIdList(self,ProjectIdList):
self.add_query_param('ProjectIdList',ProjectIdList)
def get_Page(self):
return self.get_query_params().get('Page')
def set_Page(self,Page):
self.add_query_param('Page',Page)
def get_AssigneeCodeupIdList(self):
return self.get_query_params().get('AssigneeCodeupIdList')
def set_AssigneeCodeupIdList(self,AssigneeCodeupIdList):
self.add_query_param('AssigneeCodeupIdList',AssigneeCodeupIdList)
def get_State(self):
return self.get_query_params().get('State')
def set_State(self,State):
self.add_query_param('State',State)
def get_Order(self):
return self.get_query_params().get('Order')
def set_Order(self,Order):
self.add_query_param('Order',Order) |
py | 1a51423d85c9374c50bef1603c9ef810b380cd00 | from django.urls import path
from .consumers import AnalysisConsumer, ServiceConsumer
websocket_urlpatterns = [
path(r"ws/service/", ServiceConsumer),
path(r"ws/analyses/", AnalysisConsumer),
path(r"ws/analyses/<uuid:analysis_id>/", AnalysisConsumer),
]
|
py | 1a5143f0b8bb49d0a84397a46ddcad0a35409169 | # Generated by Django 3.0.5 on 2020-04-30 23:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('weblog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='weblog.Category'),
),
]
|
py | 1a51454f84341f65aced0452196c5e6a35b9d324 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 11:50:02 2018
@author: Andrija Master
"""
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from scipy.optimize import minimize
import scipy as sp
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import math
""" CLASS GCRFCNB """
class GCRFCNB:
def __init__(self):
pass
def muKov(alfa, R, Precison, Noinst, NodeNo):
mu = np.zeros([Noinst,NodeNo])
bv = 2*np.matmul(R,alfa)
bv = bv.reshape([Noinst,NodeNo])
Kov = np.linalg.inv(Precison)
for m in range(Noinst):
mu[m,:] = Kov[m,:,:].dot(bv[m,:])
return mu,Kov
def Prec(alfa,beta,NodeNo,Se,Noinst):
alfasum = np.sum(alfa)
Q1 = np.identity(NodeNo)*alfasum
Q2 = np.zeros([Noinst,NodeNo,NodeNo])
Prec = np.zeros([Noinst,NodeNo,NodeNo])
pomocna = np.zeros(Se.shape)
for j in range(Se.shape[1]):
pomocna[:,j,:,:] = Se[:,j,:,:] * beta[j]
Q2 = -np.sum(pomocna,axis = 1)
for m in range(Noinst):
Prec[m,:,:] = 2*(Q2[m,:,:]+np.diag(-Q2[m,:,:].sum(axis=0))+Q1)
return Prec
def sigmaCal(ceta): # Provereno
Sigma=1/(1 + np.exp(-ceta))
Sigma[Sigma>0.99999999] = 0.99999999
Sigma[Sigma<1e-10] = 1e-10
return Sigma
""" PREDICT """
def predict(self,R,Se):
NodeNo = Se.shape[3]
Noinst = Se.shape[0]
Precison = GCRFCNB.Prec(self.alfa, self.beta, NodeNo, Se, Noinst)
mu, Kovmat = GCRFCNB.muKov(self.alfa, R, Precison, Noinst, NodeNo)
Prob = GCRFCNB.sigmaCal(mu)
Class = np.round(Prob,0)
self.Prob = Prob
self.Class = Class
return self.Prob, self.Class
""" FIT """
def fit(self,R,Se,Y,x0 = None, learn = 'SLSQP', maxiter = 1000, learnrate = 0.1):
def L(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R):
alfa=x[:ModelUNNo]
beta=x[-ModelSTNo:]
print(alfa)
Precison = GCRFCNB.Prec(alfa, beta, NodeNo, Se, Noinst)
mu,kovMat = GCRFCNB.muKov(alfa,R,Precison,Noinst,NodeNo)
sigma = GCRFCNB.sigmaCal(mu)
L = np.sum(Y*np.log(sigma)+(1-Y)*np.log(1-sigma))
print('skor je {}'.format(L))
return -1*L
def DLdx(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R):
def sigmaFUN(Y,mu):
sigma = GCRFCNB.sigmaCal(mu)
sigmafun=Y-sigma
return sigmafun
def dPrecdbeta(Noinst,ModelSTNo,NodeNo,Se): # PROVERENO
dPrecdbeta = np.zeros([Noinst,ModelSTNo,NodeNo,NodeNo])
dPrecdbeta = -Se
for m in range(Noinst):
for L in range(ModelSTNo):
dPrecdbeta[m,L,:,:]=2*(dPrecdbeta[m,L,:,:] + np.diag(-dPrecdbeta[m,L,:,:].sum(axis=1)))
return dPrecdbeta
def dLdalfadbeta(sigmafun,dmudalfa,dmudbeta,ModelUNNo,ModelSTNo):
dLdalfa = np.zeros(ModelUNNo)
dLdbeta = np.zeros(ModelSTNo)
for i in range(ModelUNNo):
dLdalfa[i] = np.sum(sigmafun*dmudalfa[:,i,:])
for i in range(ModelSTNo):
dLdbeta[i] = np.sum(sigmafun*dmudbeta[:,i,:])
return dLdalfa,dLdbeta
def dPrecdalfa(NodeNo,ModelUNNo): # Provereno
dPrecdalfa=np.zeros([ModelUNNo,NodeNo,NodeNo])
dQ1dalfa=np.identity(NodeNo)
for p in range(ModelUNNo):
dPrecdalfa[p,:,:]=dQ1dalfa*2
return dPrecdalfa
def dbdalfa(ModelUNNo,Noinst,R,NodeNo): # Provereno 1
dbdalfa = np.zeros([Noinst,ModelUNNo,NodeNo])
for m in range(ModelUNNo):
dbdalfa[:,m,:] = 2*R[:,m].reshape([Noinst, NodeNo])
return dbdalfa
def dmutdalfa(dbdalfa,DPrecdalfa,Kov,ModelUNNo,Noinst,mu): # Provereno
dmutdalfa=np.zeros([Noinst,ModelUNNo,NodeNo])
for m in range(Noinst):
for p in range(ModelUNNo):
dmutdalfa[m,p,:]=(dbdalfa[m,p,:]-DPrecdalfa[p,:,:].dot(mu[m,:])).T.dot(Kov[m,:,:])
return dmutdalfa
def dmutdbeta(dPrecdbeta,mu,Kov,Noinst,ModelSTNo,NodeNo): # Provereno
dmutdbeta=np.zeros([Noinst,ModelSTNo,NodeNo])
for m in range(0,Noinst):
for p in range(0,ModelSTNo):
dmutdbeta[m,p,:]=(-dPrecdbeta[m,p,:,:].dot(mu[m,:])).T.dot(Kov[m,:,:])
return dmutdbeta
alfa=x[:ModelUNNo]
beta=x[-ModelSTNo:]
DPrecdalfa=dPrecdalfa(NodeNo,ModelUNNo) # Nezavisno od alfa i iteracija
Precison = GCRFCNB.Prec(alfa, beta, NodeNo, Se, Noinst)
DPrecdbeta = dPrecdbeta(Noinst,ModelSTNo,NodeNo,Se)
mu,kovMat = GCRFCNB.muKov(alfa,R,Precison,Noinst,NodeNo)
mu[np.isnan(mu)] = 0
Dbdalfa = dbdalfa(ModelUNNo,Noinst,R,NodeNo)
# Dbdalfa[Dbdalfa == -np.inf] = -1e12
Dmudalfa = dmutdalfa(Dbdalfa,DPrecdalfa,kovMat,ModelUNNo,Noinst,mu)
Dmudbeta = dmutdbeta(DPrecdbeta,mu,kovMat,Noinst,ModelSTNo,NodeNo)
sigmafun = sigmaFUN(Y,mu)
DLdalfa,DLdbeta = dLdalfadbeta(sigmafun,Dmudalfa,Dmudbeta,ModelUNNo,ModelSTNo)
DLdx = -np.concatenate((DLdalfa,DLdbeta))
print(DLdx)
return DLdx
ModelUNNo = R.shape[1]
NodeNo = Se.shape[2]
Noinst = Se.shape[0]
ModelSTNo = Se.shape[1]
bnd = ((1e-8,None),)*(ModelSTNo+ModelUNNo)
if x0 is None:
x0 = np.abs(np.random.randn(ModelUNNo + ModelSTNo))*100
if learn == 'SLSQP':
res = minimize(L, x0, method='SLSQP', jac=DLdx, args=(Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R),\
options={'disp': True,'maxiter': maxiter,'ftol': 1e-8},bounds=bnd)
self.alfa = res.x[:ModelUNNo]
self.beta = res.x[ModelUNNo:ModelSTNo+ModelUNNo]
elif learn == 'TNC':
bnd = ((1e-6,None),)*(ModelSTNo+ModelUNNo)
res = sp.optimize.fmin_tnc(L, x0, fprime = DLdx, \
args=(Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R),\
bounds = bnd)
self.alfa = res[0][:ModelUNNo]
self.beta = res[0][ModelUNNo:ModelSTNo+ModelUNNo]
elif learn == 'EXP':
x = x0
u1 = np.log(x0)
for i in range(maxiter):
dLdx = -DLdx(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R)
u1 = u1 + learnrate*x*dLdx
x = np.exp(u1)
L1 = -L(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R)
print('U iteciji {} DLDX je {}'.format(i,dLdx))
print('U iteciji {} L je {}'.format(i,L1))
self.alfa = x[:ModelUNNo]
self.beta = x[ModelUNNo:ModelSTNo+ModelUNNo]
self.x = x
#""" Proba na SIN podacima """
#import time
#start_time = time.time()
#def S(connect,Se,Xst):
# for j in range(NoGraph):
# for k,l in connect[j]:
# if j == 0:
# Se[:,j,k,l] = np.exp(np.abs(Xst.iloc[:,j].unstack().values[:,k] -
# Xst.iloc[:,j].unstack().values[:,l]))*0.1
# Se[:,j,l,k] = Se[:,j,k,l]
# elif j == 1:
# Se[:,j,k,l] = np.exp(np.abs(Xst.iloc[:,j].unstack().values[:,k] -
# Xst.iloc[:,j].unstack().values[:,l]))*0.3
# Se[:,j,l,k] = Se[:,j,k,l]
# return Se
#
#path = 'D:\Dokumenti\Programi Python\Proba.xlsx'
#df = pd.read_excel(path)
##R = df.iloc[:,:2].values
##R=np.random.rand(5200,2)*2-1
#R = np.load('R_sinteticki.npy')
#NodeNo = 4
#Nopoint = R.shape[0]
#Noinst = np.round(Nopoint/NodeNo).astype(int)
#i1 = np.arange(NodeNo)
#i2 = np.arange(Noinst)
#Xst = np.load('Xst.npy')
#Xst =pd.DataFrame(data=Xst)
#Xst['Node'] = np.tile(i1, Noinst)
#Xst['Inst'] = np.repeat(i2,NodeNo)
#Xst = Xst.set_index(['Inst','Node'])
#connect1=np.array([[0,1],[1,2]])
#connect2=np.array([[0,1],[2,3]])
#connect=[connect1,connect2]
#NoGraph = len(connect)
##Se = np.zeros([Noinst,NoGraph,NodeNo,NodeNo])
##Se = S(connect,Se,Xst)
#Se = np.load('Se.npy')
#
#Notrain = (Noinst*0.8).astype(int)
#Notest = (Noinst*0.2).astype(int)
#
#
#mod1 = GCRFCNB()
#mod1.alfa = np.array([1,18])
#mod1.beta = np.array([0.2,0.2])
#prob, Y = mod1.predict(R,Se)
#Se_train = Se[:Notrain,:,:,:]
#R_train = R[:Notrain*NodeNo,:]
#Y_test = Y[Notrain:Noinst,:]
#Y_train = Y[:Notrain,:]
#
#mod1.fit(R_train, Se_train, Y_train, learn = 'TNC')
#
#R_test = R[Notrain*NodeNo:Noinst*NodeNo,:]
#Se_test = Se[Notrain:Noinst,:,:,:]
#prob2, Y2, Var = mod1.predict(R_test,Se_test)
#Prob1 = prob2.copy()
#Prob1[Y2==0] = 1 - Prob1[Y2==0]
#Y21 = Y2.reshape([Y2.shape[0]*Y2.shape[1]])
#Y_test1 = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
#probr = prob2.reshape([prob2.shape[0]*prob2.shape[1]])
#probr1 = Prob1.reshape([Prob1.shape[0]*Prob1.shape[1]])
#print('AUC je {}'.format(roc_auc_score(Y_test1,probr)))
##print('Skor je {}'.format(accuracy_score(Y21,Y_test1)))
#print('LogPRob je {}'.format(np.sum(np.log(probr1))))
#print("--- %s seconds ---" % (time.time() - start_time))
#""" Stvarni podaci Skijasi """
#Spom = np.load('Se.npy')
#R_train = np.load('Z_train_com.npy')
#R_test = np.load('Z_test_com.npy')
#Y_train = np.load('Y_train.npy')
#Y_test = np.load('Y_test.npy')
#Se_train_inst = np.load('Se_train.npy')
#Se_test_inst = np.load('Se_test.npy')
#
#NodeNo = 7
#Noinst_train = np.round(R_train.shape[0]/NodeNo).astype(int)
#Noinst_test = np.round(R_test.shape[0]/NodeNo).astype(int)
#
#ModelSTNo = 6
#Se_train = np.zeros([Noinst_train,ModelSTNo,NodeNo,NodeNo])
#Se_test = np.zeros([Noinst_test,ModelSTNo,NodeNo,NodeNo])
#
#for i in range(Noinst_train):
# Se_train[i,:5,:,:] = Spom
#
#for i in range(Noinst_test):
# Se_test[i,:5,:,:] = Spom
#
#Se_train[:,5,:,:] = np.squeeze(Se_train_inst)
#Se_test[:,5,:,:] = np.squeeze(Se_test_inst)
#
#
#mod1 = GCRFCNB()
#
#
#mod1.fit(R_train, Se_train, Y_train, learn = 'SLSQP', learnrate = 6e-4, maxiter = 300)
#
##mod1.alfa = np.array([0.1043126 , 0.06905401, 0.08689079])
##mod1.beta = np.array([1.00008728e-08, 2.88191498e+02, 1.00000563e-08, 1.00000000e-08,
## 8.74943190e+01, 3.48984028e-03])
#
#prob2, Y2 = mod1.predict(R_test,Se_test)
#Y2 = Y2.reshape([Y2.shape[0]*Y2.shape[1]])
#Y_test = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
#prob2 = prob2.reshape([prob2.shape[0]*prob2.shape[1]])
#
#print('AUC GCRFCNB prediktora je {}'.format(roc_auc_score(Y_test,prob2)))
#print('Skor GCRFCNB prediktora je {}'.format(accuracy_score(Y2,Y_test)))
##Skor_com = np.load('Skor_com.npy')
#Skor_com_AUC = np.load('Skor_com_AUC.npy')
#print('AUC nestruktuiranih prediktora je {}'.format(Skor_com_AUC))
##print('Skor nestruktuiranih prediktora je {}'.format(Skor_com))
#print('Logprob je {}'.format(np.sum(np.log(prob2))))
#""" Stvarni podaci Debeli """
#
#import time
#Spom = np.load('Se.npy')
#R_train = np.load('Z_train_com.npy')
#R_train[R_train == -np.inf] = -10
#R_train[R_train == -np.inf] = np.min(R_train)-100
#R_test = np.load('Z_test_com.npy')
#R_test[R_test == -np.inf] = -10
#R_test[R_test == -np.inf] = np.min(R_test)-100
#Y_train = np.load('Y_train.npy')
#Y_test = np.load('Y_test.npy')
#for i in range(R_train.shape[1]):
# Range = np.abs(np.max(R_train[:,i]) + np.min(R_train[:,i]))
# faktor = int(math.log10(Range))
# R_train[:,i] = R_train[:,i]*10**(-faktor)
# R_test[:,i] = R_test[:,i]*10**(-faktor)
#
#NodeNo = 10
#Noinst_train = np.round(R_train.shape[0]/NodeNo).astype(int)
#Noinst_test = np.round(R_test.shape[0]/NodeNo).astype(int)
#
#ModelSTNo = 4
#Se_train = np.zeros([Noinst_train,ModelSTNo,NodeNo,NodeNo])
#Se_test = np.zeros([Noinst_test,ModelSTNo,NodeNo,NodeNo])
#
#for i in range(Noinst_train):
# Se_train[i,:,:,:] = Spom
#
#for i in range(Noinst_test):
# Se_test[i,:,:,:] = Spom
#
#mod1 = GCRFCNB()
#
#start_time = time.time()
#mod1.fit(R_train, Se_train, Y_train, learn = 'SLSQP', learnrate = 6e-4, maxiter = 5000)
#
#
##mod1.alfa = np.array([1-10, 1e-10, 1e-10, 3000])
##mod1.beta = np.array([1.0000000e-10, 1.0000000e-10, 1e-10, 1e-10])
#
#prob2, Y2 = mod1.predict(R_test,Se_test)
#Y2 = Y2.reshape([Y2.shape[0]*Y2.shape[1]])
#Y_test = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
#prob2 = prob2.reshape([prob2.shape[0]*prob2.shape[1]])
#
##Y_train = Y_train.reshape([Y_train.shape[0]*Y_train.shape[1]])
#print('AUC GCRFCNB prediktora je {}'.format(roc_auc_score(Y_test,prob2)))
##print('Skor GCRFCNB prediktora je {}'.format(accuracy_score(Y2,Y_test)))
##Skor_com = np.load('Skor_com.npy')
#Skor_com_AUC = np.load('Skor_com_AUC.npy')
#print('AUC nestruktuiranih prediktora je {}'.format(Skor_com_AUC))
##print('Skor nestruktuiranih prediktora je {}'.format(Skor_com))
#print('Logprob je {}'.format(np.sum(np.log(prob2))))
#print("--- %s seconds ---" % (time.time() - start_time)) |
py | 1a51457df9621ba280d1136ceb609f143a19ec9f | """The tests for the Switch component."""
# pylint: disable=protected-access
import unittest
from homeassistant.setup import setup_component, async_setup_component
from homeassistant import core, loader
from homeassistant.components import switch
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from tests.common import get_test_home_assistant
from tests.components.switch import common
class TestSwitch(unittest.TestCase):
"""Test the switch module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
platform = loader.get_component(self.hass, 'switch.test')
platform.init()
# Switch 1 is ON, switch 2 is OFF
self.switch_1, self.switch_2, self.switch_3 = \
platform.DEVICES
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_methods(self):
"""Test is_on, turn_on, turn_off methods."""
assert setup_component(
self.hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: 'test'}}
)
assert switch.is_on(self.hass)
assert STATE_ON == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert switch.is_on(self.hass, self.switch_1.entity_id)
assert not switch.is_on(self.hass, self.switch_2.entity_id)
assert not switch.is_on(self.hass, self.switch_3.entity_id)
common.turn_off(self.hass, self.switch_1.entity_id)
common.turn_on(self.hass, self.switch_2.entity_id)
self.hass.block_till_done()
assert switch.is_on(self.hass)
assert not switch.is_on(self.hass, self.switch_1.entity_id)
assert switch.is_on(self.hass, self.switch_2.entity_id)
# Turn all off
common.turn_off(self.hass)
self.hass.block_till_done()
assert not switch.is_on(self.hass)
assert STATE_OFF == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert not switch.is_on(self.hass, self.switch_1.entity_id)
assert not switch.is_on(self.hass, self.switch_2.entity_id)
assert not switch.is_on(self.hass, self.switch_3.entity_id)
# Turn all on
common.turn_on(self.hass)
self.hass.block_till_done()
assert switch.is_on(self.hass)
assert STATE_ON == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert switch.is_on(self.hass, self.switch_1.entity_id)
assert switch.is_on(self.hass, self.switch_2.entity_id)
assert switch.is_on(self.hass, self.switch_3.entity_id)
def test_setup_two_platforms(self):
"""Test with bad configuration."""
# Test if switch component returns 0 switches
test_platform = loader.get_component(self.hass, 'switch.test')
test_platform.init(True)
loader.set_component(self.hass, 'switch.test2', test_platform)
test_platform.init(False)
assert setup_component(
self.hass, switch.DOMAIN, {
switch.DOMAIN: {CONF_PLATFORM: 'test'},
'{} 2'.format(switch.DOMAIN): {CONF_PLATFORM: 'test2'},
}
)
async def test_switch_context(hass):
"""Test that switch context works."""
assert await async_setup_component(hass, 'switch', {
'switch': {
'platform': 'test'
}
})
state = hass.states.get('switch.ac')
assert state is not None
await hass.services.async_call('switch', 'toggle', {
'entity_id': state.entity_id,
}, True, core.Context(user_id='abcd'))
state2 = hass.states.get('switch.ac')
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == 'abcd'
|
py | 1a5145b57a2c50b8cd5caf6dd3588ca0eab7b127 | from .siammask import SiamMask
def get_tracker_class():
return SiamMask
|
py | 1a5145d29c78d73316a45feccab5cd777784329a | from io import StringIO
from .dvexpansion import *
class YMLConfigPP:
def __init__(self, pathes):
self.out_fd = StringIO()
self.include_files = set()
self.pp_pathes = []
for p in pathes:
self.pp_pathes.append(evaluate_dollar_var_expr(p))
def find_yml_file(self, yml):
ret = None
for pp_path in self.pp_pathes:
print("pp_path: ", pp_path)
yml_fn = os.path.join(pp_path, yml)
if os.path.exists(yml_fn):
ret = yml_fn
break
return ret
def get_pp_content(self):
return self.out_fd.getvalue()
def run_pp(self, yml_fn):
fd = open(yml_fn, "r")
self.out_fd.write(f"# {yml_fn}\n")
while 1:
l = fd.readline()
if not l:
break
if l.find("!include") != -1:
self.out_fd.write(f"# {l}")
self.process_include(l)
else:
self.out_fd.write(l)
self.out_fd.write("\n#\n")
self.out_fd.write(f"# end of file {yml_fn}\n")
def process_include(self, include_line):
include_re = r"!include\s+<([\w+/\.\-]+)>"
m = re.match(include_re, include_line)
if m == None or len(m.groups()) != 1:
raise Exception(f"YMLConfigPP::process_include: malformed line {include_line}")
include_file = self.find_yml_file(m.groups()[0])
if include_file == None:
raise Exception(f"YMLConfigPP::process_include: can't resolve {include_line}")
if not include_file in self.include_files:
self.include_files.add(include_file)
self.run_pp(include_file)
|
py | 1a514602721a586fd848443cae6d91b496b5b642 | """Metrics related to messages."""
from prometheus_client import Counter, Gauge
msgs_sent = Counter("msg_sent",
"Number of messages sent between nodes",
["node_id", "msg_type"])
msg_rtt = Gauge("msg_rtt",
"Time taken to send a message to a node and get an ACK",
["node_id", "receiver_id", "receiver_hostname"])
msgs_in_queue = Gauge("msgs_in_queue",
"The amount of messages waiting to be sent over channel",
["node_id", "receiver_id", "receiver_hostname"])
allow_service_rtt = Gauge("allow_service_rtt",
"Time taken from declining service to allowing",
["node_id", "view_from"])
msg_sent_size = Gauge("msg_sent_size",
"Size of a message sent from node over com_mod of type \
msg_type",
["node_id", "msg_type", "com_mod"])
bytes_sent = Counter("bytes_sent",
"Number of bytes sent from node over com_mod",
["node_id", "com_mod"])
run_method_time = Gauge("run_method_time",
"Time taken to run the run-forever-loop",
["node_id", "module"])
msgs_during_exp = Gauge("msgs_during_exp",
"Number of messages sent during an experiment",
["node_id", "exp_param", "view_est_msgs",
"rep_msgs", "prim_mon_msgs", "fd_msgs"])
bytes_during_exp = Gauge("bytes_during_exp",
"Number of bytes sent during an experiment",
["node_id", "exp_param", "view_est_bytes",
"rep_bytes", "prim_mon_bytes", "fd_bytes"])
|
py | 1a51463e2f5587d16ec27c5aed93eb03320fed72 | import sys
import studentdirectory as sd
import gui
def main_for_command_line():
stud_dir = sd.StudentDirectory()
while(1):
print("\nSTUDENT DIRECTORY MENU")
print(" [a] Add New Student")
print(" [b] View Student Details")
print(" [c] Show Student Directory")
print(" [d] Edit Student Details")
print(" [e] Delete Student")
print(" [f] Clear Student Directory")
print(" [g] Exit")
choice = input("Enter choice: ")
if choice == "a":
print("\nADD NEW Student")
key = input(" Enter new student's student number: ")
detail0 = input(" Enter new student's name: ")
detail1 = input(" Enter new student's course and year: ")
detail2 = input(" Enter new student's age: ")
detail3 = input(" Enter new student's email address: ")
detail4 = input(" Enter new student's contact number: ")
value = [detail0, detail1, detail2, detail3, detail4]
stud_dir.add_new_student(key, value)
print("\nNew student added to directory successfully.\n")
elif choice == "b":
print("\nVIEW STUDENT DETAILS")
key = input(" Enter student number: ")
stud_dir.view_student_details(key)
print(" ")
elif choice == "c":
print("\nSHOW STUDENT DIRECTORY")
stud_dir.show_student_directory()
print(" ")
elif choice == "d":
print("\nEDIT STUDENT DETAILS")
key = input(" Enter student number: ")
if stud_dir.check_if_student_exist(key):
detail0 = input(" Enter student's new name: ")
detail1 = input(" Enter student's new course and year: ")
detail2 = input(" Enter student's new age: ")
detail3 = input(" Enter student's new email address: ")
detail4 = input(" Enter student's new contact number: ")
value = [detail0, detail1, detail2, detail3, detail4]
stud_dir.edit_student_details(key, value)
print("\nStudent's details edited successfully.\n")
else:
print("Student number does not exist in "
+ "the student directory.")
elif choice == "e":
print("\nDELETE STUDENT")
key = input(" Enter student number: ")
if stud_dir.check_if_student_exist(key):
stud_dir.delete_student(key)
print("\nStudent removed from the student "
+ "directory successfully.\n")
else:
print("Student number does not exist in "
+ "the student directory.")
elif choice == "f":
print("\nCLEAR STUDENT DIRECTORY")
print(" WARNING! This will delete all entries in the "
+ "student directory. Do you really want to proceed?")
decision = input(" [y]es or [n]o: ")
if decision == "y":
print("\nClearing student directory...")
stud_dir.clear_student_directory()
print("Clearing student directory successful.\n")
elif decision == "n":
print("\n Good call. Going back to the menu...")
else:
print("\nNonexistent decision. Going back to the menu...")
elif choice == "g":
print("\nSaving student directory changes to JSON file.")
stud_dir.save_changes()
print("Done. Bye.")
sys.exit(0)
else:
print("\nNonexistent choice.")
def main():
# main_for_command_line()
gui.GUI()
if __name__ == "__main__": main() |
py | 1a51473887ae8165ee792b8146895262fec46c33 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class InputFilterCondition(Model):
"""InputFilterCondition.
:param case_sensitive: Whether or not to do a case sensitive match
:type case_sensitive: bool
:param input_id: The Id of the input to filter on
:type input_id: str
:param input_value: The "expected" input value to compare with the actual input value
:type input_value: str
:param operator: The operator applied between the expected and actual input value
:type operator: object
"""
_attribute_map = {
'case_sensitive': {'key': 'caseSensitive', 'type': 'bool'},
'input_id': {'key': 'inputId', 'type': 'str'},
'input_value': {'key': 'inputValue', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'object'}
}
def __init__(self, case_sensitive=None, input_id=None, input_value=None, operator=None):
super(InputFilterCondition, self).__init__()
self.case_sensitive = case_sensitive
self.input_id = input_id
self.input_value = input_value
self.operator = operator
|
py | 1a51481358b4bbfcf4af11dbfecaa6ae6d9388af | from __future__ import absolute_import, division, print_function, unicode_literals
class context(object):
'''
Autograd context object to wrap forward and backward passes when using
distributed autograd. The context_id generated in the 'with' is required
to uniquely identify a distributed autograd pass on all workers. Each
worker stores metadata associated with this context_id, which is required
to correctly execute a distributed autograd pass.
This is only needed in the "FAST" mode for distributed autograd, where we
assume all RPC communication is would also be part of the backward pass.
Example::
>> import torch.distributed.autograd as dist_autograd
>> with dist_autograd.context() as context_id:
>> forward pass...
>> backward pass...
>> optimizer step...
'''
# TODO: Update the above example to a working solution.
def __enter__(self):
self.autograd_context = _new_context()
return self.autograd_context._context_id()
def __exit__(self, type, value, traceback):
_release_context(self.autograd_context._context_id())
|
py | 1a5148606bb63dd0b47cf26a3ea745cde3845755 | """Test cases for symbolic_expressions module."""
from pyquil import quil, quilatom
import sympy
import pytest
from .sympy_expressions import expression_from_sympy, SYMPY_DIALECT
from .translations import translate_expression
from .pyquil_expressions import QUIL_DIALECT, expression_from_pyquil
@pytest.mark.parametrize(
"sympy_expression, quil_expression",
[
(sympy.Symbol("theta"), quil.Parameter("theta")),
(
sympy.Mul(sympy.Symbol("theta"), sympy.Symbol("gamma"), evaluate=False),
quil.Parameter("theta") * quil.Parameter("gamma"),
),
(sympy.cos(sympy.Symbol("theta")), quilatom.quil_cos(quil.Parameter("theta"))),
(
sympy.cos(2 * sympy.Symbol("theta")),
quilatom.quil_cos(2 * quil.Parameter("theta")),
),
(
sympy.exp(sympy.Symbol("x") - sympy.Symbol("y")),
quilatom.quil_exp(quil.Parameter("x") - quil.Parameter("y")),
),
(
sympy.Add(
sympy.cos(sympy.Symbol("phi")),
sympy.I * sympy.sin(sympy.Symbol("phi")),
evaluate=False,
),
quilatom.quil_cos(quil.Parameter("phi"))
+ 1j * quilatom.quil_sin(quil.Parameter("phi")),
),
(
sympy.Add(
sympy.Symbol("x"),
sympy.Mul(sympy.Symbol("y"), (2 + 3j), evaluate=False),
evaluate=False,
),
quil.Parameter("x") + quil.Parameter("y") * (2 + 3j),
),
(
sympy.cos(sympy.sin(sympy.Symbol("tau"))),
quilatom.quil_cos(quilatom.quil_sin(quil.Parameter("tau"))),
),
(
sympy.Symbol("x") / sympy.Symbol("y"),
quil.Parameter("x") / quil.Parameter("y"),
),
(
sympy.tan(sympy.Symbol("theta")),
quilatom.quil_sin(quil.Parameter("theta"))
/ quilatom.quil_cos(quil.Parameter("theta")),
),
(2 ** sympy.Symbol("x"), 2 ** quil.Parameter("x")),
(
sympy.Symbol("y") ** sympy.Symbol("x"),
quil.Parameter("y") ** quil.Parameter("x"),
),
(sympy.Symbol("x") ** 2, quil.Parameter("x") ** 2),
(
sympy.sqrt(sympy.Symbol("x") - sympy.Symbol("y")),
quilatom.quil_sqrt(quil.Parameter("x") - quil.Parameter("y")),
),
(-5 * sympy.Symbol("x") * sympy.Symbol("y"), -5 * quil.Parameter("x") * quil.Parameter("y")),
],
)
def test_translating_tree_from_sympy_to_quil_gives_expected_result(
sympy_expression, quil_expression
):
expression = expression_from_sympy(sympy_expression)
assert translate_expression(expression, QUIL_DIALECT) == quil_expression
@pytest.mark.parametrize(
"quil_expression, sympy_expression",
[
(quil.Parameter("theta"), sympy.Symbol("theta")),
(
quil.Parameter("theta") * quil.Parameter("gamma"),
sympy.Symbol("theta") * sympy.Symbol("gamma"),
),
(
quilatom.quil_cos(quil.Parameter("theta")),
sympy.cos(sympy.Symbol("theta")),
),
(
quilatom.quil_cos(2 * quil.Parameter("theta")),
sympy.cos(2 * sympy.Symbol("theta")),
),
(
quilatom.quil_exp(quil.Parameter("x") - quil.Parameter("y")),
sympy.exp(sympy.Symbol("x") - sympy.Symbol("y")),
),
(
quilatom.quil_cos(quil.Parameter("phi"))
+ 1j * quilatom.quil_sin(quil.Parameter("phi")),
(sympy.cos(sympy.Symbol("phi")) + sympy.I * sympy.sin(sympy.Symbol("phi"))),
),
(
quil.Parameter("x") + quil.Parameter("y") * (2 + 3j),
sympy.Symbol("x") + sympy.Symbol("y") * (2 + 3j),
),
(
quilatom.quil_cos(quilatom.quil_sin(quil.Parameter("tau"))),
sympy.cos(sympy.sin(sympy.Symbol("tau"))),
),
(
quil.Parameter("x") / quil.Parameter("y"),
sympy.Symbol("x") / sympy.Symbol("y"),
),
(2 ** quil.Parameter("x"), 2 ** sympy.Symbol("x")),
(
quil.Parameter("y") ** quil.Parameter("x"),
sympy.Symbol("y") ** sympy.Symbol("x"),
),
(quil.Parameter("x") ** 2, sympy.Symbol("x") ** 2),
(
quilatom.quil_sqrt(quil.Parameter("x") - quil.Parameter("y")),
sympy.sqrt(sympy.Symbol("x") - sympy.Symbol("y")),
),
],
)
def test_translating_tree_from_quil_to_sympy_gives_expected_result(
quil_expression, sympy_expression
):
expression = expression_from_pyquil(quil_expression)
assert translate_expression(expression, SYMPY_DIALECT) - sympy_expression == 0
|
py | 1a51494c79c2b9bd89af44c07939f26a24820170 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (C) Canux CHENG <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Plugin that checks the XML validity."""
import plugin
from plugin.xml import PluginXMLValidity
PluginXMLValidity(
version=plugin.version,
description='Check XML validity.'
).run()
|
py | 1a5149ff4a14d8f820f222159244ce4018c3bd52 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from hypothesis import assume, given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestReductionOps(serial.SerializedTestCase):
@serial.given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@serial.given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_int_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.int32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElementsInt",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
@serial.given(n=st.integers(1, 65536),
dtype=st.sampled_from([np.float32, np.float16]),
**hu.gcs)
def test_elementwise_sqrsum(self, n, dtype, gc, dc):
if dtype == np.float16:
# fp16 is only supported with CUDA
assume(gc.device_type == caffe2_pb2.CUDA)
dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]
X = np.random.rand(n).astype(dtype)
def sumsqr_op(X):
return [np.sum(X * X)]
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
threshold = 0.01 if dtype == np.float16 else 0.005
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sumsqr_op,
threshold=threshold,
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_avg(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"],
average=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@serial.given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_rowwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def rowwise_max(X):
return [np.max(X, axis=2)]
op = core.CreateOperator(
"RowwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=rowwise_max,
)
@serial.given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_columnwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def columnwise_max(X):
return [np.max(X, axis=1)]
op = core.CreateOperator(
"ColwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=columnwise_max,
)
|
py | 1a514a9ead659e3dc203e8f8a33e830c544d6722 | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcls.models.builder import NECKS
from mmcls.models.necks import GlobalAveragePooling as _GlobalAveragePooling
@NECKS.register_module(force=True)
class GlobalAveragePooling(_GlobalAveragePooling):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
"""
def __init__(self, kernel_size=None, stride=None):
super(GlobalAveragePooling, self).__init__()
if kernel_size is None and stride is None:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AvgPool2d(kernel_size, stride)
|
py | 1a514acece7677392e86c3f26e32cf93351c6fc0 | # Copyright 2019 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from gcp_common import BaseTest, event_data
class MLModelTest(BaseTest):
def test_models_query(self):
project_id = "cloud-custodian"
session_factory = self.replay_flight_data(
'ml-models-query', project_id)
policy = self.load_policy(
{
'name': 'ml-models-query',
'resource': 'gcp.ml-model'
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_models_get(self):
project_id = 'cloud-custodian'
name = "test_model"
factory = self.replay_flight_data('ml-model-get', project_id=project_id)
p = self.load_policy({
'name': 'ml-model-get',
'resource': 'gcp.ml-model',
'mode': {
'type': 'gcp-audit',
'methods': ['google.cloud.ml.v1.ModelService.CreateModel']
}
}, session_factory=factory)
exec_mode = p.get_execution_mode()
event = event_data('ml-model-create.json')
models = exec_mode.run(event, None)
self.assertIn(name, models[0]['name'])
class MLJobTest(BaseTest):
def test_jobs_query(self):
project_id = 'cloud-custodian'
session_factory = self.replay_flight_data(
'ml-jobs-query', project_id)
policy = self.load_policy(
{
'name': 'ml-jobs-query',
'resource': 'gcp.ml-job'
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_jobs_get(self):
project_id = 'cloud-custodian'
name = "test_job"
factory = self.replay_flight_data('ml-job-get', project_id=project_id)
p = self.load_policy({
'name': 'ml-job-get',
'resource': 'gcp.ml-job',
'mode': {
'type': 'gcp-audit',
'methods': ['google.cloud.ml.v1.JobService.CreateJob']
}
}, session_factory=factory)
exec_mode = p.get_execution_mode()
event = event_data('ml-job-create.json')
jobs = exec_mode.run(event, None)
self.assertIn(name, jobs[0]['jobId'])
|
py | 1a514afc1395fc91f516352eb75b2f8685867817 | #!/usr/bin/env python3
# coding=utf-8
# date 2021-10-23 13:27:29
# author calllivecn <[email protected]>
# ๆไฝ้ฎ๏ผ้ณ้่ฐ่
# type 01 EV_KEY code 073 KEY_VOLUMEUP
# type 01 EV_KEY code 072 KEY_VOLUMEDOWN
|
py | 1a514b32789dff3133032364ea11f5c9b3738f5e | # -*- coding: utf-8 -*-
"""
Django settings
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vqje&(s$kn!osyitq#y%y1)g7-63#ia#+45(d&c%7x7u)d!pn3'
# SECURITY WARNING: don't run with debug turned on in production!
# PYTHON version
PYTHON_VERSION = sys.version_info
TESTING = sys.argv[1:2] == ['test']
if TESTING:
DEBUG = False
else:
DEBUG = True
DEBUG_PROPAGATE_EXCEPTIONS = True
DEBUG = DEBUG_PROPAGATE_EXCEPTIONS
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django_admin_bootstrapped',
'bedjango_master',
'base',
'users',
'cachalot',
'debug_toolbar',
'material',
'material.admin',
'material.frontend',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_countries',
'rosetta',
'cookielaw',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'base.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
# 'DIRS': ['../templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'base.context_processors.breadcrumbs',
'base.context_processors.add_login_form'
],
'debug': DEBUG,
},
},
]
# WSGI_APPLICATION = 'base.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'skeleton_database',
# 'USER': 'project',
# 'PASSWORD': 'project',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache: http://django-cachalot.readthedocs.io/en/latest/quickstart.html
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
CACHALOT_ENABLED = True
CACHALOT_CACHE = 'default'
# Django toolbar https://django-debug-toolbar.readthedocs.io/en/stable/configuration.html
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'cachalot.panels.CachalotPanel',
]
def show_toolbar(request):
return DEBUG
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": show_toolbar,
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LOCALE_PATHS = [
# '/django-app/locale',
os.path.join(BASE_DIR, 'locale')
]
AUTH_USER_MODEL = 'users.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'base.validators.CustomPasswordValidator',
},
]
LANGUAGE_CODE = 'es-es'
LANGUAGES = (
('es', 'Espanol (Espana)'),
('en', 'English'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
LOGIN_URL = '/'
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
FIXTURE_DIRS = ('bedjango_master/fixtures',)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Django registration
# To indicate if the register its open.
REGISTRATION_OPEN = True
# https://django-registration.readthedocs.io/en/2.1.1/settings.html#django.conf.settings.REGISTRATION_OPEN
# Configuring the HMAC activation workflow
# To specify the days that the user must be activate his register.
ACCOUNT_ACTIVATION_DAYS = 1
# https://django-registration.readthedocs.io/en/2.1.1/settings.html#django.conf.settings.ACCOUNT_ACTIVATION_DAYS
# To specify a str to construct the activation code
REGISTRATION_SALT = 'registration'
# https://django-registration.readthedocs.io/en/2.1.1/settings.html#django.conf.settings.REGISTRATION_SALT
# Email config
EMAIL_FROM = '[email protected]'
EMAIL_TO = '[email protected]'
|
py | 1a514d23ebaf813ddee749d7e4824207327e9b03 | """
Provides install path infomation.
"""
import os
from esys.lsm.util.pathSearcher import PathSearcher
installDir = "/home/daniel/Documents/fing/esys-particle/src/danielfrascarelli-git/esys-particle"
binDir = os.path.join(installDir, "bin")
libDir = os.path.join(installDir, "lib")
pythonPkgDir = "/home/daniel/Documents/fing/esys-particle/src/danielfrascarelli-git/esys-particle/lib/python2.7/site-packages"
esysPkgDir = os.path.join(pythonPkgDir, "esys")
lsmPkgDir = os.path.join(esysPkgDir, "lsm")
pkgName = "ESyS-Particle"
version = "2.3.5"
pkgHomePageUrl = "https://launchpad.net/esys-particle/"
pkgDataDir = "/home/daniel/Documents/fing/esys-particle/src/danielfrascarelli-git/esys-particle/share/esys-particle"
povrayExe = "no"
_haveVtk = False
_havePovray = False
def getPovrayExePath():
"""
Attempts to return the absolute path of the "povray" executable
using the "PATH" environment variable. If the exe can't be found
on the "PATH" then this function returns the "povray" path which
was found during installation. This function is a workaround for
for the SGI MPT mpirun, which seems to alter the user "PATH"
environment.
"""
absPath=PathSearcher().find("povray")
if ((absPath == None) or (absPath == "")):
absPath = povrayExe
return absPath
def getDataFilePath(dataFileName):
"""
Returns path for specified data file. Looks on path
C{L{pkgDataDir}:Data:.}
"""
return PathSearcher(pkgDataDir+":Data:.").which(dataFileName)
def haveVtk():
return _haveVtk
def havePovray():
return _havePovray
|
py | 1a514e64b225e638152d71b2c9960a550e5ffeb6 | from raptiformica.actions.slave import assimilate_machine
from tests.testcase import TestCase
class TestAssimilateMachine(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.actions.slave.log')
self.download_artifacts = self.set_up_patch('raptiformica.actions.slave.download_artifacts')
self.advertise = self.set_up_patch('raptiformica.actions.slave.advertise')
self.ensure_route_to_new_neighbour = self.set_up_patch(
'raptiformica.actions.slave.ensure_route_to_new_neighbour'
)
def test_assimilate_machine_logs_assimilating_machine_message(self):
assimilate_machine('1.2.3.4', port=2222)
self.assertTrue(self.log.info.called)
def test_assimilate_machine_downloads_artifacts(self):
assimilate_machine('1.2.3.4', port=2222)
self.download_artifacts.assert_called_once_with('1.2.3.4', port=2222)
def test_assimilate_machine_sets_advertised_host_and_port_on_remote_machine(self):
assimilate_machine('1.2.3.4', port=2222)
self.advertise.assert_called_once_with('1.2.3.4', port=2222)
def test_assimilate_machine_ensures_route_to_new_neighbour(self):
assimilate_machine('1.2.3.4', port=2222)
self.ensure_route_to_new_neighbour.assert_called_once_with(
'1.2.3.4', port=2222,
compute_checkout_uuid=None
)
def test_assimilate_machine_update_ensures_route_to_new_neighbour_with_optional_uuid(self):
assimilate_machine('1.2.3.4', port=2222, uuid='some_uuid_1234')
self.ensure_route_to_new_neighbour.assert_called_once_with(
'1.2.3.4', port=2222,
compute_checkout_uuid='some_uuid_1234'
)
|
py | 1a514ea075ee795f945b6c8b6ee0bd96df05ed18 | import unittest
from six.moves import StringIO
import time
from robot import utils
from robot.utils.asserts import *
from robot.output.filelogger import FileLogger
from robot.utils.robottime import TimestampCache
class _FakeTimeCache(TimestampCache):
def __init__(self):
self.fake = time.mktime((2006, 6, 13, 8, 37, 42, 0, 0, 1)) + 0.123
TimestampCache.__init__(self)
def _get_epoch(self):
return self.fake
class TestFileLogger(unittest.TestCase):
def setUp(self):
utils.robottime.TIMESTAMP_CACHE = _FakeTimeCache()
FileLogger._get_writer = lambda *args: StringIO()
self.logger = FileLogger('whatever', 'INFO')
def tearDown(self):
utils.robottime.TIMESTAMP_CACHE = TimestampCache()
def test_write(self):
self.logger.write('my message', 'INFO')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.write('my 2nd msg\nwith 2 lines', 'ERROR')
expected += '20060613 08:37:42.123 | ERROR | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_write_helpers(self):
self.logger.info('my message')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.warn('my 2nd msg\nwith 2 lines')
expected += '20060613 08:37:42.123 | WARN | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_set_level(self):
self.logger.write('msg', 'DEBUG')
self._verify_message('')
self.logger.set_level('DEBUG')
self.logger.write('msg', 'DEBUG')
self._verify_message('20060613 08:37:42.123 | DEBUG | msg\n')
def _verify_message(self, expected):
assert_equals(self.logger._writer.getvalue(), expected)
if __name__ == "__main__":
unittest.main()
|
py | 1a51516b5ea8f70c8cab9732abc98557d1eafc0b | #!/usr/bin/env python3
_DEFAULT_DEPENDENCIES = [
"packages/data/**/*",
"packages/common/**/*",
"packages/course-landing/**/*",
"packages/{{site}}/**/*",
"yarn.lock",
]
_COURSE_LANDING_DEPENDENCIES = [
"packages/data/training/sessions.yml",
"packages/data/training/recommendations/**/*",
"packages/data/training/recommendations/**/*",
"packages/data/training/pictures/**/*",
"packages/common/**/*",
"packages/course-landing/**/*",
"packages/{{site}}/**/*",
"yarn.lock",
]
_ONDREJSIKA_THEME_DEPENDENCIES = [
"packages/data/**/*",
"packages/common/**/*",
"packages/ondrejsika-theme/**/*",
"packages/{{site}}/**/*",
"yarn.lock",
]
_ONDREJSIKA_SINGLEPAGE_DEPENDENCIES = _ONDREJSIKA_THEME_DEPENDENCIES + [
"packages/ondrejsika-singlepage/**/*",
]
PROD_SITES = {
"trainera.de": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"ondrej-sika.com": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"ondrej-sika.cz": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"ondrej-sika.de": {
"dependencies": _ONDREJSIKA_SINGLEPAGE_DEPENDENCIES,
"cloudflare_workers": True,
},
"trainera.cz": {
"dependencies": _ONDREJSIKA_THEME_DEPENDENCIES,
"cloudflare_workers": True,
},
"skolenie.kubernetes.sk": {
"dependencies": _COURSE_LANDING_DEPENDENCIES,
},
"training.kubernetes.is": {
"dependencies": _COURSE_LANDING_DEPENDENCIES,
},
"training.kubernetes.lu": {
"dependencies": _COURSE_LANDING_DEPENDENCIES,
},
"cal-api.sika.io": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"ccc.oxs.cz": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"sika.blog": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"static.sika.io": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"sikahq.com": {
"dependencies": _DEFAULT_DEPENDENCIES,
},
"ondrejsika.is": {
"dependencies": _ONDREJSIKA_SINGLEPAGE_DEPENDENCIES,
"cloudflare_workers": True,
},
"skoleni.io": {
"dependencies": _DEFAULT_DEPENDENCIES,
"cloudflare_workers": True,
},
}
ALL_SITES = {}
ALL_SITES.update(PROD_SITES)
PRIORITY_SITES = (
"ondrej-sika.cz",
"ondrej-sika.com",
"trainera.cz",
"skoleni.io",
"trainera.de",
)
SUFFIX = ".panda.k8s.oxs.cz"
SITES = ALL_SITES.keys()
out = []
out.append(
"""# Don't edit this file maually
# This file is generated by ./generate-gitlab-ci.py
image: sikalabs/ci
stages:
- start
- build_docker_priority
- deploy_dev_priority
- deploy_prod_priority
- build_docker
- deploy_dev
- deploy_prod
variables:
DOCKER_BUILDKIT: '1'
GIT_CLEAN_FLAGS: "-ffdx -e node_modules -e .yarn-cache"
start:
stage: start
script: echo "start job - you can't create empty child pipeline"
"""
)
def generate_dependencies(site):
if site not in ALL_SITES:
return """ - packages/data/**/*
- packages/common/**/*
- packages/course-landing/**/*
- packages/{{site}}/**/*
- yarn.lock""".replace(
"{{site}}", site
)
return "\n".join(
(" - " + line).replace("{{site}}", site)
for line in ALL_SITES[site]["dependencies"]
)
for site in SITES:
if site in ALL_SITES and ALL_SITES[site].get("cloudflare_workers"):
pass
else:
out.append(
"""
%(site)s build docker:
stage: build_docker%(priority_suffix)s
image: sikalabs/ci-node
needs: []
variables:
GIT_CLEAN_FLAGS: -ffdx -e node_modules -e .yarn-cache
script:
- yarn --cache-folder .yarn-cache
- rm -rf packages/%(site)s/out
- mkdir -p packages/%(site)s/public/api
- slu static-api version --set-git-clean --set-git-ref $CI_COMMIT_REF_NAME -e CI_PIPELINE_ID=$CI_PIPELINE_ID -e "GITLAB_USER_LOGIN=$GITLAB_USER_LOGIN" -e "CI_COMMIT_TITLE=$CI_COMMIT_TITLE" > packages/%(site)s/public/api/version.json
- yarn run static-%(site)s
- docker login $CI_REGISTRY -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD
- cp ci/docker/* packages/%(site)s/
- docker build -t $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA packages/%(site)s
- rm packages/%(site)s/Dockerfile
- rm packages/%(site)s/nginx-site.conf
- docker push $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA
except:
variables:
- $EXCEPT_BUILD
- $EXCEPT_BUILD_DOCKER
only:
changes:
%(dependencies)s
"""
% {
"site": site,
"priority_suffix": "_priority" if site in PRIORITY_SITES else "",
"dependencies": generate_dependencies(site),
}
)
if site in PROD_SITES:
if PROD_SITES[site].get("cloudflare_workers"):
out.append(
"""
%(site)s prod deploy cloudflare:
image: sikalabs/ci-node
stage: deploy_prod%(priority_suffix)s
script:
- yarn --cache-folder .yarn-cache
- yarn --cache-folder .yarn-cache add @cloudflare/wrangler -W
- rm -rf packages/%(site)s/out
- mkdir -p packages/%(site)s/public/api
- git status
- slu static-api version --set-git-clean --set-git-ref $CI_COMMIT_REF_NAME -e CI_PIPELINE_ID=$CI_PIPELINE_ID -e "GITLAB_USER_LOGIN=$GITLAB_USER_LOGIN" -e "CI_COMMIT_TITLE=$CI_COMMIT_TITLE" > packages/%(site)s/public/api/version.json
- yarn run deploy-%(site)s
except:
variables:
- $EXCEPT_DEPLOY
- $EXCEPT_DEPLOY_CLOUDFLARE
- $EXCEPT_DEPLOY_K8S
- $EXCEPT_DEPLOY_PROD
- $EXCEPT_DEPLOY_PROD_K8S
only:
refs:
- master
changes:
%(dependencies)s
environment:
name: k8s/prod/%(site)s
url: https://%(site)s
dependencies: []
"""
% {
"site": site,
"suffix": SUFFIX,
"name": site.replace(".", "-"),
"priority_suffix": "_priority" if site in PRIORITY_SITES else "",
"dependencies": generate_dependencies(site),
}
)
else:
out.append(
"""
%(site)s prod deploy k8s:
needs:
- %(site)s build docker
stage: deploy_prod%(priority_suffix)s
variables:
GIT_STRATEGY: none
KUBECONFIG: .kubeconfig
script:
- docker login $CI_REGISTRY -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD
- docker pull $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA
- docker tag $CI_REGISTRY_IMAGE/%(site)s:$CI_COMMIT_SHORT_SHA $CI_REGISTRY_IMAGE/%(site)s
- docker push $CI_REGISTRY_IMAGE/%(site)s
except:
variables:
- $EXCEPT_DEPLOY
- $EXCEPT_DEPLOY_K8S
- $EXCEPT_DEPLOY_PROD
- $EXCEPT_DEPLOY_PROD_K8S
only:
refs:
- master
changes:
%(dependencies)s
environment:
name: k8s/prod/%(site)s
url: https://%(site)s
kubernetes:
namespace: default
dependencies: []
"""
% {
"site": site,
"suffix": SUFFIX,
"name": site.replace(".", "-"),
"priority_suffix": "_priority" if site in PRIORITY_SITES else "",
"dependencies": generate_dependencies(site),
}
)
with open(".gitlab-ci.generated.yml", "w") as f:
f.write("".join(out))
|
py | 1a5151ce0ccff335ed9a958889c9e08273b0fa12 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Implementation of an SSH2 "message".
"""
import struct
import cStringIO
from paramiko import util
class Message (object):
"""
An SSH2 I{Message} is a stream of bytes that encodes some combination of
strings, integers, bools, and infinite-precision integers (known in python
as I{long}s). This class builds or breaks down such a byte stream.
Normally you don't need to deal with anything this low-level, but it's
exposed for people implementing custom extensions, or features that
paramiko doesn't support yet.
"""
def __init__(self, content=None):
"""
Create a new SSH2 Message.
@param content: the byte stream to use as the Message content (passed
in only when decomposing a Message).
@type content: string
"""
if content != None:
self.packet = cStringIO.StringIO(content)
else:
self.packet = cStringIO.StringIO()
def __str__(self):
"""
Return the byte stream content of this Message, as a string.
@return: the contents of this Message.
@rtype: string
"""
return self.packet.getvalue()
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: string
"""
return 'paramiko.Message(' + repr(self.packet.getvalue()) + ')'
def rewind(self):
"""
Rewind the message to the beginning as if no items had been parsed
out of it yet.
"""
self.packet.seek(0)
def get_remainder(self):
"""
Return the bytes of this Message that haven't already been parsed and
returned.
@return: a string of the bytes not parsed yet.
@rtype: string
"""
position = self.packet.tell()
remainder = self.packet.read()
self.packet.seek(position)
return remainder
def get_so_far(self):
"""
Returns the bytes of this Message that have been parsed and returned.
The string passed into a Message's constructor can be regenerated by
concatenating C{get_so_far} and L{get_remainder}.
@return: a string of the bytes parsed so far.
@rtype: string
"""
position = self.packet.tell()
self.rewind()
return self.packet.read(position)
def get_bytes(self, n):
"""
Return the next C{n} bytes of the Message, without decomposing into
an int, string, etc. Just the raw bytes are returned.
@return: a string of the next C{n} bytes of the Message, or a string
of C{n} zero bytes, if there aren't C{n} bytes remaining.
@rtype: string
"""
b = self.packet.read(n)
if len(b) < n:
return '\x00'*n
return b
def get_byte(self):
"""
Return the next byte of the Message, without decomposing it. This
is equivalent to L{get_bytes(1)<get_bytes>}.
@return: the next byte of the Message, or C{'\000'} if there aren't
any bytes remaining.
@rtype: string
"""
return self.get_bytes(1)
def get_boolean(self):
"""
Fetch a boolean from the stream.
@return: C{True} or C{False} (from the Message).
@rtype: bool
"""
b = self.get_bytes(1)
return b != '\x00'
def get_int(self):
"""
Fetch an int from the stream.
@return: a 32-bit unsigned integer.
@rtype: int
"""
return struct.unpack('>I', self.get_bytes(4))[0]
def get_int64(self):
"""
Fetch a 64-bit int from the stream.
@return: a 64-bit unsigned integer.
@rtype: long
"""
return struct.unpack('>Q', self.get_bytes(8))[0]
def get_mpint(self):
"""
Fetch a long int (mpint) from the stream.
@return: an arbitrary-length integer.
@rtype: long
"""
return util.inflate_long(self.get_string())
def get_string(self):
"""
Fetch a string from the stream. This could be a byte string and may
contain unprintable characters. (It's not unheard of for a string to
contain another byte-stream Message.)
@return: a string.
@rtype: string
"""
return self.get_bytes(self.get_int())
def get_list(self):
"""
Fetch a list of strings from the stream. These are trivially encoded
as comma-separated values in a string.
@return: a list of strings.
@rtype: list of strings
"""
return self.get_string().split(',')
def add_bytes(self, b):
"""
Write bytes to the stream, without any formatting.
@param b: bytes to add
@type b: str
"""
self.packet.write(b)
return self
def add_byte(self, b):
"""
Write a single byte to the stream, without any formatting.
@param b: byte to add
@type b: str
"""
self.packet.write(b)
return self
def add_boolean(self, b):
"""
Add a boolean value to the stream.
@param b: boolean value to add
@type b: bool
"""
if b:
self.add_byte('\x01')
else:
self.add_byte('\x00')
return self
def add_int(self, n):
"""
Add an integer to the stream.
@param n: integer to add
@type n: int
"""
self.packet.write(struct.pack('>I', n))
return self
def add_int64(self, n):
"""
Add a 64-bit int to the stream.
@param n: long int to add
@type n: long
"""
self.packet.write(struct.pack('>Q', n))
return self
def add_mpint(self, z):
"""
Add a long int to the stream, encoded as an infinite-precision
integer. This method only works on positive numbers.
@param z: long int to add
@type z: long
"""
self.add_string(util.deflate_long(z))
return self
def add_string(self, s):
"""
Add a string to the stream.
@param s: string to add
@type s: str
"""
self.add_int(len(s))
self.packet.write(s)
return self
def add_list(self, l):
"""
Add a list of strings to the stream. They are encoded identically to
a single string of values separated by commas. (Yes, really, that's
how SSH2 does it.)
@param l: list of strings to add
@type l: list(str)
"""
self.add_string(','.join(l))
return self
def _add(self, i):
if type(i) is str:
return self.add_string(i)
elif type(i) is int:
return self.add_int(i)
elif type(i) is long:
if i > 0xffffffffL:
return self.add_mpint(i)
else:
return self.add_int(i)
elif type(i) is bool:
return self.add_boolean(i)
elif type(i) is list:
return self.add_list(i)
else:
raise Exception('Unknown type')
def add(self, *seq):
"""
Add a sequence of items to the stream. The values are encoded based
on their type: str, int, bool, list, or long.
@param seq: the sequence of items
@type seq: sequence
@bug: longs are encoded non-deterministically. Don't use this method.
"""
for item in seq:
self._add(item)
|
py | 1a5151e388ea281bd20a69712c137040005a9b9c | #!/usr/bin/env python3.4
"""
kill_python.py,
copyright (c) 2015 by Stefan Lehmann
"""
import os
import psutil
PROC = "python.exe"
my_pid = os.getpid()
i = 0
for p in psutil.process_iter():
if p.name() == PROC and p.pid != my_pid:
i += 1
p.kill()
print("Killed {} instances of process '{}'.".format(i, PROC))
|
py | 1a51526cb67accf3b7913712410b52b529e69465 | """Common configure functions for vlan"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def config_vlan(device, vlanid):
""" Configures a VLAN on Interface or Device
e.g.
vlan 666
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
configs = []
configs.append("vlan {vlanid}".format(vlanid=vlanid))
configs.append("no shutdown")
try:
device.configure(configs)
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure vlan {vlanid}, Error: {error}'.format(
vlanid=vlanid, error=e)
)
def config_ip_on_vlan(device, vlan_id, ipv4_address=None,
subnetmask=None, ipv6_address=None,
ipv6_prefix_len=None):
"""Configure an IPv4/IPv6 address on a vlan
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
ipv4_address (`str`): IPv4 address
subnetmask (`str`): Subnet mask to be used for IPv4 address
ipv6_address (`str`): Ipv6 address
ipv6_prefix_len (`int`): length of IPv6 prefix
Return:
None
Raise:
SubCommandFailure: Failed to configure Ipv4/Ipv6 address on vlan
"""
try:
if ipv4_address and subnetmask:
device.configure([f'interface vlan {vlan_id}',
f'ip address {ipv4_address} {subnetmask}'])
if ipv6_address and ipv6_prefix_len:
device.configure([f'interface vlan {vlan_id}',
'ipv6 enable',
f'ipv6 address {ipv6_address}/{ipv6_prefix_len}'])
except SubCommandFailure as e:
raise SubCommandFailure(
f'Could not configure Ipv4/Ipv6 address on vlan {vlan_id}, '
f'Error: {e}'
)
def unconfig_vlan(device, vlanid):
""" vlan on Interface or Device configuration removal
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
try:
device.configure("no vlan {vlanid}".format(vlanid=vlanid))
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not remove vlan {vlanid}, Error: {error}'.format(
vlanid=vlanid, error=e)
)
def config_vlan_tag_native(device):
""" Configure vlan dot1q tag native
Args:
device (`obj`): Device object
Return:
None
Raise:
SubCommandFailure: Failed configuring device
"""
try:
device.configure("vlan dot1q tag native")
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure vlan dot1q tag native, Error: {error}'.format(
error=e)
)
def configure_vlan_vpls(device, vlanid):
""" Config vpls on vlan
Args:
device (`obj`): Device object
vlanid (`str`): Vlan id
Return:
None
Raise:
SubCommandFailure: Failed configuring interface
"""
try:
device.configure(
[
"vlan configuration {vlanid}".format(vlanid=vlanid),
"member vfi vpls",
"vlan dot1q tag native",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
'Could not configure vpls on vlan {vlanid}, Error: {error}'.format(
vlanid=vlanid, error=e)
)
def configure_vtp_mode(device,mode):
""" Configures global VTP mode
Args:
device ('obj'): device to use
mode ('str'): VTP mode (i.e transparent, client, server)
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure('vtp mode {mode}'.format(mode=mode))
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure VTP mode'
)
def configure_pvlan_svi_mapping(device, svi_vlan, mapping_vlan):
""" Configures Private Vlan Mapping on SVI
Args:
device ('obj'): device to use
svi_vlan ('str'): SVI interface
mapping_vlan ('str'): Private vlan to map to
Returns:
None
Raises:
SubCommandFailure
"""
# Initialize list variable
config_list = []
config_list.append("interface {svi_vlan}".format(svi_vlan=svi_vlan))
config_list.append("private-vlan mapping {mapping_vlan}".format(mapping_vlan=mapping_vlan))
try:
device.configure(config_list)
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure PVLAN-mapping'
)
def configure_pvlan_primary(device, primary_vlan, secondary_vlan=None):
""" Configures Primary Private Vlan
Args:
device ('obj'): device to use
primary_vlan ('str'): Primary private vlan
secondary_vlan ('str',optional): Secondary isolated/community vlan
Returns:
None
Raises:
SubCommandFailure
"""
config_list = []
# vlan 100
# private-vlan primary
config_list.append("vlan {primary_vlan} \n"
"private-vlan primary".format(primary_vlan=primary_vlan))
# private-vlan association 101
if secondary_vlan != None:
config_list.append("private-vlan association {secondary_vlan}".format(secondary_vlan=secondary_vlan))
try:
device.configure(config_list)
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure Primary Pvlan'
)
def configure_pvlan_type(device,vlan,pvlan_type):
""" Configures Isolated Private Vlan
Args:
device ('obj'): device to use
vlan ('str'): Vlan id
pvlan_type ('str'): Private vlan type (i.e isolated, primary, community)
Returns:
None
Raises:
SubCommandFailure
"""
# Initialize list variable
config_list = []
config_list.append("vlan {vlan}".format(vlan=vlan))
config_list.append("private-vlan {pvlan_type}".format(pvlan_type=pvlan_type))
try:
device.configure(config_list)
except SubCommandFailure:
raise SubCommandFailure(
'Could not configure Primary Pvlan'
) |
py | 1a5153335984538f4c53cb74eacbb21a24bc1761 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a VM with user specified disks attached to it."""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def DiskName(context, diskobj):
return context.env['deployment'] + '-disk-' + diskobj['name']
def GenerateConfig(context):
"""Creates configuration."""
resources = []
project = context.env['project']
# create disks resources
for disk_obj in context.properties['disks']:
resources.append({'name': DiskName(context, disk_obj),
'type': 'compute.v1.disk',
'properties': {
'zone': context.properties['zone'],
'sizeGb': str(disk_obj['sizeGb']),
'type': ''.join([COMPUTE_URL_BASE,
'projects/', project, '/zones/',
context.properties['zone'],
'/diskTypes/', disk_obj['diskType']])
}
})
disks = []
disks.append({'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'diskName': project + '-boot',
'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',
'debian-cloud/global/images/',
'family/debian-8'])
}
})
for disk_obj in context.properties['disks']:
disks.append({'deviceName': DiskName(context, disk_obj),
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', DiskName(context, disk_obj),
'.selfLink)']),
'autoDelete': True})
# create vm with disks
resources.append({'name': context.env['deployment'] + '-vm',
'type': 'compute.v1.instance',
'properties': {
'zone': context.properties['zone'],
'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',
project, '/zones/',
context.properties['zone'],
'/machineTypes/f1-micro']),
'networkInterfaces': [{
'network': ''.join([COMPUTE_URL_BASE,
'projects/', project,
'/global/networks/default']),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'}],
}],
'disks': disks
}
})
return {'resources': resources}
|
py | 1a51537b06b9f225a554fa99fb7823584832c85c | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import keras
import numpy as np
from model.skin_resnet import skin_resnet
from keras.preprocessing import image
from keras.preprocessing.image import transform_matrix_offset_center, apply_transform
def img_rotation(x, theta,row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0.):
"""modifed from keras random_rotation
# Arguments
x: Input tensor. Must be 3D.
theta: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
# theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def img_norm(img_array):
"""tensorflow tensor form
"""
img_array = img_array.reshape((1,) + img_array.shape)
# normalization:
for i in range(img_array.shape[0]):
for k in range(3):
img_array[i,::,::,k] -= np.mean(img_array[i,::,::,k])
img_array[i,::,::,k] /= np.std(img_array[i,::,::,k]) + 1e-7
return img_array
def pred(image_path):
sz = 224 # resize image into (224,224,3)
theta = [0,-90,90,120,270]
preds = np.ndarray(shape=(5), dtype=float)
model = skin_resnet() # ๅฏผๅ
ฅ็ฝ็ป
model.load_weights("model_weight/resnet50_turnable_152_d2_3.h5") # ๅฏผๅ
ฅๆ้
img = image.load_img(image_path, target_size = (sz,sz))
img_array = image.img_to_array(img)
i = 0
for thet in theta:
img_array2 = img_rotation(img_array, thet)
img_array2 = img_norm(img_array2)
results = model.predict(img_array2) * 100
preds[i] = results[0,0]
i = i + 1
pred_f = np.median(preds) # pred_fๆฏ้ขๆต็ปๆ
print ("*"*50)
print "็ฎ่ค็็ธไผผๅบฆ๏ผ" + str("%.2f") %pred_f +"%"
print ("*"*50)
return pred_f
if __name__ == '__main__':
print ("่ฏท่พๅ
ฅๅพๅๆๅจไฝ็ฝฎ:")
image_path = raw_input() # ่พๅ
ฅๅพ็
pred(image_path)
|
py | 1a5153d903f64f05201f1f9e962b80998744aaa1 | """
-*- coding: utf-8 -*-
@github{
title = {KoSpeech: Open Source Project for Korean End-to-End Automatic Speech Recognition in PyTorch},
author = {Soohwan Kim, Seyoung Bae, Cheolhwang Won, Suwon Park},
link = {https://github.com/sooftware/KoSpeech},
year = {2020}
}
"""
import sys
import argparse
import random
import warnings
import torch
from torch import optim, nn
sys.path.append('..')
from kospeech.data.data_loader import split_dataset, load_data_list
from kospeech.optim.loss import CrossEntrypyLoss
from kospeech.optim.lr_scheduler import RampUpLR
from kospeech.optim.optimizer import Optimizer
from kospeech.trainer.supervised_trainer import SupervisedTrainer
from kospeech.model_builder import build_model
from kospeech.opts import print_opts, build_train_opts, build_model_opts, build_preprocess_opts
from kospeech.utils import PAD_token, char2id, check_envirionment
def train(opt):
random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
device = check_envirionment(opt.use_cuda)
if not opt.resume:
audio_paths, script_paths = load_data_list(opt.data_list_path, opt.dataset_path)
epoch_time_step, trainset_list, validset = split_dataset(opt, audio_paths, script_paths)
model = build_model(opt, device)
optimizer = optim.Adam(model.module.parameters(), lr=opt.init_lr, weight_decay=1e-05)
if opt.rampup_period > 0:
scheduler = RampUpLR(optimizer, opt.init_lr, opt.high_plateau_lr, opt.rampup_period)
optimizer = Optimizer(optimizer, scheduler, opt.rampup_period, opt.max_grad_norm)
else:
optimizer = Optimizer(optimizer, None, 0, opt.max_grad_norm)
criterion = CrossEntrypyLoss(len(char2id), PAD_token, opt.label_smoothing, dim=-1).to(device)
else:
trainset_list = None
validset = None
model = None
optimizer = None
criterion = None
epoch_time_step = None
trainer = SupervisedTrainer(optimizer=optimizer, criterion=criterion, trainset_list=trainset_list,
validset=validset, num_workers=opt.num_workers,
high_plateau_lr=opt.high_plateau_lr, low_plateau_lr=opt.low_plateau_lr,
decay_threshold=opt.decay_threshold, exp_decay_period=opt.exp_decay_period,
device=device, teacher_forcing_step=opt.teacher_forcing_step,
min_teacher_forcing_ratio=opt.min_teacher_forcing_ratio, print_every=opt.print_every,
save_result_every=opt.save_result_every, checkpoint_every=opt.checkpoint_every,
architecture=opt.architecture)
model = trainer.train(model=model, batch_size=opt.batch_size, epoch_time_step=epoch_time_step,
num_epochs=opt.num_epochs, teacher_forcing_ratio=opt.teacher_forcing_ratio, resume=opt.resume)
return model
def _get_parser():
""" Get arguments parser """
parser = argparse.ArgumentParser(description='KoSpeech')
parser.add_argument('--mode', type=str, default='train')
build_preprocess_opts(parser)
build_model_opts(parser)
build_train_opts(parser)
return parser
def main():
warnings.filterwarnings('ignore')
parser = _get_parser()
opt = parser.parse_args()
print_opts(opt, opt.mode)
train(opt)
if __name__ == '__main__':
main()
|
py | 1a51544cda139c5fc9e21e09682b8a3773281835 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.api.urlfetch import DownloadError
from library import login
from boto.ec2.connection import *
class AlleVolumesLoeschenDefinitiv(webapp.RequestHandler):
def get(self):
mobile = self.request.get('mobile')
if mobile != "true":
mobile = "false"
# Den Usernamen erfahren
username = users.get_current_user()
conn_region, regionname = login(username)
try:
# Liste mit den Volumes
liste_volumes = conn_region.get_all_volumes()
except EC2ResponseError:
# Wenn es nicht klappt...
fehlermeldung = "10"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
except DownloadError:
# Diese Exception hilft gegen diese beiden Fehler:
# DownloadError: ApplicationError: 2 timed out
# DownloadError: ApplicationError: 5
fehlermeldung = "8"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
else:
# Wenn es geklappt hat...
# Anzahl der Volumes in der Liste
laenge_liste_volumes = len(liste_volumes)
for i in range(laenge_liste_volumes):
try:
# Volume entfernen
conn_region.delete_volume(liste_volumes[i].id)
except EC2ResponseError:
# Wenn es nicht klappt...
fehlermeldung = "26"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
except DownloadError:
# Diese Exception hilft gegen diese beiden Fehler:
# DownloadError: ApplicationError: 2 timed out
# DownloadError: ApplicationError: 5
fehlermeldung = "8"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
fehlermeldung = "27"
self.redirect('/volumes?mobile='+str(mobile)+'&message='+fehlermeldung)
|
py | 1a515464769cb988dca585fcc3dd4765dda8bc8e | from __future__ import annotations
from enum import Enum, auto as _auto
import typing
from dataclasses import dataclass
from dianascript.chexpr import *
from dianascript.chlhs import *
@dataclass
class SFunc:
name:str
args:list[str]
body:list[Chstmt]
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SDecl:
vars:list[str]
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SAssign:
targets:list[Chlhs]
value:Chexpr
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SExpr:
expr:Chexpr
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SFor:
target:Chlhs|None
iter:Chexpr
body:list[Chstmt]
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SLoop:
block:list[Chstmt]
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SIf:
cond:Chexpr
then:list[Chstmt]
orelse:list[Chstmt]|None
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SBreak:
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SContinue:
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
@dataclass
class SReturn:
val:Chexpr|None
loc: tuple[int, int] | None = None
def __or__(self, loc):
self.loc = loc
return self
Chstmt = SFunc | SDecl | SAssign | SExpr | SFor | SLoop | SIf | SBreak | SContinue | SReturn |
py | 1a5154a2bf609a261dbfd43b0acb8557f5b0cdc2 | import json
from collections import defaultdict
from typing import List
from sqlalchemy import desc
from sqlalchemy.future import select
from app.crud.test_case.ConstructorDao import ConstructorDao
from app.crud.test_case.TestCaseAssertsDao import TestCaseAssertsDao
from app.crud.test_case.TestCaseDirectory import PityTestcaseDirectoryDao
from app.crud.test_case.TestcaseDataDao import PityTestcaseDataDao
from app.models import Session, DatabaseHelper, async_session
from app.models.constructor import Constructor
from app.models.schema.testcase_schema import TestCaseForm
from app.models.test_case import TestCase
from app.utils.logger import Log
class TestCaseDao(object):
log = Log("TestCaseDao")
@staticmethod
async def list_test_case(directory_id: int = None, name: str = "", create_user: str = None):
try:
filters = [TestCase.deleted_at == None]
if directory_id:
parents = await PityTestcaseDirectoryDao.get_directory_son(directory_id)
filters = [TestCase.deleted_at == None, TestCase.directory_id.in_(parents)]
if name:
filters.append(TestCase.name.like(f"%{name}%"))
if create_user:
filters.append(TestCase.create_user == create_user)
async with async_session() as session:
sql = select(TestCase).where(*filters).order_by(TestCase.name.asc())
result = await session.execute(sql)
return result.scalars().all()
except Exception as e:
TestCaseDao.log.error(f"่ทๅๆต่ฏ็จไพๅคฑ่ดฅ: {str(e)}")
raise Exception(f"่ทๅๆต่ฏ็จไพๅคฑ่ดฅ: {str(e)}")
@staticmethod
async def get_test_case_by_directory_id(directory_id: int):
try:
async with async_session() as session:
sql = select(TestCase).where(TestCase.deleted_at == None,
TestCase.directory_id == directory_id).order_by(TestCase.name.asc())
result = await session.execute(sql)
ans = []
case_map = dict()
for item in result.scalars():
ans.append({"title": item.name, "key": "testcase_{}".format(item.id), "children": []})
case_map[item.id]=item.name
return ans, case_map
except Exception as e:
TestCaseDao.log.error(f"่ทๅๆต่ฏ็จไพๅคฑ่ดฅ: {str(e)}")
raise Exception(f"่ทๅๆต่ฏ็จไพๅคฑ่ดฅ: {str(e)}")
@staticmethod
def get_tree(case_list):
result = defaultdict(list)
# ่ทๅ็ฎๅฝ->็จไพ็ๆ ๅฐๅ
ณ็ณป
for cs in case_list:
result[cs.catalogue].append(cs)
keys = sorted(result.keys())
tree = [dict(key=f"cat_{key}",
children=[{"key": f"case_{child.id}", "title": child.name,
"total": TestCaseDao.get_case_children_length(child.id),
"children": TestCaseDao.get_case_children(child.id)} for child in result[key]],
title=key, total=len(result[key])) for key in keys]
return tree
@staticmethod
def get_case_children(case_id: int):
data, err = TestCaseAssertsDao.list_test_case_asserts(case_id)
if err:
raise err
return [dict(key=f"asserts_{d.id}", title=d.name, case_id=case_id) for d in data]
@staticmethod
def get_case_children_length(case_id: int):
data, err = TestCaseAssertsDao.list_test_case_asserts(case_id)
if err:
raise err
return len(data)
@staticmethod
def insert_test_case(test_case, user):
"""
:param user: ๅๅปบไบบ
:param test_case: ๆต่ฏ็จไพ
:return:
"""
try:
with Session() as session:
data = session.query(TestCase).filter_by(name=test_case.get("name"),
directory_id=test_case.get("directory_id"),
deleted_at=None).first()
if data is not None:
raise Exception("็จไพๅทฒๅญๅจ")
cs = TestCase(**test_case, create_user=user)
session.add(cs)
session.commit()
session.refresh(cs)
return cs.id
except Exception as e:
TestCaseDao.log.error(f"ๆทปๅ ็จไพๅคฑ่ดฅ: {str(e)}")
raise Exception(f"ๆทปๅ ็จไพๅคฑ่ดฅ: {str(e)}")
@staticmethod
def update_test_case(test_case: TestCaseForm, user):
"""
:param user: ไฟฎๆนไบบ
:param test_case: ๆต่ฏ็จไพ
:return:
"""
try:
with Session() as session:
data = session.query(TestCase).filter_by(id=test_case.id, deleted_at=None).first()
if data is None:
raise Exception("็จไพไธๅญๅจ")
DatabaseHelper.update_model(data, test_case, user)
session.commit()
session.refresh(data)
return data
except Exception as e:
TestCaseDao.log.error(f"็ผ่พ็จไพๅคฑ่ดฅ: {str(e)}")
raise Exception(f"็ผ่พ็จไพๅคฑ่ดฅ: {str(e)}")
@staticmethod
async def query_test_case(case_id: int) -> dict:
try:
async with async_session() as session:
sql = select(TestCase).where(TestCase.id == case_id, TestCase.deleted_at == None)
result = await session.execute(sql)
data = result.scalars().first()
if data is None:
raise Exception("็จไพไธๅญๅจ")
# ่ทๅๆญ่จ้จๅ
asserts, _ = await TestCaseAssertsDao.async_list_test_case_asserts(data.id)
# ่ทๅๆฐๆฎๆ้ ๅจ
constructors = await ConstructorDao.list_constructor(case_id)
constructors_case = await TestCaseDao.query_test_case_by_constructors(constructors)
test_data = await PityTestcaseDataDao.list_testcase_data(case_id)
return dict(asserts=asserts, constructors=constructors, case=data, constructors_case=constructors_case,
test_data=test_data)
except Exception as e:
TestCaseDao.log.error(f"ๆฅ่ฏข็จไพๅคฑ่ดฅ: {str(e)}")
raise Exception(f"ๆฅ่ฏข็จไพๅคฑ่ดฅ: {str(e)}")
@staticmethod
async def query_test_case_by_constructors(constructors: List[Constructor]):
try:
# ๆพๅฐๆๆ็จไพๅ็งฐไธบ
constructors = [json.loads(x.constructor_json).get("case_id") for x in constructors if x.type == 0]
async with async_session() as session:
sql = select(TestCase).where(TestCase.id.in_(constructors), TestCase.deleted_at == None)
result = await session.execute(sql)
data = result.scalars().all()
return {x.id: x for x in data}
except Exception as e:
TestCaseDao.log.error(f"ๆฅ่ฏข็จไพๅคฑ่ดฅ: {str(e)}")
raise Exception(f"ๆฅ่ฏข็จไพๅคฑ่ดฅ: {str(e)}")
@staticmethod
async def async_query_test_case(case_id) -> [TestCase, str]:
try:
async with async_session() as session:
result = await session.execute(
select(TestCase).where(TestCase.id == case_id, TestCase.deleted_at == None))
data = result.scalars().first()
if data is None:
return None, "็จไพไธๅญๅจ"
return data, None
except Exception as e:
TestCaseDao.log.error(f"ๆฅ่ฏข็จไพๅคฑ่ดฅ: {str(e)}")
return None, f"ๆฅ่ฏข็จไพๅคฑ่ดฅ: {str(e)}"
@staticmethod
def list_testcase_tree(projects) -> [List, dict]:
try:
result = []
project_map = {}
project_index = {}
for p in projects:
project_map[p.id] = p.name
result.append({
"label": p.name,
"value": p.id,
"key": p.id,
"children": [],
})
project_index[p.id] = len(result) - 1
with Session() as session:
data = session.query(TestCase).filter(TestCase.project_id.in_(project_map.keys()),
TestCase.deleted_at == None).all()
for d in data:
result[project_index[d.project_id]]["children"].append({
"label": d.name,
"value": d.id,
"key": d.id,
})
return result
except Exception as e:
TestCaseDao.log.error(f"่ทๅ็จไพๅ่กจๅคฑ่ดฅ: {str(e)}")
raise Exception("่ทๅ็จไพๅ่กจๅคฑ่ดฅ")
@staticmethod
def select_constructor(case_id: int):
"""
้่ฟcase_id่ทๅ็จไพๆ้ ๆฐๆฎ
:param case_id:
:return:
"""
try:
with Session() as session:
data = session.query(Constructor).filter_by(case_id=case_id, deleted_at=None).order_by(
desc(Constructor.created_at)).all()
return data
except Exception as e:
TestCaseDao.log.error(f"ๆฅ่ฏขๆ้ ๆฐๆฎๅคฑ่ดฅ: {str(e)}")
@staticmethod
async def async_select_constructor(case_id: int) -> List[Constructor]:
"""
ๅผๆญฅ่ทๅ็จไพๆ้ ๆฐๆฎ
:param case_id:
:return:
"""
try:
async with async_session() as session:
sql = select(Constructor).where(Constructor.case_id == case_id,
Constructor.deleted_at == None).order_by(Constructor.created_at)
data = await session.execute(sql)
return data.scalars().all()
except Exception as e:
TestCaseDao.log.error(f"ๆฅ่ฏขๆ้ ๆฐๆฎๅคฑ่ดฅ: {str(e)}")
@staticmethod
async def collect_data(case_id: int, data: List):
"""
ๆถ้ไปฅcase_idไธบๅ็ฝฎๆกไปถ็ๆฐๆฎ(ๅ็ฝฎๆๆถไธๆฏๆ)
:param data:
:param case_id:
:return:
"""
# ๅ
่ทๅๆฐๆฎๆ้ ๅจ๏ผๅ็ฝฎๆกไปถ๏ผ
pre = dict(id=f"pre_{case_id}", label="ๅ็ฝฎๆกไปถ", children=list())
await TestCaseDao.collect_constructor(case_id, pre)
data.append(pre)
# ่ทๅๆญ่จ
asserts = dict(id=f"asserts_{case_id}", label="ๆญ่จ", children=list())
await TestCaseDao.collect_asserts(case_id, asserts)
data.append(asserts)
@staticmethod
async def collect_constructor(case_id, parent):
constructors = await TestCaseDao.async_select_constructor(case_id)
for c in constructors:
temp = dict(id=f"constructor_{c.id}", label=f"{c.name}", children=list())
if c.type == 0:
# ่ฏดๆๆฏ็จไพ๏ผ็ปง็ปญ้ๅฝ
temp["label"] = "[CASE]: " + temp["label"]
json_data = json.loads(c.constructor_json)
await TestCaseDao.collect_data(json_data.get("case_id"), temp.get("children"))
elif c.type == 1:
temp["label"] = "[SQL]: " + temp["label"]
elif c.type == 2:
temp["label"] = "[REDIS]: " + temp["label"]
# ๅฆๅๆญฃๅธธๆทปๅ ๆฐๆฎ
parent.get("children").append(temp)
@staticmethod
async def collect_asserts(case_id, parent):
asserts, err = await TestCaseAssertsDao.async_list_test_case_asserts(case_id)
if err:
raise Exception("่ทๅๆญ่จๆฐๆฎๅคฑ่ดฅ")
for a in asserts:
temp = dict(id=f"assert_{a.id}", label=f"{a.name}", children=list())
parent.get("children").append(temp)
@staticmethod
async def get_xmind_data(case_id: int):
result = dict()
data = await TestCaseDao.query_test_case(case_id)
cs = data.get("case")
# ๅผๅง่งฃๆๆต่ฏๆฐๆฎ
result.update(dict(id=f"case_{case_id}", label=f"{cs.name}({cs.id})"))
children = list()
await TestCaseDao.collect_data(case_id, children)
result["children"] = children
return result
|
py | 1a5155bd82fb2fb85a5117acd1f270e6f2f5b424 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^blog/', include('blog.foo.urls')),
(r'^$', 'Account.views.index'),
(r'^test/$', 'Account.views.test'),
(r'^random_number/$', 'Account.views.random_number'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
|
py | 1a5156203cc10b2d002654070a8d01f796da1d1a | import pandas as pd
import numpy as np
import os
import json
from datetime import date
def getFilename(subject_data):
"""
Given the subject_data field from a row of one of our SpaceFluff dataframes, extract the name of the object being classified
by extracting the 'Filename'|'image'|'IMAGE' field".
To be used with df[column].apply()
@returns {string} filename of the object being classified, including the extension '_insp.png'
"""
keys = list(subject_data.values())[0].keys()
accessKey = (
"Filename" if "Filename" in keys else "image" if "image" in keys else "IMAGE" if "IMAGE" in keys else None)
if accessKey:
return list(subject_data.values())[0][accessKey][:-9]
else:
print("No filename found!")
def getMetadataValue(metadata, field):
'''
@param metadata metadata column from a row in a SpaceFluff dataframe
@param {string} field: 'retired' | 'already_seen'
@returns {boolean} value of `field` within the row's metadata column
'''
return metadata['subject_selection_state'][field]
def parseTime(created_at):
'''
@param {df column} created_at: df['created_at'] column
'''
return pd.to_datetime(created_at, format="%Y-%m-%d %H:%M:%S %Z")
def getGroupSize(group):
'''
@param {pd.core.frame.DataFrame} pandas dataframe group
@returns number of rows in group (corresponds to number of columns in case of parsed SpaceFluff dataframe)
'''
return group.shape[0]
def extract_task_value(task_index, row):
try:
return row[task_index]['value']
except:
return
def percentageVotesForAnswer(counts, answer):
'''
@param counts: a df column like {galaxy: 15, group of objects (cluster): 10, something else/empty center: 2}
@paramanswer: one of the keys of `counts`
'''
totalVotes = sum(counts.values())
if not answer in counts.keys():
return 0
votesForAnswer = counts[answer]
return round(100*votesForAnswer/totalVotes, 1)
def extractTaskValue(annotations, task):
'''
@param {list} annotations: annotations column for a row in a SpaceFluff dataframe
@param {string} task: one of 'Ti', where i \in 0,2,1,3,4,5,9
@returns {string | None} value the user provided for the given task, or None
'''
filtered = list(filter(lambda x: x['task'] == task, annotations))
if len(filtered) > 0:
return filtered[0]['value']
def extract_retired_info(subject_data):
'''
@param subject_data: (dataframe 'subject_data' column)
'''
return list(subject_data.values())[0]["retired"]
def get_power_users(df, vote_count_threshold):
"""
@param df: parsed dataframe where each row is a single classification
@param {int} vote_count_threshold: return only users that made at least this many valid classifications
"""
groupby_username = df[['user_name']].groupby(['user_name'])
groupby_username_filtered = groupby_username.filter(lambda x: x.shape[0] >= vote_count_threshold)
grouped = groupby_username_filtered.groupby(['user_name'])
filtered_usernames_and_votes = []
for username, vote_count in grouped:
filtered_usernames_and_votes.append({
"username": username,
"votes": len(vote_count)
})
return filtered_usernames_and_votes
def get_task_0_value_counts(row):
"Get task 0 value counts for one row of a group of classifications"
row = list(row)
# value_counts = {answer: 0 for answer in answer_types}
value_counts = {}
for vote in row:
if value_counts.get(vote):
value_counts[vote] += 1
else:
value_counts[vote] = 1
return value_counts, len(row)
def as_array(lst):
'Turn a Python list into a NumPy array'
if type(lst) == np.ndarray:
return lst
return np.array(lst)
def get_running_vote_fraction(df):
"""
Returns a list of
(% votes by users that case <= n votes)/total votes
as a function of n
@param df: `df`-like dataframe, where each row corresponds to a single classification made by a single user
"""
users_and_classification_counts = []
for k, v in df.groupby('user_name').groups.items():
users_and_classification_counts.append({
'username': k,
'classifications': len(v)
})
cls_per_user = [entry['classifications'] for entry in users_and_classification_counts]
total_votes = sum(cls_per_user) # total number of votes made
sorted_vote_counts = sorted(cls_per_user) # sorted list of number of classifications per user
# create dictionary with keys: # votes per user, values: # users that cast that amount of votes
countDict = {}
for entry in sorted_vote_counts:
countDict[entry] = countDict.get(entry, 0) + 1
fractions = []
for vote_count, occurrence_rate in countDict.items():
fractions.append([vote_count, vote_count*occurrence_rate/total_votes, occurrence_rate])
counts, fractions, users_included = as_array(fractions).T
# create a running fraction of total votes cast in a single loop
running_fraction = []
for i, fr in enumerate(fractions):
if i == 0:
val = fr
else:
val = fr+running_fraction[i-1]
running_fraction.append(val)
return [
users_and_classification_counts,
cls_per_user,
counts,
running_fraction
] |
py | 1a51565e052250be0e4cd95b7260ddc458534b44 | from __future__ import absolute_import, print_function, unicode_literals
import re
import sys
from django.conf import settings as django_settings
from django.http import Http404, HttpResponseRedirect
from django.utils.cache import add_never_cache_headers
def redirect_request_processor(page, request):
"""
Returns a ``HttpResponseRedirect`` instance if the current page says
a redirect should happen.
"""
target = page.get_redirect_to_target(request)
if target:
if request._feincms_extra_context.get('extra_path', '/') == '/':
return HttpResponseRedirect(target)
raise Http404()
def extra_context_request_processor(page, request):
"""
Fills ``request._feincms_extra_context`` with a few useful variables.
"""
request._feincms_extra_context.update({
# XXX This variable name isn't accurate anymore.
'in_appcontent_subpage': False,
'extra_path': '/',
})
url = page.get_absolute_url()
if request.path != url:
request._feincms_extra_context.update({
'in_appcontent_subpage': True,
'extra_path': re.sub(
'^' + re.escape(url.rstrip('/')),
'',
request.path,
),
})
def frontendediting_request_processor(page, request):
"""
Sets the frontend editing state in the cookie depending on the
``frontend_editing`` GET parameter and the user's permissions.
"""
if not 'frontend_editing' in request.GET:
return
response = HttpResponseRedirect(request.path)
if request.user.has_module_perms('page'):
try:
enable_fe = int(request.GET['frontend_editing']) > 0
except ValueError:
enable_fe = False
if enable_fe:
response.set_cookie(str('frontend_editing'), enable_fe)
else:
response.delete_cookie(str('frontend_editing'))
# Redirect to cleanup URLs
return response
def frontendediting_response_processor(page, request, response):
# Add never cache headers in case frontend editing is active
if (hasattr(request, 'COOKIES')
and request.COOKIES.get('frontend_editing', False)):
if hasattr(response, 'add_post_render_callback'):
response.add_post_render_callback(add_never_cache_headers)
else:
add_never_cache_headers(response)
def etag_request_processor(page, request):
"""
Short-circuits the request-response cycle if the ETag matches.
"""
# XXX is this a performance concern? Does it create a new class
# every time the processor is called or is this optimized to a static
# class??
class DummyResponse(dict):
"""
This is a dummy class with enough behaviour of HttpResponse so we
can use the condition decorator without too much pain.
"""
def has_header(page, what):
return False
def dummy_response_handler(*args, **kwargs):
return DummyResponse()
def etagger(request, page, *args, **kwargs):
etag = page.etag(request)
return etag
def lastmodifier(request, page, *args, **kwargs):
lm = page.last_modified()
return lm
# Unavailable in Django 1.0 -- the current implementation of ETag support
# requires Django 1.1 unfortunately.
from django.views.decorators.http import condition
# Now wrap the condition decorator around our dummy handler:
# the net effect is that we will be getting a DummyResponse from
# the handler if processing is to continue and a non-DummyResponse
# (should be a "304 not modified") if the etag matches.
rsp = condition(etag_func=etagger, last_modified_func=lastmodifier)(
dummy_response_handler)(request, page)
# If dummy then don't do anything, if a real response, return and
# thus shortcut the request processing.
if not isinstance(rsp, DummyResponse):
return rsp
def etag_response_processor(page, request, response):
"""
Response processor to set an etag header on outgoing responses.
The Page.etag() method must return something valid as etag content
whenever you want an etag header generated.
"""
etag = page.etag(request)
if etag is not None:
response['ETag'] = '"' + etag + '"'
def debug_sql_queries_response_processor(verbose=False, file=sys.stderr):
"""
Attaches a handler which prints the query count (and optionally all
individual queries which have been executed) on the console. Does nothing
if ``DEBUG = False``.
Example::
from feincms.module.page import models, processors
models.Page.register_response_processor(
processors.debug_sql_queries_response_processor(verbose=True),
)
"""
if not django_settings.DEBUG:
return lambda page, request, response: None
def processor(page, request, response):
from django.db import connection
print_sql = lambda x: x
try:
import sqlparse
print_sql = lambda x: sqlparse.format(
x, reindent=True, keyword_case='upper')
except:
pass
if verbose:
print("-" * 60, file=file)
time = 0.0
i = 0
for q in connection.queries:
i += 1
if verbose:
print("%d : [%s]\n%s\n" % (
i, q['time'], print_sql(q['sql'])), file=file)
time += float(q['time'])
print("-" * 60, file=file)
print("Total: %d queries, %.3f ms" % (i, time), file=file)
print("-" * 60, file=file)
return processor
|
py | 1a5156e83a6f34b8e56723f9e38845d74dc9bdc3 | import requests
from data import ui
def consultar(token='25d800a8b8e8b99d77c809567aa291b8',self=0):
Sair = False
while(Sair == False):
if self == 1:
ip_input = ''
else:
ip_input = ui.input_dialog()
if len(ip_input) < 1:
ui.error_dialog('Insira algo para consultar.');break
try:
api=requests.get('http://ipwhois.app/json/'+ip_input).json()
#lat = api['latitude']
#lon = api['longitude']
#api2 = requests.get('http://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={token[2]}')
except:
msg = "erro no servidor"
try:
msg=f'''
IP: {api['ip']}
TIPO: {api['type']}
CONTINENTE: {api['continent']}
C?DIGO DO CONTINENTE: {api['continent_code']}
PAIS: {api['country']}
C?DIGO DO PA?S: {api['country']}
CAPITAL DO PAIS: {api['country_capital']}
C?DIGO TELEF?NICO DO PA?S: {api['country_phone']}
PAISES VIZINHOS: {api['country_neighbours']}
REGI?O: {api['region']}
CIDADE: {api['city']}
LATITUDE: {api['latitude']}
LONGITUDE: {api['longitude']}
ASN: {api['asn']}
ORG: {api['org']}
ISP: {api['isp']}
HOR?RIO PADR?O: {api['timezone']}
NOME DO HOR?RIO PADR?O: {api['timezone_name']}
GMT: {api['timezone_gmt']}
MOEDA: {api['currency']}
CODIGO DA MOEDA: {api['currency_code']}
SIMBOLO DA MOEDA: {api['currency_symbol']}
'''
#TEMPERATURA: {api2["weather"][0]["main"]}
except:
msg = 'Ip invalido.'
choice = int(ui.dialog_choice(msg))
if choice == 1:
pass
elif choice == 2:
Sair = True
else:
ui.error_dialog() |
py | 1a515735cc20a640744e25d5e8617faccc8edd43 | from typing import List, Tuple
from chiabip158 import PyBIP158
from cryptodoge.types.blockchain_format.coin import Coin
from cryptodoge.types.blockchain_format.sized_bytes import bytes32
from cryptodoge.types.full_block import FullBlock
from cryptodoge.types.header_block import HeaderBlock
from cryptodoge.types.name_puzzle_condition import NPC
from cryptodoge.util.condition_tools import created_outputs_for_conditions_dict
def get_block_header(block: FullBlock, tx_addition_coins: List[Coin], removals_names: List[bytes32]) -> HeaderBlock:
# Create filter
byte_array_tx: List[bytes32] = []
addition_coins = tx_addition_coins + list(block.get_included_reward_coins())
if block.is_transaction_block():
for coin in addition_coins:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for name in removals_names:
byte_array_tx.append(bytearray(name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter: bytes = bytes(bip158.GetEncoded())
return HeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.challenge_chain_ip_proof,
block.reward_chain_sp_proof,
block.reward_chain_ip_proof,
block.infused_challenge_chain_ip_proof,
block.foliage,
block.foliage_transaction_block,
encoded_filter,
block.transactions_info,
)
def additions_for_npc(npc_list: List[NPC]) -> List[Coin]:
additions: List[Coin] = []
for npc in npc_list:
for coin in created_outputs_for_conditions_dict(npc.condition_dict, npc.coin_name):
additions.append(coin)
return additions
def tx_removals_and_additions(npc_list: List[NPC]) -> Tuple[List[bytes32], List[Coin]]:
"""
Doesn't return farmer and pool reward.
"""
removals: List[bytes32] = []
additions: List[Coin] = []
# build removals list
if npc_list is None:
return [], []
for npc in npc_list:
removals.append(npc.coin_name)
additions.extend(additions_for_npc(npc_list))
return removals, additions
|
py | 1a51579014857a4090840548239cd7866dd37dd0 | class A:
def __init__(self, gpioPort):
self.gpioPort = gpioPort
def p(self):
print(self.gpioPort)
class B(A):
pass
B(12).p()
C = type('C', (A,), dict({}))
print(C)
C(14).p()
def value(value=None):
if value == None:
return 'get_value'
else:
print(value)
def m(a, b, c=None, d=None):
print(a, b, c, d)
m(1, 2)
print(value('set_value'))
print(value())
class A1:
@property
def p(self):
return {"a": 10}
class AA(A1):
@property
def p(self):
return super().p
a = AA()
print(a.p) |
py | 1a515897bcef9f45a6c36d6464be10bfcec0bd47 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Basic classes to contain rstWeb objects and methods to calculate their attributes
Author: Amir Zeldes
"""
class NODE:
def __init__(self, id, left, right, parent, depth, kind, text, relname, relkind):
"""Basic class to hold all nodes (EDU, span and multinuc) in structure.py and while importing"""
self.id = id
self.parent = parent
self.left = left
self.right = right
self.depth = depth
self.kind = kind #edu, multinuc or span node
self.text = text #text of an edu node; empty for spans/multinucs
self.relname = relname
self.relkind = relkind #rst (a.k.a. satellite), multinuc or span relation
self.sortdepth = depth
class SEGMENT:
def __init__(self, id, text):
""" Class used by segment.py to represent EDUs, NOT used by the structurer in structure.py"""
self.id = id
self.text = text
self.tokens = text.split(" ")
def get_depth(orig_node, probe_node, nodes, doc=None, project=None, user=None):
"""
Calculate graphical nesting depth of a node based on the node list graph.
Note that RST parentage without span/multinuc does NOT increase depth.
"""
if probe_node.parent != "0":
try:
parent = nodes[probe_node.parent]
except KeyError:
# Parent node does not exist, set parent to 0
from modules.rstweb_sql import update_parent
if doc is not None and project is not None and user is not None:
update_parent(probe_node.id,"0",doc,project,user)
return
else:
raise KeyError("Node ID " + probe_node.id + " has non existing parent " + probe_node.parent + " and user not set in function\n")
if parent.kind != "edu" and (probe_node.relname == "span" or parent.kind == "multinuc" and probe_node.relkind =="multinuc"):
orig_node.depth += 1
orig_node.sortdepth +=1
elif parent.kind == "edu":
orig_node.sortdepth += 1
get_depth(orig_node, parent, nodes, doc=doc, project=project, user=user)
def get_left_right(node_id, nodes, min_left, max_right, rel_hash):
"""
Calculate leftmost and rightmost EDU covered by a NODE object. For EDUs this is the number of the EDU
itself. For spans and multinucs, the leftmost and rightmost child dominated by the NODE is found recursively.
"""
if nodes[node_id].parent != "0" and node_id != "0":
parent = nodes[nodes[node_id].parent]
if min_left > nodes[node_id].left or min_left == 0:
if nodes[node_id].left != 0:
min_left = nodes[node_id].left
if max_right < nodes[node_id].right or max_right == 0:
max_right = nodes[node_id].right
if nodes[node_id].relname == "span":
if parent.left > min_left or parent.left == 0:
parent.left = min_left
if parent.right < max_right:
parent.right = max_right
elif nodes[node_id].relname in rel_hash:
if parent.kind == "multinuc" and rel_hash[nodes[node_id].relname] =="multinuc":
if parent.left > min_left or parent.left == 0:
parent.left = min_left
if parent.right < max_right:
parent.right = max_right
get_left_right(parent.id, nodes, min_left, max_right, rel_hash)
|
py | 1a51590ae35f35828c066cdfd788a2c894ff813e | import json
import argparse
import os
import re
import sys
import time
import commands
import tarfile
import urllib2
import cStringIO
from tar_utils import Local_Tarutils
from tar_utils import Http_Tarutils
#from eutester.machine import Machine
'''
class file():
@classmethod
def from_json(self, json):
parse json here?
def to_json:
output json here?
def get_path(self):
tbd
def print_path(self):
tbd
def verify_md5(self):
tbd
def get_md5(self):
tbd
def download(self):
tbd
type
uri
name
checksum
date
version
size
class users():
@classmethod
def from_json(self, json):
parse json here?
def to_json:
output json here?
name
login
groups
homedir
class packages():
@classmethod
def from_json(self, json):
parse json here?
def to_json:
output json here?
list
file
class hypervisors():
@classmethod
def from_json(self, json):
parse json here?
def to_json:
output json here?
xen
kvm
vmware
class image_set():
@classmethod
def from_json(self, json):
parse json here?
def to_json:
output json here?
roofs = file()
ramdisk = file()
kernel = file()
class os_info():
xen = image_set()
kvm = image_set()
'''
class emi_image_set():
def __init__(self, uri, headersize=512, manifestname='manifest.json'):
self.uri = uri
self.manifestname=manifestname
self.tar = self.get_tar_file(self.uri, headersize=headersize)
def sys(self, cmd, listformat=True):
status,output = commands.getstatusoutput(cmd)
if status != 0:
raise Exception('sys, cmd"'+str(cmd)+'" failed, code:'+str(status)+', output:'+str(output))
else:
if listformat:
return str(output).splitlines()
else:
return str(output)
def found(self,cmd,string):
out = self.sys(cmd)
if re.search(string, out):
return True
else:
return False
def get_tar_file(self,uri, headersize=512):
if re.search('http://', uri):
return Http_Tarutils(uri, headersize=headersize)
else:
return Local_Tarutils(uri, headersize=headersize)
def extract_file(self, fpath, destpath=None):
return self.tar.extract_member(fpath, destpath=destpath)
def list_tar_contents(self):
return self.tar.list()
def extract_all(self, destdir=None):
return self.tar.extract_member(destpath=destdir)
def get_manifest(self):
print "getting manifest"
|
py | 1a51592593fd953fbde1219bcf653869b9935674 | """recipe_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
py | 1a515afcf9104b33ec9d7873c541e4a7eabb6c9a | n, m = list(map(int, input().split()))
def find_out(gg):
nn = len(gg)
mm = len(gg[0])
x, y, tx, ty = None, None, None, None
need_keys = set()
D = {'A': 'a', 'B': 'b', 'C': 'c', 'D': 'd', 'E': 'e'}
doors = []
for i in range(nn):
for j in range(mm):
if gg[i][j] == 'S':
x, y = i, j
if gg[i][j] == 'X':
tx, ty = i, j
if gg[i][j] in D:
doors.append([i, j])
need_keys.add(D[gg[i][j]])
direcs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
dp = [[0 for _ in range(mm)] for _ in range(nn)]
ac = {(x, y)}
have_keys = set()
while ac:
cx, cy = ac.pop()
for dx, dy in direcs:
nx, ny = cx + dx, cy + dy
if 0 <= nx < nn and 0 <= ny < mm:
if nx == tx and nx == ty:
return 'YES'
else:
if gg[nx][ny] == 'X' or gg[nx][ny] in D:
pass
else:
if dp[nx][ny] == 1:
pass
else:
if gg[nx][ny] in need_keys:
have_keys.add(gg[nx][ny])
if have_keys == need_keys:
for i, j in doors:
gg[i][j] = '.'
for i in range(nn):
for j in range(mm):
dp[i][j] = 0
if gg[nx][ny] == 1:
pass
else:
gg[nx][ny] = 1
ac.add((nx, ny))
return 'NO'
while n + m != 0:
print(n, m)
grid = []
input_str = input()
while len(input_str.split()) == 1:
grid.append(list(input_str))
input_str = input()
print(find_out(grid))
n, m = list(map(int, input_str.split()))
print(n, m)
"""
7 20
......G..X...dX.X...
.b.......bXX.X..Xb.D
dX.X.....X.aXe..c...
X...D..XX..X..D....c
a..X.Xc.c..bXDXac...
.......C..X.X.c...Xb
b.....eXA..SA..X.dX.
15 11
Xd...E.XaE.
D.E..X..DXB
..A.E...DBb
DX.X..DX.ED
..XX.DXAc..
..XE.X..X.X
c...B...X.B
.X.DX..Xa.b
GcXE.B....B
..A..A..Xea
BX.EdEa..ab
e..XSE.B...
.XA...X.X.C
Cc.X..XeBcb
CXXCC...bX.
0 0
"""
|
py | 1a515b9444087aa4e5cddb2c3ebc247c1e7d1af5 | import jax
from jax import numpy as jnp
# import numpy as np
from tabcorr.tabcorr import *
class JaxTabCorr(TabCorr):
def predict(self, model, separate_gal_type=False, **occ_kwargs):
"""
Predicts the number density and correlation function for a certain
model.
Parameters
----------
model : HodModelFactory
Instance of ``halotools.empirical_models.HodModelFactory``
describing the model for which predictions are made.
separate_gal_type : boolean, optional
If True, the return values are dictionaries divided by each galaxy
types contribution to the output result.
**occ_kwargs : dict, optional
Keyword arguments passed to the ``mean_occupation`` functions
of the model.
Returns
-------
ngal : numpy.array or dict
Array or dictionary of arrays containing the number densities for
each galaxy type stored in self.gal_type. The total galaxy number
density is the sum of all elements of this array.
xi : numpy.array or dict
Array or dictionary of arrays storing the prediction for the
correlation function.
"""
try:
assert (sorted(model.gal_types) == sorted(
['centrals', 'satellites']))
except AssertionError:
raise RuntimeError('The model instance must only have centrals ' +
'and satellites as galaxy types. Check the ' +
'gal_types attribute of the model instance.')
try:
assert (model._input_model_dictionary['centrals_occupation']
.prim_haloprop_key == self.attrs['prim_haloprop_key'])
assert (model._input_model_dictionary['satellites_occupation']
.prim_haloprop_key == self.attrs['prim_haloprop_key'])
except AssertionError:
raise RuntimeError('Mismatch in the primary halo properties of ' +
'the model and the TabCorr instance.')
try:
if hasattr(model._input_model_dictionary['centrals_occupation'],
'sec_haloprop_key'):
assert (model._input_model_dictionary['centrals_occupation']
.sec_haloprop_key == self.attrs['sec_haloprop_key'])
if hasattr(model._input_model_dictionary['satellites_occupation'],
'sec_haloprop_key'):
assert (model._input_model_dictionary['satellites_occupation']
.sec_haloprop_key == self.attrs['sec_haloprop_key'])
except AssertionError:
raise RuntimeError('Mismatch in the secondary halo properties ' +
'of the model and the TabCorr instance.')
# TODO: Figure out how to add back in the redshift sanity check in a JAX-friendly way?
# ====================================================================================
# try:
# assert abs(model.redshift - self.attrs['redshift']) < 0.05
# except AssertionError:
# raise RuntimeError('Mismatch in the redshift of the model and ' +
# 'the TabCorr instance.')
mean_occupation = jnp.zeros(len(self.gal_type["gal_type"]))
mask = self.gal_type["gal_type"] == "centrals"
mean_occupation = jax.ops.index_update(
mean_occupation, mask,
model.mean_occupation_centrals(
prim_haloprop=self.gal_type["prim_haloprop"][mask],
sec_haloprop_percentile=self.gal_type["sec_haloprop_percentile"][mask],
**occ_kwargs)
)
mean_occupation = jax.ops.index_update(
mean_occupation, ~mask,
model.mean_occupation_satellites(
prim_haloprop=self.gal_type["prim_haloprop"][~mask],
sec_haloprop_percentile=self.gal_type["sec_haloprop_percentile"][~mask],
**occ_kwargs)
)
return jaxtabcorr_predict(
mean_occupation,
self.gal_type["gal_type"] == "centrals",
self.gal_type["prim_haloprop"].data,
self.gal_type["sec_haloprop_percentile"].data,
self.gal_type["n_h"].data, self.tpcf_matrix,
self.tpcf_shape, self.attrs["mode"] == "cross",
separate_gal_type)
def jaxtabcorr_predict(mean_occupation, is_centrals, prim_haloprop,
sec_haloprop_percentile, n_h, tpcf_matrix,
tpcf_shape, do_cross, separate_gal_type):
ngal = mean_occupation * n_h
if not do_cross:
ngal_sq = jnp.outer(ngal, ngal)
ngal_sq = 2 * ngal_sq - jnp.diag(jnp.diag(ngal_sq))
ngal_sq = jax_symmetric_matrix_to_array(ngal_sq)
xi = tpcf_matrix * ngal_sq / jnp.sum(ngal_sq)
else:
xi = tpcf_matrix * ngal / jnp.sum(ngal)
if not separate_gal_type:
ngal = jnp.sum(ngal)
xi = jnp.sum(xi, axis=1).reshape(tpcf_shape)
return ngal, xi
else:
ngal_dict = {}
xi_dict = {}
for gal_type, key in [(True, "centrals"), (False, "satellites")]:
mask = is_centrals == gal_type
ngal_type = jnp.where(mask, ngal, 0)
ngal_dict[key] = jnp.sum(ngal_type) # <-- TODO: this will break
if not do_cross:
for gal_type_1, gal_type_2, name in [(True, True, "centrals-centrals"),
(True, False, "centrals-satellites"),
(False, False, "satellites-satellites")]:
mask = jax_symmetric_matrix_to_array(jnp.outer(
gal_type_1 == is_centrals,
gal_type_2 == is_centrals) |
jnp.outer(
gal_type_2 == is_centrals,
gal_type_1 == is_centrals))
xi_dict[name] = jnp.sum(xi * mask, axis=1).reshape(tpcf_shape)
else:
for gal_type, key in [(True, "centrals"), (False, "satellites")]:
mask = is_centrals == gal_type
xi_dict[gal_type] = jnp.sum(
xi * mask, axis=1).reshape(tpcf_shape)
return ngal_dict, xi_dict
static_args = ["tpcf_shape", "do_cross", "separate_gal_type"]
jaxtabcorr_predict = jax.jit(jaxtabcorr_predict,
static_argnames=static_args)
def jax_symmetric_matrix_to_array(matrix):
# Assertions not allowed by jit :(
# try:
# assert matrix.shape[0] == matrix.shape[1]
# assert np.all(matrix == matrix.T)
# except AssertionError:
# raise RuntimeError('The matrix you provided is not symmetric.')
n_dim = matrix.shape[0]
sel = jnp.zeros((n_dim**2 + n_dim) // 2, dtype=int)
for i in range(matrix.shape[0]):
sel = jax.ops.index_update(
sel, slice((i*(i+1))//2, (i*(i+1))//2+(i+1)),
jnp.arange(i*n_dim, i*n_dim + i + 1))
# sel[(i*(i+1))//2:(i*(i+1))//2+(i+1)] = jnp.arange(
# i*n_dim, i*n_dim + i + 1)
return matrix.ravel()[sel] |
py | 1a515bd983d713b5eb65e4330fd062dba57d1082 | import datetime
class Employee:
raise_amount = 1.04 # Class variable
num_of_employees = 0
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@company.com'
Employee.num_of_employees += 1
def fullname(self):
return self.first + ' ' + self.last
def apply_raise(self):
# Can also use Employee.raise_amount
self.pay = int(self.pay * self.raise_amount)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
# class method as alternative constructor
@classmethod
def from_string(cls, emp_str):
"construct a new employee from string kebab case"
first_name, last_name, pay = emp_str.split('-')
# calling constructor with cls keyword, denoting class
return cls(first_name, last_name, pay)
@staticmethod
def is_work_day_with_string(day):
work_days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday']
for d in work_days:
if d.upper() == day.upper():
print('work day')
return True
print('not a work day')
return False
@staticmethod
def is_work_day(day):
if day.weekday() == 5 or day.weekday() == 6:
print('not a work day')
return False
else:
print('work day')
return True
# creating employees by parsing kebab-case string
emp_str1 = 'John-Doe-7000'
# parsing
emp1 = Employee.from_string(emp_str1)
emp2 = Employee('Burak', 'Aksoy', 3000)
print(emp1.__dict__)
print(emp2.__dict__)
# regular methods pass self def my_func(self),
# class methods pass cls def class_func(cls),
# However, static methods do not pass anyhting
print(Employee.is_work_day_with_string('monday'))
print(Employee.is_work_day_with_string('saturday'))
my_date = datetime.date.today()
print(my_date)
print(Employee.is_work_day(my_date))
|
py | 1a515c5fa8e43f634f08e9b3264157af83371433 | a.b -= 2
a[0] += 1
a[0:2] += 1
|
py | 1a515c624523fbb8bdc6d6bf4f73767813676918 | #
# Handler library for Linux IaaS
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import os
import os.path
import sys
import re
import imp
import base64
import json
import tempfile
import time
from os.path import join
import Utils.WAAgentUtil
from Utils.WAAgentUtil import waagent
import logging
import logging.handlers
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from common import CommonVariables
import platform
import subprocess
import datetime
import Utils.Status
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
import traceback
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
class HandlerContext:
def __init__(self,name):
self._name = name
self._version = '0.0'
return
class HandlerUtility:
telemetry_data = {}
serializable_telemetry_data = []
ExtErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success
SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none
HealthStatusCode = -1
def __init__(self, log, error, short_name):
self._log = log
self._error = error
self.log_message = ""
self._short_name = short_name
self.patching = None
self.storageDetailsObj = None
self.partitioncount = 0
self.logging_file = None
def _get_log_prefix(self):
return '[%s-%s]' % (self._context._name, self._context._version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
if(file.endswith('.settings')):
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(freshest_time == None):
freshest_time = os.path.getmtime(join(config_folder,file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder,file))
if(current_file_m_time > freshest_time):
freshest_time = current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def get_last_seq(self):
if(os.path.isfile('mrseq')):
seq = waagent.GetFileContents('mrseq')
if(seq):
return int(seq)
return -1
def exit_if_same_seq(self):
current_seq = int(self._context._seq_no)
last_seq = self.get_last_seq()
if(current_seq == last_seq):
self.log("the sequence number are same, so skip, current:" + str(current_seq) + "== last:" + str(last_seq))
self.update_settings_file()
sys.exit(0)
def log(self, message,level='Info'):
try:
self.log_with_no_try_except(message, level)
except IOError:
pass
except Exception as e:
try:
errMsg='Exception in hutil.log'
self.log_with_no_try_except(errMsg, 'Warning')
except Exception as e:
pass
def log_with_no_try_except(self, message, level='Info'):
WriteLog = self.get_strvalue_from_configfile('WriteLog','True')
if (WriteLog == None or WriteLog == 'True'):
if sys.version_info > (3,):
if self.logging_file is not None:
self.log_py3(message)
else:
pass
else:
self._log(self._get_log_prefix() + message)
message = "{0} {1} {2} \n".format(str(datetime.datetime.now()) , level , message)
self.log_message = self.log_message + message
def log_py3(self, msg):
if type(msg) is not str:
msg = str(msg, errors="backslashreplace")
msg = str(datetime.datetime.now()) + " " + str(self._get_log_prefix()) + msg + "\n"
try:
with open(self.logging_file, "a+") as C :
C.write(msg)
except IOError:
pass
def error(self, message):
self._error(self._get_log_prefix() + message)
def fetch_log_message(self):
return self.log_message
def _parse_config(self, ctxt):
config = None
try:
config = json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config == None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if 'protectedSettings' in handlerSettings and \
"protectedSettingsCertThumbprint" in handlerSettings and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb = handlerSettings['protectedSettingsCertThumbprint']
cert = waagent.LibDir + '/' + thumb + '.crt'
pkey = waagent.LibDir + '/' + thumb + '.prv'
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
waagent.SetFileContents(f.name,config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])
cleartxt = None
if 'NS-BSD' in platform.system():
# base64 tool is not available with NSBSD, use openssl
cleartxt = waagent.RunGetOutput(self.patching.openssl_path + " base64 -d -A -in " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
else:
cleartxt = waagent.RunGetOutput(self.patching.base64_path + " -d " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
jctxt = {}
try:
jctxt = json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings'] = jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self, operation):
self.operation = operation
_context = self.try_parse_context()
getWaagentPathUsed = Utils.WAAgentUtil.GetPathUsed()
if(getWaagentPathUsed == 0):
self.log("waagent old path is used")
else:
self.log("waagent new path is used")
if not _context:
self.log("maybe no new settings file found")
sys.exit(0)
return _context
def try_parse_context(self):
self._context = HandlerContext(self._short_name)
handler_env = None
config = None
ctxt = None
code = 0
try:
# get the HandlerEnvironment.json. According to the extension handler
# spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file = './HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt = waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env = json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir = handler_env['handlerEnvironment']['configFolder']
self._context._log_dir = handler_env['handlerEnvironment']['logFolder']
self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')
self.logging_file=self._context._log_file
self._context._shell_log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'shell.log')
self._change_log_file()
self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
self.log('sequence number is ' + self._context._seq_no)
self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt = None
ctxt = waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
return None
else:
if(self.operation is not None and self.operation.lower() == "enable"):
# we should keep the current status file
self.backup_settings_status_file(self._context._seq_no)
self._context._config = self._parse_config(ctxt)
except Exception as e:
errorMsg = "Unable to parse context, error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Error')
raise
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
waagent.LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def save_seq(self):
self.set_last_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def set_last_seq(self,seq):
waagent.SetFileContents('mrseq', str(seq))
'''
Sample /etc/azure/vmbackup.conf
[SnapshotThread]
seqsnapshot = 1
isanysnapshotfailed = False
UploadStatusAndLog = True
WriteLog = True
seqsnapshot valid values(0-> parallel snapshot, 1-> programatically set sequential snapshot , 2-> customer set it for sequential snapshot)
'''
def get_value_from_configfile(self, key):
global backup_logger
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
except Exception as e:
pass
return value
def get_strvalue_from_configfile(self, key, default):
value = self.get_value_from_configfile(key)
if value == None or value == '':
value = default
try :
value_str = str(value)
except ValueError :
self.log('Not able to parse the read value as string, falling back to default value', True, 'Warning')
value = default
return value
def get_intvalue_from_configfile(self, key, default):
value = default
value = self.get_value_from_configfile(key)
if value == None or value == '':
value = default
try :
value_int = int(value)
except ValueError :
self.log('Not able to parse the read value as int, falling back to default value', True, 'Warning')
value = default
return int(value)
def set_value_to_configfile(self, key, value):
configfile = '/etc/azure/vmbackup.conf'
try :
self.log('setting ' + str(key) + 'in config file to ' + str(value) , 'Info')
if not os.path.exists(os.path.dirname(configfile)):
os.makedirs(os.path.dirname(configfile))
config = ConfigParsers.RawConfigParser()
if os.path.exists(configfile):
config.read(configfile)
if config.has_section('SnapshotThread'):
if config.has_option('SnapshotThread', key):
config.remove_option('SnapshotThread', key)
else:
config.add_section('SnapshotThread')
else:
config.add_section('SnapshotThread')
config.set('SnapshotThread', key, value)
with open(configfile, 'w') as config_file:
config.write(config_file)
except Exception as e:
errorMsg = " Unable to set config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Warning')
return value
def get_machine_id(self):
machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B"
machine_id = ""
try:
if not os.path.exists(os.path.dirname(machine_id_file)):
os.makedirs(os.path.dirname(machine_id_file))
if os.path.exists(machine_id_file):
file_pointer = open(machine_id_file, "r")
machine_id = file_pointer.readline()
file_pointer.close()
else:
mi = MachineIdentity()
machine_id = mi.stored_identity()[1:-1]
file_pointer = open(machine_id_file, "w")
file_pointer.write(machine_id)
file_pointer.close()
except Exception as e:
errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg, 'Error')
self.log("Unique Machine Id : {0}".format(machine_id))
return machine_id
def get_total_used_size(self):
try:
df = subprocess.Popen(["df" , "-k" , "--output=source,fstype,size,used,avail,pcent,target"], stdout=subprocess.PIPE)
'''
Sample output of the df command
Filesystem Type 1K-blocks Used Avail Use% Mounted on
/dev/sda2 xfs 52155392 3487652 48667740 7% /
devtmpfs devtmpfs 7170976 0 7170976 0% /dev
tmpfs tmpfs 7180624 0 7180624 0% /dev/shm
tmpfs tmpfs 7180624 760496 6420128 11% /run
tmpfs tmpfs 7180624 0 7180624 0% /sys/fs/cgroup
/dev/sda1 ext4 245679 151545 76931 67% /boot
/dev/sdb1 ext4 28767204 2142240 25140628 8% /mnt/resource
/dev/mapper/mygroup-thinv1 xfs 1041644 33520 1008124 4% /bricks/brick1
/dev/mapper/mygroup-85197c258a54493da7880206251f5e37_0 xfs 1041644 33520 1008124 4% /run/gluster/snaps/85197c258a54493da7880206251f5e37/brick2
/dev/mapper/mygroup2-thinv2 xfs 15717376 5276944 10440432 34% /tmp/test
/dev/mapper/mygroup2-63a858543baf4e40a3480a38a2f232a0_0 xfs 15717376 5276944 10440432 34% /run/gluster/snaps/63a858543baf4e40a3480a38a2f232a0/brick2
tmpfs tmpfs 1436128 0 1436128 0% /run/user/1000
//Centos72test/cifs_test cifs 52155392 4884620 47270772 10% /mnt/cifs_test2
'''
process_wait_time = 30
while(process_wait_time >0 and df.poll() is None):
time.sleep(1)
process_wait_time -= 1
output = df.stdout.read()
output = output.split("\n")
total_used = 0
total_used_network_shares = 0
total_used_gluster = 0
network_fs_types = []
for i in range(1,len(output)-1):
device, fstype, size, used, available, percent, mountpoint = output[i].split()
self.log("Device name : {0} fstype : {1} size : {2} used space in KB : {3} available space : {4} mountpoint : {5}".format(device,fstype,size,used,available,mountpoint))
if "fuse" in fstype.lower() or "nfs" in fstype.lower() or "cifs" in fstype.lower():
if fstype not in network_fs_types :
network_fs_types.append(fstype)
self.log("Not Adding as network-drive, Device name : {0} used space in KB : {1} fstype : {2}".format(device,used,fstype))
total_used_network_shares = total_used_network_shares + int(used)
elif (mountpoint.startswith('/run/gluster/snaps/')):
self.log("Not Adding Device name : {0} used space in KB : {1} mount point : {2}".format(device,used,mountpoint))
total_used_gluster = total_used_gluster + int(used)
else:
self.log("Adding Device name : {0} used space in KB : {1} mount point : {2}".format(device,used,mountpoint))
total_used = total_used + int(used) #return in KB
if not len(network_fs_types) == 0:
HandlerUtility.add_to_telemetery_data("networkFSTypeInDf",str(network_fs_types))
HandlerUtility.add_to_telemetery_data("totalUsedNetworkShare",str(total_used_network_shares))
self.log("Total used space in Bytes of network shares : {0}".format(total_used_network_shares * 1024))
if total_used_gluster !=0 :
HandlerUtility.add_to_telemetery_data("glusterFSSize",str(total_used_gluster))
self.log("Total used space in Bytes : {0}".format(total_used * 1024))
return total_used * 1024,False #Converting into Bytes
except Exception as e:
errMsg = 'Unable to fetch total used space with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return 0,True
def get_storage_details(self,total_size,failure_flag):
self.storageDetailsObj = Utils.Status.StorageDetails(self.partitioncount, total_size, False, failure_flag)
self.log("partition count : {0}, total used size : {1}, is storage space present : {2}, is size computation failed : {3}".format(self.storageDetailsObj.partitionCount, self.storageDetailsObj.totalUsedSizeInBytes, self.storageDetailsObj.isStoragespacePresent, self.storageDetailsObj.isSizeComputationFailed))
return self.storageDetailsObj
def SetExtErrorCode(self, extErrorCode):
if self.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success :
self.ExtErrorCode = extErrorCode
def SetSnapshotConsistencyType(self, snapshotConsistency):
self.SnapshotConsistency = snapshotConsistency
def SetHealthStatusCode(self, healthStatusCode):
self.HealthStatusCode = healthStatusCode
def do_status_json(self, operation, status, sub_status, status_code, message, telemetrydata, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj,total_size,failure_flag):
tstamp = time.strftime(DateTimeFormat, time.gmtime())
formattedMessage = Utils.Status.FormattedMessage("en-US",message)
stat_obj = Utils.Status.StatusObj(self._context._name, operation, status, sub_status, status_code, formattedMessage, telemetrydata, self.get_storage_details(total_size,failure_flag), self.get_machine_id(), taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj)
top_stat_obj = Utils.Status.TopLevelStatus(self._context._version, tstamp, stat_obj)
return top_stat_obj
def get_extension_version(self):
try:
cur_dir = os.getcwd()
cur_extension = cur_dir.split("/")[-1]
extension_version = cur_extension.split("-")[-1]
return extension_version
except Exception as e:
errMsg = 'Failed to retrieve the Extension version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
extension_version="Unknown"
return extension_version
def get_wala_version(self):
try:
file_pointer = open('/var/log/waagent.log','r')
waagent_version = ''
for line in file_pointer:
if 'Azure Linux Agent Version' in line:
waagent_version = line.split(':')[-1]
if waagent_version[:-1]=="": #for removing the trailing '\n' character
waagent_version = self.get_wala_version_from_command()
return waagent_version
else:
waagent_version = waagent_version[:-1].split("-")[-1] #getting only version number
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_wala_version_from_command(self):
try:
cur_dir = os.getcwd()
os.chdir("..")
p = subprocess.Popen(['/usr/sbin/waagent', '-version'], stdout=subprocess.PIPE)
process_wait_time = 30
while(process_wait_time > 0 and p.poll() is None):
time.sleep(1)
process_wait_time -= 1
out = p.stdout.read()
out = str(out)
if "Goal state agent: " in out:
waagent_version = out.split("Goal state agent: ")[1].strip()
else:
out = out.split(" ")
waagent = out[0]
waagent_version = waagent.split("-")[-1] #getting only version number
os.chdir(cur_dir)
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_dist_info(self):
try:
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "FreeBSD",release
if 'NS-BSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "NS-BSD", release
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(full_distribution_name=0))
# remove trailing whitespace in distro name
if(distinfo[0] == ''):
osfile= open("/etc/os-release", "r")
for line in osfile:
lists=str(line).split("=")
if(lists[0]== "NAME"):
distroname = lists[1].split("\"")
if(lists[0]=="VERSION"):
distroversion = lists[1].split("\"")
osfile.close()
return distroname[1]+"-"+distroversion[1],platform.release()
distinfo[0] = distinfo[0].strip()
return distinfo[0]+"-"+distinfo[1],platform.release()
else:
distinfo = platform.dist()
return distinfo[0]+"-"+distinfo[1],platform.release()
except Exception as e:
errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return "Unkonwn","Unkonwn"
def substat_new_entry(self,sub_status,code,name,status,formattedmessage):
sub_status_obj = Utils.Status.SubstatusObj(code,name,status,formattedmessage)
sub_status.append(sub_status_obj)
return sub_status
def timedelta_total_seconds(self, delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
@staticmethod
def add_to_telemetery_data(key,value):
HandlerUtility.telemetry_data[key]=value
def add_telemetry_data(self):
os_version,kernel_version = self.get_dist_info()
HandlerUtility.add_to_telemetery_data("guestAgentVersion",self.get_wala_version_from_command())
HandlerUtility.add_to_telemetery_data("extensionVersion",self.get_extension_version())
HandlerUtility.add_to_telemetery_data("osVersion",os_version)
HandlerUtility.add_to_telemetery_data("kernelVersion",kernel_version)
def convert_telemetery_data_to_bcm_serializable_format(self):
HandlerUtility.serializable_telemetry_data = []
for k,v in HandlerUtility.telemetry_data.items():
each_telemetry_data = {}
each_telemetry_data["Value"] = v
each_telemetry_data["Key"] = k
HandlerUtility.serializable_telemetry_data.append(each_telemetry_data)
def do_status_report(self, operation, status, status_code, message, taskId = None, commandStartTimeUTCTicks = None, snapshot_info = None,total_size = 0,failure_flag = True ):
self.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
sub_stat = []
stat_rept = []
self.add_telemetry_data()
snapshotTelemetry = ""
if CommonVariables.snapshotCreator in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.snapshotCreator , HandlerUtility.telemetry_data[CommonVariables.snapshotCreator])
if CommonVariables.hostStatusCodePreSnapshot in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.hostStatusCodePreSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodePreSnapshot])
if CommonVariables.hostStatusCodeDoSnapshot in HandlerUtility.telemetry_data.keys():
snapshotTelemetry = "{0}{1}={2}, ".format(snapshotTelemetry , CommonVariables.hostStatusCodeDoSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodeDoSnapshot])
if CommonVariables.statusBlobUploadError in HandlerUtility.telemetry_data.keys():
message = "{0} {1}={2}, ".format(message , CommonVariables.statusBlobUploadError , HandlerUtility.telemetry_data[CommonVariables.statusBlobUploadError])
message = message + snapshotTelemetry
vm_health_obj = Utils.Status.VmHealthInfoObj(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode], int(self.ExtErrorCode))
consistencyTypeStr = CommonVariables.consistency_crashConsistent
if (self.SnapshotConsistency != Utils.Status.SnapshotConsistencyType.crashConsistent):
if (status_code == CommonVariables.success_appconsistent):
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.applicationConsistent
consistencyTypeStr = CommonVariables.consistency_applicationConsistent
elif (status_code == CommonVariables.success):
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.fileSystemConsistent
consistencyTypeStr = CommonVariables.consistency_fileSystemConsistent
else:
self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none
consistencyTypeStr = CommonVariables.consistency_none
HandlerUtility.add_to_telemetery_data("consistencyType", consistencyTypeStr)
extensionResponseObj = Utils.Status.ExtensionResponse(message, self.SnapshotConsistency, "")
message = str(json.dumps(extensionResponseObj, cls = ComplexEncoder))
self.convert_telemetery_data_to_bcm_serializable_format()
stat_rept = self.do_status_json(operation, status, sub_stat, status_code, message, HandlerUtility.serializable_telemetry_data, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj, total_size,failure_flag)
time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
time_span = self.timedelta_total_seconds(time_delta) * 1000
date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0'
stat_rept.timestampUTC = date_place_holder
date_string = r'\/Date(' + str((int)(time_span)) + r')\/'
stat_rept = "[" + json.dumps(stat_rept, cls = ComplexEncoder) + "]"
stat_rept = stat_rept.replace(date_place_holder,date_string)
# Add Status as sub-status for Status to be written on Status-File
sub_stat = self.substat_new_entry(sub_stat,'0',stat_rept,'success',None)
if self.get_public_settings()[CommonVariables.vmType].lower() == CommonVariables.VmTypeV2.lower() and CommonVariables.isTerminalStatus(status) :
status = CommonVariables.status_success
stat_rept_file = self.do_status_json(operation, status, sub_stat, status_code, message, None, taskId, commandStartTimeUTCTicks, None, None,total_size,failure_flag)
stat_rept_file = "[" + json.dumps(stat_rept_file, cls = ComplexEncoder) + "]"
# rename all other status files, or the WALA would report the wrong
# status file.
# because the wala choose the status file with the highest sequence
# number to report.
return stat_rept, stat_rept_file
def write_to_status_file(self, stat_rept_file):
try:
if self._context._status_file:
with open(self._context._status_file,'w+') as f:
f.write(stat_rept_file)
except Exception as e:
errMsg = 'Status file creation failed with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
def is_status_file_exists(self):
try:
if os.path.exists(self._context._status_file):
return True
else:
return False
except Exception as e:
self.log("exception is getting status file" + traceback.format_exc())
return False
def backup_settings_status_file(self, _seq_no):
self.log("current seq no is " + _seq_no)
for subdir, dirs, files in os.walk(self._context._config_dir):
for file in files:
try:
if(file.endswith('.settings') and file != (_seq_no + ".settings")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._config_dir,file), join(self._context._config_dir,new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
for subdir, dirs, files in os.walk(self._context._status_dir):
for file in files:
try:
if(file.endswith('.status') and file != (_seq_no + ".status")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._status_dir,file), join(self._context._status_dir, new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
def do_exit(self, exit_code, operation,status,code,message):
try:
HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode]))
self.do_status_report(operation, status,code,message)
except Exception as e:
self.log("Can't update status: " + str(e))
sys.exit(exit_code)
def get_handler_settings(self):
return self._context._config['runtimeSettings'][0]['handlerSettings']
def get_protected_settings(self):
return self.get_handler_settings().get('protectedSettings')
def get_public_settings(self):
return self.get_handler_settings().get('publicSettings')
def is_prev_in_transition(self):
curr_seq = self.get_last_seq()
last_seq = curr_seq - 1
if last_seq >= 0:
self.log("previous status and path: " + str(last_seq) + " " + str(self._context._status_dir))
status_file_prev = os.path.join(self._context._status_dir, str(last_seq) + '_status')
if os.path.isfile(status_file_prev) and os.access(status_file_prev, os.R_OK):
searchfile = open(status_file_prev, "r")
for line in searchfile:
if "Transition" in line:
self.log("transitioning found in the previous status file")
searchfile.close()
return True
searchfile.close()
return False
def get_prev_log(self):
with open(self._context._log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 300):
lines = lines[-300:]
return ''.join(str(x) for x in lines)
else:
return ''.join(str(x) for x in lines)
def get_shell_script_log(self):
lines = ""
try:
with open(self._context._shell_log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 10):
lines = lines[-10:]
return ''.join(str(x) for x in lines)
except Exception as e:
self.log("Can't receive shell log file: " + str(e))
return lines
def update_settings_file(self):
if(self._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings') != None):
del self._context._config['runtimeSettings'][0]['handlerSettings']['protectedSettings']
self.log("removing the protected settings")
waagent.SetFileContents(self._context._settings_file,json.dumps(self._context._config))
def UriHasSpecialCharacters(self, blobs):
uriHasSpecialCharacters = False
if blobs is not None:
for blob in blobs:
blobUri = str(blob.split("?")[0])
if '%' in blobUri:
self.log(blobUri + " URI has special characters")
uriHasSpecialCharacters = True
return uriHasSpecialCharacters
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'convertToDictionary'):
return obj.convertToDictionary()
else:
return obj.__dict__
|
py | 1a515c6692cec976409fb224f4dafa5b084ffe2d | """
Contains all the config for the Flask App
"""
import os
class BaseConfig:
"""
Base configuration
"""
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
BCRYPT_LOG_ROUNDS = 13
TOKEN_EXPIRATION_DAYS = 1
TOKEN_EXPIRATION_SECONDS = 0
class DevelopmentConfig(BaseConfig):
"""
Development configuration
"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
DEBUG_TB_ENABLED = True
BCRYPT_LOG_ROUNDS = 4
class TestingConfig(BaseConfig):
"""
Testing Configuration
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_TEST_URL")
DEBUG_TB_ENABLED = True
BCRYPT_LOG_ROUNDS = 4
TOKEN_EXPIRATION_DAYS = 0
TOKEN_EXPIRATION_SECONDS = 3
class ProductionConfig(BaseConfig):
"""
Production configuration
"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
|
py | 1a515e9634dbf020ec90926f8f8fa0f15a427763 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras import backend as K
from tensorflow.keras import initializers, regularizers, constraints
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow_addons.layers import InstanceNormalization, SpectralNormalization
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import os
import numpy as np
import time
from PIL import Image
from math import log2
import random
from datagen import dataGenerator, printProgressBar
from models import makeGen, makeDisc
class GAN:
def __init__(self, data, test_data, image_size, model_name = "StarGAN", channels=16, size="normal", verbose=False, batch_size = 6, learning_rate = 0.0001):
#data: A list of folder names inside of the data folder to generate data from for training.
#test_data: A list of folder names inside of the data folder to generate data for testing. Can be the same as data. Should be different so that you can more easily see collapse.
#image_size: A tuple or int (if square) depicting the size of the desired size of the images for training. The data generator will resize the images to this size, and the model will train on this size.
#model_name: A name for the model. Used for the folder in which results and checkpoints are saved.
#channels: The number of channels to be used at each step in the model before multiplication. Recommended 16.
#verbose: Whether or not the data generators will create output showing their status while generating the data.
#batch_Size: The batch size for the model.
#learning_rate: The learning rate for the model. The discriminator's will be double this value.
self.MODELNAME = model_name
self.CKPT = os.path.dirname(os.path.realpath(__file__)) + "\\" + self.MODELNAME + "\\checkpoints\\"
self.imagedir = os.path.dirname(os.path.realpath(__file__)) + "\\" + self.MODELNAME
self.verbose = verbose
#Converts an integer into a tuple, ensures an integer or tuple is given.
if((type(image_size) is not tuple)):
if(type(image_size) is int):
self.image_size = (image_size, image_size)
else:
print("Expected tuple (x,y) or int for image size.")
exit()
else:
self.image_size = image_size
#Prints an error message if the dimensions are incompatible with the training of the model.
if((self.image_size[0] % 8 != 0) or (self.image_size[1] % 8 != 0)):
print("Image dimensions must be divisible by 8 for the model to train! Please adjust your image sizes.")
exit()
#Try making each directory, if it fails it generally means the folder already exists, so continue regardless.
try:
os.makedirs(self.imagedir)
except OSError as error:
pass
try:
os.makedirs(self.CKPT)
except OSError as error:
pass
#Create both the training and testing datagenerators using a list of strings
#containing the folder names inside of the data folder.
#The first string will have the first label, and so on.
self.datagens = []
for item in data:
self.datagens.append(dataGenerator(item, self.image_size, verbose = self.verbose, resize=True))
self.testData= []
for item in test_data:
self.testData.append(dataGenerator(item, self.image_size, verbose = self.verbose, resize=True))
#Determine the number of labels in the model.
self.NUMLABELS = len(self.datagens)
#Make the generator and discriminator as specified in models.py either normal or large sized.
self.cha = channels
self.BATCH_SIZE = batch_size
self.gen = makeGen(self.cha, self.NUMLABELS, self.image_size)
self.disc = makeDisc(self.cha, self.NUMLABELS, self.image_size)
#Setup the optimizers
self.discOpt = tf.keras.optimizers.Adam(learning_rate=learning_rate*2, beta_1=0.0, beta_2=0.99)#
self.genOpt = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.0, beta_2=0.99)#
@tf.function
def trainStep(self, images, labels):
#function to train the model.
def grad_loss(samples, output, k=1.0):
#An implementation of a two-sided local gradient penalty.
#Helps to smooth out gradients in the discriminator.
#Not strictly necessary, used to improve stability of discirminator.
init_grad = tf.gradients(output, samples)[0]
squared_grad = tf.square(init_grad)
sum_grad = tf.sqrt(K.sum(squared_grad, axis=[1,2,3]))
penalty = tf.maximum(sum_grad-k, tf.keras.backend.zeros_like(sum_grad))
return tf.reduce_mean(penalty)
with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
#Running data through models
generatedImage = self.gen([images,labels[1]],training=True)
restoredImage = self.gen([generatedImage,labels[0]], training=True)
genfakeOut = K.sum(self.disc([generatedImage],training=True) * labels[1], axis=1) #Multiply by label due to multi-task discriminator.
discRealOut = K.sum(self.disc([images],training=True) * labels[0], axis=1) #Multiply by label due to multi-task discriminator.
#Loss functions
cycleLoss = K.mean(K.abs(images - restoredImage)) * 5
genLoss = K.mean(genfakeOut) + cycleLoss #Due to multi-task discriminator, label comparison and real/fake are done in one with genfakeout.
discLoss = K.mean(K.relu(1.0 - genfakeOut) + K.relu(1.0 + discRealOut)) + K.mean(grad_loss(images, discRealOut)) * 10 #Hinge loss plust gradient penalty.
#Calculate and apply gradients.
genGradients = genTape.gradient(genLoss,self.gen.trainable_variables)
discGradients = discTape.gradient(discLoss,self.disc.trainable_variables)
self.genOpt.apply_gradients(zip(genGradients,self.gen.trainable_variables))
self.discOpt.apply_gradients(zip(discGradients,self.disc.trainable_variables))
return (genLoss, discLoss)
def labelMaker(self, index, maxSize=None, batch=None):
#Creates a one hot vector for the label of the image.
#Index: the index for where the value will be one.
#maxSize: typically the number of labels. How long to make the vector.
#batch: the batch size, or how many labels to produce.
if maxSize == None:
maxSize = self.NUMLABELS
if batch == None:
batch = self.BATCH_SIZE
labels = np.ones([batch]) * index
return to_categorical(labels, num_classes = maxSize)
def train(self, steps = 100000, curStep = 1):
#The train function repeats the training step to train the model.
#steps: The number of steps to train.
#curStep: The step to begin training on. (e.g. In case you load weights from 50000 steps and want to retrain from 50000 steps onward.)
#Setup some variables to compute train time and store loss values.
genLoss = 0
discLoss = 0
trainTime = time.time()
start = time.time()
for step in range(curStep,steps+1):
#Randomly select a source and target label and batch.
randInt = random.randint(0, self.NUMLABELS-1)
batch = self.datagens[randInt].get_batch(self.BATCH_SIZE)
smalllabelsReal = self.labelMaker(randInt)
#Selects a class to convert to.
randInt = (random.randint(1,self.NUMLABELS-1)+randInt) % self.NUMLABELS
smalllabelsNew = self.labelMaker(randInt)
labels = [smalllabelsReal,smalllabelsNew]
stepGenLoss, stepDiscLoss = self.trainStep(batch, labels)
#Print the progress bar so that you may see how far along the model is.
printProgressBar(step % 1000, 999, decimals=2)
#Save variables to compute the average.
genLoss += stepGenLoss
discLoss += stepDiscLoss
if (step) % 1000 == 0:
#At every 1000 steps, generate an image containing all possible conversions, and show the average loss values for the previous 1000 steps.
self.makeImages(step)
print("At step {}. Time for 1000 steps was {} seconds".format(step,time.time()-start))
print("Average generator loss: {}, Average discriminator loss: {}".format((genLoss / 1000.0),(discLoss / 1000.0)))
genLoss = 0
discLoss = 0
start = time.time()
if (step) % 10000 == 0:
#At every 10000 steps, save the weights of the generator and discriminator so they may be loaded.
self.gen.save_weights(self.CKPT + f"{step}GEN.ckpt")
self.disc.save_weights(self.CKPT + f"{step}DISC.ckpt")
print("Gan saved!")
#At the end of training, show the total amount of time it took.
print(f"Time for training was {(time.time() - trainTime) / 60.0} minutes")
return
def makeImages(self, step, numEx = 5):
#A function to create an array of images. The first row is real, the second row is converted to the first class, the third row is converted to the second class, and so on.
#Step: A only used in naming. Typically used to show what step the image was generated on.
imageRows = []
#For each class, translate to each other class.
#Original images will be on the top row in order of label.
#Every row beneath will be a different label.
for i in range(self.NUMLABELS):
batch = self.testData[i].get_batch(numEx)
#Place all of the original images on one row.
rowstart = batch[0]
for k in range(1,numEx):
rowstart = np.append(rowstart, batch[k], 1)
for j in range(self.NUMLABELS):
results = self.gen([batch, self.labelMaker(j, self.NUMLABELS, numEx)], training=False)
if i == j: #Don't convert to your own class! Instead show a black box.
results = np.zeros_like(results)
rowAdd = results[0]
for k in range(1,numEx):
rowAdd = np.append(rowAdd, results[k], 1)
rowstart = np.append(rowstart, rowAdd, 0)
imageRows.append(rowstart)
output = imageRows[0]
for i in range(1,len(imageRows)):
output = np.append(output, imageRows[i], 1) #All originals will be on
output = np.clip(np.squeeze(output), 0.0, 1.0)
plt.imsave(self.imagedir + f"\\{step}.png", output)
def loadSave(self, step):
#A function to load the weights of a trained model. Must be the same size and channels as initialized model.
#Step: What step to laod from.
self.gen.load_weights(self.CKPT + f"{step}GEN.ckpt")
self.disc.load_weights(self.CKPT + f"{step}DISC.ckpt")
def translate(self, image, target):
#Converts a single image to the target class. Returns the translated image.
#image: an array of (imageSize1,imageSize2,channels).
#target: an index for what class to convert to (starting at 0)
image = np.expand_dims(image, 0)
label = self.labelMaker(target, batch=1)
return np.squeeze(self.gen([image, label], training=False), axis=0)
#An example of how you could run the model using class folders "/data/classA_folder/", etc image size 256, model name "StarGAN", channel coefficient of 16, and normal size.
# if __name__ == "__main__":
# data = ["classA_folder", "classB_folder", "classC_folder"] #In this case, class A has an index of 0, B 1, C 2.
# testdata = ["classA_testfolder", "classB_testfolder", "classC_testfolder"]
# starGAN = GAN(data, testdata, 128, "StarGAN", 24)
# starGAN.makeImages(-999)
# starGAN.train(200000)
# exit()
|
py | 1a515eb72e0a9db57520e6963df3af843799f2ea | import numpy as np
from keras_cn_parser_and_analyzer.library.classifiers.cnn_lstm import WordVecCnnLstm
from keras_cn_parser_and_analyzer.library.utility.simple_data_loader import load_text_label_pairs
from keras_cn_parser_and_analyzer.library.utility.text_fit import fit_text
def main():
random_state = 42
np.random.seed(random_state)
output_dir_path = './models'
data_file_path = '../data/training_data'
text_data_model = fit_text(data_file_path, label_type='line_label')
text_label_pairs = load_text_label_pairs(data_file_path, label_type='line_label')
classifier = WordVecCnnLstm()
batch_size = 64
epochs = 20
history = classifier.fit(text_data_model=text_data_model,
model_dir_path=output_dir_path,
text_label_pairs=text_label_pairs,
batch_size=batch_size, epochs=epochs,
test_size=0.3,
random_state=random_state)
if __name__ == '__main__':
main()
|
py | 1a515ec9344220e518ea9d8e8660d6851503bd2a | # https://www.hackerrank.com/challenges/xor-se/problem
# An array, , is defined as follows:
# A[0] = 0
# A[x] = A[x-1]^x
# for , where is the symbol for XOR
# You will be given a left and right index . You must determine the XOR sum of the segment of A as
# A[l]^A[l+1]...^A[r].
# For example, A = [0,1,3,0,4,1,7,0,8] . The segment from l=1 to r=4 sums to 1^3^0^4 = 6.
# Print the answer to each question.
# Function Description
# Complete the xorSequence function in the editor below. It should return the integer value calculated.
# xorSequence has the following parameter(s):
# l: the lower index of the range to sum
# r: the higher index of the range to sum
# Input Format
# The first line contains an integer , the number of questions.
# Each of the next lines contains two space-separated integers, and , the inclusive left and right indexes of the
# segment to query.
# Constraints
# 1<=q<=10e5
# 1<l[i]<r[i]<10e15
# Output Format
# On a new line for each test case, print the XOR-Sum of 's elements in the inclusive range between indices and .
# Sample Input 0
# 3
# 2 4
# 2 8
# 5 9
# Sample Output 0
# 7
# 9
# 15
# Explanation 0
# The beginning of our array looks like this: [0,1,3,0,4,1,7,0,8,...]
import math
import os
import random
import re
import sys
# Complete the xorSequence function below.
def xorSequence(l, r):
def A(x):
a = x%8
if(a == 0 or a == 1):
return x
if(a == 2 or a == 3):
return 2
if(a == 4 or a == 5):
return x+2
if(a == 6 or a == 7):
return 0;
ans = A(l-1)^A(r)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
lr = input().split()
l = int(lr[0])
r = int(lr[1])
result = xorSequence(l, r)
fptr.write(str(result) + '\n')
fptr.close()
|
py | 1a515ecbe268dc4c4da7a3313541eba01bddd073 | from . import DATABASE, log
import os
from flask import Blueprint, render_template, flash
from flask_login import login_required, current_user
views = Blueprint("views", __name__)
@views.route("/")
@views.route("/home")
@login_required
def home():
log.debug("Received a GET request at `/home`")
return render_template("home.html", user=current_user)
@views.route("/files/username=<username>")
@login_required
def get_files(username: str):
# check whether the folder already exists
files = {}
if os.path.isdir(f"{DATABASE}\\{username}"):
os.chdir(f"{DATABASE}\\{username}")
files = {i: f"{DATABASE}\\{username}\\{i}" for i in os.listdir()}
else:
# creating the folder
os.mkdir(f"{DATABASE}/{username}")
return files
@views.route("/get_file/filename=<filename>&username=<username>")
@login_required
def get_specific_file(filename: str, username: str):
if os.path.isdir(f"{DATABASE}\\{username}"):
os.chdir(f"{DATABASE}\\{username}")
if os.path.isfile(filename):
with open(filename, "r") as file:
return file.read()
else:
flash("File not found!", category="error")
|
py | 1a51606879e13ba77a0d88ef2d3e27c1c546e6b3 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('userauth.urls')),
]
|
py | 1a516089e76e2ed2431f2ee9a70acf667f1c74b1 | """MLP Merge Model.
A model composed only of a multi-layer perceptron (MLP), which maps
real-valued inputs to real-valued outputs. This model is called an
MLP Merge Model because it takes two inputs and concatenates the second
input with the layer at a specified index. It can be merged with any layer
from the input layer to the last hidden layer.
"""
import tensorflow as tf
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
class MLPMergeModel(Model):
"""MLP Merge Model.
Args:
output_dim (int): Dimension of the network output.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
concat_layer (int): The index of layers at which to concatenate
input_var2 with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(input_var1) while an index of -1 points to the last hidden
layer. Default parameter points to second layer from the end.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name='MLPMergeModel',
hidden_sizes=(32, 32),
concat_layer=-2,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._concat_layer = concat_layer
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['input_var1', 'input_var2']
# pylint: disable=arguments-differ
def _build(self, state_input, action_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
action_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
input_var2=action_input,
concat_layer=self._concat_layer,
name='mlp_concat',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
|
py | 1a5161061c03cdea64f46cba1c246a8f60cb521a | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import os
import sys
# Which CoAPthon version to use
if sys.version_info.major == 2:
COAPTHON="CoAPThon"
ZEROCONF="zeroconf2"
else:
COAPTHON="CoAPThon3"
ZEROCONF="zeroconf"
here = path.abspath(path.dirname(__file__))
long_description = """
Control program (and module) for iotsa devices. Allows finding of iotsa devices on the local
WiFi network or in the physical vicinity, inspecting and changing configuration
of those devices and uploading new firmware over the air.
"""
# Get the version number from the iotsa module
VERSION="2.1"
setup(
name='iotsaControl',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION,
description='Control iotsa devices',
long_description=long_description,
# The project's main homepage.
url='http://www.cwi.nl',
# Author details
author='Jack Jansen',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Communications',
'Topic :: Home Automation',
'Topic :: Internet'
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
#keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["iotsaControl"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["future", "requests", "esptool", ZEROCONF, COAPTHON],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data=package_data,
#include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'iotsaControl=iotsaControl.__main__:main',
],
},
)
|
py | 1a51611ebb27b011d5d1df75a82d7962eca753fa | import random
import pytest
import numpy as np
import os
from ray import cloudpickle as pickle
from ray import ray_constants
from ray.actor import ActorClassInheritanceException
try:
import pytest_timeout
except ImportError:
pytest_timeout = None
import sys
import tempfile
import datetime
from ray._private.test_utils import (
client_test_enabled,
wait_for_condition,
wait_for_pid_to_exit,
)
from ray.tests.client_test_utils import create_remote_signal_actor
import ray
# NOTE: We have to import setproctitle after ray because we bundle setproctitle
# with ray.
import setproctitle # noqa
@pytest.mark.parametrize("set_enable_auto_connect", ["1", "0"], indirect=True)
def test_caching_actors(shutdown_only, set_enable_auto_connect):
# Test defining actors before ray.init() has been called.
@ray.remote
class Foo:
def __init__(self):
pass
def get_val(self):
return 3
if set_enable_auto_connect == "0":
# Check that we can't actually create actors before ray.init() has
# been called.
with pytest.raises(Exception):
f = Foo.remote()
ray.init(num_cpus=1)
else:
# Actor creation should succeed here because ray.init() auto connection
# is (by default) enabled.
f = Foo.remote()
f = Foo.remote()
assert ray.get(f.get_val.remote()) == 3
# https://github.com/ray-project/ray/issues/20554
def test_not_reusing_task_workers(shutdown_only):
@ray.remote
def create_ref():
ref = ray.put(np.zeros(100_000_000))
return ref
@ray.remote
class Actor:
def __init__(self):
return
def foo(self):
return
ray.init(num_cpus=1, object_store_memory=1000_000_000)
wrapped_ref = create_ref.remote()
print(ray.get(ray.get(wrapped_ref)))
# create_ref worker gets reused as an actor.
a = Actor.remote()
ray.get(a.foo.remote())
# Actor will get force-killed.
del a
# Flush the object store.
for _ in range(10):
ray.put(np.zeros(100_000_000))
# Object has been evicted and owner has died. Throws OwnerDiedError.
print(ray.get(ray.get(wrapped_ref)))
def test_remote_function_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
# Create some values to close over.
val1 = 1
val2 = 2
@ray.remote
def f(x):
return val1 + x
@ray.remote
def g(x):
return ray.get(f.remote(x))
@ray.remote
class Actor:
def __init__(self, x):
self.x = x
self.y = val2
self.object_refs = [f.remote(i) for i in range(5)]
self.values2 = ray.get([f.remote(i) for i in range(5)])
def get_values(self):
return self.x, self.y, self.object_refs, self.values2
def f(self):
return [f.remote(i) for i in range(5)]
def g(self):
return ray.get([g.remote(i) for i in range(5)])
def h(self, object_refs):
return ray.get(object_refs)
actor = Actor.remote(1)
values = ray.get(actor.get_values.remote())
assert values[0] == 1
assert values[1] == val2
assert ray.get(values[2]) == list(range(1, 6))
assert values[3] == list(range(1, 6))
assert ray.get(ray.get(actor.f.remote())) == list(range(1, 6))
assert ray.get(actor.g.remote()) == list(range(1, 6))
assert ray.get(actor.h.remote([f.remote(i) for i in range(5)])) == list(range(1, 6))
def test_define_actor_within_actor(ray_start_10_cpus):
# Make sure we can use remote funtions within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def new_actor(self, z):
@ray.remote
class Actor2:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
self.actor2 = Actor2.remote(z)
def get_values(self, z):
self.new_actor(z)
return self.x, ray.get(self.actor2.get_value.remote())
actor1 = Actor1.remote(3)
assert ray.get(actor1.get_values.remote(5)) == (3, 5)
def test_use_actor_within_actor(ray_start_10_cpus):
# Make sure we can use actors within actors.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_val(self):
return self.x
@ray.remote
class Actor2:
def __init__(self, x, y):
self.x = x
self.actor1 = Actor1.remote(y)
def get_values(self, z):
return self.x, ray.get(self.actor1.get_val.remote())
actor2 = Actor2.remote(3, 4)
assert ray.get(actor2.get_values.remote(5)) == (3, 4)
def test_use_actor_twice(ray_start_10_cpus):
# Make sure we can call the same actor using different refs.
@ray.remote
class Actor1:
def __init__(self):
self.count = 0
def inc(self):
self.count += 1
return self.count
@ray.remote
class Actor2:
def __init__(self):
pass
def inc(self, handle):
return ray.get(handle.inc.remote())
a = Actor1.remote()
a2 = Actor2.remote()
assert ray.get(a2.inc.remote(a)) == 1
assert ray.get(a2.inc.remote(a)) == 2
def test_define_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can define and actors within remote funtions.
@ray.remote
def f(x, n):
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
actor = Actor1.remote(x)
return ray.get([actor.get_value.remote() for _ in range(n)])
assert ray.get(f.remote(3, 1)) == [3]
assert ray.get([f.remote(i, 20) for i in range(10)]) == [
20 * [i] for i in range(10)
]
def test_use_actor_within_remote_function(ray_start_10_cpus):
# Make sure we can create and use actors within remote funtions.
@ray.remote
class Actor1:
def __init__(self, x):
self.x = x
def get_values(self):
return self.x
@ray.remote
def f(x):
actor = Actor1.remote(x)
return ray.get(actor.get_values.remote())
assert ray.get(f.remote(3)) == 3
def test_actor_import_counter(ray_start_10_cpus):
# This is mostly a test of the export counters to make sure that when
# an actor is imported, all of the necessary remote functions have been
# imported.
# Export a bunch of remote functions.
num_remote_functions = 50
for i in range(num_remote_functions):
@ray.remote
def f():
return i
@ray.remote
def g():
@ray.remote
class Actor:
def __init__(self):
# This should use the last version of f.
self.x = ray.get(f.remote())
def get_val(self):
return self.x
actor = Actor.remote()
return ray.get(actor.get_val.remote())
assert ray.get(g.remote()) == num_remote_functions - 1
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_actor_method_metadata_cache(ray_start_regular):
class Actor(object):
pass
# The cache of ActorClassMethodMetadata.
cache = ray.actor.ActorClassMethodMetadata._cache
cache.clear()
# Check cache hit during ActorHandle deserialization.
A1 = ray.remote(Actor)
a = A1.remote()
assert len(cache) == 1
cached_data_id = [id(x) for x in list(cache.items())[0]]
for x in range(10):
a = pickle.loads(pickle.dumps(a))
assert len(ray.actor.ActorClassMethodMetadata._cache) == 1
assert [id(x) for x in list(cache.items())[0]] == cached_data_id
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_actor_class_name(ray_start_regular):
@ray.remote
class Foo:
def __init__(self):
pass
Foo.remote()
g = ray.worker.global_worker.gcs_client
actor_keys = g.internal_kv_keys(
b"ActorClass", ray_constants.KV_NAMESPACE_FUNCTION_TABLE
)
assert len(actor_keys) == 1
actor_class_info = pickle.loads(
g.internal_kv_get(actor_keys[0], ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
)
assert actor_class_info["class_name"] == "Foo"
assert "test_actor" in actor_class_info["module"]
def test_actor_exit_from_task(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self):
print("Actor created")
def f(self):
return 0
@ray.remote
def f():
a = Actor.remote()
x_id = a.f.remote()
return [x_id]
x_id = ray.get(f.remote())[0]
print(ray.get(x_id)) # This should not hang.
def test_actor_init_error_propagated(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, error=False):
if error:
raise Exception("oops")
def foo(self):
return "OK"
actor = Actor.remote(error=False)
ray.get(actor.foo.remote())
actor = Actor.remote(error=True)
with pytest.raises(Exception, match=".*oops.*"):
ray.get(actor.foo.remote())
def test_keyword_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, arg2="a"):
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
def get_values(self, arg0, arg1=2, arg2="b"):
return self.arg0 + arg0, self.arg1 + arg1, self.arg2 + arg2
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, "ab")
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, "ab")
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, "cd")
actor = Actor.remote(1, arg2="c")
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 3, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 1, "cd")
actor = Actor.remote(1, arg2="c", arg1=2)
assert ray.get(actor.get_values.remote(0, arg2="d")) == (1, 4, "cd")
assert ray.get(actor.get_values.remote(0, arg2="d", arg1=0)) == (1, 2, "cd")
assert ray.get(actor.get_values.remote(arg2="d", arg1=0, arg0=2)) == (3, 2, "cd")
# Make sure we get an exception if the constructor is called
# incorrectly.
with pytest.raises(TypeError):
actor = Actor.remote()
with pytest.raises(TypeError):
actor = Actor.remote(0, 1, 2, arg3=3)
with pytest.raises(TypeError):
actor = Actor.remote(0, arg0=1)
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with pytest.raises(Exception):
ray.get(actor.get_values.remote())
def test_actor_name_conflict(ray_start_regular_shared):
@ray.remote
class A(object):
def foo(self):
return 100000
a = A.remote()
r = a.foo.remote()
results = [r]
for x in range(10):
@ray.remote
class A(object):
def foo(self):
return x
a = A.remote()
r = a.foo.remote()
results.append(r)
assert ray.get(results) == [100000, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_variable_number_of_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self, arg0, arg1=1, *args):
self.arg0 = arg0
self.arg1 = arg1
self.args = args
def get_values(self, arg0, arg1=2, *args):
return self.arg0 + arg0, self.arg1 + arg1, self.args, args
actor = Actor.remote(0)
assert ray.get(actor.get_values.remote(1)) == (1, 3, (), ())
actor = Actor.remote(1, 2)
assert ray.get(actor.get_values.remote(2, 3)) == (3, 5, (), ())
actor = Actor.remote(1, 2, "c")
assert ray.get(actor.get_values.remote(2, 3, "d")) == (3, 5, ("c",), ("d",))
actor = Actor.remote(1, 2, "a", "b", "c", "d")
assert ray.get(actor.get_values.remote(2, 3, 1, 2, 3, 4)) == (
3,
5,
("a", "b", "c", "d"),
(1, 2, 3, 4),
)
@ray.remote
class Actor:
def __init__(self, *args):
self.args = args
def get_values(self, *args):
return self.args, args
a = Actor.remote()
assert ray.get(a.get_values.remote()) == ((), ())
a = Actor.remote(1)
assert ray.get(a.get_values.remote(2)) == ((1,), (2,))
a = Actor.remote(1, 2)
assert ray.get(a.get_values.remote(3, 4)) == ((1, 2), (3, 4))
def test_no_args(ray_start_regular_shared):
@ray.remote
class Actor:
def __init__(self):
pass
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_no_constructor(ray_start_regular_shared):
@ray.remote
class Actor:
def get_values(self):
pass
actor = Actor.remote()
assert ray.get(actor.get_values.remote()) is None
def test_custom_classes(ray_start_regular_shared):
class Foo:
def __init__(self, x):
self.x = x
@ray.remote
class Actor:
def __init__(self, f2):
self.f1 = Foo(1)
self.f2 = f2
def get_values1(self):
return self.f1, self.f2
def get_values2(self, f3):
return self.f1, self.f2, f3
actor = Actor.remote(Foo(2))
results1 = ray.get(actor.get_values1.remote())
assert results1[0].x == 1
assert results1[1].x == 2
results2 = ray.get(actor.get_values2.remote(Foo(3)))
assert results2[0].x == 1
assert results2[1].x == 2
assert results2[2].x == 3
def test_actor_class_attributes(ray_start_regular_shared):
class Grandparent:
GRANDPARENT = 2
class Parent1(Grandparent):
PARENT1 = 6
class Parent2:
PARENT2 = 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@classmethod
def f(cls):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_actor_static_attributes(ray_start_regular_shared):
class Grandparent:
GRANDPARENT = 2
@staticmethod
def grandparent_static():
assert Grandparent.GRANDPARENT == 2
return 1
class Parent1(Grandparent):
PARENT1 = 6
@staticmethod
def parent1_static():
assert Parent1.PARENT1 == 6
return 2
def parent1(self):
assert Parent1.PARENT1 == 6
class Parent2:
PARENT2 = 7
def parent2(self):
assert Parent2.PARENT2 == 7
@ray.remote
class TestActor(Parent1, Parent2):
X = 3
@staticmethod
def f():
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.X == 3
return 4
def g(self):
assert TestActor.GRANDPARENT == 2
assert TestActor.PARENT1 == 6
assert TestActor.PARENT2 == 7
assert TestActor.f() == 4
return TestActor.X
t = TestActor.remote()
assert ray.get(t.g.remote()) == 3
def test_decorator_args(ray_start_regular_shared):
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote()
class Actor:
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(invalid_kwarg=0) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with pytest.raises(Exception):
@ray.remote(num_cpus=0, invalid_kwarg=0) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_gpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1, num_gpus=1) # noqa: F811
class Actor: # noqa: F811
def __init__(self):
pass
def test_random_id_generation(ray_start_regular_shared):
@ray.remote
class Foo:
def __init__(self):
pass
# Make sure that seeding numpy does not interfere with the generation
# of actor IDs.
np.random.seed(1234)
random.seed(1234)
f1 = Foo.remote()
np.random.seed(1234)
random.seed(1234)
f2 = Foo.remote()
assert f1._actor_id != f2._actor_id
@pytest.mark.skipif(client_test_enabled(), reason="differing inheritence structure")
def test_actor_inheritance(ray_start_regular_shared):
class NonActorBase:
def __init__(self):
pass
# Test that an actor class can inherit from a non-actor class.
@ray.remote
class ActorBase(NonActorBase):
def __init__(self):
pass
# Test that you can't instantiate an actor class directly.
with pytest.raises(Exception, match="cannot be instantiated directly"):
ActorBase()
# Test that you can't inherit from an actor class.
with pytest.raises(
ActorClassInheritanceException,
match="Inheriting from actor classes is not " "currently supported.",
):
class Derived(ActorBase):
def __init__(self):
pass
def test_multiple_return_values(ray_start_regular_shared):
@ray.remote
class Foo:
def method0(self):
return 1
@ray.method(num_returns=1)
def method1(self):
return 1
@ray.method(num_returns=2)
def method2(self):
return 1, 2
@ray.method(num_returns=3)
def method3(self):
return 1, 2, 3
f = Foo.remote()
id0 = f.method0.remote()
assert ray.get(id0) == 1
id1 = f.method1.remote()
assert ray.get(id1) == 1
id2a, id2b = f.method2.remote()
assert ray.get([id2a, id2b]) == [1, 2]
id3a, id3b, id3c = f.method3.remote()
assert ray.get([id3a, id3b, id3c]) == [1, 2, 3]
def test_options_num_returns(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self):
return 1, 2
f = Foo.remote()
obj = f.method.remote()
assert ray.get(obj) == (1, 2)
obj1, obj2 = f.method.options(num_returns=2).remote()
assert ray.get([obj1, obj2]) == [1, 2]
def test_options_name(ray_start_regular_shared):
@ray.remote
class Foo:
def method(self, name):
assert setproctitle.getproctitle() == f"ray::{name}"
f = Foo.remote()
ray.get(f.method.options(name="foo").remote("foo"))
ray.get(f.method.options(name="bar").remote("bar"))
def test_define_actor(ray_start_regular_shared):
@ray.remote
class Test:
def __init__(self, x):
self.x = x
def f(self, y):
return self.x + y
t = Test.remote(2)
assert ray.get(t.f.remote(1)) == 3
# Make sure that calling an actor method directly raises an exception.
with pytest.raises(Exception):
t.f(1)
def test_actor_deletion(ray_start_regular_shared):
# Make sure that when an actor handles goes out of scope, the actor
# destructor is called.
@ray.remote
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
a = None
wait_for_pid_to_exit(pid)
actors = [Actor.remote() for _ in range(10)]
pids = ray.get([a.getpid.remote() for a in actors])
a = None
actors = None
[wait_for_pid_to_exit(pid) for pid in pids]
def test_actor_method_deletion(ray_start_regular_shared):
@ray.remote
class Actor:
def method(self):
return 1
# Make sure that if we create an actor and call a method on it
# immediately, the actor doesn't get killed before the method is
# called.
assert ray.get(Actor.remote().method.remote()) == 1
def test_distributed_actor_handle_deletion(ray_start_regular_shared):
@ray.remote
class Actor:
def method(self):
return 1
def getpid(self):
return os.getpid()
@ray.remote
def f(actor, signal):
ray.get(signal.wait.remote())
return ray.get(actor.method.remote())
SignalActor = create_remote_signal_actor(ray)
signal = SignalActor.remote()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
# Pass the handle to another task that cannot run yet.
x_id = f.remote(a, signal)
# Delete the original handle. The actor should not get killed yet.
del a
# Once the task finishes, the actor process should get killed.
ray.get(signal.send.remote())
assert ray.get(x_id) == 1
wait_for_pid_to_exit(pid)
def test_multiple_actors(ray_start_regular_shared):
@ray.remote
class Counter:
def __init__(self, value):
self.value = value
def increase(self):
self.value += 1
return self.value
def reset(self):
self.value = 0
num_actors = 5
num_increases = 50
# Create multiple actors.
actors = [Counter.remote(i) for i in range(num_actors)]
results = []
# Call each actor's method a bunch of times.
for i in range(num_actors):
results += [actors[i].increase.remote() for _ in range(num_increases)]
result_values = ray.get(results)
for i in range(num_actors):
v = result_values[(num_increases * i) : (num_increases * (i + 1))]
assert v == list(range(i + 1, num_increases + i + 1))
# Reset the actor values.
[actor.reset.remote() for actor in actors]
# Interweave the method calls on the different actors.
results = []
for j in range(num_increases):
results += [actor.increase.remote() for actor in actors]
result_values = ray.get(results)
for j in range(num_increases):
v = result_values[(num_actors * j) : (num_actors * (j + 1))]
assert v == num_actors * [j + 1]
def test_inherit_actor_from_class(ray_start_regular_shared):
# Make sure we can define an actor by inheriting from a regular class.
# Note that actors cannot inherit from other actors.
class Foo:
def __init__(self, x):
self.x = x
def f(self):
return self.x
def g(self, y):
return self.x + y
@ray.remote
class Actor(Foo):
def __init__(self, x):
Foo.__init__(self, x)
def get_value(self):
return self.f()
actor = Actor.remote(1)
assert ray.get(actor.get_value.remote()) == 1
assert ray.get(actor.g.remote(5)) == 6
def test_get_non_existing_named_actor(ray_start_regular_shared):
with pytest.raises(ValueError):
_ = ray.get_actor("non_existing_actor")
# https://github.com/ray-project/ray/issues/17843
def test_actor_namespace(ray_start_regular_shared):
@ray.remote
class Actor:
def f(self):
return "ok"
a = Actor.options(name="foo", namespace="f1").remote()
with pytest.raises(ValueError):
ray.get_actor(name="foo", namespace="f2")
a1 = ray.get_actor(name="foo", namespace="f1")
assert ray.get(a1.f.remote()) == "ok"
del a
def test_named_actor_cache(ray_start_regular_shared):
"""Verify that named actor cache works well."""
@ray.remote(max_restarts=-1)
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
a = Counter.options(name="hi").remote()
first_get = ray.get_actor("hi")
assert ray.get(first_get.inc_and_get.remote()) == 1
second_get = ray.get_actor("hi")
assert ray.get(second_get.inc_and_get.remote()) == 2
ray.kill(a, no_restart=True)
def actor_removed():
try:
ray.get_actor("hi")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
get_after_restart = Counter.options(name="hi").remote()
assert ray.get(get_after_restart.inc_and_get.remote()) == 1
get_by_name = ray.get_actor("hi")
assert ray.get(get_by_name.inc_and_get.remote()) == 2
def test_named_actor_cache_via_another_actor(ray_start_regular_shared):
"""Verify that named actor cache works well with another actor."""
@ray.remote(max_restarts=0)
class Counter:
def __init__(self):
self.count = 0
def inc_and_get(self):
self.count += 1
return self.count
# The third actor to get named actor. To indicates this cache doesn't
# break getting from the third party.
@ray.remote(max_restarts=0)
class ActorGetter:
def get_actor_count(self, name):
actor = ray.get_actor(name)
return ray.get(actor.inc_and_get.remote())
# Start a actor and get it by name in driver.
a = Counter.options(name="foo").remote()
first_get = ray.get_actor("foo")
assert ray.get(first_get.inc_and_get.remote()) == 1
# Start another actor as the third actor to get named actor.
actor_getter = ActorGetter.remote()
assert ray.get(actor_getter.get_actor_count.remote("foo")) == 2
ray.kill(a, no_restart=True)
def actor_removed():
try:
ray.get_actor("foo")
return False
except ValueError:
return True
wait_for_condition(actor_removed)
# Restart the named actor.
get_after_restart = Counter.options(name="foo").remote()
assert ray.get(get_after_restart.inc_and_get.remote()) == 1
# Get the named actor from the third actor again.
assert ray.get(actor_getter.get_actor_count.remote("foo")) == 2
# Get the named actor by name in driver again.
get_by_name = ray.get_actor("foo")
assert ray.get(get_by_name.inc_and_get.remote()) == 3
def test_wrapped_actor_handle(ray_start_regular_shared):
@ray.remote
class B:
def doit(self):
return 2
@ray.remote
class A:
def __init__(self):
self.b = B.remote()
def get_actor_ref(self):
return [self.b]
a = A.remote()
b_list = ray.get(a.get_actor_ref.remote())
assert ray.get(b_list[0].doit.remote()) == 2
@pytest.mark.skip("This test is just used to print the latency of creating 100 actors.")
def test_actor_creation_latency(ray_start_regular_shared):
# This test is just used to test the latency of actor creation.
@ray.remote
class Actor:
def get_value(self):
return 1
start = datetime.datetime.now()
actor_handles = [Actor.remote() for _ in range(100)]
actor_create_time = datetime.datetime.now()
for actor_handle in actor_handles:
ray.get(actor_handle.get_value.remote())
end = datetime.datetime.now()
print(
"actor_create_time_consume = {}, total_time_consume = {}".format(
actor_create_time - start, end - start
)
)
@pytest.mark.parametrize(
"exit_condition",
[
# "out_of_scope", TODO(edoakes): enable this once fixed.
"__ray_terminate__",
"ray.actor.exit_actor",
"ray.kill",
],
)
def test_atexit_handler(ray_start_regular_shared, exit_condition):
@ray.remote
class A:
def __init__(self, tmpfile, data):
import atexit
def f(*args, **kwargs):
with open(tmpfile, "w") as f:
f.write(data)
f.flush()
atexit.register(f)
def ready(self):
pass
def exit(self):
ray.actor.exit_actor()
data = "hello"
tmpfile = tempfile.NamedTemporaryFile("w+", suffix=".tmp", delete=False)
tmpfile.close()
a = A.remote(tmpfile.name, data)
ray.get(a.ready.remote())
if exit_condition == "out_of_scope":
del a
elif exit_condition == "__ray_terminate__":
ray.wait([a.__ray_terminate__.remote()])
elif exit_condition == "ray.actor.exit_actor":
ray.wait([a.exit.remote()])
elif exit_condition == "ray.kill":
ray.kill(a)
else:
assert False, "Unrecognized condition"
def check_file_written():
with open(tmpfile.name, "r") as f:
if f.read() == data:
return True
return False
# ray.kill() should not trigger atexit handlers, all other methods should.
if exit_condition == "ray.kill":
assert not check_file_written()
else:
wait_for_condition(check_file_written)
os.unlink(tmpfile.name)
def test_return_actor_handle_from_actor(ray_start_regular_shared):
@ray.remote
class Inner:
def ping(self):
return "pong"
@ray.remote
class Outer:
def __init__(self):
self.inner = Inner.remote()
def get_ref(self):
return self.inner
outer = Outer.remote()
inner = ray.get(outer.get_ref.remote())
assert ray.get(inner.ping.remote()) == "pong"
def test_actor_autocomplete(ray_start_regular_shared):
"""
Test that autocomplete works with actors by checking that the builtin dir()
function works as expected.
"""
@ray.remote
class Foo:
def method_one(self) -> None:
pass
class_calls = [fn for fn in dir(Foo) if not fn.startswith("_")]
assert set(class_calls) == {"method_one", "options", "remote"}
f = Foo.remote()
methods = [fn for fn in dir(f) if not fn.startswith("_")]
assert methods == ["method_one"]
all_methods = set(dir(f))
assert all_methods == {"__init__", "method_one", "__ray_terminate__"}
method_options = [fn for fn in dir(f.method_one) if not fn.startswith("_")]
assert set(method_options) == {"options", "remote"}
def test_actor_mro(ray_start_regular_shared):
@ray.remote
class Foo:
def __init__(self, x):
self.x = x
@classmethod
def factory_f(cls, x):
return cls(x)
def get_x(self):
return self.x
obj = Foo.factory_f(1)
assert obj.get_x() == 1
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
py | 1a516147b256e8d494ba43ccab974f64c95e329d | #!/usr/bin/env python
# ---------------------------------------------------------------------------------------
# configure.py: Athena++ configuration script in python. Original version by CJW.
#
# When configure.py is run, it uses the command line options and default settings to
# create custom versions of the files Makefile and src/defs.hpp from the template files
# Makefile.in and src/defs.hpp.in respectively.
#
# The following options are implememted:
# -h --help help message
# --prob=name use src/pgen/name.cpp as the problem generator
# --coord=xxx use xxx as the coordinate system
# --eos=xxx use xxx as the equation of state
# --flux=xxx use xxx as the Riemann solver
# --nghost=xxx set NGHOST=xxx
# --nscalars=xxx set NSCALARS=xxx
# -eos_table enable EOS table
# -b enable magnetic fields
# -s enable special relativity
# -g enable general relativity
# -t enable interface frame transformations for GR
# -debug enable debug flags (-g -O0); override other compiler options
# -coverage enable compiler-dependent code coverage flags
# -float enable single precision (default is double)
# -mpi enable parallelization with MPI
# -omp enable parallelization with OpenMP
# -hdf5 enable HDF5 output (requires the HDF5 library)
# --hdf5_path=path path to HDF5 libraries (requires the HDF5 library)
# -fft enable FFT (requires the FFTW library)
# --fftw_path=path path to FFTW libraries (requires the FFTW library)
# --grav=xxx use xxx as the self-gravity solver
# --cxx=xxx use xxx as the C++ compiler (works w/ or w/o -mpi)
# --ccmd=name use name as the command to call the (non-MPI) C++ compiler
# --mpiccmd=name use name as the command to call the MPI C++ compiler
# --gcovcmd=name use name as the command to call the gcov utility
# --cflag=string append string whenever invoking compiler/linker
# --include=path use -Ipath when compiling
# --lib_path=path use -Lpath when linking
# --lib=xxx use -lxxx when linking
# ----------------------------------------------------------------------------------------
# Modules
import argparse
import glob
import re
# Set template and output filenames
makefile_input = 'Makefile.in'
makefile_output = 'Makefile'
defsfile_input = 'src/defs.hpp.in'
defsfile_output = 'src/defs.hpp'
# --- Step 1. Prepare parser, add each of the arguments ------------------
athena_description = (
"Prepare custom Makefile and defs.hpp for compiling Athena++ solver"
)
athena_epilog = (
"Full documentation of options available at "
"https://github.com/PrincetonUniversity/athena-public-version/wiki/Configuring"
)
parser = argparse.ArgumentParser(description=athena_description, epilog=athena_epilog)
# --prob=[name] argument
pgen_directory = 'src/pgen/'
# set pgen_choices to list of .cpp files in src/pgen/
pgen_choices = glob.glob(pgen_directory + '*.cpp')
# remove 'src/pgen/' prefix and '.cpp' extension from each filename
pgen_choices = [choice[len(pgen_directory):-4] for choice in pgen_choices]
parser.add_argument('--prob',
default='shock_tube',
choices=pgen_choices,
help='select problem generator')
# --coord=[name] argument
parser.add_argument(
'--coord',
default='cartesian',
choices=[
'cartesian',
'cylindrical',
'spherical_polar',
'minkowski',
'sinusoidal',
'tilted',
'schwarzschild',
'kerr-schild',
'gr_user'],
help='select coordinate system')
# --eos=[name] argument
parser.add_argument('--eos',
default='adiabatic',
choices=['adiabatic', 'isothermal', 'general/eos_table',
'general/hydrogen', 'general/ideal'],
help='select equation of state')
# --flux=[name] argument
parser.add_argument('--flux',
default='default',
choices=['default', 'hlle', 'hllc', 'lhllc', 'hlld', 'lhlld', 'roe', 'llf'], # noqa
help='select Riemann solver')
# --nghost=[value] argument
parser.add_argument('--nghost',
default='2',
help='set number of ghost zones')
# --nscalars=[value] argument
parser.add_argument('--nscalars',
default='0',
help='set number of passive scalars')
# -b argument
parser.add_argument('-b',
action='store_true',
default=False,
help='enable magnetic field')
# -sts argument
parser.add_argument('-sts',
action='store_true',
default=False,
help='enable super-time-stepping')
# -s argument
parser.add_argument('-s',
action='store_true',
default=False,
help='enable special relativity')
# -g argument
parser.add_argument('-g',
action='store_true',
default=False,
help='enable general relativity')
# -t argument
parser.add_argument('-t',
action='store_true',
default=False,
help='enable interface frame transformations for GR')
# -debug argument
parser.add_argument('-debug',
action='store_true',
default=False,
help='enable debug flags; override other compiler options')
# -coverage argument
parser.add_argument('-coverage',
action='store_true',
default=False,
help='enable compiler-dependent code coverage flag')
# -float argument
parser.add_argument('-float',
action='store_true',
default=False,
help='enable single precision')
# -mpi argument
parser.add_argument('-mpi',
action='store_true',
default=False,
help='enable parallelization with MPI')
# -omp argument
parser.add_argument('-omp',
action='store_true',
default=False,
help='enable parallelization with OpenMP')
# --grav=[name] argument
parser.add_argument('--grav',
default='none',
choices=['none', 'fft'],
help='select self-gravity solver')
# -fft argument
parser.add_argument('-fft',
action='store_true',
default=False,
help='enable FFT')
# --fftw_path argument
parser.add_argument('--fftw_path',
default='',
help='path to FFTW libraries')
# -hdf5 argument
parser.add_argument('-hdf5',
action='store_true',
default=False,
help='enable HDF5 Output')
# -h5double argument
parser.add_argument('-h5double',
action='store_true',
default=False,
help='enable double precision HDF5 output')
# --hdf5_path argument
parser.add_argument('--hdf5_path',
default='',
help='path to HDF5 libraries')
# The main choices for --cxx flag, using "ctype[-suffix]" formatting, where "ctype" is the
# major family/suite/group of compilers and "suffix" may represent variants of the
# compiler version and/or predefined sets of compiler options. The C++ compiler front ends
# are the main supported/documented options and are invoked on the command line, but the C
# front ends are also acceptable selections and are mapped to the matching C++ front end:
# gcc -> g++, clang -> clang++, icc-> icpc
cxx_choices = [
'g++',
'g++-simd',
'icpx',
'icpc',
'icpc-debug',
'icpc-phi',
'cray',
'bgxlc++',
'clang++',
'clang++-simd',
'clang++-apple',
]
def c_to_cpp(arg):
arg = arg.replace('gcc', 'g++', 1)
arg = arg.replace('icc', 'icpc', 1)
arg = arg.replace('icx', 'icpx', 1)
if arg == 'bgxl' or arg == 'bgxlc':
arg = 'bgxlc++'
if arg == 'clang':
arg = 'clang++'
else:
arg = arg.replace('clang-', 'clang++-', 1)
return arg
# --cxx=[name] argument
parser.add_argument(
'--cxx',
default='g++',
type=c_to_cpp,
choices=cxx_choices,
help='select C++ compiler and default set of flags (works w/ or w/o -mpi)')
# --ccmd=[name] argument
parser.add_argument('--ccmd',
default=None,
help='override for command to use to call (non-MPI) C++ compiler')
# --mpiccmd=[name] argument
parser.add_argument('--mpiccmd',
default=None,
help='override for command to use to call MPI C++ compiler')
# --gcovcmd=[name] argument
parser.add_argument('--gcovcmd',
default=None,
help='override for command to use to call Gcov utility in Makefile')
# --cflag=[string] argument
parser.add_argument('--cflag',
default=None,
help='additional string of flags to append to compiler/linker calls')
# --include=[name] arguments
parser.add_argument(
'--include',
default=[],
action='append',
help=('extra path for included header files (-I<path>); can be specified multiple '
'times'))
# --lib_path=[name] arguments
parser.add_argument(
'--lib_path',
default=[],
action='append',
help=('extra path for linked library files (-L<path>); can be specified multiple '
'times'))
# --lib=[name] arguments
parser.add_argument(
'--lib',
default=[],
action='append',
help='name of library to link against (-l<lib>); can be specified multiple times')
# Parse command-line inputs
args = vars(parser.parse_args())
# --- Step 2. Test for incompatible arguments ----------------------------
# Set default flux; HLLD for MHD, HLLC for hydro, HLLE for isothermal hydro or any GR
if args['flux'] == 'default':
if args['g']:
args['flux'] = 'hlle'
elif args['b']:
args['flux'] = 'hlld'
elif args['eos'] == 'isothermal':
args['flux'] = 'hlle'
else:
args['flux'] = 'hllc'
# Check Riemann solver compatibility
if args['flux'] == 'hllc' and args['eos'] == 'isothermal':
raise SystemExit('### CONFIGURE ERROR: HLLC flux cannot be used with isothermal EOS')
if args['flux'] == 'hllc' and args['b']:
raise SystemExit('### CONFIGURE ERROR: HLLC flux cannot be used with MHD')
if args['flux'] == 'lhllc' and args['eos'] == 'isothermal':
raise SystemExit('### CONFIGURE ERROR: LHLLC flux cannot be used with isothermal EOS') # noqa
if args['flux'] == 'lhllc' and args['b']:
raise SystemExit('### CONFIGURE ERROR: LHLLC flux cannot be used with MHD')
if args['flux'] == 'hlld' and not args['b']:
raise SystemExit('### CONFIGURE ERROR: HLLD flux can only be used with MHD')
if args['flux'] == 'lhlld' and args['eos'] == 'isothermal':
raise SystemExit('### CONFIGURE ERROR: LHLLD flux cannot be used with isothermal EOS') # noqa
if args['flux'] == 'lhlld' and not args['b']:
raise SystemExit('### CONFIGURE ERROR: LHLLD flux can only be used with MHD')
# Check relativity
if args['s'] and args['g']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'GR implies SR; the -s option is restricted to pure SR')
if args['t'] and not args['g']:
raise SystemExit('### CONFIGURE ERROR: Frame transformations only apply to GR')
if args['g'] and not args['t'] and args['flux'] not in ('llf', 'hlle'):
raise SystemExit('### CONFIGURE ERROR: Frame transformations required for {0}'
.format(args['flux']))
if args['g'] and args['coord'] in ('cartesian', 'cylindrical', 'spherical_polar'):
raise SystemExit('### CONFIGURE ERROR: GR cannot be used with {0} coordinates'
.format(args['coord']))
if not args['g'] and args['coord'] not in ('cartesian', 'cylindrical', 'spherical_polar'):
raise SystemExit('### CONFIGURE ERROR: '
+ args['coord'] + ' coordinates only apply to GR')
if args['eos'] == 'isothermal':
if args['s'] or args['g']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'Isothermal EOS is incompatible with relativity')
if args['eos'][:8] == 'general/':
if args['s'] or args['g']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'General EOS is incompatible with relativity')
if args['flux'] not in ['hllc', 'hlld']:
raise SystemExit('### CONFIGURE ERROR: '
+ 'General EOS is incompatible with flux ' + args['flux'])
# --- Step 3. Set definitions and Makefile options based on above argument
# Prepare dictionaries of substitutions to be made
definitions = {}
makefile_options = {}
makefile_options['LOADER_FLAGS'] = ''
# --prob=[name] argument
definitions['PROBLEM'] = makefile_options['PROBLEM_FILE'] = args['prob']
# --coord=[name] argument
definitions['COORDINATE_SYSTEM'] = makefile_options['COORDINATES_FILE'] = args['coord']
# --eos=[name] argument
definitions['NON_BAROTROPIC_EOS'] = '0' if args['eos'] == 'isothermal' else '1'
makefile_options['EOS_FILE'] = args['eos']
definitions['EQUATION_OF_STATE'] = args['eos']
# set number of hydro variables for adiabatic/isothermal
definitions['GENERAL_EOS'] = '0'
makefile_options['GENERAL_EOS_FILE'] = 'noop'
definitions['EOS_TABLE_ENABLED'] = '0'
if args['eos'] == 'isothermal':
definitions['NHYDRO_VARIABLES'] = '4'
elif args['eos'] == 'adiabatic':
definitions['NHYDRO_VARIABLES'] = '5'
else:
definitions['GENERAL_EOS'] = '1'
makefile_options['GENERAL_EOS_FILE'] = 'general'
definitions['NHYDRO_VARIABLES'] = '5'
if args['eos'] == 'general/eos_table':
definitions['EOS_TABLE_ENABLED'] = '1'
# --flux=[name] argument
definitions['RSOLVER'] = makefile_options['RSOLVER_FILE'] = args['flux']
# --nghost=[value] argument
definitions['NUMBER_GHOST_CELLS'] = args['nghost']
# --nscalars=[value] argument
definitions['NUMBER_PASSIVE_SCALARS'] = args['nscalars']
# -b argument
# set variety of macros based on whether MHD/hydro or adi/iso are defined
if args['b']:
definitions['MAGNETIC_FIELDS_ENABLED'] = '1'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_mhd'
else:
makefile_options['EOS_FILE'] += '_mhd'
definitions['NFIELD_VARIABLES'] = '3'
makefile_options['RSOLVER_DIR'] = 'mhd/'
if args['flux'] == 'hlle' or args['flux'] == 'llf' or args['flux'] == 'roe':
makefile_options['RSOLVER_FILE'] += '_mhd'
if args['eos'] == 'isothermal':
definitions['NWAVE_VALUE'] = '6'
if args['flux'] == 'hlld':
makefile_options['RSOLVER_FILE'] += '_iso'
else:
definitions['NWAVE_VALUE'] = '7'
else:
definitions['MAGNETIC_FIELDS_ENABLED'] = '0'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_hydro'
else:
makefile_options['EOS_FILE'] += '_hydro'
definitions['NFIELD_VARIABLES'] = '0'
makefile_options['RSOLVER_DIR'] = 'hydro/'
if args['eos'] == 'isothermal':
definitions['NWAVE_VALUE'] = '4'
else:
definitions['NWAVE_VALUE'] = '5'
# -sts argument
if args['sts']:
definitions['STS_ENABLED'] = '1'
else:
definitions['STS_ENABLED'] = '0'
# -s, -g, and -t arguments
definitions['RELATIVISTIC_DYNAMICS'] = '1' if args['s'] or args['g'] else '0'
definitions['GENERAL_RELATIVITY'] = '1' if args['g'] else '0'
definitions['FRAME_TRANSFORMATIONS'] = '1' if args['t'] else '0'
if args['s']:
makefile_options['EOS_FILE'] += '_sr'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_sr'
makefile_options['RSOLVER_FILE'] += '_rel'
if args['g']:
makefile_options['EOS_FILE'] += '_gr'
if definitions['GENERAL_EOS'] != '0':
makefile_options['GENERAL_EOS_FILE'] += '_gr'
makefile_options['RSOLVER_FILE'] += '_rel'
if not args['t']:
makefile_options['RSOLVER_FILE'] += '_no_transform'
# --cxx=[name] argument
if args['cxx'] == 'g++':
# GCC is C++11 feature-complete since v4.8.1 (2013-05-31)
definitions['COMPILER_CHOICE'] = 'g++'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'g++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'g++-simd':
# GCC version >= 4.9, for OpenMP 4.0; version >= 6.1 for OpenMP 4.5 support
definitions['COMPILER_CHOICE'] = 'g++-simd'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'g++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -fopenmp-simd -fwhole-program -flto -ffast-math '
'-march=native -fprefetch-loop-arrays'
# -march=skylake-avx512, skylake, core-avx2
# -mprefer-vector-width=128 # available in gcc-8, but not gcc-7
# -mtune=native, generic, broadwell
# -mprefer-avx128
# -m64 (default)
)
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpx':
# Next-gen LLVM-based Intel oneAPI DPC++/C++ Compiler
definitions['COMPILER_CHOICE'] = 'icpx'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpx'
makefile_options['PREPROCESSOR_FLAGS'] = ''
# ICX drivers icx and icpx will accept ICC Classic Compiler options or Clang*/LLVM
# Compiler options
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -ipo -xhost -qopenmp-simd '
)
# Currently unsupported, but "options to be supported" according to icpx
# -qnextgen-diag: '-inline-forceinline -qopt-prefetch=4 '
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpc':
# ICC is C++11 feature-complete since v15.0 (2014-08-26)
definitions['COMPILER_CHOICE'] = 'icpc'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpc'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -ipo -xhost -inline-forceinline -qopenmp-simd -qopt-prefetch=4 '
'-qoverride-limits' # -qopt-report-phase=ipo (does nothing without -ipo)
)
# -qopt-zmm-usage=high' # typically harms multi-core performance on Skylake Xeon
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpc-debug':
# Disable IPO, forced inlining, and fast math. Enable vectorization reporting.
# Useful for testing symmetry, SIMD-enabled functions and loops with OpenMP 4.5
definitions['COMPILER_CHOICE'] = 'icpc'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpc'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -xhost -qopenmp-simd -fp-model precise -qopt-prefetch=4 '
'-qopt-report=5 -qopt-report-phase=openmp,vec -g -qoverride-limits'
)
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'icpc-phi':
# Cross-compile for Intel Xeon Phi x200 KNL series (unique AVX-512ER and AVX-512FP)
# -xMIC-AVX512: generate AVX-512F, AVX-512CD, AVX-512ER and AVX-512FP
definitions['COMPILER_CHOICE'] = 'icpc'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'icpc'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -std=c++11 -ipo -xMIC-AVX512 -inline-forceinline -qopenmp-simd '
'-qopt-prefetch=4 -qoverride-limits'
)
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'cray':
# Cray Compiling Environment 8.4 (2015-09-24) introduces C++11 feature completeness
# (except "alignas"). v8.6 is C++14 feature-complete
definitions['COMPILER_CHOICE'] = 'cray'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'CC'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -h std=c++11 -h aggress -h vector3 -hfp3'
makefile_options['LINKER_FLAGS'] = '-hwp -hpl=obj/lib'
makefile_options['LIBRARY_FLAGS'] = '-lm'
if args['cxx'] == 'bgxlc++':
# IBM XL C/C++ for BG/Q is NOT C++11 feature-complete as of v12.1.0.15 (2017-12-22)
# suppressed messages:
# 1500-036: The NOSTRICT option has the potential to alter the program's semantics
# 1540-1401: An unknown "pragma simd" is specified
# 1586-083: ld option ignored by IPA
# 1586-233: Duplicate definition of symbol ignored
# 1586-267: Inlining of specified subprogram failed due to the presence of a C++
# exception handler
definitions['COMPILER_CHOICE'] = 'bgxlc++'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'bgxlc++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = (
'-O3 -qhot=level=1:vector -qinline=level=5:auto -qipa=level=1:noobject'
' -qstrict=subnormals -qmaxmem=150000 -qlanglvl=extended0x -qsuppress=1500-036'
' -qsuppress=1540-1401 -qsuppress=1586-083 -qsuppress=1586-233'
' -qsuppress=1586-267'
)
makefile_options['LINKER_FLAGS'] = makefile_options['COMPILER_FLAGS']
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'clang++':
# Clang is C++11 feature-complete since v3.3 (2013-06-17)
definitions['COMPILER_CHOICE'] = 'clang++'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'clang++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'clang++-simd':
# LLVM/Clang version >= 3.9 for most of OpenMP 4.0 and 4.5 (still incomplete; no
# offloading, target/declare simd directives). OpenMP 3.1 fully supported in LLVM 3.7
definitions['COMPILER_CHOICE'] = 'clang++-simd'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'clang++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11 -fopenmp-simd'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
if args['cxx'] == 'clang++-apple':
# Apple LLVM/Clang: forked version of the open-source LLVM project bundled in macOS
definitions['COMPILER_CHOICE'] = 'clang++-apple'
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'clang++'
makefile_options['PREPROCESSOR_FLAGS'] = ''
makefile_options['COMPILER_FLAGS'] = '-O3 -std=c++11'
makefile_options['LINKER_FLAGS'] = ''
makefile_options['LIBRARY_FLAGS'] = ''
# -float argument
if args['float']:
definitions['SINGLE_PRECISION_ENABLED'] = '1'
else:
definitions['SINGLE_PRECISION_ENABLED'] = '0'
# -debug argument
if args['debug']:
definitions['DEBUG_OPTION'] = 'DEBUG'
# Completely replace the --cxx= sets of default compiler flags, disable optimization,
# and emit debug symbols in the compiled binaries
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug'
or args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
makefile_options['COMPILER_FLAGS'] = '-O0 --std=c++11 -g' # -Og
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] = '-O0 -h std=c++11'
if args['cxx'] == 'bgxlc++':
makefile_options['COMPILER_FLAGS'] = '-O0 -g -qlanglvl=extended0x'
if args['cxx'] == 'icpc-phi':
makefile_options['COMPILER_FLAGS'] = '-O0 --std=c++11 -g -xMIC-AVX512'
else:
definitions['DEBUG_OPTION'] = 'NOT_DEBUG'
# -coverage argument
if args['coverage']:
definitions['EXCEPTION_HANDLING_OPTION'] = 'DISABLE_EXCEPTIONS'
# For now, append new compiler flags and don't override --cxx set, but set code to be
# unoptimized (-O0 instead of -O3) to get useful statement annotations. Should we add
# '-g -fopenmp-simd', by default? Don't combine lines when writing source code!
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd'):
makefile_options['COMPILER_FLAGS'] += (
' -O0 -fprofile-arcs -ftest-coverage'
' -fno-inline -fno-exceptions -fno-elide-constructors'
)
if (args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc-phi'):
makefile_options['COMPILER_FLAGS'] += ' -O0 -prof-gen=srcpos'
if (args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
# Clang's "source-based" code coverage feature to produces .profraw output
makefile_options['COMPILER_FLAGS'] += (
' -O0 -fprofile-instr-generate -fcoverage-mapping'
) # use --coverage to produce GCC-compatible .gcno, .gcda output for gcov
if (args['cxx'] == 'cray' or args['cxx'] == 'bgxlc++'):
raise SystemExit(
'### CONFIGURE ERROR: No code coverage avaialbe for selected compiler!')
else:
# Enable C++ try/throw/catch exception handling, by default. Disable only when testing
# code coverage, since it causes Gcov and other tools to report misleadingly low
# branch coverage statstics due to untested throwing of exceptions from function calls
definitions['EXCEPTION_HANDLING_OPTION'] = 'ENABLE_EXCEPTIONS'
# --ccmd=[name] argument
if args['ccmd'] is not None:
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = args['ccmd']
# --gcovcmd=[name] argument (only modifies Makefile target)
if args['gcovcmd'] is not None:
makefile_options['GCOV_COMMAND'] = args['gcovcmd']
else:
makefile_options['GCOV_COMMAND'] = 'gcov'
# -mpi argument
if args['mpi']:
definitions['MPI_OPTION'] = 'MPI_PARALLEL'
if (args['cxx'] == 'g++' or args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc-phi' or args['cxx'] == 'g++-simd'
or args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'mpicxx'
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] += ' -h mpi1'
if args['cxx'] == 'bgxlc++':
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = 'mpixlcxx' # noqa
# --mpiccmd=[name] argument
if args['mpiccmd'] is not None:
definitions['COMPILER_COMMAND'] = makefile_options['COMPILER_COMMAND'] = args['mpiccmd'] # noqa
else:
definitions['MPI_OPTION'] = 'NOT_MPI_PARALLEL'
# -omp argument
if args['omp']:
definitions['OPENMP_OPTION'] = 'OPENMP_PARALLEL'
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd' or args['cxx'] == 'clang++'
or args['cxx'] == 'clang++-simd'):
makefile_options['COMPILER_FLAGS'] += ' -fopenmp'
if (args['cxx'] == 'clang++-apple'):
# Apple Clang disables the front end OpenMP driver interface; enable it via the
# preprocessor. Must install LLVM's OpenMP runtime library libomp beforehand
makefile_options['COMPILER_FLAGS'] += ' -Xpreprocessor -fopenmp'
makefile_options['LIBRARY_FLAGS'] += ' -lomp'
if (args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug' or args['cxx'] == 'icpc-phi'
or args['cxx'] == 'icpx'):
makefile_options['COMPILER_FLAGS'] += ' -qopenmp'
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] += ' -homp'
if args['cxx'] == 'bgxlc++':
# use thread-safe version of compiler
definitions['COMPILER_COMMAND'] += '_r'
makefile_options['COMPILER_COMMAND'] += '_r'
makefile_options['COMPILER_FLAGS'] += ' -qsmp'
else:
definitions['OPENMP_OPTION'] = 'NOT_OPENMP_PARALLEL'
if args['cxx'] == 'cray':
makefile_options['COMPILER_FLAGS'] += ' -hnoomp'
if (args['cxx'] == 'icpc' or args['cxx'] == 'icpc-debug' or args['cxx'] == 'icpc-phi'
or args['cxx'] == 'icpx'):
# suppressed messages:
# 3180: pragma omp not recognized
makefile_options['COMPILER_FLAGS'] += ' -diag-disable 3180'
# --grav argument
if args['grav'] == "none":
definitions['SELF_GRAVITY_ENABLED'] = '0'
else:
if args['grav'] == "fft":
definitions['SELF_GRAVITY_ENABLED'] = '1'
if not args['fft']:
raise SystemExit(
'### CONFIGURE ERROR: FFT Poisson solver only be used with FFT')
# -fft argument
makefile_options['MPIFFT_FILE'] = ' '
definitions['FFT_OPTION'] = 'NO_FFT'
if args['fft']:
definitions['FFT_OPTION'] = 'FFT'
if args['fftw_path'] != '':
makefile_options['PREPROCESSOR_FLAGS'] += ' -I{0}/include'.format(
args['fftw_path'])
makefile_options['LINKER_FLAGS'] += ' -L{0}/lib'.format(args['fftw_path'])
if args['omp']:
makefile_options['LIBRARY_FLAGS'] += ' -lfftw3_omp'
if args['mpi']:
makefile_options['MPIFFT_FILE'] = ' $(wildcard src/fft/plimpton/*.cpp)'
makefile_options['LIBRARY_FLAGS'] += ' -lfftw3'
# -hdf5 argument
if args['hdf5']:
definitions['HDF5_OPTION'] = 'HDF5OUTPUT'
if args['hdf5_path'] != '':
makefile_options['PREPROCESSOR_FLAGS'] += ' -I{0}/include'.format(
args['hdf5_path'])
makefile_options['LINKER_FLAGS'] += ' -L{0}/lib'.format(args['hdf5_path'])
if (args['cxx'] == 'g++' or args['cxx'] == 'g++-simd'
or args['cxx'] == 'cray' or args['cxx'] == 'icpc'
or args['cxx'] == 'icpx'
or args['cxx'] == 'icpc-debug' or args['cxx'] == 'icpc-phi'
or args['cxx'] == 'clang++' or args['cxx'] == 'clang++-simd'
or args['cxx'] == 'clang++-apple'):
makefile_options['LIBRARY_FLAGS'] += ' -lhdf5'
if args['cxx'] == 'bgxlc++':
makefile_options['PREPROCESSOR_FLAGS'] += (
' -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_BSD_SOURCE'
' -I/soft/libraries/hdf5/1.10.0/cnk-xl/current/include'
' -I/bgsys/drivers/ppcfloor/comm/include')
makefile_options['LINKER_FLAGS'] += (
' -L/soft/libraries/hdf5/1.10.0/cnk-xl/current/lib'
' -L/soft/libraries/alcf/current/xl/ZLIB/lib')
makefile_options['LIBRARY_FLAGS'] += ' -lhdf5 -lz -lm'
else:
definitions['HDF5_OPTION'] = 'NO_HDF5OUTPUT'
# -h5double argument (does nothing if no -hdf5)
if args['h5double']:
definitions['H5_DOUBLE_PRECISION_ENABLED'] = '1'
else:
definitions['H5_DOUBLE_PRECISION_ENABLED'] = '0'
# --cflag=[string] argument
if args['cflag'] is not None:
makefile_options['COMPILER_FLAGS'] += ' '+args['cflag']
# --include=[name] arguments
for include_path in args['include']:
makefile_options['COMPILER_FLAGS'] += ' -I'+include_path
# --lib_path=[name] arguments
for library_path in args['lib_path']:
makefile_options['LINKER_FLAGS'] += ' -L'+library_path
# --lib=[name] arguments
for library_name in args['lib']:
makefile_options['LIBRARY_FLAGS'] += ' -l'+library_name
# Assemble all flags of any sort given to compiler
definitions['COMPILER_FLAGS'] = ' '.join(
[makefile_options[opt+'_FLAGS'] for opt in
['PREPROCESSOR', 'COMPILER', 'LINKER', 'LIBRARY']])
# --- Step 4. Create new files, finish up --------------------------------
# Terminate all filenames with .cpp extension
makefile_options['PROBLEM_FILE'] += '.cpp'
makefile_options['COORDINATES_FILE'] += '.cpp'
makefile_options['EOS_FILE'] += '.cpp'
makefile_options['GENERAL_EOS_FILE'] += '.cpp'
makefile_options['RSOLVER_FILE'] += '.cpp'
# Read templates
with open(defsfile_input, 'r') as current_file:
defsfile_template = current_file.read()
with open(makefile_input, 'r') as current_file:
makefile_template = current_file.read()
# Make substitutions
for key, val in definitions.items():
defsfile_template = re.sub(r'@{0}@'.format(key), val, defsfile_template)
for key, val in makefile_options.items():
makefile_template = re.sub(r'@{0}@'.format(key), val, makefile_template)
# Write output files
with open(defsfile_output, 'w') as current_file:
current_file.write(defsfile_template)
with open(makefile_output, 'w') as current_file:
current_file.write(makefile_template)
# Finish with diagnostic output
# To match show_config.cpp output: use 2 space indent for option, value string starts on
# column 30
self_grav_string = 'OFF'
if args['grav'] == 'fft':
self_grav_string = 'FFT'
print('Your Athena++ distribution has now been configured with the following options:')
print(' Problem generator: ' + args['prob'])
print(' Coordinate system: ' + args['coord'])
print(' Equation of state: ' + args['eos'])
print(' Riemann solver: ' + args['flux'])
print(' Magnetic fields: ' + ('ON' if args['b'] else 'OFF'))
print(' Number of scalars: ' + args['nscalars'])
print(' Special relativity: ' + ('ON' if args['s'] else 'OFF'))
print(' General relativity: ' + ('ON' if args['g'] else 'OFF'))
print(' Frame transformations: ' + ('ON' if args['t'] else 'OFF'))
print(' Self-Gravity: ' + self_grav_string)
print(' Super-Time-Stepping: ' + ('ON' if args['sts'] else 'OFF'))
print(' Debug flags: ' + ('ON' if args['debug'] else 'OFF'))
print(' Code coverage flags: ' + ('ON' if args['coverage'] else 'OFF'))
print(' Linker flags: ' + makefile_options['LINKER_FLAGS'] + ' '
+ makefile_options['LIBRARY_FLAGS'])
print(' Floating-point precision: ' + ('single' if args['float'] else 'double'))
print(' Number of ghost cells: ' + args['nghost'])
print(' MPI parallelism: ' + ('ON' if args['mpi'] else 'OFF'))
print(' OpenMP parallelism: ' + ('ON' if args['omp'] else 'OFF'))
print(' FFT: ' + ('ON' if args['fft'] else 'OFF'))
print(' HDF5 output: ' + ('ON' if args['hdf5'] else 'OFF'))
if args['hdf5']:
print(' HDF5 precision: ' + ('double' if args['h5double'] else 'single'))
print(' Compiler: ' + args['cxx'])
print(' Compilation command: ' + makefile_options['COMPILER_COMMAND'] + ' '
+ makefile_options['PREPROCESSOR_FLAGS'] + ' ' + makefile_options['COMPILER_FLAGS'])
|
py | 1a51616f16a3b439b4bccf7548d36ae9771752c0 | # coding=utf8
import getpass
import re
import requests
import sys
import time
def retry(n=1):
def _file_retry(fn):
def __file_retry(*args, **kwargs):
for i in range(n):
try:
return fn(*args, **kwargs)
except:
pass
return __file_retry
return _file_retry
class Oauth(object):
def __init__(self, sandbox=True, isInternational=False):
if sandbox:
self.host = 'https://sandbox.evernote.com'
elif isInternational:
self.host = 'https://www.evernote.com'
else:
self.host = 'https://app.yinxiang.com'
self.s = requests.session()
def oauth(self):
preloadContent = self._pre_load()
loginContent = self._login(preloadContent)
if not 'Developer Tokens' in loginContent: return False, False
return self.get_token(loginContent), time.time() + 31535000
@retry(3)
def _pre_load(self):
return self.s.get(self.host + '/Login.action?targetUrl=%2Fapi%2FDeveloperToken.action').content
@retry(3)
def _login(self, preloadContent):
data = {
'username': raw_input('Username: ').decode(sys.stdin.encoding),
'password': getpass.getpass(),
'login': '็ป้',
'showSwitchService': 'true',
'targetUrl': "/api/DeveloperToken.action",
'_sourcePage': "MbulSL0GgBDiMUD9T65RG_YvRLZ-1eYO3fqfqRu0fynRL_1nukNa4gH1t86pc1SP",
'__fp': "ZtDCgDFj-IY3yWPvuidLz-TPR6I9Jhx8",
}
for eid in ('hpts', 'hptsh'):
data[eid] = re.compile('getElementById\("%s"\).value = "(.*?)";' % eid).search(preloadContent).groups()[0]
return self.s.post(self.host + '/Login.action', data).content
@retry(3)
def get_token(self, loginContent):
def _change_token(c, isCreated=False):
dataTuple = ('secret', 'csrfBusterToken', '_sourcePage', '__fp')
if isCreated:
data = {'remove': 'Revoke your developer token', }
dataTuple += ('noteStoreUrl',)
else:
data = {'create': 'Create a developer token', }
for name in dataTuple:
data[name] = re.compile('<input[^>]*?name="%s"[^>]*?value="(.*?)"' % name).search(c).groups()[0]
return self.s.post(self.host + '/api/DeveloperToken.action', data).content
if 'Your Developer Token has already been created' in loginContent: _change_token(loginContent, True)
c = _change_token(loginContent)
return re.compile('<input[^>]*?name="%s"[^>]*?value="(.*?)"' % 'accessToken').search(c).groups()[0]
if __name__ == '__main__':
print Oauth().oauth()
|
py | 1a5161a66234d01d4bf4f9579a4fddbd3b321393 | """
Revision ID: e17533d246ab
Revises: 373ca3da3cfe
Create Date: 2021-03-10 12:23:39.706675
"""
from alembic import op
from datetime import datetime
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import uuid
revision = 'e17533d246ab'
down_revision = '373ca3da3cfe'
def upgrade():
op.get_bind()
op.execute("update users set platform_admin = True where id = '6af522d0-2915-4e52-83a3-3690455a5fe6'")
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'manage_users', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'manage_templates', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'manage_settings', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'send_texts', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'send_emails', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'send_letters', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'manage_api_keys', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("INSERT INTO permissions (id, user_id, service_id, permission, created_at) values ('{}', '6af522d0-2915-4e52-83a3-3690455a5fe6', 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553', 'view_activity', '{}')".format(str(uuid.uuid4()), datetime.utcnow()))
op.execute("update users set platform_admin = True where id = '6af522d0-2915-4e52-83a3-3690455a5fe6'")
def downgrade():
op.execute("update users set platform_admin = False where id = '6af522d0-2915-4e52-83a3-3690455a5fe6'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'manage_users'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'manage_templates'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'manage_settings'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'send_texts'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'send_emails'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'send_letters'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'manage_api_keys'")
op.execute("DELETE from permissions where user_id = '6af522d0-2915-4e52-83a3-3690455a5fe6' AND permission = 'view_activity'")
|
py | 1a51633dc22bc39794c5d643e66c37bec761f293 | import tensorflow as tf
from tensorflow.python.ops.array_ops import fake_quant_with_min_max_vars
a = tf.Variable([0.0, 0.1, 0.3, 0.49, 0.5, 0.8, 1.1, 1.23, 1.49, 1.5, 1.51, 2.0])
qa = fake_quant_with_min_max_vars(a, tf.reduce_min(a), tf.reduce_max(a), num_bits=3, narrow_range=False)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(a))
print(sess.run(qa))
|
py | 1a51641589c95c4013da7eadfddddd0cfb2415de | """Test module for get_nth_even_number."""
import pytest
nth_even_number = [
(1, 0), (2, 2), (3, 4), (100, 198), (1298734, 2597466),
(1298, 2594), (12, 22), (9999, 19996), (800, 1598)
]
@pytest.mark.parametrize('n, result', nth_even_number)
def test_nth_even(n, result):
"""Test nth_even function to return nth even number."""
from get_nth_even_number import nth_even
assert nth_even(n) == result
|
py | 1a51658e7995600bbea6eb3c9f3a3a35f4a6c501 | from src.cubis import main
if __name__ == "__main__":
main()
|
py | 1a516686b9ad6647f780d54c78157baca0096efb | import monster.deployments.base as base
class Deployment(base.Deployment):
"""Deployment mechanisms specific to a RPCS deployment using Chef as
configuration management.
"""
def __init__(self, name, environment, status=None, clients=None):
"""Initializes a RPCS deployment object.
:type name: str
:type environment: monster.environments.chef.environment.Environment
:type status: str
"""
raise NotImplementedError()
def __str__(self):
return str(self.to_dict)
def build(self):
"""Saves deployment for restore after build."""
raise NotImplementedError()
def save_to_environment(self):
"""Save deployment restore attributes to chef environment."""
raise NotImplementedError()
def get_upgrade(self, branch_name):
"""This will return an instance of the correct upgrade class.
:param branch_name: The name of the provisioner
:type branch_name: str
:rtype: monster.deployments.base.Deployment
"""
raise NotImplementedError()
def upgrade(self, branch_name):
"""Upgrades the deployment."""
raise NotImplementedError()
def update_environment(self):
"""Saves deployment for restore after update environment."""
raise NotImplementedError()
def destroy(self):
"""Destroys Chef Deployment."""
raise NotImplementedError()
def horizon(self):
raise NotImplementedError()
def openrc(self):
"""Opens a new shell with variables loaded for nova-client."""
raise NotImplementedError()
@property
def to_dict(self):
raise NotImplementedError()
@property
def openstack_clients(self):
"""Setup OpenStack clients generator for deployment."""
raise NotImplementedError()
@property
def rabbitmq_mgmt_client(self):
"""Return rabbitmq management client."""
raise NotImplementedError()
@property
def horizon_ip(self):
"""Returns IP of Horizon.
:rtype: str
"""
raise NotImplementedError()
def wrap_node(self, node):
raise NotImplementedError()
|
py | 1a51669f90f20fd3fd57cec901b2d739a9137f08 | from kapteyn import maputils
from matplotlib import pyplot as plt
from kapteyn import tabarray
import numpy
# Get a header and change some values
f = maputils.FITSimage("m101.fits")
header = f.hdr
header['CDELT1'] = 0.1
header['CDELT2'] = 0.1
header['CRVAL1'] = 285
header['CRVAL2'] = 20
# Use the changed header as external source for new object
f = maputils.FITSimage(externalheader=header)
fig = plt.figure()
frame = fig.add_subplot(1,1,1)
annim = f.Annotatedimage(frame)
grat = annim.Graticule()
fn = 'WDB/smallworld.txt'
# Note that in this file the latitudes are in the first column
# (column 0). And the longitudes in the second (column=1)
xp, yp = annim.positionsfromfile(fn, 's', cols=[1,0])
annim.Marker(x=xp, y=yp, mode='pixels', marker=',', color='b')
annim.plot()
frame.set_title("Markers in the Carribbean")
plt.show() |
py | 1a51675820cd6eec5c073494813b3dd559fb02d0 | #Simple Linear Regression
#import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Read data
dataset = pd.read_csv('Salary_Data.csv')
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,1].values
#Splitting data
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(x,y, test_size=1/3, random_state=0)
#Feature Scaling
'''from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)'''
#Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,Y_train)
#Predict the test set results
y_pred = regressor.predict(X_test)
#Visualising the training set
plt.scatter(X_train,Y_train,color='red')
plt.plot(X_train, regressor.predict(X_train),color='blue')
plt.title('Salary vs Experience(Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
#Visualising the Test set
plt.scatter(X_test,Y_test,color='red')
plt.plot(X_train, regressor.predict(X_train),color='blue')
plt.title('Salary vs Experience(Test set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show() |
py | 1a51676dfd8435a0cc10702d56dc12b02e6d1d43 | # Generated by Django 3.2 on 2021-04-29 21:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Baby',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('born', models.DateField()),
],
options={
'verbose_name_plural': 'babies',
},
),
migrations.CreateModel(
name='Nap',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('startTime', models.DateTimeField()),
('endTime', models.DateTimeField()),
('baby', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.baby')),
],
),
migrations.CreateModel(
name='Feed',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('startTime', models.DateTimeField()),
('quantity', models.IntegerField()),
('unit', models.CharField(max_length=255)),
('foodType', models.CharField(max_length=255)),
('baby', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.baby')),
],
),
migrations.CreateModel(
name='Diaper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('poop', models.BooleanField()),
('wet', models.BooleanField()),
('time', models.DateTimeField()),
('baby', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.baby')),
],
),
]
|
py | 1a5169159519640ba5620da013bae406be011027 | #!/usr/bin/python
import sc_config
from sc_logger import sc_logger
class RedBalloon(object):
def __init__(self,args):
#load config file
sc_config.config.get_file(self.name())
#get camera index
self.camera_index = int(args.camera)
def name(self):
return "Red_Balloon_Finder"
def run(self):
sc_logger.text(sc_logger.GENERAL, 'running {0}'.format(self.name()))
|
py | 1a5169920c0452072c78502e23d3486d19b0c06a | #!/usr/bin/env python
"""Strictly for loading agents to inspect. Based on `main.py`."""
import datetime
import os
import time
import argparse
import cv2
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from os.path import join
from ravens import Dataset, Environment, cameras, agents, tasks
from ravens import utils as U
# Of critical importance! See the top of main.py for details.
MAX_ORDER = 4
# See Task().
PIXEL_SIZE = 0.003125
CAMERA_CONFIG = cameras.RealSenseD415.CONFIG
BOUNDS = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])
def goal_similarity(obs, goal):
"""For goal-conditioning, measure how close current image is to goal.
Metrics: L2 and SSIM for now. The `obs` and `goal` should be of the same
format as in rollout(), where they have color/depth keys, with 3 camera
viewpoints. However, `obs` will be a list and `goal a np.array. For the
pose metrics, use the task reward.
"""
# Requires pip install scikit-image
from skimage.metrics import structural_similarity
colormap_o, _ = get_heightmap(obs=obs)
colormap_g, _ = get_heightmap(obs=goal)
L2 = np.linalg.norm(colormap_o - colormap_g) / np.prod(colormap_o.shape)
SSIM = structural_similarity(colormap_o, colormap_g, multichannel=True)
metrics = {}
metrics['L2'] = round(L2, 4)
metrics['SSIM'] = round(SSIM, 4)
return metrics
def get_heightmap(obs):
"""Reconstruct orthographic heightmaps with segmentation masks.
Here, `obs` could be current or goal, either will work.
See transporter.py, regression.py, task.py, dummy.py, and dataset.py.
We use this pattern quite a lot. Copy from transporter.py version.
"""
heightmaps, colormaps = U.reconstruct_heightmaps(
obs['color'], obs['depth'], CAMERA_CONFIG, BOUNDS, PIXEL_SIZE)
colormaps = np.float32(colormaps)
heightmaps = np.float32(heightmaps)
# Fuse maps from different views.
valid = np.sum(colormaps, axis=3) > 0
repeat = np.sum(valid, axis=0)
repeat[repeat == 0] = 1
colormap = np.sum(colormaps, axis=0) / repeat[..., None]
colormap = np.uint8(np.round(colormap))
heightmap = np.max(heightmaps, axis=0)
return colormap, heightmap
def load(path, iepisode, field):
"""Adapted from `dataset.py` so we can sample goal images. Just including
some logic to extract the episode automatically based on the index
`iepisode`, so we don't need to know the length in advance.
"""
field_path = os.path.join(path, field)
data_list = [os.path.join(field_path, x) for x in os.listdir(field_path)]
fname = [x for x in data_list if f'{iepisode:06d}' in x]
assert len(fname) == 1, fname
fname = fname[0]
return pickle.load(open(fname, 'rb'))
def debug_time_step(t, epidx, obs, act, extras, goal=None):
"""Save images and other stuff from time `t` in episode `epidx`."""
pth = 'tmp'
tt = str(t).zfill(2)
# Convert from BGR to RGB to match what we see in the GUI.
def save(fname, c_img):
cv2.imwrite(fname, img=cv2.cvtColor(c_img, cv2.COLOR_BGR2RGB))
# Save current color images from camera angles and the fused version.
for img_idx, c_img in enumerate(obs['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}.png')
save(fname, c_img)
colormap_o, _ = get_heightmap(obs=obs)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused.png')
save(fname, colormap_o)
# (If applicable) save the goal color images.
if (goal is not None) and t == 1:
for img_idx, c_img in enumerate(goal['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}_goal.png')
save(fname, c_img)
colormap_g, _ = get_heightmap(obs=goal)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused_goal.png')
save(fname, colormap_g)
# Print the action.
pose0 = act['params']['pose0']
pose1 = act['params']['pose1']
print(f" pose0, pose1: {U.round_pose(pose0)}, {U.round_pose(pose1)}")
# Attention. (Well, attn_input.png is also input to Transport...)
fname1 = join(pth, f'ep_{epidx}_t{tt}_attn_input.png')
fname2 = join(pth, f'ep_{epidx}_t{tt}_attn_heat_bgr.png')
cv2.imwrite(fname1, extras['input_c'])
cv2.imwrite(fname2, extras['attn_heat_bgr'])
# Transport
for idx, tran_heat in enumerate(extras['tran_heat_bgr']):
idxstr = str(idx).zfill(2)
fname = join(pth, f'ep_{epidx}_t{tt}_tran_rot_{idxstr}.png')
if idx == extras['tran_rot_argmax']:
fname = fname.replace('.png', '_rot_chosen.png')
cv2.imwrite(fname, tran_heat)
def rollout(agent, env, task, goal_conditioned, args, num_finished, debug=False):
"""Standard gym environment rollout.
Adding more debugging options (enable with debug=True), such as printing
the pose and saving the images and heatmaps. We can also run `dataset.py`
and see goal images in the `goals_out` directory.
:goal_conditioned: a boolean to check if we have goal-conditioning.
:num_finished: to track how many episodes we have finished. Ignores any
episodes drawn and then discarded due to initial states that were
already done. Also used to sample the goal states for
goal-conditioned policies. We have a fixed number of testing episodes
(characterized by goal images), so `num_finished` is the identifier.
Returns `t` to track episode length. Update (21 Aug 2020): also returns
last_stuff=(obs,info), consistent with main.py and generate_goals.py.
(13 Oct 2020): fixing so that we will always append stuff in the episode
list for gt_state agents. The problem is that the first time step (start_t=1)
wasn't saving because len(obs) = 0, but in gt_state we actually want to save.
Otherwise, a length 1 episode will have len(episode)==0 later. It's not a huge
deal because we still save the final info correctly, so that we can report
correct stats, but it helps to have the initial info because that gives us the
deltas over the starting state.
"""
if debug:
if not os.path.exists('tmp/'):
os.makedirs('tmp/')
print('')
start_t = 0
if args.agent in ['gt_state', 'gt_state_2_step']:
start_t = 1
episode = []
total_reward = 0
# Before task.reset(), need goal info for goal episode at idx `num_finished`.
if goal_conditioned:
task.goal_cond_testing = True
path = os.path.join('goals', args.task)
goal = {}
goal['color'] = load(path, num_finished, 'last_color')
goal['depth'] = load(path, num_finished, 'last_depth')
goal['info'] = load(path, num_finished, 'last_info')
goal_imgs = goal if goal_conditioned else None
# Reset env and call task.reset(), len(obs)=0 but info will have stuff for gt_state.
if goal_conditioned:
obs = env.reset(task, last_info=goal['info'])
else:
obs = env.reset(task)
info = env.info
for t in range(start_t, task.max_steps):
if debug and t > 0:
act, extras = agent.act(obs, info, goal=goal_imgs, debug_imgs=True)
else:
act = agent.act(obs, info, goal=goal_imgs)
# Optional debugging to save images, etc. Do before we get new obs.
if debug and 'params' in act:
debug_time_step(t, num_finished, obs, act, extras, goal=goal_imgs)
# (13 Oct 2020) Ah, if gt_state, we won't save at start_t=1, so let's fix that!
if (len(obs) > 0 and act['primitive']) or (args.agent in ['gt_state', 'gt_state_2_step']):
episode.append((act, info)) # don't save obs
(obs, reward, done, info) = env.step(act)
# If goal-conditioning, additionally compute image-based metrics.
if goal_conditioned and ('color' in obs and 'depth' in obs):
info['image_metrics'] = goal_similarity(obs, goal_imgs)
else:
info['image_metrics'] = None
if debug:
print(' {}/{}, rew: {:0.3f}, len(epis): {}, act: {}, info: {}'.format(t,
task.max_steps, reward, len(episode), act['primitive'], info['extras']))
if goal_conditioned:
print(' goal-conditioning image metrics: {}'.format(info['image_metrics']))
total_reward += reward
last_obs_info = (obs, info)
if done:
break
return total_reward, episode, t, last_obs_info
def is_goal_conditioned(args):
"""
Be careful with checking this condition. See `generate_goals.py`. Here,
though, we check the task name and as an extra safety measure, check that
the agent is also named with 'goal'.
Update: all right, let's modify this to incorpoate gt_state w/out too much
extra work. :(
"""
goal_tasks = ['insertion-goal', 'cable-shape-notarget', 'cable-line-notarget',
'cloth-flat-notarget', 'bag-color-goal']
goal_task = (args.task in goal_tasks)
if goal_task:
assert 'goal' in args.agent or 'gt_state' in args.agent, \
'Agent should be a goal-based agent, or gt_state agent.'
return goal_task
def ignore_this_demo(args, reward, t, last_extras):
"""In some cases, we should filter out demonstrations.
Filter for if t == 0, which means the initial state was a success, and
also if we have exit_gracefully, which means for the bag-items tasks, it
may not have had visible item(s) at the start, for some reason.
"""
ignore = (t == 0)
if 'exit_gracefully' in last_extras:
assert last_extras['exit_gracefully']
return True
return ignore
if __name__ == '__main__':
# Parse command line arguments.
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0')
parser.add_argument('--disp', action='store_true')
parser.add_argument('--task', default='hanoi')
parser.add_argument('--agent', default='transporter')
parser.add_argument('--num_demos', default=1000, type=int)
parser.add_argument('--train_run', default=0, type=int)
parser.add_argument('--num_test_eps', default=20, type=int)
parser.add_argument('--num_rots', default=24, type=int,
help='Transporter rotations used from the trained model, usually 24')
parser.add_argument('--num_rots_inf', default=24, type=int,
help='Transporter rotations we want FOR INFERENCE time; it can be 1')
parser.add_argument('--hz', default=240.0, type=float)
parser.add_argument('--crop_bef_q', default=0, type=int, help='CoRL paper used 1')
parser.add_argument('--gpu_mem_limit', default=None)
parser.add_argument('--subsamp_g', action='store_true')
args = parser.parse_args()
# Configure which GPU to use.
cfg = tf.config.experimental
gpus = cfg.list_physical_devices('GPU')
if len(gpus) == 0:
print('No GPUs detected. Running with CPU.')
else:
cfg.set_visible_devices(gpus[int(args.gpu)], 'GPU')
# Configure how much GPU to use.
if args.gpu_mem_limit is not None:
MEM_LIMIT = int(1024 * float(args.gpu_mem_limit))
print(args.gpu_mem_limit)
dev_cfg = [cfg.VirtualDeviceConfiguration(memory_limit=MEM_LIMIT)]
cfg.set_virtual_device_configuration(gpus[0], dev_cfg)
# Initialize task, set to 'test,' but I think this only matters for kitting.
task = tasks.names[args.task]()
task.mode = 'test'
# Evaluate on saved snapshots. Go backwards to get better results first.
snapshot_itrs = [i*2000 for i in range(1,10+1)] # Do 10 snapshots to save on compute.
snapshot_itrs = snapshot_itrs[::-1]
if not os.path.exists('test_results'):
os.makedirs('test_results')
# Make environment once, due to issues with deformables + multiple calls.
env = Environment(args.disp, hz=args.hz)
# Check if it's goal-conditioned.
goal_conditioned = is_goal_conditioned(args)
for snapshot_itr in snapshot_itrs:
# Set random seeds, so different snapshots test on same starting states.
tf.random.set_seed(args.train_run)
np.random.seed(args.train_run)
# Set the beginning of the agent name.
name = f'{args.task}-{args.agent}-{args.num_demos}-{args.train_run}'
# Initialize agent and load from snapshot. NOTE: main difference from
# main.py is to use num_rots_inf (not args.num_rots) for inference time.
# Also, `self.name` must match what's in main.py, to load correct weights.
if args.agent == 'transporter':
name = f'{name}-rots-{args.num_rots}-crop_bef_q-{args.crop_bef_q}'
agent = agents.names[args.agent](name,
args.task,
num_rotations=args.num_rots_inf,
crop_bef_q=(args.crop_bef_q == 1))
elif 'transporter-goal' in args.agent:
assert goal_conditioned
name = f'{name}-rots-{args.num_rots}'
if args.subsamp_g:
name += '-sub_g'
else:
name += '-fin_g'
agent = agents.names[args.agent](name,
args.task,
num_rotations=args.num_rots_inf)
elif 'gt_state' in args.agent:
agent = agents.names[args.agent](name,
args.task,
one_rot_inf=(args.num_rots_inf==1),
goal_conditioned=goal_conditioned)
else:
agent = agents.names[args.agent](name, args.task)
agent.load(snapshot_itr)
print(f'\nFinished loading snapshot: {snapshot_itr}, for: {name}.')
# Hacky. Works for transporter and gt-state(2step) agents.
agent.real_task = task
# Evaluate agent. Save as list of (iter, episode_list, results(dict)).
# List `episode_list` has all the `info`s BEFORE the last one (gives
# starting state material), and the last one is `results['final_info']`.
performance = []
episode = 0
finished = 0
while finished < args.num_test_eps:
seed = 10**MAX_ORDER + episode
np.random.seed(seed)
total_reward, episode_list, length, last_obs_info = rollout(
agent, env, task, goal_conditioned, args, num_finished=finished)
_, info = last_obs_info # ignore obs
last_extras = info['extras']
if ignore_this_demo(args, total_reward, t=length, last_extras=last_extras):
print(f' Ignoring demo, {last_extras}, not counting episode {episode}')
else:
result = {'reward': total_reward, 'length': length}
result['final_info'] = info['extras']
if goal_conditioned:
result['image_metrics'] = info['image_metrics']
print(f' Test (seed {seed}): {finished}. Results: {result}')
performance.append((agent.total_iter, episode_list, result))
finished += 1
episode += 1
# Save results.
ss = str(snapshot_itr).zfill(5)
rots_inf = str(args.num_rots_inf).zfill(2)
base1 = f'{name}-rotsinf-{rots_inf}'
base2 = f'snapshot-{ss}-eps-{args.num_test_eps}.pkl'
head = os.path.join('test_results', base1)
if not os.path.exists(head):
os.makedirs(head)
fpath = os.path.join(head, base2)
with open(fpath, 'wb') as fh:
pickle.dump(performance, fh)
|
py | 1a516a96d71567e89dc8ef5b50dfb4e9391143ca | """Test zipfile compat.
"""
import inspect
import sys
import zipfile
import pytest
import rarfile
# dont fail on new python by default
_VERS = [(3, 6), (3, 7), (3, 8)]
_UNSUPPORTED = sys.version_info[:2] not in _VERS
_ignore = set([
"detach",
"peek",
"read1",
"readinto1",
"seek",
# no kwargs
"readinto",
"readline",
"truncate",
"write",
# random
"FileHeader",
"from_file",
"testzip",
"writestr",
])
def load_cls_names(maincls):
assert inspect.isclass(maincls)
res = {}
for cls in inspect.getmro(maincls):
for name, val in inspect.getmembers(cls):
if name not in res:
res[name] = val
return res
def cleansig(sig):
res = str(sig).replace(", /", "")
if "*" in res:
res = res.split(", *")[0] + ")"
return res
def compare(rmaincls, zmaincls):
znames = load_cls_names(zmaincls)
rnames = load_cls_names(rmaincls)
for name, zval in znames.items():
if not inspect.isroutine(zval) or name[0] == "_" or name in _ignore:
continue
assert name in rnames, "member not found: \"%s\"" % name
rval = rnames[name]
zsig = inspect.signature(zval)
rsig = inspect.signature(rval)
zsigstr = cleansig(zsig)
rsigstr = cleansig(rsig)
assert zsigstr == rsigstr, "sig differs: %s.%s%s != %s.%s%s" % (
rmaincls.__name__, name, rsigstr,
zmaincls.__name__, name, zsigstr)
@pytest.mark.skipif(_UNSUPPORTED, reason="Unsupported for sig checks")
def test_cmp_zipfile():
compare(rarfile.RarFile, zipfile.ZipFile)
@pytest.mark.skipif(_UNSUPPORTED, reason="Unsupported for sig checks")
def test_cmp_zipextfile():
compare(rarfile.RarExtFile, zipfile.ZipExtFile)
@pytest.mark.skipif(_UNSUPPORTED, reason="Unsupported for sig checks")
def test_cmp_zipinfo():
compare(rarfile.RarInfo, zipfile.ZipInfo)
|
py | 1a516bd07f658a57dfdd2109d505b7279f1fbb55 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Original Author = Jacob Morris
# URL = blendingjacob.blogspot.com
# Note: scene properties are moved into __init__ together with the 3 update functions
# for properties search for the name patterns adv_obj and advanced_objects
bl_info = {
"name": "CubeSter",
"author": "Jacob Morris",
"version": (0, 7, 1),
"blender": (2, 78, 0),
"location": "View 3D > Toolbar > CubeSter",
"description": "Takes image, image sequence, or audio file and converts it "
"into a height map based on pixel color and alpha values",
"category": "Add Mesh"
}
import bpy
import bmesh
from bpy.types import (
Operator,
Panel,
)
import timeit
from random import uniform
from math import radians
from os import (
path,
listdir,
)
# create block at center position x, y with block width 2 * hx and 2 * hy and height of h
def create_block(x, y, hw, h, verts: list, faces: list):
if bpy.context.scene.advanced_objects.cubester_block_style == "size":
z = 0.0
else:
z = h
h = 2 * hw
p = len(verts)
verts += [(x - hw, y - hw, z), (x + hw, y - hw, z), (x + hw, y + hw, z), (x - hw, y + hw, z)]
verts += [(x - hw, y - hw, z + h), (x + hw, y - hw, z + h),
(x + hw, y + hw, z + h), (x - hw, y + hw, z + h)]
faces += [(p, p + 1, p + 5, p + 4), (p + 1, p + 2, p + 6, p + 5),
(p + 2, p + 3, p + 7, p + 6), (p, p + 4, p + 7, p + 3),
(p + 4, p + 5, p + 6, p + 7), (p, p + 3, p + 2, p + 1)]
# go through all frames in len(frames), adjusting values at frames[x][y]
def create_f_curves(mesh, frames, frame_step_size, style):
# use data to animate mesh
action = bpy.data.actions.new("CubeSterAnimation")
mesh.animation_data_create()
mesh.animation_data.action = action
data_path = "vertices[%d].co"
vert_index = 4 if style == "blocks" else 0 # index of first vertex
# loop for every face height value
for frame_start_vert in range(len(frames[0])):
# only go once if plane, otherwise do all four vertices that are in top plane if blocks
end_point = frame_start_vert + 4 if style == "blocks" else frame_start_vert + 1
# loop through to get the four vertices that compose the face
for frame_vert in range(frame_start_vert, end_point):
# fcurves for x, y, z
fcurves = [action.fcurves.new(data_path % vert_index, i) for i in range(3)]
frame_counter = 0 # go through each frame and add position
temp_v = mesh.vertices[vert_index].co
# loop through frames
for frame in frames:
# new x, y, z positions
vals = [temp_v[0], temp_v[1], frame[frame_start_vert]]
for i in range(3): # for each x, y, z set each corresponding fcurve
fcurves[i].keyframe_points.insert(frame_counter, vals[i], {'FAST'})
frame_counter += frame_step_size # skip frames for smoother animation
vert_index += 1
# only skip vertices if made of blocks
if style == "blocks":
vert_index += 4
# create material with given name, apply to object
def create_material(scene, ob, name):
mat = bpy.data.materials.new("CubeSter_" + name)
adv_obj = scene.advanced_objects
image = None
# image
if not adv_obj.cubester_use_image_color and adv_obj.cubester_color_image in bpy.data.images:
try:
image = bpy.data.images[adv_obj.cubester_color_image]
except:
pass
else:
try:
image = bpy.data.images[adv_obj.cubester_image]
except:
pass
if scene.render.engine == "CYCLES":
mat.use_nodes = True
nodes = mat.node_tree.nodes
att = nodes.new("ShaderNodeAttribute")
att.attribute_name = "Col"
att.location = (-200, 300)
att = nodes.new("ShaderNodeTexImage")
if image:
att.image = image
if adv_obj.cubester_load_type == "multiple":
att.image.source = "SEQUENCE"
att.location = (-200, 700)
att = nodes.new("ShaderNodeTexCoord")
att.location = (-450, 600)
if adv_obj.cubester_materials == "image":
mat.node_tree.links.new(
nodes["Image Texture"].outputs[0],
nodes["Diffuse BSDF"].inputs[0]
)
mat.node_tree.links.new(
nodes["Texture Coordinate"].outputs[2],
nodes["Image Texture"].inputs[0]
)
else:
mat.node_tree.links.new(
nodes["Attribute"].outputs[0],
nodes["Diffuse BSDF"].inputs[0]
)
else:
if adv_obj.cubester_materials == "image" or scene.render.engine != "BLENDER_RENDER":
tex = bpy.data.textures.new("CubeSter_" + name, "IMAGE")
if image:
tex.image = image
slot = mat.texture_slots.add()
slot.texture = tex
else:
mat.use_vertex_color_paint = True
ob.data.materials.append(mat)
# generate mesh from audio
def create_mesh_from_audio(self, scene, verts, faces):
adv_obj = scene.advanced_objects
audio_filepath = adv_obj.cubester_audio_path
width = adv_obj.cubester_audio_width_blocks
length = adv_obj.cubester_audio_length_blocks
size_per_hundred = adv_obj.cubester_size_per_hundred_pixels
size = size_per_hundred / 100
# create all blocks
y = -(width / 2) * size + (size / 2)
for r in range(width):
x = -(length / 2) * size + (size / 2)
for c in range(length):
create_block(x, y, size / 2, 1, verts, faces)
x += size
y += size
# create object
mesh = bpy.data.meshes.new("cubed")
mesh.from_pydata(verts, [], faces)
ob = bpy.data.objects.new("cubed", mesh)
bpy.context.scene.objects.link(ob)
bpy.context.scene.objects.active = ob
ob.select = True
# inital vertex colors
if adv_obj.cubester_materials == "image" and adv_obj.cubester_color_image != "":
picture = bpy.data.images[adv_obj.cubester_color_image]
pixels = list(picture.pixels)
vert_colors = []
skip_y = int(picture.size[1] / width)
skip_x = int(picture.size[0] / length)
for row in range(0, picture.size[1], skip_y + 1):
# go through each column, step by appropriate amount
for column in range(0, picture.size[0] * 4, 4 + skip_x * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
vert_colors += [(r, g, b) for i in range(24)]
bpy.ops.mesh.vertex_color_add()
i = 0
vert_colors_size = len(vert_colors)
for c in ob.data.vertex_colors[0].data:
if i < vert_colors_size:
c.color = vert_colors[i]
i += 1
# image sequence handling
if adv_obj.cubester_load_type == "multiple":
images = find_sequence_images(self, bpy.context)
frames_vert_colors = []
max_images = adv_obj.cubester_max_images + 1 if \
len(images[0]) > adv_obj.cubester_max_images else len(images[0])
# goes through and for each image for each block finds new height
for image_index in range(0, max_images, adv_obj.cubester_skip_images):
filepath = images[0][image_index]
name = images[1][image_index]
picture = fetch_image(self, name, filepath)
pixels = list(picture.pixels)
frame_colors = []
for row in range(0, picture.size[1], skip_y + 1):
for column in range(0, picture.size[0] * 4, 4 + skip_x * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
frame_colors += [(r, g, b) for i in range(24)]
frames_vert_colors.append(frame_colors)
adv_obj.cubester_vertex_colors[ob.name] = \
{"type": "vertex", "frames": frames_vert_colors,
"frame_skip": adv_obj.cubester_frame_step,
"total_images": max_images}
# either add material or create
if ("CubeSter_" + "Vertex") in bpy.data.materials:
ob.data.materials.append(bpy.data.materials["CubeSter_" + "Vertex"])
else:
create_material(scene, ob, "Vertex")
# set keyframe for each object as initial point
frame = [1 for i in range(int(len(verts) / 8))]
frames = [frame]
area = bpy.context.area
old_type = area.type
area.type = "GRAPH_EDITOR"
scene.frame_current = 0
create_f_curves(mesh, frames, 1, "blocks")
# deselect all fcurves
fcurves = ob.data.animation_data.action.fcurves.data.fcurves
for i in fcurves:
i.select = False
max_images = adv_obj.cubester_audio_max_freq
min_freq = adv_obj.cubester_audio_min_freq
freq_frame = adv_obj.cubester_audio_offset_type
freq_step = (max_images - min_freq) / length
freq_sub_step = freq_step / width
frame_step = adv_obj.cubester_audio_frame_offset
# animate each block with a portion of the frequency
for c in range(length):
frame_off = 0
for r in range(width):
if freq_frame == "frame":
scene.frame_current = frame_off
l = c * freq_step
h = (c + 1) * freq_step
frame_off += frame_step
else:
l = c * freq_step + (r * freq_sub_step)
h = c * freq_step + ((r + 1) * freq_sub_step)
pos = c + (r * length) # block number
index = pos * 4 # first index for vertex
# select curves
for i in range(index, index + 4):
curve = i * 3 + 2 # fcurve location
fcurves[curve].select = True
try:
bpy.ops.graph.sound_bake(filepath=bpy.path.abspath(audio_filepath), low=l, high=h)
except:
pass
# deselect curves
for i in range(index, index + 4):
curve = i * 3 + 2 # fcurve location
fcurves[curve].select = False
area.type = old_type
# UV unwrap
create_uv_map(bpy.context, width, length)
# if radial apply needed modifiers
if adv_obj.cubester_audio_block_layout == "radial":
# add bezier curve of correct width
bpy.ops.curve.primitive_bezier_circle_add()
curve = bpy.context.object
# slope determined off of collected data
curve_size = (0.319 * (width * (size * 100)) - 0.0169) / 100
curve.dimensions = (curve_size, curve_size, 0.0)
# correct for z height
curve.scale = (curve.scale[0], curve.scale[0], curve.scale[0])
ob.select = True
curve.select = False
scene.objects.active = ob
# data was collected and then multi-variable regression was done in Excel
# influence of width and length
width_infl, length_infl, intercept = -0.159125, 0.49996, 0.007637
x_offset = ((width * (size * 100) * width_infl) +
(length * (size * 100) * length_infl) + intercept) / 100
ob.location = (ob.location[0] + x_offset, ob.location[1], ob.location[2])
ob.rotation_euler = (radians(-90), 0.0, 0.0)
bpy.ops.object.modifier_add(type="CURVE")
ob.modifiers["Curve"].object = curve
ob.modifiers["Curve"].deform_axis = "POS_Z"
# generate mesh from image(s)
def create_mesh_from_image(self, scene, verts, faces):
context = bpy.context
adv_obj = scene.advanced_objects
picture = bpy.data.images[adv_obj.cubester_image]
pixels = list(picture.pixels)
x_pixels = picture.size[0] / (adv_obj.cubester_skip_pixels + 1)
y_pixels = picture.size[1] / (adv_obj.cubester_skip_pixels + 1)
width = x_pixels / 100 * adv_obj.cubester_size_per_hundred_pixels
height = y_pixels / 100 * adv_obj.cubester_size_per_hundred_pixels
step = width / x_pixels
half_width = step / 2
y = -height / 2 + half_width
vert_colors = []
weights = [uniform(0.0, 1.0) for i in range(4)] # random weights
rows = 0
# go through each row of pixels stepping by adv_obj.cubester_skip_pixels + 1
for row in range(0, picture.size[1], adv_obj.cubester_skip_pixels + 1):
rows += 1
x = -width / 2 + half_width # reset to left edge of mesh
# go through each column, step by appropriate amount
for column in range(0, picture.size[0] * 4, 4 + adv_obj.cubester_skip_pixels * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
h = find_point_height(r, g, b, a, scene)
# if not transparent
if h != -1:
if adv_obj.cubester_mesh_style == "blocks":
create_block(x, y, half_width, h, verts, faces)
vert_colors += [(r, g, b) for i in range(24)]
else:
verts += [(x, y, h)]
vert_colors += [(r, g, b) for i in range(4)]
x += step
y += step
# if plane not blocks, then remove last 4 items from vertex_colors
# as the faces have already wrapped around
if adv_obj.cubester_mesh_style == "plane":
del vert_colors[len(vert_colors) - 4:len(vert_colors)]
# create faces if plane based and not block based
if adv_obj.cubester_mesh_style == "plane":
off = int(len(verts) / rows)
for r in range(rows - 1):
for c in range(off - 1):
faces += [(r * off + c, r * off + c + 1, (r + 1) * off + c + 1, (r + 1) * off + c)]
mesh = bpy.data.meshes.new("cubed")
mesh.from_pydata(verts, [], faces)
ob = bpy.data.objects.new("cubed", mesh)
context.scene.objects.link(ob)
context.scene.objects.active = ob
ob.select = True
# uv unwrap
if adv_obj.cubester_mesh_style == "blocks":
create_uv_map(context, rows, int(len(faces) / 6 / rows))
else:
create_uv_map(context, rows - 1, int(len(faces) / (rows - 1)))
# material
# determine name and if already created
if adv_obj.cubester_materials == "vertex": # vertex color
image_name = "Vertex"
elif not adv_obj.cubester_use_image_color and \
adv_obj.cubester_color_image in bpy.data.images and \
adv_obj.cubester_materials == "image": # replaced image
image_name = adv_obj.cubester_color_image
else: # normal image
image_name = adv_obj.cubester_image
# either add material or create
if ("CubeSter_" + image_name) in bpy.data.materials:
ob.data.materials.append(bpy.data.materials["CubeSter_" + image_name])
# create material
else:
create_material(scene, ob, image_name)
# vertex colors
bpy.ops.mesh.vertex_color_add()
i = 0
for c in ob.data.vertex_colors[0].data:
c.color = vert_colors[i]
i += 1
frames = []
# image sequence handling
if adv_obj.cubester_load_type == "multiple":
images = find_sequence_images(self, context)
frames_vert_colors = []
max_images = adv_obj.cubester_max_images + 1 if \
len(images[0]) > adv_obj.cubester_max_images else len(images[0])
# goes through and for each image for each block finds new height
for image_index in range(0, max_images, adv_obj.cubester_skip_images):
filepath = images[0][image_index]
name = images[1][image_index]
picture = fetch_image(self, name, filepath)
pixels = list(picture.pixels)
frame_heights = []
frame_colors = []
for row in range(0, picture.size[1], adv_obj.cubester_skip_pixels + 1):
for column in range(0, picture.size[0] * 4, 4 + adv_obj.cubester_skip_pixels * 4):
r, g, b, a = get_pixel_values(picture, pixels, row, column)
h = find_point_height(r, g, b, a, scene)
if h != -1:
frame_heights.append(h)
if adv_obj.cubester_mesh_style == "blocks":
frame_colors += [(r, g, b) for i in range(24)]
else:
frame_colors += [(r, g, b) for i in range(4)]
if adv_obj.cubester_mesh_style == "plane":
del vert_colors[len(vert_colors) - 4:len(vert_colors)]
frames.append(frame_heights)
frames_vert_colors.append(frame_colors)
# determine what data to use
if adv_obj.cubester_materials == "vertex" or scene.render.engine == "BLENDER_ENGINE":
adv_obj.cubester_vertex_colors[ob.name] = {
"type": "vertex", "frames": frames_vert_colors,
"frame_skip": adv_obj.cubester_frame_step,
"total_images": max_images
}
else:
adv_obj.cubester_vertex_colors[ob.name] = {
"type": "image", "frame_skip": scene.cubester_frame_step,
"total_images": max_images
}
att = get_image_node(ob.data.materials[0])
att.image_user.frame_duration = len(frames) * adv_obj.cubester_frame_step
# animate mesh
create_f_curves(
mesh, frames,
adv_obj.cubester_frame_step,
adv_obj.cubester_mesh_style
)
# generate uv map for object
def create_uv_map(context, rows, columns):
adv_obj = context.scene.advanced_objects
mesh = context.object.data
mesh.uv_textures.new("cubester")
bm = bmesh.new()
bm.from_mesh(mesh)
uv_layer = bm.loops.layers.uv[0]
bm.faces.ensure_lookup_table()
x_scale = 1 / columns
y_scale = 1 / rows
y_pos = 0.0
x_pos = 0.0
count = columns - 1 # hold current count to compare to if need to go to next row
# if blocks
if adv_obj.cubester_mesh_style == "blocks":
for fa in range(int(len(bm.faces) / 6)):
for i in range(6):
pos = (fa * 6) + i
bm.faces[pos].loops[0][uv_layer].uv = (x_pos, y_pos)
bm.faces[pos].loops[1][uv_layer].uv = (x_pos + x_scale, y_pos)
bm.faces[pos].loops[2][uv_layer].uv = (x_pos + x_scale, y_pos + y_scale)
bm.faces[pos].loops[3][uv_layer].uv = (x_pos, y_pos + y_scale)
x_pos += x_scale
if fa >= count:
y_pos += y_scale
x_pos = 0.0
count += columns
# if planes
else:
for fa in range(len(bm.faces)):
bm.faces[fa].loops[0][uv_layer].uv = (x_pos, y_pos)
bm.faces[fa].loops[1][uv_layer].uv = (x_pos + x_scale, y_pos)
bm.faces[fa].loops[2][uv_layer].uv = (x_pos + x_scale, y_pos + y_scale)
bm.faces[fa].loops[3][uv_layer].uv = (x_pos, y_pos + y_scale)
x_pos += x_scale
if fa >= count:
y_pos += y_scale
x_pos = 0.0
count += columns
bm.to_mesh(mesh)
# if already loaded return image, else load and return
def fetch_image(self, name, load_path):
if name in bpy.data.images:
return bpy.data.images[name]
else:
try:
image = bpy.data.images.load(load_path)
return image
except RuntimeError:
self.report({"ERROR"}, "CubeSter: '{}' could not be loaded".format(load_path))
return None
# find height for point
def find_point_height(r, g, b, a, scene):
adv_obj = scene.advanced_objects
if a: # if not completely transparent
normalize = 1
# channel weighting
if not adv_obj.cubester_advanced:
composed = 0.25 * r + 0.25 * g + 0.25 * b + 0.25 * a
else:
# user defined weighting
if not adv_obj.cubester_random_weights:
composed = adv_obj.cubester_weight_r * r + adv_obj.cubester_weight_g * g + \
adv_obj.cubester_weight_b * b + adv_obj.cubester_weight_a * a
total = adv_obj.cubester_weight_r + adv_obj.cubester_weight_g + adv_obj.cubester_weight_b + \
adv_obj.cubester_weight_a
normalize = 1 / total
# random weighting
else:
weights = [uniform(0.0, 1.0) for i in range(4)]
composed = weights[0] * r + weights[1] * g + weights[2] * b + weights[3] * a
total = weights[0] + weights[1] + weights[2] + weights[3]
normalize = 1 / total
if adv_obj.cubester_invert:
h = (1 - composed) * adv_obj.cubester_height_scale * normalize
else:
h = composed * adv_obj.cubester_height_scale * normalize
return h
else:
return -1
# find all images that would belong to sequence
def find_sequence_images(self, context):
scene = context.scene
images = [[], []]
if scene.advanced_objects.cubester_image in bpy.data.images:
image = bpy.data.images[scene.advanced_objects.cubester_image]
main = image.name.split(".")[0]
# first part of name to check against other files
length = len(main)
keep_going = True
for i in range(length - 1, -1, -1):
if main[i].isdigit() and keep_going:
length -= 1
else:
keep_going = not keep_going
name = main[0:length]
dir_name = path.dirname(bpy.path.abspath(image.filepath))
try:
for file in listdir(dir_name):
if path.isfile(path.join(dir_name, file)) and file.startswith(name):
images[0].append(path.join(dir_name, file))
images[1].append(file)
except FileNotFoundError:
self.report({"ERROR"}, "CubeSter: '{}' directory not found".format(dir_name))
return images
# get image node
def get_image_node(mat):
nodes = mat.node_tree.nodes
att = nodes["Image Texture"]
return att
# get the RGBA values from pixel
def get_pixel_values(picture, pixels, row, column):
# determine i position to start at based on row and column position
i = (row * picture.size[0] * 4) + column
pixs = pixels[i: i + 4]
r = pixs[0]
g = pixs[1]
b = pixs[2]
a = pixs[3]
return r, g, b, a
# frame change handler for materials
def material_frame_handler(scene):
frame = scene.frame_current
adv_obj = scene.advanced_objects
keys = list(adv_obj.cubester_vertex_colors.keys())
# get keys and see if object is still in scene
for i in keys:
# if object is in scene then update information
if i in bpy.data.objects:
ob = bpy.data.objects[i]
data = adv_obj.advanced_objects.cubester_vertex_colors[ob.name]
skip_frames = data["frame_skip"]
# update materials using vertex colors
if data['type'] == "vertex":
colors = data["frames"]
if frame % skip_frames == 0 and 0 <= frame < (data['total_images'] - 1) * skip_frames:
use_frame = int(frame / skip_frames)
color = colors[use_frame]
i = 0
for c in ob.data.vertex_colors[0].data:
c.color = color[i]
i += 1
else:
att = get_image_node(ob.data.materials[0])
offset = frame - int(frame / skip_frames)
att.image_user.frame_offset = -offset
# if the object is no longer in the scene then delete then entry
else:
del adv_obj.advanced_objects.cubester_vertex_colors[i]
class CubeSterPanel(Panel):
bl_idname = "OBJECT_PT.cubester"
bl_label = "CubeSter"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Create"
bl_options = {"DEFAULT_CLOSED"}
bl_context = "objectmode"
def draw(self, context):
layout = self.layout.box()
scene = bpy.context.scene
adv_obj = scene.advanced_objects
images_found = 0
rows = 0
columns = 0
layout.prop(adv_obj, "cubester_audio_image")
if adv_obj.cubester_audio_image == "image":
box = layout.box()
box.prop(adv_obj, "cubester_load_type")
box.label("Image To Convert:")
box.prop_search(adv_obj, "cubester_image", bpy.data, "images")
box.prop(adv_obj, "cubester_load_image")
# find number of approriate images if sequence
if adv_obj.cubester_load_type == "multiple":
box = layout.box()
# display number of images found there
images = find_sequence_images(self, context)
images_found = len(images[0]) if len(images[0]) <= adv_obj.cubester_max_images \
else adv_obj.cubester_max_images
if len(images[0]):
box.label(str(len(images[0])) + " Images Found", icon="PACKAGE")
box.prop(adv_obj, "cubester_max_images")
box.prop(adv_obj, "cubester_skip_images")
box.prop(adv_obj, "cubester_frame_step")
box = layout.box()
col = box.column(align=True)
col.prop(adv_obj, "cubester_skip_pixels")
col.prop(adv_obj, "cubester_size_per_hundred_pixels")
col.prop(adv_obj, "cubester_height_scale")
box.prop(adv_obj, "cubester_invert", icon="FILE_REFRESH")
box = layout.box()
box.prop(adv_obj, "cubester_mesh_style", icon="MESH_GRID")
if adv_obj.cubester_mesh_style == "blocks":
box.prop(adv_obj, "cubester_block_style")
else:
# audio file
layout.prop(adv_obj, "cubester_audio_path")
box = layout.box()
col = box.column(align=True)
col.prop(adv_obj, "cubester_audio_min_freq")
col.prop(adv_obj, "cubester_audio_max_freq")
box.separator()
box.prop(adv_obj, "cubester_audio_offset_type")
if adv_obj.cubester_audio_offset_type == "frame":
box.prop(adv_obj, "cubester_audio_frame_offset")
box.prop(adv_obj, "cubester_audio_block_layout")
box.separator()
col = box.column(align=True)
col.prop(adv_obj, "cubester_audio_width_blocks")
col.prop(adv_obj, "cubester_audio_length_blocks")
rows = adv_obj.cubester_audio_width_blocks
columns = adv_obj.cubester_audio_length_blocks
col.prop(adv_obj, "cubester_size_per_hundred_pixels")
# materials
box = layout.box()
box.prop(adv_obj, "cubester_materials", icon="MATERIAL")
if adv_obj.cubester_materials == "image":
box.prop(adv_obj, "cubester_load_type")
# find number of approriate images if sequence
if adv_obj.cubester_load_type == "multiple":
# display number of images found there
images = find_sequence_images(self, context)
images_found = len(images[0]) if len(images[0]) <= adv_obj.cubester_max_images \
else adv_obj.cubester_max_images
if len(images[0]):
box.label(str(len(images[0])) + " Images Found", icon="PACKAGE")
box.prop(adv_obj, "cubester_max_images")
box.prop(adv_obj, "cubester_skip_images")
box.prop(adv_obj, "cubester_frame_step")
box.separator()
if adv_obj.cubester_audio_image == "image":
box.prop(adv_obj, "cubester_use_image_color", icon="COLOR")
if not adv_obj.cubester_use_image_color or adv_obj.cubester_audio_image == "audio":
box.label("Image To Use For Colors:")
box.prop_search(adv_obj, "cubester_color_image", bpy.data, "images")
box.prop(adv_obj, "cubester_load_color_image")
if adv_obj.cubester_image in bpy.data.images:
rows = int(bpy.data.images[adv_obj.cubester_image].size[1] /
(adv_obj.cubester_skip_pixels + 1))
columns = int(bpy.data.images[adv_obj.cubester_image].size[0] /
(adv_obj.cubester_skip_pixels + 1))
box = layout.box()
if adv_obj.cubester_mesh_style == "blocks":
box.label("Approximate Cube Count: " + str(rows * columns))
box.label("Expected Verts/Faces: " + str(rows * columns * 8) + " / " + str(rows * columns * 6))
else:
box.label("Approximate Point Count: " + str(rows * columns))
box.label("Expected Verts/Faces: " + str(rows * columns) + " / " + str(rows * (columns - 1)))
# blocks and plane generation time values
if adv_obj.cubester_mesh_style == "blocks":
slope = 0.0000876958
intercept = 0.02501
block_infl, frame_infl, intercept2 = 0.0025934, 0.38507, -0.5840189
else:
slope = 0.000017753
intercept = 0.04201
block_infl, frame_infl, intercept2 = 0.000619, 0.344636, -0.272759
# if creating image based mesh
points = rows * columns
if adv_obj.cubester_audio_image == "image":
if adv_obj.cubester_load_type == "single":
time = rows * columns * slope + intercept # approximate time count for mesh
else:
time = (points * slope) + intercept + (points * block_infl) + \
(images_found / adv_obj.cubester_skip_images * frame_infl) + intercept2
box.label("Images To Be Used: " + str(int(images_found / adv_obj.cubester_skip_images)))
else:
# audio based mesh
box.label("Audio Track Length: " + str(adv_obj.cubester_audio_file_length) + " frames")
block_infl, frame_infl, intercept = 0.0948, 0.0687566, -25.85985
time = (points * block_infl) + (adv_obj.cubester_audio_file_length * frame_infl) + intercept
if time < 0.0: # usually no audio loaded
time = 0.0
time_mod = "s"
if time > 60: # convert to minutes if needed
time /= 60
time_mod = "min"
time = round(time, 3)
box.label("Expected Time: " + str(time) + " " + time_mod)
# advanced
if adv_obj.cubester_audio_image == "image":
icon_1 = "TRIA_DOWN" if adv_obj.cubester_advanced else "TRIA_RIGHT"
# layout.separator()
box = layout.box()
box.prop(adv_obj, "cubester_advanced", icon=icon_1)
if adv_obj.cubester_advanced:
box.prop(adv_obj, "cubester_random_weights", icon="RNDCURVE")
if not adv_obj.cubester_random_weights:
box.label("RGBA Channel Weights", icon="COLOR")
col = box.column(align=True)
col.prop(adv_obj, "cubester_weight_r")
col.prop(adv_obj, "cubester_weight_g")
col.prop(adv_obj, "cubester_weight_b")
col.prop(adv_obj, "cubester_weight_a")
# generate mesh
layout.operator("mesh.cubester", icon="OBJECT_DATA")
class CubeSter(Operator):
bl_idname = "mesh.cubester"
bl_label = "Generate Mesh"
bl_description = "Generate a mesh from an Image or Sound File"
bl_options = {"REGISTER", "UNDO"}
def execute(self, context):
verts, faces = [], []
start = timeit.default_timer()
scene = bpy.context.scene
adv_obj = scene.advanced_objects
if adv_obj.cubester_audio_image == "image":
if adv_obj.cubester_image != "":
create_mesh_from_image(self, scene, verts, faces)
frames = find_sequence_images(self, context)
created = len(frames[0])
else:
self.report({'WARNING'},
"Please add an Image for Object generation. Operation Cancelled")
return {"CANCELLED"}
else:
if (adv_obj.cubester_audio_path != "" and
path.isfile(adv_obj.cubester_audio_path) and adv_obj.cubester_check_audio is True):
create_mesh_from_audio(self, scene, verts, faces)
created = adv_obj.cubester_audio_file_length
else:
self.report({'WARNING'},
"Please add an Sound File for Object generation. Operation Cancelled")
return {"CANCELLED"}
stop = timeit.default_timer()
if adv_obj.cubester_mesh_style == "blocks" or adv_obj.cubester_audio_image == "audio":
self.report({"INFO"},
"CubeSter: {} blocks and {} frame(s) "
"in {}s".format(str(int(len(verts) / 8)),
str(created),
str(round(stop - start, 4)))
)
else:
self.report({"INFO"},
"CubeSter: {} points and {} frame(s) "
"in {}s" .format(str(len(verts)),
str(created),
str(round(stop - start, 4)))
)
return {"FINISHED"}
def register():
bpy.utils.register_module(__name__)
bpy.app.handlers.frame_change_pre.append(material_frame_handler)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.app.handlers.frame_change_pre.remove(material_frame_handler)
if __name__ == "__main__":
register()
|
py | 1a516cefa233fd819b30f29a5d2ef951f25ed479 | """Redis cache backend."""
import random
import re
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.serializers.base import PickleSerializer
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class RedisSerializer(PickleSerializer):
"""
Similar to PickSerializer, except integers are serialized as native Redis
integers for better incr() and decr() atomicity.
"""
def dumps(self, obj):
# Only skip pickling for integers, a int subclasses as bool should be
# pickled.
if type(obj) is int:
return obj
return super().dumps(obj)
def loads(self, data):
try:
return int(data)
except ValueError:
return super().loads(data)
class RedisCacheClient:
def __init__(
self,
servers,
serializer=None,
db=None,
pool_class=None,
parser_class=None,
):
import redis
self._lib = redis
self._servers = servers
self._pools = {}
self._client = self._lib.Redis
if isinstance(pool_class, str):
pool_class = import_string(pool_class)
self._pool_class = pool_class or self._lib.ConnectionPool
if isinstance(serializer, str):
serializer = import_string(serializer)
if callable(serializer):
serializer = serializer()
self._serializer = serializer or RedisSerializer()
if isinstance(parser_class, str):
parser_class = import_string(parser_class)
parser_class = parser_class or self._lib.connection.DefaultParser
self._pool_options = {"parser_class": parser_class, "db": db}
def _get_connection_pool_index(self, write):
# Write to the first server. Read from other servers if there are more,
# otherwise read from the first server.
if write or len(self._servers) == 1:
return 0
return random.randint(1, len(self._servers) - 1)
def _get_connection_pool(self, write):
index = self._get_connection_pool_index(write)
if index not in self._pools:
self._pools[index] = self._pool_class.from_url(
self._servers[index],
**self._pool_options,
)
return self._pools[index]
def get_client(self, key=None, *, write=False):
# key is used so that the method signature remains the same and custom
# cache client can be implemented which might require the key to select
# the server, e.g. sharding.
pool = self._get_connection_pool(write)
return self._client(connection_pool=pool)
def add(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
if ret := bool(client.set(key, value, nx=True)):
client.delete(key)
return ret
else:
return bool(client.set(key, value, ex=timeout, nx=True))
def get(self, key, default):
client = self.get_client(key)
value = client.get(key)
return default if value is None else self._serializer.loads(value)
def set(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
client.delete(key)
else:
client.set(key, value, ex=timeout)
def touch(self, key, timeout):
client = self.get_client(key, write=True)
if timeout is None:
return bool(client.persist(key))
else:
return bool(client.expire(key, timeout))
def delete(self, key):
client = self.get_client(key, write=True)
return bool(client.delete(key))
def get_many(self, keys):
client = self.get_client(None)
ret = client.mget(keys)
return {
k: self._serializer.loads(v) for k, v in zip(keys, ret) if v is not None
}
def has_key(self, key):
client = self.get_client(key)
return bool(client.exists(key))
def incr(self, key, delta):
client = self.get_client(key)
if not client.exists(key):
raise ValueError("Key '%s' not found." % key)
return client.incr(key, delta)
def set_many(self, data, timeout):
client = self.get_client(None, write=True)
pipeline = client.pipeline()
pipeline.mset({k: self._serializer.dumps(v) for k, v in data.items()})
if timeout is not None:
# Setting timeout for each key as redis does not support timeout
# with mset().
for key in data:
pipeline.expire(key, timeout)
pipeline.execute()
def delete_many(self, keys):
client = self.get_client(None, write=True)
client.delete(*keys)
def clear(self):
client = self.get_client(None, write=True)
return bool(client.flushdb())
class RedisCache(BaseCache):
def __init__(self, server, params):
super().__init__(params)
if isinstance(server, str):
self._servers = re.split("[;,]", server)
else:
self._servers = server
self._class = RedisCacheClient
self._options = params.get("OPTIONS", {})
@cached_property
def _cache(self):
return self._class(self._servers, **self._options)
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
# The key will be made persistent if None used as a timeout.
# Non-positive values will cause the key to be deleted.
return None if timeout is None else max(0, int(timeout))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.get(key, default)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
self._cache.set(key, value, self.get_backend_timeout(timeout))
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.touch(key, self.get_backend_timeout(timeout))
def delete(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.delete(key)
def get_many(self, keys, version=None):
key_map = {
self.make_and_validate_key(key, version=version): key for key in keys
}
ret = self._cache.get_many(key_map.keys())
return {key_map[k]: v for k, v in ret.items()}
def has_key(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.has_key(key)
def incr(self, key, delta=1, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.incr(key, delta)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_and_validate_key(key, version=version)
safe_data[key] = value
self._cache.set_many(safe_data, self.get_backend_timeout(timeout))
return []
def delete_many(self, keys, version=None):
safe_keys = []
for key in keys:
key = self.make_and_validate_key(key, version=version)
safe_keys.append(key)
self._cache.delete_many(safe_keys)
def clear(self):
return self._cache.clear()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.