repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
deepmind/deepmind-research | ogb_lsc/mag/losses.py | 1 | 6654 | # Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses and related utilities."""
from typing import Mapping, Tuple, Sequence, NamedTuple, Dict, Optional
import jax
import jax.numpy as jnp
import jraph
import numpy as np
# pylint: disable=g-bad-import-order
import datasets
LogsDict = Mapping[str, jnp.ndarray]
class Predictions(NamedTuple):
node_indices: np.ndarray
labels: np.ndarray
predictions: np.ndarray
logits: np.ndarray
def node_classification_loss(
logits: jnp.ndarray,
batch: datasets.Batch,
extra_stats: bool = False,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Gets node-wise classification loss and statistics."""
log_probs = jax.nn.log_softmax(logits)
loss = -jnp.sum(log_probs * batch.node_labels, axis=-1)
num_valid = jnp.sum(batch.label_mask)
labels = jnp.argmax(batch.node_labels, axis=-1)
is_correct = (jnp.argmax(log_probs, axis=-1) == labels)
num_correct = jnp.sum(is_correct * batch.label_mask)
loss = jnp.sum(loss * batch.label_mask) / (num_valid + 1e-8)
accuracy = num_correct / (num_valid + 1e-8)
entropy = -jnp.mean(jnp.sum(jax.nn.softmax(logits) * log_probs, axis=-1))
stats = {
'classification_loss': loss,
'prediction_entropy': entropy,
'accuracy': accuracy,
'num_valid': num_valid,
'num_correct': num_correct,
}
if extra_stats:
for k in range(1, 6):
stats[f'top_{k}_correct'] = topk_correct(logits, labels,
batch.label_mask, k)
return loss, stats
def get_predictions_labels_and_logits(
logits: jnp.ndarray,
batch: datasets.Batch,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Gets prediction labels and logits."""
mask = batch.label_mask > 0.
indices = batch.node_indices[mask]
logits = logits[mask]
predictions = jnp.argmax(logits, axis=-1)
labels = jnp.argmax(batch.node_labels[mask], axis=-1)
return indices, predictions, labels, logits
def topk_correct(
logits: jnp.ndarray,
labels: jnp.ndarray,
valid_mask: jnp.ndarray,
topk: int,
) -> jnp.ndarray:
"""Calculates top-k accuracy."""
pred_ranking = jnp.argsort(logits, axis=1)[:, ::-1]
pred_ranking = pred_ranking[:, :topk]
is_correct = jnp.any(pred_ranking == labels[:, jnp.newaxis], axis=1)
return (is_correct * valid_mask).sum()
def ensemble_predictions_by_probability_average(
predictions_list: Sequence[Predictions]) -> Predictions:
"""Ensemble predictions by ensembling the probabilities."""
_assert_consistent_predictions(predictions_list)
all_probs = np.stack([
jax.nn.softmax(predictions.logits, axis=-1)
for predictions in predictions_list
],
axis=0)
ensembled_logits = np.log(all_probs.mean(0))
return predictions_list[0]._replace(
logits=ensembled_logits, predictions=np.argmax(ensembled_logits, axis=-1))
def get_accuracy_dict(predictions: Predictions) -> Dict[str, float]:
"""Returns the accuracy dict."""
output_dict = {}
output_dict['num_valid'] = predictions.predictions.shape[0]
matches = (predictions.labels == predictions.predictions)
output_dict['accuracy'] = matches.mean()
pred_ranking = jnp.argsort(predictions.logits, axis=1)[:, ::-1]
for k in range(1, 6):
matches = jnp.any(
pred_ranking[:, :k] == predictions.labels[:, None], axis=1)
output_dict[f'top_{k}_correct'] = matches.mean()
return output_dict
def bgrl_loss(
first_online_predictions: jnp.ndarray,
second_target_projections: jnp.ndarray,
second_online_predictions: jnp.ndarray,
first_target_projections: jnp.ndarray,
symmetrize: bool,
valid_mask: jnp.ndarray,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Implements BGRL loss."""
first_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(first_online_predictions, axis=-1) -
_l2_normalize(second_target_projections, axis=-1)),
axis=-1)
if symmetrize:
second_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(second_online_predictions, axis=-1) -
_l2_normalize(first_target_projections, axis=-1)),
axis=-1)
node_loss = first_side_node_loss + second_side_node_loss
else:
node_loss = first_side_node_loss
loss = (node_loss * valid_mask).sum() / (valid_mask.sum() + 1e-6)
return loss, dict(bgrl_loss=loss)
def get_corrupted_view(
graph: jraph.GraphsTuple,
feature_drop_prob: float,
edge_drop_prob: float,
rng_key: jnp.ndarray,
) -> jraph.GraphsTuple:
"""Returns corrupted graph view."""
node_key, edge_key = jax.random.split(rng_key)
def mask_feature(x):
mask = jax.random.bernoulli(node_key, 1 - feature_drop_prob, x.shape)
return x * mask
# Randomly mask features with fixed probability.
nodes = jax.tree_map(mask_feature, graph.nodes)
# Simulate dropping of edges by changing genuine edges to self-loops on
# the padded node.
num_edges = graph.senders.shape[0]
last_node_idx = graph.n_node.sum() - 1
edge_mask = jax.random.bernoulli(edge_key, 1 - edge_drop_prob, [num_edges])
senders = jnp.where(edge_mask, graph.senders, last_node_idx)
receivers = jnp.where(edge_mask, graph.receivers, last_node_idx)
# Note that n_edge will now be invalid since edges in the middle of the list
# will correspond to the final graph. Set n_edge to None to ensure we do not
# accidentally use this.
return graph._replace(
nodes=nodes,
senders=senders,
receivers=receivers,
n_edge=None,
)
def _assert_consistent_predictions(predictions_list: Sequence[Predictions]):
first_predictions = predictions_list[0]
for predictions in predictions_list:
assert np.all(predictions.node_indices == first_predictions.node_indices)
assert np.all(predictions.labels == first_predictions.labels)
assert np.all(
predictions.predictions == np.argmax(predictions.logits, axis=-1))
def _l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-6,
) -> jnp.ndarray:
return x * jax.lax.rsqrt(
jnp.sum(jnp.square(x), axis=axis, keepdims=True) + epsilon)
| apache-2.0 | -7,726,497,317,545,474,000 | 32.437186 | 80 | 0.681996 | false |
adhocish/MELEEDB | meleedb/main.py | 1 | 1570 | import logging
import datetime
import os
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.utils.log import configure_logging
import spiders
def run():
# Logging settings
configure_logging(install_root_handler=False)
logging.basicConfig(
datefmt='%Y-%m-%d %H:%M:%S',
filemode='w',
filename='output/' + datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") + '.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO
)
# Project settings
settings = Settings()
settings.setmodule('settings', priority='project')
# Class to run parallel spiders
process = CrawlerProcess(settings)
process.crawl(spiders.LiquipediaSpider)
# Block until crawling is complete
process.start()
def handle_cmdline_arguments():
# Output to JSON file
# Create new database
# Update database?
# Logging
# Specify specific tournament(s)
# parser = argparse.ArgumentParser(description='Creates an .m3u playlist from given media files.')
# parser.add_argument('-u', '--upload', help='Attempt to upload files to remote.', action='store_true')
# parser.add_argument('-r', '--recursive', help='Process subdirectories as well.', action='store_true')
# parser.add_argument('-n', '--name', type=str, help='Name of playlist.')
# parser.add_argument('files', type=str, nargs='+', help='Absolute paths to files.')
# args = parser.parse_args()
run()
if __name__ == "__main__":
handle_cmdline_arguments() | gpl-3.0 | 334,612,834,064,739,840 | 29.803922 | 107 | 0.664968 | false |
uwescience/pulse2percept | pulse2percept/implants/bvt.py | 1 | 4694 | """`BVT24`"""
import numpy as np
from .base import ProsthesisSystem
from .electrodes import DiskElectrode
from .electrode_arrays import ElectrodeArray
class BVT24(ProsthesisSystem):
"""24-channel suprachoroidal retinal prosthesis
This class creates a 24-channel suprachoroidal retinal prosthesis
[Layton2014]_, which was developed by the Bionic Vision Australia
Consortium and commercialized by Bionic Vision Technologies (BVT).
The center of the array is located at (x,y,z), given in microns, and the
array is rotated by rotation angle ``rot``, given in radians.
The array consists of:
- 33 platinum stimulating electrodes:
- 30 electrodes with 600um diameter (Electrodes 1-20 (except
9, 17, 19) and Electrodes 21a-m),
- 3 electrodes with 400um diameter (Electrodes 9, 17, 19)
- 2 return electrodes with 2000um diameter (Electrodes 22, 23)
Electrodes 21a-m are typically being ganged to provide an external
ring for common ground. The center of the array is assumed to lie
between Electrodes 7, 8, 9, and 13.
.. note::
Column order for electrode numbering is reversed in a left-eye
implant.
.. versionadded:: 0.6
Parameters
----------
x : float
x coordinate of the array center (um)
y : float
y coordinate of the array center (um)
z: float or array_like
Distance of the array to the retinal surface (um). Either a list
with 60 entries or a scalar.
rot : float
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ()
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None):
self.eye = eye
self.earray = ElectrodeArray([])
n_elecs = 35
# the positions of the electrodes 1-20, 21a-21m, R1-R2
x_arr = [-1275.0, -850.0, -1275.0, -850.0, -1275.0,
-425.0, 0, -425.0, 0, -425.0,
425.0, 850.0, 425.0, 850.0, 425.0,
1275.0, 1700.0, 1275.0, 1700.0, 1275.0,
-850.0, 0, 850.0, 1700.0, 2125.0,
2550.0, 2125.0, 2550.0, 2125.0, 1700.0,
850.0, 0, -850.0, 7000.0, 9370.0]
y_arr = [1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
2280.0, 2280.0, 2280.0, 2280.0, 1520.0,
760.0, 0.0, -760.0, -1520.0, -2280.0,
-2280.0, -2280.0, -2280.0, 0, 0]
if isinstance(z, (list, np.ndarray)):
# Specify different height for every electrode in a list:
z_arr = np.asarray(self.z).flatten()
if z_arr.size != n_elecs:
raise ValueError("If `z` is a list, it must have %d entries, "
"not %d." % (n_elecs, len(z)))
else:
# If `z` is a scalar, choose same height for all electrodes:
z_arr = np.ones(n_elecs, dtype=float) * z
# the position of the electrodes 1-20, 21a-21m, R1-R2 for left eye
if eye == 'LE':
x_arr = np.negative(x_arr)
# the radius of all the electrodes in the implants
r_arr = [300.0] * n_elecs
# the radius of electrodes 9, 17, 19 is 200.0 um
r_arr[8] = r_arr[16] = r_arr[18] = 200.0
# the radius of the return electrodes is 1000.0 um
r_arr[33] = r_arr[34] = 1000.0
# the names of the electrodes 1-20, 21a-21m, R1 and R2
names = [str(name) for name in range(1, 21)]
names.extend(['21a', '21b', '21c', '21d', '21e',
'21f', '21g', '21h', '21i', '21j',
'21k', '21l', '21m'])
names.extend(['R1', 'R2'])
# Rotate the grid:
rotmat = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(rotmat, np.vstack((x_arr, y_arr)))
x_arr = xy[0, :]
y_arr = xy[1, :]
# Apply offset to make the grid centered at (x, y):
x_arr += x
y_arr += y
for x, y, z, r, name in zip(x_arr, y_arr, z_arr, r_arr, names):
self.earray.add_electrode(name, DiskElectrode(x, y, z, r))
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
| bsd-3-clause | -6,385,752,689,116,946,000 | 37.162602 | 78 | 0.553473 | false |
wanderknight/tushare | tushare/__init__.py | 1 | 2582 | __version__ = '0.4.1'
__author__ = 'Jimmy Liu'
"""
for trading data
"""
from tushare.stock.trading import (get_hist_data, get_tick_data,
get_today_all, get_realtime_quotes,
get_h_data, get_today_ticks,
get_index, get_hists,
get_sina_dd)
"""
for trading data
"""
from tushare.stock.fundamental import (get_stock_basics, get_report_data,
get_profit_data,
get_operation_data, get_growth_data,
get_debtpaying_data, get_cashflow_data)
"""
for macro data
"""
from tushare.stock.macro import (get_gdp_year, get_gdp_quarter,
get_gdp_for, get_gdp_pull,
get_gdp_contrib, get_cpi,
get_ppi, get_deposit_rate,
get_loan_rate, get_rrr,
get_money_supply, get_money_supply_bal)
"""
for classifying data
"""
from tushare.stock.classifying import (get_industry_classified, get_concept_classified,
get_area_classified, get_gem_classified,
get_sme_classified, get_st_classified,
get_hs300s, get_sz50s, get_zz500s,
get_terminated, get_suspended)
"""
for macro data
"""
from tushare.stock.newsevent import (get_latest_news, latest_content,
get_notices, notice_content,
guba_sina)
"""
for reference
"""
from tushare.stock.reference import (profit_data, forecast_data,
xsg_data, fund_holdings,
new_stocks, sh_margins,
sh_margin_details,
sz_margins, sz_margin_details)
"""
for shibor
"""
from tushare.stock.shibor import (shibor_data, shibor_quote_data,
shibor_ma_data, lpr_data,
lpr_ma_data)
"""
for LHB
"""
from tushare.stock.billboard import (top_list, cap_tops, broker_tops,
inst_tops, inst_detail)
"""
for DataYes Token
"""
from tushare.util.upass import (set_token, get_token)
from tushare.datayes.api import *
| bsd-3-clause | 8,037,513,311,149,606,000 | 32.426667 | 87 | 0.45701 | false |
belemizz/mimic2_tools | clinical_db/classify_patients.py | 1 | 3203 | """
classify patients based on lab tests
"""
import get_sample.mimic2
from mutil import Graph
import mutil.mycsv
import time
import datetime
import random
import numpy as np
import theano
import theano.tensor as T
import alg.classification
def main( max_id = 2000, target_codes = ['428.0'], show_flag = True):
mimic2db = get_sample.mimic2.Mimic2()
graph = Graph()
## Get Subject ID ##
id_list = mimic2db.subject_with_icd9_codes(target_codes)
subject_ids = [item for item in id_list if item < max_id]
print "Number of Candidates : %d"%len(subject_ids)
## Get Data ##
days_before_discharge = [0]
recover_values = [[], [], [], []]
expire_values = [[], [], [], []]
start_time = time.clock()
algo_num = 0
time_diff = 4
cr_id = 50090
bun_id = 50177
for str_id in subject_ids:
sid = int(str_id)
print sid
patient = mimic2db.get_subject(sid)
if patient:
final_adm = patient.get_final_admission()
if len(final_adm.icd9)>0 and final_adm.icd9[0][3] == target_codes[0]:
for index, dbd in enumerate(days_before_discharge):
if algo_num == 0:
# bun_and_creatinine
time_of_interest = final_adm.disch_dt + datetime.timedelta(1-dbd)
lab_result = final_adm.get_newest_lab_at_time(time_of_interest)
value1 = [item[4] for item in lab_result if item[0] == cr_id]
value2 = [item[4] for item in lab_result if item[0] == bun_id]
else:
# trend of BUN
time_of_interest1 = final_adm.disch_dt + datetime.timedelta(1-dbd)
time_of_interest2 = final_adm.disch_dt + datetime.timedelta(1-dbd-time_diff)
lab_result1 = final_adm.get_newest_lab_at_time(time_of_interest1)
lab_result2 = final_adm.get_newest_lab_at_time(time_of_interest2)
value1 = [item[4] for item in lab_result1 if item[0] == bun_id]
value2 = [item[4] for item in lab_result2 if item[0] == bun_id]
if patient.hospital_expire_flg == 'Y':
expire_values[index].append([value1, value2])
else:
recover_values[index].append([value1, value2])
end_time = time.clock()
print "data_retrieving_time: %f sec"%(end_time - start_time)
def transform_values(input_values):
""" transform to numpy format """
temp = []
for item in input_values:
if len(item[0])>0 and len(item[1])>0:
temp.append([float(item[0][0]), float(item[1][0])])
return np.array(temp)
positive_x = transform_values(expire_values[0])
negative_x = transform_values(recover_values[0])
data = [[item, 1] for item in positive_x]
data.extend([[item, 0] for item in negative_x])
random.shuffle(data)
x = np.array([item[0] for item in data])
y = np.array([item[1] for item in data])
if __name__ == '__main__':
main()
| mit | -5,518,623,470,133,790,000 | 33.074468 | 100 | 0.547924 | false |
Erotemic/utool | utool/util_logging.py | 1 | 20540 | # -*- coding: utf-8 -*-
"""
If logging is on, utool will overwrite the print function with a logging function
This is a special module which will not get injected into (should it be internal?)
References:
# maybe we can do something like this Queue to try fixing error when
# when using injected print statments with Qt signals and slots
http://stackoverflow.com/questions/21071448/redirecting-stdout-and-stderr-to-a-pyqt4-qtextedit-from-a-secondary-thread
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from six.moves import builtins, map, zip, range # NOQA
from os.path import exists, join, realpath
import logging
import logging.config
import multiprocessing
import os
import sys
from utool._internal import meta_util_arg, meta_util_six
VERBOSE = meta_util_arg.VERBOSE
VERYVERBOSE = meta_util_arg.VERYVERBOSE
PRINT_ALL_CALLERS = meta_util_arg.PRINT_ALL_CALLERS
LOGGING_VERBOSE = meta_util_arg.LOGGING_VERBOSE # --verb-logging
PRINT_INJECT_ORDER = meta_util_arg.PRINT_INJECT_ORDER
def __inside_doctest(original_stdout=sys.stdout):
return original_stdout != sys.stdout
__IN_MAIN_PROCESS__ = multiprocessing.current_process().name == 'MainProcess'
__UTOOL_ROOT_LOGGER__ = None
__CURRENT_LOG_FPATH__ = None
# Remeber original python values
# __PYTHON_STDOUT__ = sys.stdout
# __PYTHON_PRINT__ = builtins.print
# __PYTHON_WRITE__ = __PYTHON_STDOUT__.write
# __PYTHON_FLUSH__ = __PYTHON_STDOUT__.flush
# Initialize utool values
__UTOOL_STDOUT__ = None
__UTOOL_PRINT__ = None
# TODO: Allow write and flush to have a logging equivalent
__UTOOL_WRITE__ = None
__UTOOL_FLUSH__ = None
__UTOOL_WRITE_BUFFER__ = []
def _utool_stdout():
if __UTOOL_STDOUT__ is not None:
return __UTOOL_STDOUT__
else:
return sys.stdout
def _utool_write():
if __UTOOL_WRITE__ is not None:
return __UTOOL_WRITE__
else:
return sys.stdout.write
def _utool_flush():
if __UTOOL_FLUSH__ is not None:
return __UTOOL_FLUSH__
else:
return sys.stdout.flush
def _utool_print():
if __UTOOL_PRINT__ is not None:
return __UTOOL_PRINT__
else:
return builtins.print
__STR__ = six.text_type
logdir_cacheid = 'log_dpath'
def testlogprog():
r"""
Test to ensure that all progress lines are outputed to the file logger
while only a few progress lines are outputed to stdout.
(if backspace is specified)
CommandLine:
python -m utool.util_logging testlogprog --show --verb-logging
python -m utool.util_logging testlogprog --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_logging import * # NOQA
>>> import utool as ut
>>> result = testlogprog()
>>> print(result)
"""
import utool as ut
print('Starting test log function')
def test_body(count, logmode, backspace):
ut.colorprint('\n---- count = %r -----' % (count,), 'yellow')
ut.colorprint('backspace = %r' % (backspace,), 'yellow')
ut.colorprint('logmode = %r' % (logmode,), 'yellow')
if logmode:
ut.delete('test.log')
ut.start_logging('test.log')
print('Start main loop')
import time
for count in ut.ProgressIter(range(20), freq=3, backspace=backspace):
time.sleep(.01)
print('Done with main loop work')
print('Exiting main body')
if logmode:
ut.stop_logging()
#print('-----DONE LOGGING----')
testlog_text = ut.readfrom('test.log')
print(ut.indent(testlog_text.replace('\r', '\n'), ' '))
def test_body2(count, logmode, backspace):
ut.colorprint('\n---- count = %r -----' % (count,), 'yellow')
ut.colorprint('backspace = %r' % (backspace,), 'yellow')
ut.colorprint('logmode = %r' % (logmode,), 'yellow')
if logmode:
ut.delete('test.log')
ut.start_logging('test.log')
print('Start main loop')
import time
for count in ut.ProgressIter(range(2), freq=1, backspace=backspace):
for count in ut.ProgressIter(range(50), freq=1, backspace=backspace):
time.sleep(.01)
print('Done with main loop work')
print('Exiting main body')
if logmode:
ut.stop_logging()
#print('-----DONE LOGGING----')
#testlog_text = ut.readfrom('test.log')
#print(ut.indent(testlog_text.replace('\r', '\n'), ' '))
#test_body(0, False, True)
#test_body(1, False, False)
#test_body(2, True, True)
#test_body(3, True, False)
test_body2(4, True, True)
test_body2(5, False, True)
def ensure_logging():
flag = is_logging()
if not flag:
start_logging()
return flag
def is_logging():
global __UTOOL_ROOT_LOGGER__
flag = __UTOOL_ROOT_LOGGER__ is not None
return flag
def debug_logging_iostreams():
print(' --- <DEBUG IOSTREAMS> --')
print('__STR__ = %r' % (__STR__,))
print('__IN_MAIN_PROCESS__ = %r' % (__IN_MAIN_PROCESS__,))
print('__UTOOL_ROOT_LOGGER__ = %r' % (__UTOOL_ROOT_LOGGER__,))
print('__CURRENT_LOG_FPATH__ = %r' % (__CURRENT_LOG_FPATH__,))
# print('__PYTHON_STDOUT__ = %r' % (__PYTHON_STDOUT__,))
# print('__PYTHON_PRINT__ = %r' % (__PYTHON_PRINT__,))
# print('__PYTHON_WRITE__ = %r' % (__PYTHON_WRITE__,))
# print('__PYTHON_FLUSH__ = %r' % (__PYTHON_FLUSH__,))
print('__UTOOL_STDOUT__ = %r' % (__UTOOL_STDOUT__,))
print('__UTOOL_PRINT__ = %r' % (__UTOOL_PRINT__,))
print('__UTOOL_FLUSH__ = %r' % (__UTOOL_FLUSH__,))
print('__UTOOL_WRITE__ = %r' % (__UTOOL_WRITE__,))
print(' --- </DEBUG IOSTREAMS> --')
def get_logging_dir(appname='default'):
"""
The default log dir is in the system resource directory
But the utool global cache allows for the user to override
where the logs for a specific app should be stored.
Returns:
log_dir_realpath (str): real path to logging directory
"""
from utool._internal import meta_util_cache
from utool._internal import meta_util_cplat
from utool import util_cache
if appname is None or appname == 'default':
appname = util_cache.get_default_appname()
resource_dpath = meta_util_cplat.get_resource_dir()
default = join(resource_dpath, appname, 'logs')
# Check global cache for a custom logging dir otherwise
# use the default.
log_dir = meta_util_cache.global_cache_read(logdir_cacheid,
appname=appname,
default=default)
log_dir_realpath = realpath(log_dir)
return log_dir_realpath
def get_shelves_dir(appname='default'):
"""
The default shelf dir is in the system resource directory
But the utool global cache allows for the user to override
where the shelf for a specific app should be stored.
Returns:
log_dir_realpath (str): real path to shelves directory
"""
from utool._internal import meta_util_cache
from utool._internal import meta_util_cplat
from utool import util_cache
if appname is None or appname == 'default':
appname = util_cache.get_default_appname()
resource_dpath = meta_util_cplat.get_resource_dir()
default = join(resource_dpath, appname, 'shelves')
# Check global cache for a custom logging dir otherwise
# use the default.
log_dir = meta_util_cache.global_cache_read(logdir_cacheid,
appname=appname,
default=default)
log_dir_realpath = realpath(log_dir)
return log_dir_realpath
def get_current_log_fpath():
global __CURRENT_LOG_FPATH__
return __CURRENT_LOG_FPATH__
def get_current_log_text():
fpath = get_current_log_fpath()
if fpath is None:
text = None
else:
with open(fpath, 'r') as file_:
text = file_.read()
return text
def get_log_fpath(num='next', appname=None, log_dir=None):
"""
Returns:
log_fpath (str): path to log file
"""
if log_dir is None:
log_dir = get_logging_dir(appname=appname)
if not exists(log_dir):
os.makedirs(log_dir)
if appname is not None:
log_fname = appname + '_logs_%04d.out'
else:
log_fname = 'utool_logs_%04d.out'
if isinstance(num, six.string_types):
if num == 'next':
count = 0
log_fpath = join(log_dir, log_fname % count)
while exists(log_fpath):
log_fpath = join(log_dir, log_fname % count)
count += 1
else:
log_fpath = join(log_dir, log_fname % num)
return log_fpath
def get_utool_logger():
return __UTOOL_ROOT_LOGGER__
def add_logging_handler(handler, format_='file'):
"""
mostly for util_logging internals
"""
global __UTOOL_ROOT_LOGGER__
if __UTOOL_ROOT_LOGGER__ is None:
builtins.print('[WARNING] logger not started, cannot add handler')
return
# create formatter and add it to the handlers
#logformat = '%Y-%m-%d %H:%M:%S'
#logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
timeformat = '%H:%M:%S'
if format_ == 'file':
logformat = '[%(asctime)s]%(message)s'
elif format_ == 'stdout':
logformat = '%(message)s'
else:
raise AssertionError('unknown logging format_: %r' % format_)
# Create formatter for handlers
formatter = logging.Formatter(logformat, timeformat)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
__UTOOL_ROOT_LOGGER__.addHandler(handler)
class CustomStreamHandler(logging.Handler):
"""
Modified from logging.py
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
self.terminator = "\n"
logging.Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s%s"
if six.PY3 or not logging._unicode: # if no unicode support...
stream.write(fs % (msg, self.terminator))
else:
try:
if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)):
ufs = u'%s%s'
try:
stream.write(ufs % (msg, self.terminator))
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % (msg, self.terminator)).encode(stream.encoding))
else:
stream.write(fs % (msg, self.terminator))
except UnicodeError:
stream.write(fs % (msg.encode("UTF-8"), self.terminator.encode("UTF-8")))
#self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def start_logging(log_fpath=None, mode='a', appname='default', log_dir=None):
r"""
Overwrites utool print functions to use a logger
CommandLine:
python -m utool.util_logging --test-start_logging:0
python -m utool.util_logging --test-start_logging:1
Example0:
>>> # DISABLE_DOCTEST
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> ut.util_logging._utool_print()('hello world')
>>> ut.util_logging._utool_write()('writing1')
>>> ut.util_logging._utool_write()('writing2\n')
>>> ut.util_logging._utool_write()('writing3')
>>> ut.util_logging._utool_flush()()
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> print('current_log_text =\n%s' % (current_log_text,))
>>> assert current_log_text.find('hello world') > 0, 'cant hello world'
>>> assert current_log_text.find('writing1writing2') > 0, 'cant find writing1writing2'
>>> assert current_log_text.find('writing3') > 0, 'cant find writing3'
Example1:
>>> # DISABLE_DOCTEST
>>> # Ensure that progress is logged
>>> import sys
>>> sys.argv.append('--verb-logging')
>>> import utool as ut
>>> ut.start_logging()
>>> [x for x in ut.ProgressIter(range(0, 1000), freq=4)]
>>> handler = ut.util_logging.__UTOOL_ROOT_LOGGER__.handlers[0]
>>> current_log_fpath = handler.stream.name
>>> current_log_text = ut.read_from(current_log_fpath)
>>> assert current_log_text.find('rate') > 0, 'progress was not logged'
>>> print(current_log_text)
"""
global __UTOOL_ROOT_LOGGER__
global __UTOOL_PRINT__
global __UTOOL_WRITE__
global __UTOOL_FLUSH__
global __CURRENT_LOG_FPATH__
if LOGGING_VERBOSE:
print('[utool] start_logging()')
# FIXME: The test for doctest may not work
if __UTOOL_ROOT_LOGGER__ is None and __IN_MAIN_PROCESS__ and not __inside_doctest():
if LOGGING_VERBOSE:
print('[utool] start_logging()... rootcheck OK')
#logging.config.dictConfig(LOGGING)
if log_fpath is None:
log_fpath = get_log_fpath(num='next', appname=appname, log_dir=log_dir)
__CURRENT_LOG_FPATH__ = log_fpath
# Print what is about to happen
if VERBOSE or LOGGING_VERBOSE:
startmsg = ('logging to log_fpath=%r' % log_fpath)
_utool_print()(startmsg)
# Create root logger
__UTOOL_ROOT_LOGGER__ = logging.getLogger('root')
__UTOOL_ROOT_LOGGER__.setLevel('DEBUG')
# create file handler which logs even debug messages
#fh = logging.handlers.WatchedFileHandler(log_fpath)
logfile_handler = logging.FileHandler(log_fpath, mode=mode)
#stdout_handler = logging.StreamHandler(__UTOOL_STDOUT__)
stdout_handler = CustomStreamHandler(__UTOOL_STDOUT__)
stdout_handler.terminator = ''
# http://stackoverflow.com/questions/7168790/suppress-newline-in-python-logging-module
#stdout_handler.terminator = ''
add_logging_handler(logfile_handler, format_='file')
add_logging_handler(stdout_handler, format_='stdout')
__UTOOL_ROOT_LOGGER__.propagate = False
__UTOOL_ROOT_LOGGER__.setLevel(logging.DEBUG)
# Overwrite utool functions with the logging functions
def utool_flush(*args):
""" flushes whatever is in the current utool write buffer """
# Flushes only the stdout handler
stdout_handler.flush()
#__UTOOL_ROOT_LOGGER__.flush()
#global __UTOOL_WRITE_BUFFER__
#if len(__UTOOL_WRITE_BUFFER__) > 0:
# msg = ''.join(__UTOOL_WRITE_BUFFER__)
# #sys.stdout.write('FLUSHING %r\n' % (len(__UTOOL_WRITE_BUFFER__)))
# __UTOOL_WRITE_BUFFER__ = []
# return __UTOOL_ROOT_LOGGER__.info(msg)
#__PYTHON_FLUSH__()
def utool_write(*args):
""" writes to current utool logs and to sys.stdout.write """
#global __UTOOL_WRITE_BUFFER__
#sys.stdout.write('WRITEING\n')
msg = ', '.join(map(six.text_type, args))
#__UTOOL_WRITE_BUFFER__.append(msg)
__UTOOL_ROOT_LOGGER__.info(msg)
#if msg.endswith('\n'):
# # Flush on newline, and remove newline
# __UTOOL_WRITE_BUFFER__[-1] = __UTOOL_WRITE_BUFFER__[-1][:-1]
# utool_flush()
#elif len(__UTOOL_WRITE_BUFFER__) > 32:
# # Flush if buffer is too large
# utool_flush()
if not PRINT_ALL_CALLERS:
def utool_print(*args):
""" standard utool print function """
#sys.stdout.write('PRINT\n')
endline = '\n'
try:
msg = ', '.join(map(six.text_type, args))
return __UTOOL_ROOT_LOGGER__.info(msg + endline)
except UnicodeDecodeError:
new_msg = ', '.join(map(meta_util_six.ensure_unicode, args))
#print(new_msg)
return __UTOOL_ROOT_LOGGER__.info(new_msg + endline)
else:
def utool_print(*args):
""" debugging utool print function """
import utool as ut
utool_flush()
endline = '\n'
__UTOOL_ROOT_LOGGER__.info('\n\n----------')
__UTOOL_ROOT_LOGGER__.info(ut.get_caller_name(range(0, 20)))
return __UTOOL_ROOT_LOGGER__.info(', '.join(map(six.text_type, args)) + endline)
def utool_printdbg(*args):
""" DRPRICATE standard utool print debug function """
return __UTOOL_ROOT_LOGGER__.debug(', '.join(map(six.text_type, args)))
# overwrite the utool printers
__UTOOL_WRITE__ = utool_write
__UTOOL_FLUSH__ = utool_flush
__UTOOL_PRINT__ = utool_print
# Test out our shiney new logger
if VERBOSE or LOGGING_VERBOSE:
__UTOOL_PRINT__('<__LOG_START__>')
__UTOOL_PRINT__(startmsg)
else:
if LOGGING_VERBOSE:
print('[utool] start_logging()... FAILED TO START')
print('DEBUG INFO')
print('__inside_doctest() = %r' % (__inside_doctest(),))
print('__IN_MAIN_PROCESS__ = %r' % (__IN_MAIN_PROCESS__,))
print('__UTOOL_ROOT_LOGGER__ = %r' % (__UTOOL_ROOT_LOGGER__,))
def stop_logging():
"""
Restores utool print functions to python defaults
"""
global __UTOOL_ROOT_LOGGER__
global __UTOOL_PRINT__
global __UTOOL_WRITE__
global __UTOOL_FLUSH__
if __UTOOL_ROOT_LOGGER__ is not None:
# Flush remaining buffer
if VERBOSE or LOGGING_VERBOSE:
_utool_print()()('<__LOG_STOP__>')
_utool_flush()()
# Remove handlers
for h in __UTOOL_ROOT_LOGGER__.handlers[:]:
__UTOOL_ROOT_LOGGER__.removeHandler(h)
# Reset objects
__UTOOL_ROOT_LOGGER__ = None
__UTOOL_PRINT__ = None
__UTOOL_WRITE__ = None
__UTOOL_FLUSH__ = None
# HAVE TO HACK THIS IN FOR UTOOL SPECIAL CASE ONLY
# OTHER MODULE CAN USE NOINJECT
if PRINT_INJECT_ORDER:
from utool._internal import meta_util_dbg
callername = meta_util_dbg.get_caller_name(N=1, strict=False)
fmtdict = dict(callername=callername, modname='utool.util_logging')
msg = '[util_inject] {modname} is imported by {callername}'.format(**fmtdict)
builtins.print(msg)
if __name__ == '__main__':
"""
CommandLine:
python -m utool.util_logging
python -m utool.util_logging --allexamples
python -m utool.util_logging --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 | -7,436,012,474,006,777,000 | 35.353982 | 122 | 0.574099 | false |
Stymphalian/sublime-plugins | KeyMapQuery/KeyMapQuery.py | 1 | 8716 | import sublime, sublime_plugin
import re
from collections import namedtuple
import os.path
"""
KeyMapQueryCommand allows you to quickly query if a key-binding is bound.
A combo-box will appear displayings a list of bound key-bindings. Type a key-combination
into the inptu box to narrow the results ( i.e. ctrl+k,ctrl+i ).
If there is a conflict in key-bindings,by default,the highest precendence match
is shown lower in the list.
i.e. if ctrl+o is bound in two files.
["ctrl+o" : command 1]
["ctrl+o" : command 2] <-- this is the one which actually gets used.
"""
class KeyMapQueryCommand(sublime_plugin.WindowCommand):
"""
InternalObject holds state during the execution of the command.
"""
class InternalObject(object):
KeyMap = namedtuple("KeyMap",["filename","bindings"])
def __init__(self):
self.keymaps = []
self.single_array = []
self.settings = sublime.load_settings("KeyMapQuery.sublime-settings")
def get_key_binding(self,index):
s = self.single_array[index]
return s.split(":")[0]
def get_relative_index(self,index):
count = 0
for d in self.keymaps:
if count <= index < count + len(d.bindings):
return index - count
else:
count += len(d.bindings)
raise IndexError("Index out of range")
# given an index from the on_select() callback
# determine the sublime-keymap filename which it belongs to.
def get_filename(self,index):
count = 0
for d in self.keymaps:
if count <= index < count + len(d.bindings):
return d.filename
else:
count += len(d.bindings)
raise IndexError("Index out of range")
# Given the keymap files we loaded in, flatten them into
# a single array of strings to be used by the window.show_quick_panel()
def get_string_list_of_keymaps(self):
# flatten the keymaps into a single array contains only the keybinding object
rs = []
for d in self.keymaps:
rs.extend(d.bindings)
# convert each key-binding into a string
# The format should look like
# ["ctrl+i","ctrl+j"] : command_to_be_run_1
# ["ctrl+i","ctrl+k"] : command_to_be_run_2
# ["ctrl+i","ctrl+l"] : command_to_be_run_3
def str_format(obj):
objs = map(lambda x: '"' + x +'"', obj["keys"])
return "{:30s} : {}".format("["+ ",".join(objs) + "]",obj["command"])
self.single_array = list(map(str_format,rs))
return self.single_array
# Load all the sublime-keymap files that are known to sublime.
# This includes keymap files zipped inside sublime-package directories.
def load_keymaps(self,file_re):
# Get all the keymap filenames
all_keymap_files = sublime.find_resources("*.sublime-keymap")
# sort them, such as described by the merging/precedence rules defined
# http://docs.sublimetext.info/en/latest/extensibility/packages.html?highlight=precedence
all_keymap_files.sort()
if self.settings.get("reverse_sort_order"):
all_keymap_files.reverse()
filtered_files = list(filter(lambda x : re.match(file_re,x) != None,all_keymap_files))
# Load the keymap files; decode them into pyobjects;
# and then convert them into KeyMap tuples
def mapToPythonObj(filename):
res = sublime.load_resource(filename)
# assumption is that the decoded json is represented as
# a python array of dictionaries.
return self.KeyMap(filename,sublime.decode_value(res))
self.keymaps = list(map(mapToPythonObj,filtered_files))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self,window):
self.window = window
self.file_re = self._get_keymap_regex(sublime.platform())
self.state = None
def run(self):
self.state = self.InternalObject()
self.state.load_keymaps(self.file_re)
input_array = self.state.get_string_list_of_keymaps()
view = self.window.show_quick_panel(
input_array,
flags=sublime.MONOSPACE_FONT,
selected_index=0,
on_select= self.on_select,
on_highlight=None)
# on_highlight=self.on_highlight)
def _get_keymap_regex(self,platform):
if( platform == "windows"):
file_re = re.compile(r'(.*(Default \(Windows\)|Default)\.sublime-keymap)')
elif (platform == "linux"):
file_re = re.compile(r'(.*(Default \(Linux\)|Default)\.sublime-keymap)')
else:
file_re = re.compile(r'(.*(Default \(OSX\)|Default)\.sublime-keymap)')
return file_re
def on_highlight(self,value):
if value == -1:
return
def on_select(self,value):
if value == -1:
return
# open the keymap file.
filename = self.state.get_filename(value)
split_filename = "/".join(filename.split("/")[1:])
# This fucking sucks. I would really like to use the sublime API open_file()
# directly. This would get me a direct ref to the View object and allow me
# to set the cursor to the proper position to show the hotkey binding.
# There are a few problems with this:
# i) sublime-packages are compresesd (zip).
# ii) packages are stored in different folders ( pakcages, pakcage/user, etc)
# and they are all differnet on differenct architectures.
# iii) Changing the sel() on a view doesn't update the cursor position
# on the screen. Not sure if this is a bug, but I thinkg it is
# becaues we aren't making edits using an Edit object. Therefore
# any changes that we make aren't known/shown until some user
# interaction
# Because of these problems I use the following hack.
# 1.) Open the file using the window.run_command, and use the ${packages}
# variables substitution.The internal sublime api automatically finds and
# uncompresses all the files for me. I don't have to deal with anything
# and the proper files gets opened (assuming it exists).
# 2.) The pit-fall to this is that I don't get a direct ref to the View
# that is created/opened. This means that I can't set the cursor position.
# Additinally,because the run_command is async, I don't even know when
# the View gets fully loaded (i.e. I can't use window.active_view())
# 3.) To get around this problem. I creat a helper TextCommand class.
# The purpose of this class is to positoin the cursor on the view.
# (i.e find_all() on a regex string). This is hacky because it pollutes
# the command namespace. This only solves the problem of being able to
# set the cursor position. I still have to use a set_timeout() in order
# to "ensure" the file is opened before I issue the command.
self.window.run_command("open_file",{"file":"${packages}/"+split_filename})
def inner():
self.window.run_command("move_cursor_to_pattern",
{"pattern":r'"keys"\s*:\s*\[',
"index":self.state.get_relative_index(value)})
# TODO: extract settings into my own class,whcih allows you to specify defaults
delay= self.state.settings.get("timeout_delay")
if(delay == None):
delay = 250
sublime.set_timeout(inner,delay)
# A Helper command used to move the cursor to the beginning/end of
# a regex pattern in the view.
class MoveCursorToPatternCommand(sublime_plugin.TextCommand):
def run(self,edit,pattern,index=0):
r = self.view.find_all(pattern)
if index < 0 or index >= len(r):
print("Pattern not found \"{}\"".format(pattern))
return
r = r[index]
self.view.show_at_center(r)
sel = self.view.sel()
sel.clear()
sel.add(sublime.Region(r.b,r.b)) | mit | -5,049,045,572,851,911,000 | 44.37234 | 101 | 0.572855 | false |
alfredodeza/execnet | execnet/gateway_io.py | 1 | 7538 | # -*- coding: utf-8 -*-
"""
execnet io initialization code
creates io instances used for gateway io
"""
import os
import shlex
import sys
try:
from execnet.gateway_base import Popen2IO, Message
except ImportError:
from __main__ import Popen2IO, Message
from functools import partial
class Popen2IOMaster(Popen2IO):
def __init__(self, args, execmodel):
self.popen = p = execmodel.PopenPiped(args)
Popen2IO.__init__(self, p.stdin, p.stdout, execmodel=execmodel)
def wait(self):
try:
return self.popen.wait()
except OSError:
pass # subprocess probably dead already
def kill(self):
killpopen(self.popen)
def killpopen(popen):
try:
if hasattr(popen, "kill"):
popen.kill()
else:
killpid(popen.pid)
except EnvironmentError:
sys.stderr.write("ERROR killing: %s\n" % (sys.exc_info()[1]))
sys.stderr.flush()
def killpid(pid):
if hasattr(os, "kill"):
os.kill(pid, 15)
elif sys.platform == "win32" or getattr(os, "_name", None) == "nt":
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
raise EnvironmentError("no method to kill {}".format(pid))
popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))"
def shell_split_path(path):
"""
Use shell lexer to split the given path into a list of components,
taking care to handle Windows' '\' correctly.
"""
if sys.platform.startswith("win"):
# replace \\ by / otherwise shlex will strip them out
path = path.replace("\\", "/")
return shlex.split(path)
def popen_args(spec):
args = shell_split_path(spec.python) if spec.python else [sys.executable]
args.append("-u")
if spec is not None and spec.dont_write_bytecode:
args.append("-B")
# Slight gymnastics in ordering these arguments because CPython (as of
# 2.7.1) ignores -B if you provide `python -c "something" -B`
args.extend(["-c", popen_bootstrapline])
return args
def ssh_args(spec):
# NOTE: If changing this, you need to sync those changes to vagrant_args
# as well, or, take some time to further refactor the commonalities of
# ssh_args and vagrant_args.
remotepython = spec.python or "python"
args = ["ssh", "-C"]
if spec.ssh_config is not None:
args.extend(["-F", str(spec.ssh_config)])
args.extend(spec.ssh.split())
remotecmd = '{} -c "{}"'.format(remotepython, popen_bootstrapline)
args.append(remotecmd)
return args
def vagrant_ssh_args(spec):
# This is the vagrant-wrapped version of SSH. Unfortunately the
# command lines are incompatible to just channel through ssh_args
# due to ordering/templating issues.
# NOTE: This should be kept in sync with the ssh_args behaviour.
# spec.vagrant is identical to spec.ssh in that they both carry
# the remote host "address".
remotepython = spec.python or "python"
args = ["vagrant", "ssh", spec.vagrant_ssh, "--", "-C"]
if spec.ssh_config is not None:
args.extend(["-F", str(spec.ssh_config)])
remotecmd = '{} -c "{}"'.format(remotepython, popen_bootstrapline)
args.extend([remotecmd])
return args
def create_io(spec, execmodel):
if spec.popen:
args = popen_args(spec)
return Popen2IOMaster(args, execmodel)
if spec.ssh:
args = ssh_args(spec)
io = Popen2IOMaster(args, execmodel)
io.remoteaddress = spec.ssh
return io
if spec.vagrant_ssh:
args = vagrant_ssh_args(spec)
io = Popen2IOMaster(args, execmodel)
io.remoteaddress = spec.vagrant_ssh
return io
#
# Proxy Gateway handling code
#
# master: proxy initiator
# forwarder: forwards between master and sub
# sub: sub process that is proxied to the initiator
RIO_KILL = 1
RIO_WAIT = 2
RIO_REMOTEADDRESS = 3
RIO_CLOSE_WRITE = 4
class ProxyIO(object):
""" A Proxy IO object allows to instantiate a Gateway
through another "via" gateway. A master:ProxyIO object
provides an IO object effectively connected to the sub
via the forwarder. To achieve this, master:ProxyIO interacts
with forwarder:serve_proxy_io() which itself
instantiates and interacts with the sub.
"""
def __init__(self, proxy_channel, execmodel):
# after exchanging the control channel we use proxy_channel
# for messaging IO
self.controlchan = proxy_channel.gateway.newchannel()
proxy_channel.send(self.controlchan)
self.iochan = proxy_channel
self.iochan_file = self.iochan.makefile("r")
self.execmodel = execmodel
def read(self, nbytes):
return self.iochan_file.read(nbytes)
def write(self, data):
return self.iochan.send(data)
def _controll(self, event):
self.controlchan.send(event)
return self.controlchan.receive()
def close_write(self):
self._controll(RIO_CLOSE_WRITE)
def kill(self):
self._controll(RIO_KILL)
def wait(self):
return self._controll(RIO_WAIT)
@property
def remoteaddress(self):
return self._controll(RIO_REMOTEADDRESS)
def __repr__(self):
return "<RemoteIO via {}>".format(self.iochan.gateway.id)
class PseudoSpec:
def __init__(self, vars):
self.__dict__.update(vars)
def __getattr__(self, name):
return None
def serve_proxy_io(proxy_channelX):
execmodel = proxy_channelX.gateway.execmodel
log = partial(
proxy_channelX.gateway._trace, "serve_proxy_io:%s" % proxy_channelX.id
)
spec = PseudoSpec(proxy_channelX.receive())
# create sub IO object which we will proxy back to our proxy initiator
sub_io = create_io(spec, execmodel)
control_chan = proxy_channelX.receive()
log("got control chan", control_chan)
# read data from master, forward it to the sub
# XXX writing might block, thus blocking the receiver thread
def forward_to_sub(data):
log("forward data to sub, size %s" % len(data))
sub_io.write(data)
proxy_channelX.setcallback(forward_to_sub)
def controll(data):
if data == RIO_WAIT:
control_chan.send(sub_io.wait())
elif data == RIO_KILL:
control_chan.send(sub_io.kill())
elif data == RIO_REMOTEADDRESS:
control_chan.send(sub_io.remoteaddress)
elif data == RIO_CLOSE_WRITE:
control_chan.send(sub_io.close_write())
control_chan.setcallback(controll)
# write data to the master coming from the sub
forward_to_master_file = proxy_channelX.makefile("w")
# read bootstrap byte from sub, send it on to master
log("reading bootstrap byte from sub", spec.id)
initial = sub_io.read(1)
assert initial == "1".encode("ascii"), initial
log("forwarding bootstrap byte from sub", spec.id)
forward_to_master_file.write(initial)
# enter message forwarding loop
while True:
try:
message = Message.from_io(sub_io)
except EOFError:
log("EOF from sub, terminating proxying loop", spec.id)
break
message.to_io(forward_to_master_file)
# proxy_channelX will be closed from remote_exec's finalization code
if __name__ == "__channelexec__":
serve_proxy_io(channel) # noqa
| mit | 4,103,531,356,347,174,000 | 29.152 | 82 | 0.645795 | false |
wpjesus/codematch | ietf/secr/proceedings/forms.py | 1 | 6833 | import os
from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.name.models import DocTypeName
from ietf.meeting.models import Meeting, Session
# ---------------------------------------------
# Globals
# ---------------------------------------------
VALID_SLIDE_EXTENSIONS = ('.doc','.docx','.pdf','.ppt','.pptx','.txt','.zip')
VALID_MINUTES_EXTENSIONS = ('.txt','.html','.htm','.pdf')
VALID_AGENDA_EXTENSIONS = ('.txt','.html','.htm')
VALID_BLUESHEET_EXTENSIONS = ('.pdf','.jpg','.jpeg')
#----------------------------------------------------------
# Forms
#----------------------------------------------------------
class AjaxChoiceField(forms.ChoiceField):
'''
Special ChoiceField to use when populating options with Ajax. The submitted value
is not in the initial choices list so we need to override valid_value().
'''
def valid_value(self, value):
return True
class EditSlideForm(forms.ModelForm):
class Meta:
model = Document
fields = ('title',)
class InterimMeetingForm(forms.Form):
date = forms.DateField(help_text="(YYYY-MM-DD Format, please)")
group_acronym_id = forms.CharField(widget=forms.HiddenInput())
def clean(self):
super(InterimMeetingForm, self).clean()
cleaned_data = self.cleaned_data
# need to use get() here, if the date field isn't valid it won't exist
date = cleaned_data.get('date','')
group_acronym_id = cleaned_data["group_acronym_id"]
qs = Meeting.objects.filter(type='interim',date=date,session__group__acronym=group_acronym_id)
if qs:
raise forms.ValidationError('A meeting already exists for this date.')
return cleaned_data
class RecordingForm(forms.Form):
group = forms.CharField(max_length=40)
external_url = forms.URLField(label='Url')
session = AjaxChoiceField(choices=(('','----'),))
def clean_session(self):
'''
Emulate ModelChoiceField functionality
'''
id = self.cleaned_data.get('session')
try:
return Session.objects.get(id=id)
except Session.DoesNotExist:
raise forms.ValidationError('Invalid Session')
def clean_group(self):
acronym = self.cleaned_data.get('group')
try:
return Group.objects.get(acronym=acronym)
except Group.DoesNotExist:
raise forms.ValidationError('Invalid group name')
class RecordingEditForm(forms.ModelForm):
class Meta:
model = Document
fields = ['external_url']
def __init__(self, *args, **kwargs):
super(RecordingEditForm, self).__init__(*args, **kwargs)
self.fields['external_url'].label='Url'
class ReplaceSlideForm(forms.ModelForm):
file = forms.FileField(label='Select File')
class Meta:
model = Document
fields = ('title',)
def clean_file(self):
file = self.cleaned_data.get('file')
ext = os.path.splitext(file.name)[1].lower()
if ext not in VALID_SLIDE_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for presentation slides: %s' % ','.join(VALID_SLIDE_EXTENSIONS))
if file._size > settings.SECR_MAX_UPLOAD_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file._size)))
return file
class UnifiedUploadForm(forms.Form):
acronym = forms.CharField(widget=forms.HiddenInput())
meeting_id = forms.CharField(widget=forms.HiddenInput())
material_type = forms.ModelChoiceField(queryset=DocTypeName.objects.filter(slug__in=('minutes','agenda','slides','bluesheets')),empty_label=None)
slide_name = forms.CharField(label='Name of Presentation',max_length=255,required=False,help_text="For presentations only")
file = forms.FileField(label='Select File',help_text='<div id="id_file_help">Note 1: You can only upload a presentation file in txt, pdf, doc, or ppt/pptx. System will not accept presentation files in any other format.<br><br>Note 2: All uploaded files will be available to the public immediately on the Preliminary Page. However, for the Proceedings, ppt/pptx files will be converted to html format and doc files will be converted to pdf format manually by the Secretariat staff.</div>')
def clean_file(self):
file = self.cleaned_data['file']
if file._size > settings.SECR_MAX_UPLOAD_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file._size)))
return file
def clean(self):
super(UnifiedUploadForm, self).clean()
# if an invalid file type is supplied no file attribute will exist
if self.errors:
return self.cleaned_data
cleaned_data = self.cleaned_data
material_type = cleaned_data['material_type']
slide_name = cleaned_data['slide_name']
file = cleaned_data['file']
ext = os.path.splitext(file.name)[1].lower()
if material_type.slug == 'slides' and not slide_name:
raise forms.ValidationError('ERROR: Name of Presentaion cannot be blank')
# only supporting PDFs per Alexa 04-05-2011
#if material_type == 1 and not file_ext[1] == '.pdf':
# raise forms.ValidationError('Presentations must be a PDF file')
# validate file extensions based on material type (slides,agenda,minutes,bluesheets)
# valid extensions per online documentation: meeting-materials.html
# 09-14-11 added ppt, pdf per Alexa
# 04-19-12 txt/html for agenda, +pdf for minutes per Russ
if material_type.slug == 'slides' and ext not in VALID_SLIDE_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for presentation slides: %s' % ','.join(VALID_SLIDE_EXTENSIONS))
if material_type.slug == 'agenda' and ext not in VALID_AGENDA_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for agendas: %s' % ','.join(VALID_AGENDA_EXTENSIONS))
if material_type.slug == 'minutes' and ext not in VALID_MINUTES_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for minutes: %s' % ','.join(VALID_MINUTES_EXTENSIONS))
if material_type.slug == 'bluesheets' and ext not in VALID_BLUESHEET_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for bluesheets: %s' % ','.join(VALID_BLUESHEET_EXTENSIONS))
return cleaned_data
| bsd-3-clause | 3,905,673,467,625,157,000 | 45.482993 | 492 | 0.647007 | false |
alaeddine10/ggrc-core | src/ggrc/models/relationship_types.py | 1 | 21783 |
class RelationshipTypes(object):
@classmethod
def types(cls):
types = {}
for k, rt in RELATIONSHIP_TYPES.items():
types[k] = rt.copy()
types[k].update({ 'relationship_type': k })
return types
@classmethod
def get_type(cls, relationship_type_id):
return cls.types().get(relationship_type_id, None)
@classmethod
def valid_relationship_hash(cls, relationship_type, related_model, endpoint):
return dict(
relationship_type=relationship_type,
related_model=related_model,
related_model_endpoint=endpoint)
@classmethod
def valid_relationship(cls, obj_type, name, rel):
if 'symmetric' in rel and rel['symmetric']:
if rel['source_type'] == obj_type and rel['target_type'] == obj_type:
return cls.valid_relationship_hash(name, obj_type, 'both')
else:
if rel['source_type'] == obj_type:
return cls.valid_relationship_hash(
name, rel['target_type'], 'destination')
if rel['target_type'] == obj_type:
return cls.valid_relationship_hash(
name, rel['source_type'], 'source')
@classmethod
def valid_relationship_helper(cls, obj_type):
return [
cls.valid_relationship(obj_type, name, rel)
for name, rel in cls.types().items()]
@classmethod
def valid_relationships(cls, obj_type):
if not isinstance(obj_type, (str, unicode)):
if not isinstance(obj_type, type):
obj_type = obj_type.__class__
obj_type = obj_type.__name__
return [vr for vr in cls.valid_relationship_helper(obj_type) if vr]
RELATIONSHIP_TYPES = {
'data_asset_has_process': {
'source_type': "DataAsset",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This data asset relies upon the following processes.",
'reverse_description': "This process supports the following data assets."
},
'data_asset_relies_upon_data_asset': {
'source_type': "DataAsset",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following data assets.",
'reverse_description': "This data asset supports the following data assets."
},
'data_asset_relies_upon_facility': {
'source_type': "DataAsset",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following facilities.",
'reverse_description': "This facility supports the following data assets."
},
'data_asset_relies_upon_system': {
'source_type': "DataAsset",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following systems.",
'reverse_description': "This system supports the following data assets."
},
'facility_has_process': {
'source_type': "Facility",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This facility relies upon the following processes.",
'reverse_description': "This process supports the following facilities."
},
'facility_relies_upon_data_asset': {
'source_type': "Facility",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following data assets.",
'reverse_description': "This data asset supports the following facilities."
},
'facility_relies_upon_facility': {
'source_type': "Facility",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following facilities.",
'reverse_description': "This facility supports the following facilities."
},
'facility_relies_upon_system': {
'source_type': "Facility",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following systems.",
'reverse_description': "This system supports the following facilities."
},
'market_has_process': {
'source_type': "Market",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This market relies upon the following processes.",
'reverse_description': "This process supports the following markets."
},
'market_includes_market': {
'source_type': "Market",
'target_type': "Market",
'forward_phrase': "includes",
'reverse_phrase': "is included in",
'forward_description': "This market includes the following markets.",
'reverse_description': "This market is included in the following markets."
},
'market_relies_upon_data_asset': {
'source_type': "Market",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following data assets.",
'reverse_description': "This data asset supports the following markets."
},
'market_relies_upon_facility': {
'source_type': "Market",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following facilities.",
'reverse_description': "This facility supports the following markets."
},
'market_relies_upon_system': {
'source_type': "Market",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following systems.",
'reverse_description': "This system supports the following markets."
},
'org_group_has_process': {
'source_type': "OrgGroup",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This org group relies upon the following processes.",
'reverse_description': "This process supports the following org groups."
},
'org_group_is_affiliated_with_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'symmetric': True,
'forward_phrase': "is affiliated/collaborates with",
'reverse_phrase': "is affiliated/collaborates with",
'forward_description': "This org group is affiliated/collaborates with the following org groups.",
'reverse_description': "This org group is affiliated/collaborates with the following org groups."
},
'org_group_is_responsible_for_data_asset': {
'source_type': "OrgGroup",
'target_type': "DataAsset",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following data assets.",
'reverse_description': "This data asset is overseen by the following org groups."
},
'org_group_is_responsible_for_facility': {
'source_type': "OrgGroup",
'target_type': "Facility",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following facilities.",
'reverse_description': "This facility is overseen by the following org groups."
},
'org_group_is_responsible_for_market': {
'source_type': "OrgGroup",
'target_type': "Market",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following markets.",
'reverse_description': "This market is overseen by the following org groups."
},
'org_group_is_responsible_for_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following org groups.",
'reverse_description': "This org group is overseen by the following org groups."
},
'org_group_is_responsible_for_process': {
'source_type': "OrgGroup",
'target_type': "Process",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following processes.",
'reverse_description': "This process is overseen by the following org groups."
},
'org_group_is_responsible_for_product': {
'source_type': "OrgGroup",
'target_type': "Product",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following products.",
'reverse_description': "This product is overseen by the following org groups."
},
'org_group_is_responsible_for_project': {
'source_type': "OrgGroup",
'target_type': "Project",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following projects.",
'reverse_description': "This project is overseen by the following org groups."
},
'org_group_is_responsible_for_system': {
'source_type': "OrgGroup",
'target_type': "System",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following systems.",
'reverse_description': "This system is overseen by the following org groups."
},
'org_group_relies_upon_data_asset': {
'source_type': "OrgGroup",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following data assets.",
'reverse_description': "This data asset supports the following org groups."
},
'org_group_relies_upon_facility': {
'source_type': "OrgGroup",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following facilities.",
'reverse_description': "This facility supports the following org groups."
},
'org_group_relies_upon_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following org groups.",
'reverse_description': "This org group supports the following org groups."
},
'org_group_relies_upon_system': {
'source_type': "OrgGroup",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following systems.",
'reverse_description': "This system supports the following org groups."
},
'product_has_process': {
'source_type': "Product",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This product relies upon the following processes.",
'reverse_description': "This process supports the following products."
},
'product_is_affiliated_with_product': {
'source_type': "Product",
'target_type': "Product",
'symmetric': True,
'forward_phrase': "is affiliated/collaborates with",
'reverse_phrase': "is affiliated/collaborates with",
'forward_description': "This product is affiliated/collaborates with the following products.",
'reverse_description': "This product is affiliated/collaborates with the following products."
},
'product_is_sold_into_market': {
'source_type': "Product",
'target_type': "Market",
'forward_phrase': "is sold into",
'reverse_phrase': "is a market for",
'forward_description': "This product is sold into the following markets.",
'reverse_description': "This market is a market for the following products."
},
'product_relies_upon_data_asset': {
'source_type': "Product",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following data assets.",
'reverse_description': "This data asset supports the following products."
},
'product_relies_upon_facility': {
'source_type': "Product",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following facilities.",
'reverse_description': "This facility supports the following products."
},
'product_relies_upon_product': {
'source_type': "Product",
'target_type': "Product",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following products.",
'reverse_description': "This product supports the following products."
},
'product_relies_upon_system': {
'source_type': "Product",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following systems.",
'reverse_description': "This system supports the following products."
},
'program_applies_to_data_asset': {
'source_type': "Program",
'target_type': "DataAsset",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following data assets.",
'reverse_description': "This data asset is within scope of the following programs."
},
'program_applies_to_facility': {
'source_type': "Program",
'target_type': "Facility",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following facilities.",
'reverse_description': "This facility is within scope of the following programs."
},
'program_applies_to_market': {
'source_type': "Program",
'target_type': "Market",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following markets.",
'reverse_description': "This market is within scope of the following programs."
},
'program_applies_to_org_group': {
'source_type': "Program",
'target_type': "OrgGroup",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following org groups.",
'reverse_description': "This org group is within scope of the following programs."
},
'program_applies_to_process': {
'source_type': "Program",
'target_type': "Process",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following processes.",
'reverse_description': "This process is within scope of the following programs."
},
'program_applies_to_product': {
'source_type': "Program",
'target_type': "Product",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following products.",
'reverse_description': "This product is within scope of the following programs."
},
'program_applies_to_project': {
'source_type': "Program",
'target_type': "Project",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following projects.",
'reverse_description': "This project is within scope of the following programs."
},
'program_applies_to_system': {
'source_type': "Program",
'target_type': "System",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following systems.",
'reverse_description': "This system is within scope of the following programs."
},
'project_has_process': {
'source_type': "Project",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This project relies upon the following processes.",
'reverse_description': "This process supports the following projects."
},
'project_relies_upon_data_asset': {
'source_type': "Project",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following data assets.",
'reverse_description': "This data asset supports the following projects."
},
'project_relies_upon_facility': {
'source_type': "Project",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following facilities.",
'reverse_description': "This facility supports the following projects."
},
'project_relies_upon_system': {
'source_type': "Project",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following systems.",
'reverse_description': "This system supports the following projects."
},
'project_targets_data_asset': {
'source_type': "Project",
'target_type': "DataAsset",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following data assets.",
'reverse_description': "This data asset is targeted by the following projects."
},
'project_targets_facility': {
'source_type': "Project",
'target_type': "Facility",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following facilities.",
'reverse_description': "This facility is targeted by the following projects."
},
'project_targets_market': {
'source_type': "Project",
'target_type': "Market",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following markets.",
'reverse_description': "This market is targeted by the following projects."
},
'project_targets_org_group': {
'source_type': "Project",
'target_type': "OrgGroup",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following org groups.",
'reverse_description': "This org group is targeted by the following projects."
},
'project_targets_product': {
'source_type': "Project",
'target_type': "Product",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following products.",
'reverse_description': "This product is targeted by the following projects."
},
'risk_is_a_threat_to_data_asset': {
'source_type': "Risk",
'target_type': "DataAsset",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following data assets.",
'reverse_description': "This data asset is vulnerable to the following risks."
},
'risk_is_a_threat_to_facility': {
'source_type': "Risk",
'target_type': "Facility",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following facilities.",
'reverse_description': "This faciliy is vulnerable to the following risks."
},
'risk_is_a_threat_to_market': {
'source_type': "Risk",
'target_type': "Market",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following markets.",
'reverse_description': "This market is vulnerable to the following risks."
},
'risk_is_a_threat_to_org_group': {
'source_type': "Risk",
'target_type': "OrgGroup",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is not a threat to the following org groups.",
'reverse_description': "This org group is vulnerable to the following risks."
},
'risk_is_a_threat_to_process': {
'source_type': "Risk",
'target_type': "Process",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following processes.",
'reverse_description': "This process is vulnerable to the following risks."
},
'risk_is_a_threat_to_product': {
'source_type': "Risk",
'target_type': "Product",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following products.",
'reverse_description': "This product is vulnerable to the following risks."
},
'risk_is_a_threat_to_project': {
'source_type': "Risk",
'target_type': "Project",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following projects.",
'reverse_description': "This project is vulnerable to the following risks."
},
'risk_is_a_threat_to_system': {
'source_type': "Risk",
'target_type': "System",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following systems.",
'reverse_description': "This system is vulnerable to the following risks."
},
}
| apache-2.0 | -5,848,448,770,137,311,000 | 40.333966 | 102 | 0.66786 | false |
3liz/qgis-wps4server | filters/PyWPS/tests/process_inits.py | 1 | 3027 | import os
import sys
pywpsPath = os.path.abspath(os.path.join(
os.path.split(os.path.abspath(__file__))[0], ".."))
sys.path.append(pywpsPath)
import pywps
import pywps.Process
import unittest
import os
from xml.dom import minidom
class RequestGetTestCase(unittest.TestCase):
inputs = None
getcapabilitiesrequest = "service=wps&request=getcapabilities"
getdescribeprocessrequest = "service=wps&request=describeprocess&version=1.0.0&identifier=dummyprocess"
getexecuterequest = "service=wps&request=execute&version=1.0.0&identifier=dummyprocess&datainputs=[input1=20;input2=10]"
wfsurl = "http://www2.dmsolutions.ca/cgi-bin/mswfs_gmap?version=1.0.0&request=getfeature&service=wfs&typename=park"
wpsns = "http://www.opengis.net/wps/1.0.0"
xmldom = None
def setUp(self):
sys.stderr = open("/dev/null", "w")
def testLoadProcessesFromClass(self):
"""Test, if we can load process as classes"""
class newClassProcess(pywps.Process.WPSProcess):
def __init__(self):
pywps.Process.WPSProcess.__init__(
self, identifier="foo", title="bar")
mypywps = pywps.Pywps(pywps.METHOD_GET)
inputs = mypywps.parseRequest(self.getcapabilitiesrequest)
mypywps.performRequest(self.inputs, [newClassProcess])
xmldom = minidom.parseString(mypywps.response)
self.assertTrue(
len(xmldom.getElementsByTagNameNS(self.wpsns, "Process")) > 0)
def testLoadProcessesAsInstance(self):
"""Test, if we can load process as instances"""
class newClassProcess(pywps.Process.WPSProcess):
def __init__(self):
pywps.Process.WPSProcess.__init__(
self, identifier="foo", title="bar")
mypywps = pywps.Pywps(pywps.METHOD_GET)
inputs = mypywps.parseRequest(self.getcapabilitiesrequest)
mypywps.performRequest(self.inputs, [newClassProcess()])
xmldom = minidom.parseString(mypywps.response)
self.assertTrue(
len(xmldom.getElementsByTagNameNS(self.wpsns, "Process")) > 0)
def testLoadProcessesFromEnvVar(self):
"""Test, if we can load processes set from PYWPS_PROCESSES
environment variable"""
self._setFromEnv()
mypywps = pywps.Pywps(pywps.METHOD_GET)
inputs = mypywps.parseRequest(self.getcapabilitiesrequest)
mypywps.performRequest(inputs)
xmldom = minidom.parseString(mypywps.response)
self.assertEquals(len(mypywps.request.processes), 14)
self.assertTrue(mypywps.request.getProcess("dummyprocess"))
def _setFromEnv(self):
os.putenv("PYWPS_PROCESSES", os.path.join(
pywpsPath, "tests", "processes"))
os.environ["PYWPS_PROCESSES"] = os.path.join(
pywpsPath, "tests", "processes")
if __name__ == "__main__":
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(RequestGetTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 | -3,480,720,783,210,404,000 | 37.807692 | 124 | 0.670631 | false |
benjaminrigaud/django | tests/migrations/test_autodetector.py | 1 | 81958 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase, override_settings
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ProjectState, ModelState
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db import models, connection
from django.contrib.auth.models import AbstractBaseUser
class DeconstructableObject(object):
"""
A custom deconstructable object.
"""
def deconstruct(self):
return self.__module__ + '.' + self.__class__.__name__, [], {}
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))])
author_name_null = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, null=True))])
author_name_longer = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400))])
author_name_renamed = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200))])
author_name_default = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default='Ada Lovelace'))])
author_name_deconstructable_1 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructableObject()))])
author_name_deconstructable_2 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructableObject()))])
author_name_deconstructable_3 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField()))])
author_name_deconstructable_4 = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField()))])
author_custom_pk = ModelState("testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))])
author_with_book = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
author_with_book_order_wrt = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))], options={"order_with_respect_to": "book"})
author_renamed_with_book = ModelState("testapp", "Writer", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
author_with_publisher_string = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher_name", models.CharField(max_length=200))])
author_with_publisher = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher", models.ForeignKey("testapp.Publisher"))])
author_with_custom_user = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("user", models.ForeignKey("thirdapp.CustomUser"))])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_options = ModelState("testapp", "AuthorProxy", [], {"proxy": True, "verbose_name": "Super Author"}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", ))
author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", ))
author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", ))
author_unmanaged_default_pk = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_unmanaged_custom_pk = ModelState("testapp", "Author", [
("pk_field", models.IntegerField(primary_key=True)),
])
author_with_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
])
author_with_m2m_through = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract"))])
author_with_options = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))], {"verbose_name": "Authi", "permissions": [('can_hire', 'Can hire')]})
author_with_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True))
], {"db_table": "author_one"})
author_with_new_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True))
], {"db_table": "author_two"})
author_renamed_with_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True))
], {"db_table": "author_one"})
contract = ModelState("testapp", "Contract", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("publisher", models.ForeignKey("testapp.Publisher"))])
publisher = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100))])
publisher_with_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("name", models.CharField(max_length=100))])
publisher_with_aardvark_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Aardvark")), ("name", models.CharField(max_length=100))])
publisher_with_book = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("otherapp.Book")), ("name", models.CharField(max_length=100))])
other_pony = ModelState("otherapp", "Pony", [("id", models.AutoField(primary_key=True))])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))])
book_proxy_fk = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("thirdapp.AuthorProxy")), ("title", models.CharField(max_length=200))])
book_migrations_fk = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("migrations.UnmigratedModel")), ("title", models.CharField(max_length=200))])
book_with_no_author = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=200))])
book_with_author_renamed = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Writer")), ("title", models.CharField(max_length=200))])
book_with_field_and_author_renamed = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("writer", models.ForeignKey("testapp.Writer")), ("title", models.CharField(max_length=200))])
book_with_multiple_authors = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author")), ("title", models.CharField(max_length=200))])
book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")), ("title", models.CharField(max_length=200))])
book_unique = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": {("author", "title")}})
book_unique_2 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": {("title", "author")}})
book_unique_3 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("newfield", models.IntegerField()), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": {("title", "newfield")}})
book_unique_4 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
], {"unique_together": {("title", "newfield2")}})
attribution = ModelState("otherapp", "Attribution", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("book", models.ForeignKey("otherapp.Book"))])
edition = ModelState("thirdapp", "Edition", [("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book"))])
custom_user = ModelState("thirdapp", "CustomUser", [("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255))], bases=(AbstractBaseUser, ))
custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255))])
aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", ))
aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [("id", models.OneToOneField("testapp.Author", primary_key=True))])
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState("eggs", "Rabbit", [("id", models.AutoField(primary_key=True)), ("knight", models.ForeignKey("eggs.Knight")), ("parent", models.ForeignKey("eggs.Rabbit"))], {"unique_together": {("parent", "knight")}})
def repr_changes(self, changes):
output = ""
for app_label, migrations in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
))
def assertOperationTypes(self, changes, app_label, index, types):
if not changes.get(app_label, None):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
real_types = [operation.__class__.__name__ for operation in migration.operations]
if types != real_types:
self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
types,
self.repr_changes(changes),
))
def assertOperationAttributes(self, changes, app_label, index, operation_index, **attrs):
if not changes.get(app_label, None):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if len(changes[app_label]) < index + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_index,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_index]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_index,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
))
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model_state(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"Tests auto-naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"Tests that trim does not remove dependencies but does remove unwanted apps"
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner(defaults={"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"Tests custom naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = 'custom_name'
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_new_model(self):
"Tests autodetection of new models"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "Author")
def test_old_model(self):
"Tests deletion of old models"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Author")
def test_add_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_remove_field(self):
"Tests autodetection of removed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "name")
def test_alter_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertTrue(action.preserve_default)
def test_alter_field_to_not_null_with_default(self):
"#23609 - Tests autodetection of nullable to non-nullable alterations"
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertTrue(action.preserve_default)
self.assertEqual(action.field.default, 'Ada Lovelace')
def test_alter_field_to_not_null_without_default(self):
"#23609 - Tests autodetection of nullable to non-nullable alterations"
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
# Ignore for now, and let me handle existing rows with NULL
# myself (e.g. adding a RunPython or RunSQL operation in the new
# migration file before the AlterField operation)
return models.NOT_PROVIDED
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertTrue(action.preserve_default)
self.assertIs(action.field.default, models.NOT_PROVIDED)
def test_alter_field_to_not_null_oneoff_default(self):
"#23609 - Tests autodetection of nullable to non-nullable alterations"
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
# Provide a one-off default now (will be set on all existing rows)
return 'Some Name'
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
self.assertFalse(action.preserve_default)
self.assertEqual(action.field.default, "Some Name")
def test_rename_field(self):
"Tests autodetection of renamed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Check
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names")
def test_rename_model(self):
"Tests autodetection of renamed models"
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
# Right number of migrations for model rename?
self.assertNumberMigrations(changes, 'testapp', 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameModel")
self.assertEqual(action.old_name, "Author")
self.assertEqual(action.new_name, "Writer")
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, 'otherapp', 0)
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_field_and_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True, "ask_rename": True}))
changes = autodetector._detect_changes()
# Right number of migrations for model rename?
self.assertNumberMigrations(changes, 'testapp', 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right actions?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameModel")
self.assertEqual(action.old_name, "Author")
self.assertEqual(action.new_name, "Writer")
# Right number of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, 'otherapp', 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right actions?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameField")
self.assertEqual(action.old_name, "author")
self.assertEqual(action.new_name, "writer")
def test_fk_dependency(self):
"Tests that having a ForeignKey automatically adds a dependency"
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 1)
self.assertEqual(len(changes['thirdapp']), 1)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['thirdapp'][0]
self.assertEqual(len(migration3.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
# Right dependencies?
self.assertEqual(migration1.dependencies, [])
self.assertEqual(migration2.dependencies, [("testapp", "auto_1")])
self.assertEqual(migration3.dependencies, [("otherapp", "auto_1")])
def test_proxy_fk_dependency(self):
"Tests that FK dependencies still work on proxy models"
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertNumberMigrations(changes, 'thirdapp', 1)
# Right number of actions?
# Right actions?
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [])
self.assertEqual(changes['otherapp'][0].dependencies, [("thirdapp", "auto_1")])
self.assertEqual(changes['thirdapp'][0].dependencies, [("testapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book, self.publisher_with_book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertNumberMigrations(changes, 'otherapp', 2)
# Right types?
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [("otherapp", "auto_1")])
self.assertEqual(changes['otherapp'][0].dependencies, [])
self.assertEqual(set(changes['otherapp'][1].dependencies), {("otherapp", "auto_1"), ("testapp", "auto_1")})
def test_same_app_circular_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
# Right dependencies?
self.assertEqual(changes['testapp'][0].dependencies, [])
def test_same_app_circular_fk_dependency_and_unique_together(self):
"""
Tests that a migration with circular FK dependency does not try to
create unique together constraint before creating all required fields first.
See ticket #22275.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.knight, self.rabbit])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'eggs', 1)
self.assertOperationTypes(changes, 'eggs', 0, ["CreateModel", "CreateModel", "AlterUniqueTogether"])
self.assertFalse("unique_together" in changes['eggs'][0].operations[0].options)
self.assertFalse("unique_together" in changes['eggs'][0].operations[1].options)
# Right dependencies?
self.assertEqual(changes['eggs'][0].dependencies, [])
def test_unique_together(self):
"Tests unique_together detection"
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, {("author", "title")})
def test_unique_together_no_changes(self):
"Tests that unique_togther doesn't generate a migration if no changes have been made"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options"""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterModelTable")
self.assertEqual(action.name, "author")
self.assertEqual(action.table, "author_one")
def test_alter_db_table_change(self):
"Tests detection for changing db_table in model's options'"
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_new_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterModelTable")
self.assertEqual(action.name, "author")
self.assertEqual(action.table, "author_two")
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterModelTable")
self.assertEqual(action.name, "author")
self.assertEqual(action.table, None)
def test_alter_db_table_no_changes(self):
"""
Tests that alter_db_table doesn't generate a migration if no changes
have been made.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_alter_db_table_with_model_change(self):
"""
Tests when model changes, autodetector does not create more than one
operation.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_renamed_with_db_table_options])
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
def test_empty_foo_together(self):
"#23452 - Empty unique/index_togther shouldn't generate a migration."
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_secified = ModelState("a", "model",
[("id", models.AutoField(primary_key=True))]
)
# Explicitly testing for None, since this was the issue in #23452 after
# a AlterFooTogether operation with e.g. () as value
model_state_none = ModelState("a", "model",
[("id", models.AutoField(primary_key=True))],
{"unique_together": None, "index_together": None}
)
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState("a", "model",
[("id", models.AutoField(primary_key=True))],
{"unique_together": set(), "index_together": set()}
)
def test(from_state, to_state, msg):
before = self.make_project_state([from_state])
after = self.make_project_state([to_state])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
if len(changes) > 0:
ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations)
self.fail('Created operation(s) %s from %s' % (ops, msg))
tests = (
(model_state_not_secified, model_state_not_secified, '"not specified" to "not specified"'),
(model_state_not_secified, model_state_none, '"not specified" to "None"'),
(model_state_not_secified, model_state_empty, '"not specified" to "empty"'),
(model_state_none, model_state_not_secified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(model_state_empty, model_state_not_secified, '"empty" to "not specified"'),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_unique_together_ordering(self):
"Tests that unique_together also triggers on ordering changes"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")})
def test_add_field_and_unique_together(self):
"Tests that added fields will be created before using them in unique together"
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddField", "AlterUniqueTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")})
def test_remove_field_and_unique_together(self):
"Tests that removed fields will be removed after updating unique_together"
before = self.make_project_state([self.author_empty, self.book_unique_3])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
def test_rename_field_and_unique_together(self):
"Tests that removed fields will be removed after updating unique together"
before = self.make_project_state([self.author_empty, self.book_unique_3])
after = self.make_project_state([self.author_empty, self.book_unique_4])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield2")})
def test_remove_index_together(self):
author_index_together = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))
], {"index_together": {("id", "name")}})
before = self.make_project_state([author_index_together])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterIndexTogether"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", index_together=set())
def test_remove_unique_together(self):
author_unique_together = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))
], {"unique_together": {("id", "name")}})
before = self.make_project_state([author_unique_together])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", unique_together=set())
def test_proxy(self):
"Tests that the autodetector correctly deals with proxy models"
# First, we test adding a proxy model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True})
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
before = self.make_project_state([self.author_empty, self.author_proxy])
after = self.make_project_state([self.author_empty, self.author_proxy_notproxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={})
def test_proxy_custom_pk(self):
"#23415 - The autodetector must correctly deal with custom FK on proxy models."
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'pk_field')
def test_unmanaged(self):
"Tests that the autodetector correctly deals with managed models"
# First, we test adding an unmanaged model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_unmanaged])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged")
self.assertEqual(changes['testapp'][0].operations[0].options['managed'], False)
# Now, we test turning an unmanaged model into a managed model
before = self.make_project_state([self.author_empty, self.author_unmanaged])
after = self.make_project_state([self.author_empty, self.author_unmanaged_managed])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="AuthorUnmanaged")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorUnmanaged")
def test_unmanaged_custom_pk(self):
"#23415 - The autodetector must correctly deal with custom FK on unmanaged models."
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_default_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_custom_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'pk_field')
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
before = self.make_project_state([self.custom_user])
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 1)
# Check the dependency is correct
migration = changes['testapp'][0]
self.assertEqual(migration.dependencies, [("__setting__", "AUTH_USER_MODEL")])
def test_add_field_with_default(self):
"""
Adding a field with a default should work (#22030).
"""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_custom_deconstructable(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
before = self.make_project_state([self.author_name_deconstructable_1])
after = self.make_project_state([self.author_name_deconstructable_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
def test_deconstruct_field_kwarg(self):
"""
Field instances are handled correctly by nested deconstruction.
"""
before = self.make_project_state([self.author_name_deconstructable_3])
after = self.make_project_state([self.author_name_deconstructable_4])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
def test_deconstruct_type(self):
"""
#22951 -- Uninstanted classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
))
],
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
Adding an FK in the same "spot" as a deleted CharField should work. (#22300).
"""
# Make state
before = self.make_project_state([self.author_with_publisher_string])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right result?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher])
after = self.make_project_state([self.author_name]) # removes both the model and FK
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 2)
# Right actions in right order?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "publisher")
action = migration.operations[1]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Publisher")
def test_add_many_to_many(self):
"""
Adding a ManyToManyField should not prompt for a default (#22435).
"""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
before = self.make_project_state([self.author_empty, self.publisher])
# Add ManyToManyField to author model
after = self.make_project_state([self.author_with_m2m, self.publisher])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
migration = changes['testapp'][0]
# Right actions in right order?
self.assertEqual(len(migration.operations), 1)
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should
be ordered correctly.
"""
before = self.make_project_state([])
after = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"])
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change must remove
the field before the model to maintain consistency.
"""
before = self.make_project_state([self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution])
after = self.make_project_state([self.book_with_no_author, self.author_name]) # removes both the through model and ManyToMany
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 4)
# Right actions in right order?
# The first two are because we can't optimise RemoveField
# into DeleteModel reliably.
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "author")
action = migration.operations[1]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "book")
action = migration.operations[2]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "authors")
action = migration.operations[3]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Attribution")
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the
"through" model in the same change must remove
the field before the model to maintain consistency.
"""
before = self.make_project_state([self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution])
after = self.make_project_state([self.author_name]) # removes both the through model and ManyToMany
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
# Right number of actions?
self.assertOperationTypes(changes, 'otherapp', 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be removed in the same
migration as that through model as the schema will pass through an inconsistent state.
The autodetector should produce two migrations to avoid this issue.
"""
before = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
after = self.make_project_state([self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "RemoveField", "DeleteModel"])
# Actions touching the right stuff?
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="publisher")
self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract")
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the same time,
the autodetector should remove them in the correct order.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
def test_alter_model_options(self):
"""
Changing a model's options should make a change
"""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
# Changing them back to empty should also make a change
before = self.make_project_state([self.author_with_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
def test_alter_model_options_proxy(self):
"""
Changing a proxy model's options should also make a change
"""
before = self.make_project_state([self.author_proxy, self.author_empty])
after = self.make_project_state([self.author_proxy_options, self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
# Right actions in right order?
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
def test_set_alter_order_with_respect_to(self):
"Tests that setting order_with_respect_to adds a field"
# Make state
before = self.make_project_state([self.book, self.author_with_book])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book")
def test_add_alter_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the FK too
does things in the right order.
"""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
def test_remove_alter_order_with_respect_to(self):
"""
Tests that removing order_with_respect_to when removing the FK too
does things in the right order.
"""
# Make state
before = self.make_project_state([self.book, self.author_with_book_order_wrt])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None)
self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book")
def test_add_model_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the whole model
does things in the right order.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
# Make sure the _order field is not in the CreateModel fields
self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields])
def test_swappable_first_inheritance(self):
"""
Tests that swappable models get their CreateModel first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""
Tests that swappable models get their CreateModel first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user_no_inherit, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
def test_bases_first(self):
"""
Tests that bases of other models come first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_based_on_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_proxy_bases_first(self):
"""
Tests that bases of proxies come first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy, self.author_proxy_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy")
def test_pk_fk_included(self):
"""
Tests that a relation used as the primary key is kept as part of CreateModel.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_pk_fk_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
Tests that a dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
# Right dependencies?
self.assertEqual(changes['otherapp'][0].dependencies, [("migrations", "__first__")])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
Tests that a dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
# Right dependencies?
self.assertEqual(changes['otherapp'][0].dependencies, [("migrations", "0002_second")])
def test_alter_fk_before_model_deletion(self):
"""
Tests that ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
# Make state
before = self.make_project_state([self.author_name, self.publisher_with_author])
after = self.make_project_state([self.aardvark_testapp, self.publisher_with_aardvark_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
Tests that ForeignKeys correctly depend on other apps' models (#23100)
"""
# Make state
before = self.make_project_state([self.author_name, self.book])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book")
self.assertEqual(changes['testapp'][0].dependencies, [("otherapp", "__first__")])
def test_circular_dependency_mixed_addcreate(self):
"""
Tests that the dependency resolver knows to put all CreateModel
before AddField and not become unsolvable (#23315)
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry")),
])
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
])
apackage = ModelState("b", "APackage", [
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person")),
])
country = ModelState("b", "DeliveryCountry", [
("id", models.AutoField(primary_key=True)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, person, apackage, country])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
Tests that the dependency resolver knows to explicitly resolve
swappable models (#23322)
"""
tenant = ModelState("a", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address"))],
bases=(AbstractBaseUser, )
)
address = ModelState("b", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertEqual(changes['a'][0].dependencies, [])
self.assertEqual(set(changes['a'][1].dependencies), {('a', 'auto_1'), ('b', 'auto_1')})
self.assertEqual(changes['b'][0].dependencies, [('__setting__', 'AUTH_USER_MODEL')])
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
Tests that the dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model (#23322)
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)),
])
tenant = ModelState("b", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address"))],
bases=(AbstractBaseUser, )
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertEqual(changes['a'][0].dependencies, [])
self.assertEqual(set(changes['a'][1].dependencies), {('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')})
self.assertEqual(changes['b'][0].dependencies, [('a', 'auto_1')])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
Tests that the dependency resolver knows to explicitly resolve
swappable models (#23322)
"""
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, related_name='children'))
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([person])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertNumberMigrations(changes, 'a', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertEqual(changes['a'][0].dependencies, [])
| bsd-3-clause | 1,305,360,902,814,706,000 | 54.190572 | 273 | 0.650724 | false |
bworrell/mixbox | mixbox/parser.py | 1 | 6500 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from abc import ABCMeta, abstractmethod
from distutils.version import StrictVersion
from .exceptions import ignored
from .xml import get_etree_root, get_schemaloc_pairs
class UnknownVersionError(Exception):
"""A parsed document contains no version information."""
pass
class UnsupportedVersionError(Exception):
"""A parsed document is a version unsupported by the parser."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedVersionError, self).__init__(message)
self.expected = expected
self.found = found
class UnsupportedRootElementError(Exception):
"""A parsed document contains an unsupported root element."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedRootElementError, self).__init__(message)
self.expected = expected
self.found = found
class EntityParser(object):
__metaclass__ = ABCMeta
@abstractmethod
def supported_tags(self):
"""Return an iterable of supported document root tags (strings)."""
@abstractmethod
def get_version(self, root):
"""Return as a string the schema version used by the document root."""
@abstractmethod
def supported_versions(self, tag):
"""Return all the supported versions for a given tag."""
@abstractmethod
def get_entity_class(self, tag):
"""Return the class to be returned as the result of parsing."""
def _get_version(self, root):
"""Return the version of the root element passed in.
Args:
root (etree.Element)
Returns:
distutils.StrictVersion
Raises:
UnknownVersionError
"""
# Note: STIX and MAEC use a "version" attribute. To support CybOX, a
# subclass will need to combine "cybox_major_version",
# "cybox_minor_version", and "cybox_update_version".
version = self.get_version(root)
if version:
return StrictVersion(version)
raise UnknownVersionError(
"Unable to determine the version of the input document. No "
"version information found on the root element."
)
def _check_version(self, root):
"""Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError
"""
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
raise UnsupportedVersionError(
message=error.format(version, supported),
expected=supported,
found=version
)
def _check_root_tag(self, root):
"""Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError
"""
supported = self.supported_tags()
if root.tag in supported:
return
error = "Document root element ({0}) not one of ({1})"
raise UnsupportedRootElementError(
message=error.format(root.tag, supported),
expected=supported,
found=root.tag,
)
def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a STIX binding object from the supplied xml file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
root = get_etree_root(xml_file, encoding=encoding)
if check_root:
self._check_root_tag(root)
if check_version:
self._check_version(root)
entity_class = self.get_entity_class(root.tag)
entity_obj = entity_class._binding_class.factory()
entity_obj.build(root)
return entity_obj
def parse_xml(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a python-stix STIXPackage object from the supplied xml_file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`. If
``None``, an attempt will be made to determine the input
character encoding.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
entity_obj = self.parse_xml_to_obj(
xml_file=xml_file,
check_version=check_version,
check_root=check_root,
encoding=encoding
)
root = get_etree_root(xml_file, encoding=encoding)
entity = self.get_entity_class(root.tag).from_obj(entity_obj)
# Save the parsed nsmap and schemalocations onto the parsed Entity
entity.__input_namespaces__ = dict(root.nsmap.iteritems())
with ignored(KeyError):
pairs = get_schemaloc_pairs(root)
entity.__input_schemalocations__ = dict(pairs)
return entity
| bsd-3-clause | 6,696,311,111,732,359,000 | 32.333333 | 79 | 0.612154 | false |
urska19/LVR-sat | src/graphColoring.py | 1 | 1511 | #!/usr/bin/env python
from logConstructs import *
def graph_coloring(graph, colors):
if len(graph) < colors:
return False
variables=[[None for i in range(colors)] for j in range(len(graph))]
#construct variables
for i in range(len(graph)):
for j in range(colors):
variables[i][j] = Var("X" + str(i) + "" + str(j))
#construct first sub formula - node must be colored
main_formula = And(map(lambda x: Or(x), variables))
#construct second sub formula - node must be colored with one color
subformula = []
for k in range(colors - 1):
for l in range(k + 1, colors):
subformula += map(lambda x: Not(And([x[k], x[l]])), variables)
#construct third sub formula - connected nodes have different colors
for i in range(len(graph) - 1):
for j in range(i + 1, len(graph)):
if graph[i][j] == 1:
subformula += map(lambda x: Not(And([variables[i][x], variables[j][x]])), range(colors))
main_formula = And(subformula + main_formula.clause)
return main_formula.simplify()
def printGraph(graph):
result = ""
for i in range(len(graph)):
for j in range(len(graph)):
result += " " + str(graph[i][j]) + " "
result += "\n"
return result
def processResult(result):
mappings = {}
for key in result:
node = key[1]
color = key[2]
if result[key]:
mappings[int(node)] = int(color)
return mappings
| bsd-3-clause | 3,202,995,941,548,603,000 | 24.610169 | 104 | 0.578425 | false |
dschien/PyExcelModelingHelper | excel_helper/__init__.py | 1 | 33092 | import csv
import datetime
import importlib
import sys
from abc import abstractmethod
from collections import defaultdict
from typing import Dict, List, Set
import numpy as np
import pandas as pd
from dateutil import relativedelta as rdelta
import logging
from functools import partial
from xlrd import xldate_as_tuple
import calendar
from scipy.interpolate import interp1d
import json
__author__ = 'schien'
import pkg_resources # part of setuptools
version = pkg_resources.require("excel-modelling-helper")[0].version
param_name_map_v1 = {'variable': 'name', 'scenario': 'source_scenarios_string', 'module': 'module_name',
'distribution': 'distribution_name', 'param 1': 'param_a', 'param 2': 'param_b',
'param 3': 'param_c',
'unit': '', 'CAGR': 'cagr', 'ref date': 'ref_date', 'label': '', 'tags': '', 'comment': '',
'source': ''}
param_name_map_v2 = {'CAGR': 'cagr',
'comment': '',
'label': '',
'mean growth': 'growth_factor',
'param': '',
'ref date': 'ref_date',
'ref value': '',
'scenario': 'source_scenarios_string',
'source': '',
'tags': '',
'type': '',
'unit': '',
'variability growth': 'ef_growth_factor',
'initial_value_proportional_variation': '',
'variable': 'name'}
param_name_maps = {1: param_name_map_v1, 2: param_name_map_v2}
# logger.basicConfig(level=logger.DEBUG)
logger = logging.getLogger(__name__)
class DistributionFunctionGenerator(object):
module: str
distribution: str
param_a: str
param_b: str
param_c: str
def __init__(self, module_name=None, distribution_name=None, param_a: float = None,
param_b: float = None, param_c: float = None, size=None, **kwargs):
"""
Instantiate a new object.
:param module_name:
:param distribution_name:
:param param_a:
:param param_b:
:param param_c:
:param size:
:param kwargs: can contain key "sample_mean_value" with bool value
"""
self.kwargs = kwargs
self.size = size
self.module_name = module_name
self.distribution_name = distribution_name
self.sample_mean_value = kwargs.get('sample_mean_value', False)
# prepare function arguments
if distribution_name == 'choice':
if type(param_a) == str:
tokens = param_a.split(',')
params = [float(token.strip()) for token in tokens]
self.random_function_params = [np.array(params, dtype=np.float)]
else:
self.random_function_params = [np.array([i for i in [param_a, param_b, param_c] if i], dtype=np.float)]
logger.debug(f'setting function params for choice distribution {self.random_function_params}')
else:
self.random_function_params = [i for i in [param_a, param_b, param_c] if i not in [None, ""]]
def get_mean(self, distribution_function):
"""Get the mean value for a distribution.
If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted.
Else, the distribution is instantiated and then the mean is being calculated.
:param distribution_function:
:return: the mean as a scalar
"""
name = self.distribution_name
params = self.random_function_params
if name == 'normal':
return params[0]
if name == 'uniform':
return (params[0] + params[1]) / 2.
if name == 'choice':
return params[0].mean()
if name == 'triangular':
return (params[0] + params[1] + params[2]) / 3.
return distribution_function().mean()
def generate_values(self, *args, **kwargs):
"""
Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg.
If `self.sample_mean_value == True` the sample will contain "size" times the mean value.
:param args:
:param kwargs:
:return: sample as vector of given size
"""
sample_size = kwargs.get('size', self.size)
f = self.instantiate_distribution_function(self.module_name, self.distribution_name)
distribution_function = partial(f, *self.random_function_params, size=sample_size)
if self.sample_mean_value:
sample = np.full(sample_size, self.get_mean(distribution_function))
else:
sample = distribution_function()
return sample
@staticmethod
def instantiate_distribution_function(module_name, distribution_name):
module = importlib.import_module(module_name)
func = getattr(module, distribution_name)
return func
class Parameter(object):
"""
A single parameter
"""
version: int
name: str
unit: str
comment: str
source: str
scenario: str
processes: Dict[str, List]
"optional comma-separated list of tags"
tags: str
def __init__(self, name, tags=None, source_scenarios_string: str = None, unit: str = None,
comment: str = None, source: str = None, version=None,
**kwargs):
# The source definition of scenarios. A comma-separated list
self.version = version
self.source = source
self.comment = comment
self.unit = unit
self.source_scenarios_string = source_scenarios_string
self.tags = tags
self.name = name
self.scenario = None
self.cache = None
# track the usages of this parameter per process as a list of process-specific variable names that are backed by this parameter
self.processes = defaultdict(list)
self.kwargs = kwargs
def __call__(self, settings=None, *args, **kwargs):
"""
Samples from a parameter. Values are cached and returns the same value every time called.
@todo confusing interface that accepts 'settings' and kwargs at the same time.
worse- 'use_time_series' must be present in the settings dict
:param args:
:param kwargs:
:return:
"""
if self.cache is None:
kwargs['name'] = self.name
kwargs['unit'] = self.unit
kwargs['tags'] = self.tags
kwargs['scenario'] = self.scenario
if not settings:
settings = {}
common_args = {'size': settings.get('sample_size', 1),
'sample_mean_value': settings.get('sample_mean_value', False)}
common_args.update(**self.kwargs)
if settings.get('use_time_series', False):
if self.version == 2:
generator = GrowthTimeSeriesGenerator(**common_args, times=settings['times'])
else:
generator = ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(**common_args,
times=settings['times'])
else:
generator = DistributionFunctionGenerator(**common_args)
self.cache = generator.generate_values(*args, **kwargs)
return self.cache
def add_usage(self, process_name, variable_name):
# add the name of a variable of a process model that is backed by this parameter
self.processes[process_name].append(variable_name)
class GrowthTimeSeriesGenerator(DistributionFunctionGenerator):
ref_date: str
# of the mean values
# the type of growth ['exp']
# growth_function_type: str
# of the error function
variance: str
# error function growth rate
ef_growth_factor: str
def __init__(self, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
assert 'ref value' in self.kwargs
# 1. Generate $\mu$
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
ref_date = self.ref_date
if not ref_date:
raise Exception(f"Ref date not set for variable {kwargs['name']}")
mu = self.generate_mu(end_date, ref_date, start_date)
# 3. Generate $\sigma$
## Prepare array with growth values $\sigma$
if self.sample_mean_value:
sigma = np.zeros((len(self.times), self.size))
else:
if self.kwargs['type'] == 'interp':
def get_date(record):
return datetime.datetime.strptime(record[0], "%Y-%m-%d")
ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date)
intial_value = ref_value_[0][1]
else:
intial_value = float(self.kwargs['ref value'])
variability_ = intial_value * self.kwargs['initial_value_proportional_variation']
logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}')
sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size))
# logger.debug(ref_date.strftime("%b %d %Y"))
## 4. Prepare growth array for $\alpha_{sigma}$
alpha_sigma = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['ef_growth_factor'], 1)
### 5. Prepare DataFrame
iterables = [self.times, range(self.size)]
index_names = ['time', 'samples']
_multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
# logger.debug(start_date)
# logger.debug(end_date)
from dateutil import relativedelta
r = relativedelta.relativedelta(end_date, start_date)
months = r.years * 12 + r.months + 1
name = kwargs['name']
## Apply growth to $\sigma$ and add $\sigma$ to $\mu$
# logger.debug(sigma.size)
# logger.debug(alpha_sigma.shape)
# logger.debug(months)
unit_ = kwargs["unit"]
if not unit_:
unit_ = 'dimensionless'
series = pd.Series(((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel(), index=_multi_index,
dtype=f'pint[{unit_}]')
## test if df has sub-zero values
df_sigma__dropna = series.where(series < 0).dropna()
if not df_sigma__dropna.pint.m.empty:
logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}")
return series
def generate_mu(self, end_date, ref_date, start_date):
if self.kwargs['type'] == 'exp':
mu_bar = np.full(len(self.times), float(self.kwargs['ref value']))
# 2. Apply Growth to Mean Values $\alpha_{mu}$
alpha_mu = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['growth_factor'], 1)
mu = mu_bar * alpha_mu.ravel()
mu = mu.reshape(len(self.times), 1)
return mu
if self.kwargs['type'] == 'interp':
def toTimestamp(d):
return calendar.timegm(d.timetuple())
def interpolate(growth_config: Dict[str, float], date_range, kind='linear'):
arr1 = np.array([toTimestamp(datetime.datetime.strptime(date_val, '%Y-%m-%d')) for date_val in
growth_config.keys()])
arr2 = np.array([val for val in growth_config.values()])
f = interp1d(arr1, arr2, kind=kind, fill_value='extrapolate')
return f([toTimestamp(date_val) for date_val in date_range])
ref_value_ = json.loads(self.kwargs['ref value'].strip())
return interpolate(ref_value_, self.times, self.kwargs['param'])
class ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(DistributionFunctionGenerator):
cagr: str
ref_date: str
def __init__(self, cagr=None, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cagr = cagr if cagr else 0
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
values = super().generate_values(*args, **kwargs, size=(len(self.times) * self.size,))
alpha = self.cagr
# @todo - fill to cover the entire time: define rules for filling first
ref_date = self.ref_date if self.ref_date else self.times[0].to_pydatetime()
# assert ref_date >= self.times[0].to_pydatetime(), 'Ref date must be within variable time span.'
# assert ref_date <= self.times[-1].to_pydatetime(), 'Ref date must be within variable time span.'
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
a = growth_coefficients(start_date, end_date, ref_date, alpha, self.size)
values *= a.ravel()
# df = pd.DataFrame(values)
# df.columns = [kwargs['name']]
# df.set_index(self._multi_index, inplace=True)
# # @todo this is a hack to return a series with index as I don't know how to set an index and rename a series
# data_series = df.iloc[:, 0]
# data_series._metadata = kwargs
# data_series.index.rename(['time', 'samples'], inplace=True)
#
if not kwargs["unit"]:
series = pd.Series(values, index=self._multi_index, dtype='pint[dimensionless]')
else:
series = pd.Series(values, index=self._multi_index, dtype=f'pint[{kwargs["unit"]}]')
return series
def growth_coefficients(start_date, end_date, ref_date, alpha, samples):
"""
Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0).
a growth rate alpha
t0 start date
t' end date
y' output
y0 start value
"""
start_offset = 0
if ref_date < start_date:
offset_delta = rdelta.relativedelta(start_date, ref_date)
start_offset = offset_delta.months + 12 * offset_delta.years
start_date = ref_date
end_offset = 0
if ref_date > end_date:
offset_delta = rdelta.relativedelta(ref_date, end_date)
end_offset = offset_delta.months + 12 * offset_delta.years
end_date = ref_date
delta_ar = rdelta.relativedelta(ref_date, start_date)
ar = delta_ar.months + 12 * delta_ar.years
delta_br = rdelta.relativedelta(end_date, ref_date)
br = delta_br.months + 12 * delta_br.years
# we place the ref point on the lower interval (delta_ar + 1) but let it start from 0
# in turn we let the upper interval start from 1
g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float)
h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float)
g = np.flipud(g)
# now join the two arrays
a = np.vstack((g, h))
if start_offset > 0:
a = a[start_offset:]
if end_offset > 0:
a = a[:-end_offset]
return a
class ParameterScenarioSet(object):
"""
The set of all version of a parameter for all the scenarios.
"""
default_scenario = 'default'
"the name of the parameters in this set"
parameter_name: str
scenarios: Dict[str, Parameter]
def __init__(self):
self.scenarios = {}
def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario):
"""
Add a scenario for this parameter.
:param scenario_name:
:param parameter:
:return:
"""
self.scenarios[scenario_name] = parameter
def __getitem__(self, item):
return self.scenarios.__getitem__(item)
def __setitem__(self, key, value):
return self.scenarios.__setitem__(key, value)
class ParameterRepository(object):
"""
Contains all known parameter definitions (so that it is not necessary to re-read the excel file for repeat param accesses).
The param definitions are independent from the sampling (the Param.__call__ method). Repeat access to __call__ will
create new samples.
Internally, parameters are organised together with all the scenario variants in a single ParameterScenarioSet.
"""
parameter_sets: Dict[str, ParameterScenarioSet]
tags: Dict[str, Dict[str, Set[Parameter]]]
def __init__(self):
self.parameter_sets = defaultdict(ParameterScenarioSet)
self.tags = defaultdict(lambda: defaultdict(set))
def add_all(self, parameters: List[Parameter]):
for p in parameters:
self.add_parameter(p)
def clear_cache(self):
for p_sets in self.parameter_sets.values():
for param_name, param in p_sets.scenarios.items():
param.cache = None
def add_parameter(self, parameter: Parameter):
"""
A parameter can have several scenarios. They are specified as a comma-separated list in a string.
:param parameter:
:return:
"""
# try reading the scenarios from the function arg or from the parameter attribute
scenario_string = parameter.source_scenarios_string
if scenario_string:
_scenarios = [i.strip() for i in scenario_string.split(',')]
self.fill_missing_attributes_from_default_parameter(parameter)
else:
_scenarios = [ParameterScenarioSet.default_scenario]
for scenario in _scenarios:
parameter.scenario = scenario
self.parameter_sets[parameter.name][scenario] = parameter
# record all tags for this parameter
if parameter.tags:
_tags = [i.strip() for i in parameter.tags.split(',')]
for tag in _tags:
self.tags[tag][parameter.name].add(parameter)
def fill_missing_attributes_from_default_parameter(self, param):
"""
Empty fields in Parameter definitions in scenarios are populated with default values.
E.g. in the example below, the source for the Power_TV variable in the 8K scenario would also be EnergyStar.
| name | scenario | val | tags | source |
|----------|----------|-----|--------|------------|
| Power_TV | | 60 | UD, TV | EnergyStar |
| Power_TV | 8K | 85 | new_tag| |
**Note** tags must not differ. In the example above, the 8K scenario variable the tags value would be overwritten
with the default value.
:param param:
:return:
"""
if not self.exists(param.name) or not ParameterScenarioSet.default_scenario in self.parameter_sets[
param.name].scenarios.keys():
logger.warning(
f'No default value for param {param.name} found.')
return
default = self.parameter_sets[param.name][ParameterScenarioSet.default_scenario]
for att_name, att_value in default.__dict__.items():
if att_name in ['unit', 'label', 'comment', 'source', 'tags']:
if att_name == 'tags' and default.tags != param.tags:
logger.warning(
f'For param {param.name} for scenarios {param.source_scenarios_string}, tags is different from default parameter tags. Overwriting with default values.')
setattr(param, att_name, att_value)
if not getattr(param, att_name):
logger.debug(
f'For param {param.name} for scenarios {param.source_scenarios_string}, populating attribute {att_name} with value {att_value} from default parameter.')
setattr(param, att_name, att_value)
def __getitem__(self, item) -> Parameter:
"""
Return the default scenario parameter for a given variable name
:param item: the name of the variable
:return:
"""
return self.get_parameter(item, scenario_name=ParameterScenarioSet.default_scenario)
def get_parameter(self, param_name, scenario_name=ParameterScenarioSet.default_scenario) -> Parameter:
if self.exists(param_name, scenario=scenario_name):
return self.parameter_sets[param_name][scenario_name]
try:
return self.parameter_sets[param_name][ParameterScenarioSet.default_scenario]
except KeyError:
raise KeyError(f"{param_name} not found")
def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]:
"""
Get all registered dicts that are registered for a tag
:param tag: str - single tag
:return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for
all parameter names with a given tag
"""
return self.tags[tag]
def exists(self, param, scenario=None) -> bool:
# if scenario is not None:
# return
present = param in self.parameter_sets.keys()
if not present:
return False
scenario = scenario if scenario else ParameterScenarioSet.default_scenario
return scenario in self.parameter_sets[param].scenarios.keys()
def list_scenarios(self, param):
if param in self.parameter_sets.keys():
return self.parameter_sets[param].scenarios.keys()
class ExcelHandler(object):
version: int
def __init__(self):
self.version = 1
@abstractmethod
def load_definitions(self, sheet_name, filename=None):
raise NotImplementedError()
class OpenpyxlExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
definitions = []
from openpyxl import load_workbook
wb = load_workbook(filename=filename, data_only=True)
_sheet_names = [sheet_name] if sheet_name else wb.sheetnames
for _sheet_name in _sheet_names:
sheet = wb.get_sheet_by_name(_sheet_name)
rows = list(sheet.rows)
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class Xlsx2CsvHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
from xlsx2csv import Xlsx2csv
data = Xlsx2csv(filename, inmemory=True).convert(None, sheetid=0)
definitions = []
_sheet_names = [sheet_name] if sheet_name else [data.keys()]
for _sheet_name in _sheet_names:
sheet = data[_sheet_name]
header = sheet.header
if header[0] != 'variable':
continue
for row in sheet.rows:
values = {}
for key, cell in zip(header, row):
values[key] = cell
definitions.append(values)
return definitions
class CSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
return csv.DictReader(open(filename), delimiter=',')
class PandasCSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
self.version = 2
import pandas as pd
df = pd.read_csv(filename, usecols=range(15), index_col=False, parse_dates=['ref date'],
dtype={'initial_value_proportional_variation': 'float64'},
dayfirst=True
# date_parser=lambda x: pd.datetime.strptime(x, '%d-%m-%Y')
)
df = df.dropna(subset=['variable', 'ref value'])
df.fillna("", inplace=True)
return df.to_dict(orient='records')
class XLRDExcelHandler(ExcelHandler):
version: int
@staticmethod
def get_sheet_range_bounds(filename, sheet_name):
import xlrd
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name(sheet_name)
rows = list(sheet.get_rows())
return len(rows)
def load_definitions(self, sheet_name, filename=None):
import xlrd
wb = xlrd.open_workbook(filename)
sh = None
definitions = []
_definition_tracking = defaultdict(dict)
_sheet_names = [sheet_name] if sheet_name else [sh.name for sh in wb.sheets()]
version = 1
try:
sheet = wb.sheet_by_name('metadata')
rows = list(sheet.get_rows())
for row in rows:
if row[0].value == 'version':
version = row[1].value
self.version = version
except:
logger.info(f'could not find a sheet with name "metadata" in workbook. defaulting to v2')
for _sheet_name in _sheet_names:
if _sheet_name == 'metadata':
continue
sheet = wb.sheet_by_name(_sheet_name)
rows = list(sheet.get_rows())
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for i, row in enumerate(rows[1:]):
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
if not values['variable']:
# logger.debug(f'ignoring row {i}: {row}')
continue
if 'ref date' in values and values['ref date']:
if isinstance(values['ref date'], float):
values['ref date'] = datetime.datetime(*xldate_as_tuple(values['ref date'], wb.datemode))
if values['ref date'].day != 1:
logger.warning(f'ref date truncated to first of month for variable {values["variable"]}')
values['ref date'] = values['ref date'].replace(day=1)
else:
raise Exception(
f"{values['ref date']} for variable {values['variable']} is not a date - "
f"check spreadsheet value is a valid day of a month")
logger.debug(f'values for {values["variable"]}: {values}')
definitions.append(values)
scenario = values['scenario'] if values['scenario'] else "n/a"
if scenario in _definition_tracking[values['variable']]:
logger.error(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
raise ValueError(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
else:
_definition_tracking[values['variable']][scenario] = 1
return definitions
class XLWingsExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
import xlwings as xw
definitions = []
wb = xw.Book(fullname=filename)
_sheet_names = [sheet_name] if sheet_name else wb.sheets
for _sheet_name in _sheet_names:
sheet = wb.sheets[_sheet_name]
range = sheet.range('A1').expand()
rows = range.rows
header = [cell.value for cell in rows[0]]
# check if this sheet contains parameters or if it documentation
if header[0] != 'variable':
continue
total_rows = XLRDExcelHandler.get_sheet_range_bounds(filename, _sheet_name)
range = sheet.range((1, 1), (total_rows, len(header)))
rows = range.rows
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class ExcelParameterLoader(object):
definition_version: int
"""Utility to populate ParameterRepository from spreadsheets.
The structure of the spreadsheets is:
| variable | ... |
|----------|-----|
| ... | ... |
If the first row in a spreadsheet does not contain they keyword 'variable' the sheet is ignored.
"""
def __init__(self, filename, excel_handler='xlrd', **kwargs):
self.filename = filename
self.definition_version = 2
logger.info(f'Using {excel_handler} excel handler')
excel_handler_instance = None
if excel_handler == 'csv':
excel_handler_instance = CSVHandler()
if excel_handler == 'pandas':
excel_handler_instance = PandasCSVHandler()
if excel_handler == 'openpyxl':
excel_handler_instance = OpenpyxlExcelHandler()
if excel_handler == 'xlsx2csv':
excel_handler_instance = Xlsx2CsvHandler()
if excel_handler == 'xlwings':
excel_handler_instance = XLWingsExcelHandler()
if excel_handler == 'xlrd':
excel_handler_instance = XLRDExcelHandler()
self.excel_handler: ExcelHandler = excel_handler_instance
def load_parameter_definitions(self, sheet_name: str = None):
"""
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
"""
definitions = self.excel_handler.load_definitions(sheet_name, filename=self.filename)
self.definition_version = self.excel_handler.version
return definitions
def load_into_repo(self, repository: ParameterRepository = None, sheet_name: str = None):
"""
Create a Repo from an excel file.
:param repository: the repository to load into
:param sheet_name:
:return:
"""
repository.add_all(self.load_parameters(sheet_name))
def load_parameters(self, sheet_name):
parameter_definitions = self.load_parameter_definitions(sheet_name=sheet_name)
params = []
param_name_map = param_name_maps[int(self.definition_version)]
for _def in parameter_definitions:
# substitute names from the headers with the kwargs names in the Parameter and Distributions classes
# e.g. 'variable' -> 'name', 'module' -> 'module_name', etc
parameter_kwargs_def = {}
for k, v in _def.items():
if k in param_name_map:
if param_name_map[k]:
parameter_kwargs_def[param_name_map[k]] = v
else:
parameter_kwargs_def[k] = v
name_ = parameter_kwargs_def['name']
del parameter_kwargs_def['name']
p = Parameter(name_, version=self.definition_version, **parameter_kwargs_def)
params.append(p)
return params
| mit | -7,804,508,916,194,763,000 | 36.434389 | 177 | 0.579808 | false |
Laufire/ec | tests/test_utils.py | 1 | 1168 | r"""
Test ec.utils.
"""
import unittest
from ec.utils import get, static, custom, walk
from support.helpers import RawInputHook as RIH, expect_exception
# Tests
class TestUtils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get(self):
Inputs = 'a', 1
RIH.values(*Inputs)
# test the call
assert(get('str') == Inputs[0])
assert(get('int', type=int) == Inputs[1])
def test_static(self):
@static
class cls: #pylint: disable=W0232
def method(val):
return val
assert(cls.method(1) == 1)
def test_custom(self):
_type = custom(lambda v: v%2 == 1, int, type_str='an odd number')
assert(_type(1) == 1)
assert(expect_exception(lambda: _type(2), ValueError))
assert(expect_exception(lambda: _type('a'), ValueError))
def test_walk(self):
from targets import simple
from ec import interface
interface.setBase(simple)
expected = set(['task1', 'group1', 'ex', 'hex'])
got = set()
for Member in walk(simple.__ec_member__):
got.add(Member.Config['name'])
assert(expected == got)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,234,203,857,414,561,800 | 19.491228 | 69 | 0.621575 | false |
mike-perdide/gitbuster | gitbuster/q_git_delegate.py | 1 | 4118 | # q_git_delegate.py
# Copyright (C) 2010 Julien Miotte <[email protected]>
#
# This module is part of gitbuster and is released under the GPLv3
# License: http://www.gnu.org/licenses/gpl-3.0.txt
#
from PyQt4.QtCore import QDateTime, QVariant, Qt, SIGNAL, QRect
from PyQt4.QtGui import QDateTimeEdit, QItemDelegate, QLineEdit, QTextEdit
from gfbi_core import ACTOR_FIELDS, TEXT_FIELDS, TIME_FIELDS
class QGitDelegate(QItemDelegate):
def __init__(self, view):
QItemDelegate.__init__(self, None)
self._view = view
self._selected_indexes = None
def createEditor(self, parent, option, index):
if len(self._view.selectedIndexes()) > 1:
self._selected_indexes = self._view.selectedIndexes()
columns = index.model().get_git_model().get_columns()
field_name = columns[index.column()]
if field_name in TEXT_FIELDS:
editor = QTextEdit(parent)
elif field_name in ACTOR_FIELDS:
editor = QLineEdit(parent)
elif field_name in TIME_FIELDS:
editor = QDateTimeEdit(parent)
editor.setDisplayFormat("yyyy-MM-dd hh:mm:ss")
else:
return QItemDelegate.createEditor(self, parent, option,
index)
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return editor
def updateEditorGeometry(self, editor, option, index):
"""
Here we're gonna make the text edit of the message column bigger.
"""
model = index.model()
columns = model.get_git_model().get_columns()
field_name = columns[index.column()]
if field_name != "message":
QItemDelegate.updateEditorGeometry(self, editor, option, index)
return
message = model.data(index, Qt.EditRole)
new_geometry = option.rect
new_height = 27 * message.toString().count("\n") or option.rect.height()
new_geometry.setHeight(new_height)
editor.setGeometry(new_geometry)
def commitAndCloseEditor(self):
editor = self.sender()
if isinstance(editor, (QTextEdit, QLineEdit)):
self.emit(SIGNAL("closeEditor(QWidget*)"), editor)
def setEditorData(self, editor, index):
columns = index.model().get_git_model().get_columns()
field_name = columns[index.column()]
if field_name in TEXT_FIELDS or field_name in ACTOR_FIELDS:
text = index.model().data(index, Qt.EditRole).toString()
editor.setText(text)
elif field_name in TIME_FIELDS:
timestamp, tz = index.model().data(index, Qt.EditRole)
_q_datetime = QDateTime()
_q_datetime.setTime_t(timestamp)
editor.setDateTime(_q_datetime)
def setModelData(self, editor, model, index, ignore_history=False):
model = index.model()
columns = model.get_git_model().get_columns()
field_name = columns[index.column()]
if field_name in TEXT_FIELDS:
data = QVariant(editor.toPlainText())
elif field_name in TIME_FIELDS:
data = (editor.dateTime().toTime_t(),
model.data(index, Qt.EditRole)[1])
elif field_name in ACTOR_FIELDS:
data = QVariant(editor.text())
if not ignore_history:
# Start a new history event, only for the first modified index.
# That way, an undo will undo all the selected indexes.
model.start_history_event()
model.setData(index, data)
if self._selected_indexes:
edited_column = index.column()
selected_indexes = list(self._selected_indexes)
self._selected_indexes = None
for selected_index in selected_indexes:
if model.is_first_commit(selected_index):
continue
if selected_index.column() == edited_column:
self.setModelData(editor, model, selected_index,
ignore_history=True)
| gpl-3.0 | -6,977,865,863,436,541,000 | 36.099099 | 80 | 0.602234 | false |
SetBased/py-stratum | pystratum/command/LoaderCommand.py | 1 | 3242 | """
PyStratum
"""
import configparser
from pydoc import locate
from cleo import Command, Input, Output
from pystratum.RoutineLoader import RoutineLoader
from pystratum.style.PyStratumStyle import PyStratumStyle
class LoaderCommand(Command):
"""
Command for loading stored routines into a MySQL/MsSQL/PgSQL instance from pseudo SQL files
loader
{config_file : The audit configuration file}
{file_names?* : Sources with stored routines}
"""
# ------------------------------------------------------------------------------------------------------------------
def execute(self, input_object: Input, output_object: Output) -> int:
"""
Executes this command.
"""
self.input = input_object
self.output = output_object
return self.handle()
# ------------------------------------------------------------------------------------------------------------------
def handle(self) -> int:
"""
Executes loader command.
"""
self.output = PyStratumStyle(self.input, self.output)
config_file = self.argument('config_file')
sources = self.argument('file_names')
return self.run_command(config_file, sources)
# ------------------------------------------------------------------------------------------------------------------
def run_command(self, config_file, sources) -> int:
"""
:param str config_file: The name of config file.
:param list sources: The list with source files.
"""
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get('database', 'rdbms').lower()
loader = self.create_routine_loader(rdbms)
status = loader.main(config_file, sources)
return status
# ------------------------------------------------------------------------------------------------------------------
def create_routine_loader(self, rdbms: str) -> RoutineLoader:
"""
Factory for creating a Routine Loader objects (i.e. objects for loading stored routines into a RDBMS instance
from (pseudo) SQL files.
:param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql).
:rtype: RoutineLoader
"""
# Note: We load modules and classes dynamically such that on the end user's system only the required modules
# and other dependencies for the targeted RDBMS must be installed (and required modules and other
# dependencies for the other RDBMSs are not required).
if rdbms == 'mysql':
module = locate('pystratum_mysql.MySqlRoutineLoader')
return module.MySqlRoutineLoader(self.output)
if rdbms == 'mssql':
module = locate('pystratum_mssql.MsSqlRoutineLoader')
return module.MsSqlRoutineLoader(self.output)
if rdbms == 'pgsql':
module = locate('pystratum_pgsql.PgSqlRoutineLoader')
return module.PgSqlRoutineLoader(self.output)
raise Exception("Unknown RDBMS '{0!s}'.".format(rdbms))
# ----------------------------------------------------------------------------------------------------------------------
| mit | -4,059,710,956,174,533,600 | 35.840909 | 120 | 0.520666 | false |
luaduck/suds | soapclient.py | 1 | 6841 |
###
# This file is part of Soap.
#
# Soap is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# Soap is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should have received
# a copy of the GNU General Public License along with Soap. If not, see
# <http://www.gnu.org/licenses/>.
###
import logging
import Queue
from libottdadmin2.trackingclient import TrackingAdminClient
from libottdadmin2.event import Event
from libottdadmin2.enums import UpdateType, UpdateFrequency
from enums import RconStatus, ConnectionState
class SoapEvents(object):
def __init__(self):
self.connected = Event()
self.disconnected = Event()
self.shutdown = Event()
self.new_game = Event()
self.new_map = Event()
# self.protocol = Event()
# self.datechanged = Event()
# self.clientinfo = Event()
self.clientjoin = Event()
self.clientupdate = Event()
self.clientquit = Event()
# self.companyinfo = Event()
# self.companynew = Event()
# self.companyupdate = Event()
# self.companyremove = Event()
# self.companystats = Event()
# self.companyeconomy = Event()
self.chat = Event()
self.rcon = Event()
self.rconend = Event()
self.console = Event()
self.cmdlogging = Event()
self.pong = Event()
class SoapClient(TrackingAdminClient):
# Initialization & miscellanious functions
def __init__(self, channel, serverid, events = None):
super(SoapClient, self).__init__(events)
self.channel = channel
self.ID = serverid
self.soapEvents = SoapEvents()
self._attachEvents()
self.logger = logging.getLogger('Soap-%s' % self.ID)
self.logger.setLevel(logging.INFO)
self.rconCommands = Queue.Queue()
self.rconNick = None
self.rconResults = {}
self.rconState = RconStatus.IDLE
self.connectionstate = ConnectionState.DISCONNECTED
self.registered = False
self.filenumber = None
self.clientPassword = None
def _attachEvents(self):
self.events.connected += self._rcvConnected
self.events.disconnected += self._rcvDisconnected
self.events.shutdown += self._rcvShutdown
self.events.new_game += self._rcvNewGame
self.events.new_map += self._rcvNewMap
self.events.clientjoin += self._rcvClientJoin
self.events.clientupdate += self._rcvClientUpdate
self.events.clientquit += self._rcvClientQuit
self.events.chat += self._rcvChat
self.events.rcon += self._rcvRcon
self.events.rconend += self._rcvRconEnd
self.events.console += self._rcvConsole
self.events.cmdlogging += self._rcvCmdLogging
self.events.pong += self._rcvPong
def copy(self):
obj = SoapClient(self._channel, self._ID, self.events)
for prop in self._settable_args:
setattr(obj, prop, getattr(self, prop, None))
return obj
# Insert connection info into parameters
def _rcvConnected(self):
self.registered = True
self.soapEvents.connected(self.channel)
def _rcvDisconnected(self, canRetry):
self.registered = False
self.soapEvents.disconnected(self.channel, canRetry)
def _rcvShutdown(self):
self.soapEvents.shutdown(self.channel)
def _rcvNewGame(self):
self.soapEvents.new_game(self.channel)
def _rcvNewMap(self, mapinfo, serverinfo):
self.soapEvents.new_map(self.channel, mapinfo, serverinfo)
def _rcvClientJoin(self, client):
self.soapEvents.clientjoin(self.channel, client)
def _rcvClientUpdate(self, old, client, changed):
self.soapEvents.clientupdate(self.channel, old, client, changed)
def _rcvClientQuit(self, client, errorcode):
self.soapEvents.clientquit(self.channel, client, errorcode)
def _rcvChat(self, **kwargs):
data = dict(kwargs.items())
data['connChan'] = self.channel
self.soapEvents.chat(**data)
def _rcvRcon(self, result, colour):
self.soapEvents.rcon(self.channel, result, colour)
def _rcvRconEnd(self, command):
self.soapEvents.rconend(self.channel, command)
def _rcvConsole(self, message, origin):
self.soapEvents.console(self.channel, origin, message)
def _rcvCmdLogging(self, **kwargs):
data = dict(kwargs.items())
data['connChan'] = self.channel
self.soapEvents.cmdlogging(**data)
def _rcvPong(self, start, end, delta):
self.soapEvents.pong(self.channel, start, end, delta)
# Store some extra info
_settable_args = TrackingAdminClient._settable_args + ['irc', 'ID', 'channel', 'debugLog']
_irc = None
_ID = 'Default'
_channel = None
_debugLog = False
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value.lower()
@property
def irc(self):
return self._irc
@irc.setter
def irc(self, value):
self._irc = value
@property
def ID(self):
return self._ID
@ID.setter
def ID(self, value):
self._ID = value.lower()
@property
def debugLog(self):
return self._debugLog
@debugLog.setter
def debugLog(self, value):
self._debugLog = value
if self._debugLog:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
update_types = [
(UpdateType.CLIENT_INFO, UpdateFrequency.AUTOMATIC),
(UpdateType.COMPANY_INFO, UpdateFrequency.AUTOMATIC),
(UpdateType.COMPANY_ECONOMY, UpdateFrequency.WEEKLY),
(UpdateType.COMPANY_STATS, UpdateFrequency.WEEKLY),
(UpdateType.CHAT, UpdateFrequency.AUTOMATIC),
(UpdateType.CONSOLE, UpdateFrequency.AUTOMATIC),
(UpdateType.LOGGING, UpdateFrequency.AUTOMATIC),
(UpdateType.DATE, UpdateFrequency.DAILY),
]
| gpl-2.0 | -6,182,395,961,143,806,000 | 29.095455 | 94 | 0.597427 | false |
cloudnull/skylab | skylab/executable.py | 1 | 9051 | # =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
import os
import json
import skylab as sk
from skylab import arguments
from skylab import osclients
from skylab import service_module as sm
from skylab import utils
def execute():
"""Execute the Tribble Application."""
user_args = arguments.args()
# Load the local DB for rebuilding
user_args['db_path'] = sk.dbm_create(
db_path=user_args.get('db_path'),
db_name=user_args.get('db_name'),
db_key=user_args.get('name')
)
Runner(args=user_args).run_method()
class Runner(object):
"""Run the application."""
def __init__(self, args):
"""Run the application process from within the thread.
:param args: parsed cli arguments.
"""
self.client = None
self.args = args
def run_method(self):
"""Get action and run."""
action = getattr(self, self.args.get('method'))
if action is None:
raise SystemExit('Died because something bad happened.')
else:
action()
def build_lab(self):
"""Build the Openstack Lab."""
queue = None
# Load the Novaclient and Authenticate.
creds = osclients.Creds(
user=self.args.get('username'),
region=self.args.get('region'),
key=self.args.get('apikey'),
password=self.args.get('password'),
tenant=self.args.get('tenant_name'),
project_id=self.args.get('project_id'),
)
clients = osclients.Clients(creds=creds, args=self.args)
self.client = clients.nova()
self.client.authenticate()
# Set the tenant ID
with utils.IndicatorThread(debug=self.args.get('debug')):
tenant = self.client.client.tenant_id
# Set the controller Flavor ID
print('Finding Flavor Information')
controller_flavor = sm.flavor_find(
client=self.client, flavor_ram=self.args.get('controller_ram')
)
self.args['controller'] = {'flavor': controller_flavor}
# Set the compute Flavor ID
compute_flavor = sm.flavor_find(
client=self.client, flavor_ram=self.args.get('compute_ram')
)
self.args['compute'] = {'flavor': compute_flavor}
# Add up total purposed ram for the build
con_ram = self.args.get('controller_ram')
com_ram = self.args.get('compute_ram')
t_ram = con_ram + com_ram
print('Checking Build against Limits')
in_limits = sm.check_limits(
client=self.client,
tenant_id=tenant,
purposed_ram=t_ram
)
if in_limits is False:
raise sk.NotEnoughRam(
'This build is not possible, your account does not'
' have enough RAM available.'
)
print('Defining the Network')
network = sm.skylab_network(
client=self.client,
name=self.args.get('name'),
net_cidr=self.args.get('net_cidr'),
net_id=self.args.get('net_id')
)
print('Checking for Image')
image_id = sm.image_find(
client=self.client,
image=self.args.get('image')
)
nics = [
{'net-id': "00000000-0000-0000-0000-000000000000"},
{'net-id': network}
]
if self.args.get('no_private') is False:
nics.append(
{'net-id': "11111111-1111-1111-1111-111111111111"}
)
build_kwargs = {
'image': image_id,
'nics': nics
}
print('Defining the key')
if self.args.get('admin_pass') is not None:
build_kwargs['admin_pass'] = self.args['admin_pass']
if self.args.get('key_name'):
if not sm.client_key_find(self.client,
key_name=self.args['key_name']):
key_path = os.path.expanduser(self.args['key_location'])
if os.path.exists(key_path):
with open(key_path, 'rb') as key:
sm.client_key_create(
self.client,
key_name=self.args['key_name'],
public_key=key.read()
)
build_kwargs['key_name'] = self.args['key_name']
else:
build_kwargs['key_name'] = self.args['key_name']
print('Loading Work Queue')
if self.args['node_count'] < 3:
raise sk.NotEnoughNodes(
'The node count is too low. You have set "%s" but it needs'
' to be a minimum of "3".' % self.args['node_count']
)
else:
# Load our queue
queue = utils.basic_queue()
self.args['compute'].update(build_kwargs)
for c_node in range(self.args['node_count'] - 2):
c_node += 1
compute = {
'name': '%s_compute%s' % (self.args['name'], c_node)
}
compute.update(self.args['compute'])
queue.put(compute)
self.args['controller'].update(build_kwargs)
for c_node in range(2):
c_node += 1
controller = {
'name': '%s_controller%s' % (self.args['name'], c_node)
}
controller.update(self.args['controller'])
queue.put(controller)
# Prep the threader
proc_args = {'client': self.client,
'args': self.args,
'queue': queue,
'job_action': sm.bob_the_builder}
with utils.IndicatorThread(work_q=queue, debug=self.args.get('debug')):
print('Building "%s" nodes' % self.args['node_count'])
utils.worker_proc(
kwargs=proc_args
)
# Construct all the things.
with utils.IndicatorThread(work_q=queue, debug=self.args.get('debug')):
sm.construct_skylab(args=self.args)
def db_show(self):
with sk.Shelve(file_path=self.args['db_path']) as db:
print(json.dumps(dict(db), indent=4))
def lab_info(self):
def get_addr(server, net_name):
if 'addresses' in srv:
addresses = server['addresses'].get(net_name)
if addresses is not None:
for addr in addresses:
if addr.get('version') == 4:
return addr.get('addr')
else:
return None
name = self.args['name']
with sk.Shelve(file_path=self.args['db_path']) as db:
db_data = dict(db)
info = [db_data[name][srv] for srv in db_data[name].keys()
if srv.startswith(name)]
if self.args.get('server'):
pass
else:
print_data = []
skynet = '%s_address' % name
for srv in info:
print_data.append({
'id': srv.get('id'),
'name': srv.get('name'),
skynet: get_addr(server=srv, net_name=name),
'public_net': get_addr(server=srv, net_name='public')
})
sk.print_horiz_table(print_data)
def scuttle_lab(self):
with utils.IndicatorThread(debug=self.args.get('debug')):
servers = [
(server.id, server.name)
for server in sm.client_list(self.client)
if server.name.startswith(self.args['name'])
]
with sk.Shelve(file_path=self.args['db_path']) as db:
for server in servers:
if self.args['name'] in db:
lab_db = db[self.args['name']]
if lab_db.get(server[1]) is not None:
del lab_db[server[1]]
sm.client_delete(self.client, server_id=server[0])
if __name__ == '__main__':
execute()
| gpl-3.0 | -6,981,783,929,136,485,000 | 34.774704 | 79 | 0.481936 | false |
kmaglione/amo-validator | validator/testcases/javascript/obsolete.py | 1 | 3693 | from __future__ import absolute_import, print_function, unicode_literals
from .jstypes import Hook, Interfaces
OBSOLETE_EXTENSION_MANAGER = {
'on_get': 'This interface is part of the obsolete extension manager '
'interface, which is not available in any remotely modern '
'version of Firefox. It should not be referenced in any '
'code.'}
Interfaces.hook({
'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,
'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,
'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,
'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,
'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,
})
# nsIJSON
NSIJSON_DEPRECATED = {
'err_id': ('testcases_javascript_calldefinitions', 'nsIJSON', 'deprec'),
'warning': 'Deprecated nsIJSON methods in use.',
'description':
'The `encode` and `decode` methods in nsIJSON have been '
'deprecated since Gecko 7. You should use the methods in the '
'global JSON object instead. See '
'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference'
'/Global_Objects/JSON for more information.'}
@Interfaces.hook
class nsIJSON(Hook):
encode = {'on_call': NSIJSON_DEPRECATED}
decode = {'on_call': NSIJSON_DEPRECATED}
# nsIWebBrowserPersist
WEBBROWSERPERSIST_DEPRECATED = {
'err_id': ('testcases_javascript_call_definititions',
'webbrowserpersist'),
'warning': 'nsIWebBrowserPersist should no longer be used',
'description':
'Most nsIWebBrowserPersist methods have been '
'superseded by simpler methods in Downloads.jsm, namely '
'`Downloads.fetch` and `Downloads.createDownload`. See '
'http://mzl.la/downloads-jsm for more information.',
}
@Interfaces.hook
class nsIWebBrowserPersist(Hook):
saveChannel = {'on_call': WEBBROWSERPERSIST_DEPRECATED}
savePrivacyAwareURI = {'on_call': WEBBROWSERPERSIST_DEPRECATED}
@Hook.on_call
def saveURI(this, args, callee):
"""nsIWebBrowserPersist.saveURI requires a valid privacy context as
of Firefox 19."""
if len(args) >= 7:
load_context = args[6]
if load_context.as_primitive() is None:
this.traverser.warning(
err_id=('testcases_javascript_call_definititions',
'webbrowserpersist_saveuri'),
warning=('saveURI should not be called with a null load '
'context'),
description=(
'While nsIWebBrowserPersist.saveURI accepts null '
'in place of a privacy context, this usage is '
'acceptable only when no appropriate load '
'context exists.'))
return WEBBROWSERPERSIST_DEPRECATED
# nsITransferable
@Interfaces.hook
class nsITransferable(Hook):
@Hook.on_call
def init(this, args, callee):
if args and not args[0].as_primitive():
this.traverser.warning(
err_id=('js_entity_values', 'nsITransferable', 'init'),
warning=(
'`nsITransferable.init` should not be called with `null` '
'as its first argument'),
description=(
'Calling `nsITransferable.init()` with a null first '
'argument has the potential to leak data across '
'private browsing mode sessions. `null` is '
'appropriate only when reading data or writing data '
'which is not associated with a particular window.'))
| bsd-3-clause | -5,829,427,618,306,759,000 | 37.46875 | 78 | 0.620092 | false |
tchellomello/home-assistant | homeassistant/components/roomba/sensor.py | 1 | 1696 | """Sensor for checking the battery level of Roomba."""
import logging
from homeassistant.components.vacuum import STATE_DOCKED
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.icon import icon_for_battery_level
from .const import BLID, DOMAIN, ROOMBA_SESSION
from .irobot_base import IRobotEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the iRobot Roomba vacuum cleaner."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
roomba = domain_data[ROOMBA_SESSION]
blid = domain_data[BLID]
roomba_vac = RoombaBattery(roomba, blid)
async_add_entities([roomba_vac], True)
class RoombaBattery(IRobotEntity):
"""Class to hold Roomba Sensor basic info."""
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} Battery Level"
@property
def unique_id(self):
"""Return the ID of this sensor."""
return f"battery_{self._blid}"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return PERCENTAGE
@property
def icon(self):
"""Return the icon for the battery."""
charging = bool(self._robot_state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self._battery_level, charging=charging
)
@property
def state(self):
"""Return the state of the sensor."""
return self._battery_level
| apache-2.0 | -4,041,612,516,098,413,000 | 28.241379 | 68 | 0.665684 | false |
bcopeland/speccy | scanner.py | 1 | 9599 | #!/usr/bin/python
from multiprocessing import Value
from threading import Thread, Lock
import os
import time
class Scanner(object):
interface = None
freqlist = None
process = None
debugfs_dir = None
is_ath10k = False
lock = None
run = True
def dev_to_phy(self, dev):
f = open('/sys/class/net/%s/phy80211/name' % dev)
phy = f.read().strip()
f.close()
return phy
def freq_to_chan(self, freq):
chan = 0
if (freq >= 2412 and freq <= 2472):
chan = (freq - 2407) / 5
if (freq >= 5180 and freq <= 5900):
chan = (freq - 5000) / 5
return chan
def _find_debugfs_dir(self):
''' search debugfs for spectral_scan_ctl for this interface '''
for dirname, subd, files in os.walk('/sys/kernel/debug/ieee80211'):
if 'spectral_scan_ctl' in files:
phy = dirname.split(os.path.sep)[-2]
if phy == self.dev_to_phy(self.interface):
self.phy = phy
return dirname
return None
def _scan(self):
while self.run:
if self.is_ath10k:
self.cmd_trigger()
if self.mode.value == 1: # only in 'chanscan' mode
cmd = 'iw dev %s scan' % self.interface
self.lock.acquire()
if self.freqlist:
cmd = '%s freq %s' % (cmd, ' '.join(self.freqlist))
self.lock.release()
os.system('%s >/dev/null 2>/dev/null' % cmd)
time.sleep(.01)
def __init__(self, interface, idx=0):
self.interface = interface
self.lock = Lock()
self.phy = ""
self.idx = idx
self.monitor_name = "ssmon%d" % self.idx # just a arbitrary, but unique id
self.monitor_added = False
self.debugfs_dir = self._find_debugfs_dir()
if not self.debugfs_dir:
raise Exception, \
'Unable to access spectral_scan_ctl file for interface %s' % interface
self.is_ath10k = self.debugfs_dir.endswith("ath10k")
self.ctl_file = '%s/spectral_scan_ctl' % self.debugfs_dir
self.sample_count_file = '%s/spectral_count' % self.debugfs_dir
self.short_repeat_file = '%s/spectral_short_repeat' % self.debugfs_dir
self.cur_chan = 6
self.sample_count = 8
self.mode = Value('i', -1) # -1 = undef, 1 = 'chanscan', 2 = 'background scan', 3 = 'noninvasive bg scan'
self.channel_mode = "HT20"
self.thread = None
self.file_reader = None
self.noninvasive = False
self.set_freqs(2412, 2472, 5)
def set_freqs(self, minf, maxf, spacing):
self.lock.acquire()
self.freqlist = ['%s' % x for x in range(minf, maxf + spacing, spacing)]
self.lock.release()
# TODO restart scanner
self.freq_idx = 0;
print "freqlist: %s" % self.freqlist
def hw_setup_chanscan(self):
print "enter 'chanscan' mode: set dev type to 'managed'"
os.system("ip link set %s down" % self.interface)
os.system("iw dev %s set type managed" % self.interface)
os.system("ip link set %s up" % self.interface)
if self.is_ath10k:
self.cmd_background()
else:
self.cmd_chanscan()
def hw_setup_background(self):
if self.noninvasive:
self.dev_add_monitor()
else:
print "enter 'background' mode: set dev type to 'monitor'"
os.system("ip link set %s down" % self.interface)
os.system("iw dev %s set type monitor" % self.interface)
os.system("ip link set %s up" % self.interface)
self.cmd_setfreq(0)
self.cmd_background()
self.cmd_trigger()
def mode_chanscan(self):
if self.mode.value != 1:
self.hw_setup_chanscan()
self.mode.value = 1
def mode_background(self):
if self.mode.value != 2:
self.hw_setup_background()
self.mode.value = 2
def mode_manual(self):
self.mode.value = 3
def mode_noninvasive_background(self):
self.noninvasive = True
self.mode_background()
def retune_up(self): # FIXME: not save for 5Ghz / ath10k
if self.mode.value == 1: # tuning not possible in mode 'chanscan'
return
idx = (self.freq_idx + 1) % len(self.freqlist)
print "tune to freq %s" % self.freqlist[idx]
self.fix_ht40_mode()
self.cmd_setfreq(idx)
self.cmd_trigger()
def retune_down(self): # FIXME: not save for 5Ghz / ath10k
if self.mode.value == 1: # tuning not possible in mode 'chanscan'
return
idx = (self.freq_idx - 1) % len(self.freqlist)
print "tune to freq %s" % self.freqlist[idx]
self.fix_ht40_mode()
self.cmd_setfreq(idx)
self.cmd_trigger()
def cmd_samplecount_up(self):
self.sample_count *= 2
if self.sample_count == 256: # special case, 256 is not valid, set to last valid value
self.sample_count = 255
if self.sample_count > 255:
self.sample_count = 1
self.cmd_set_samplecount(self.sample_count)
def cmd_samplecount_down(self):
if self.sample_count == 255:
self.sample_count = 256 # undo special case, see above
self.sample_count /= 2
if self.sample_count < 1:
self.sample_count = 255
self.cmd_set_samplecount(self.sample_count)
def cmd_trigger(self):
f = open(self.ctl_file, 'w')
f.write("trigger")
f.close()
def cmd_background(self):
f = open(self.ctl_file, 'w')
f.write("background")
if self.is_ath10k:
f.write("trigger")
f.close()
def cmd_manual(self):
f = open(self.ctl_file, 'w')
f.write("manual")
f.close()
def cmd_chanscan(self):
f = open(self.ctl_file, 'w')
f.write("chanscan")
f.close()
def cmd_disable(self):
f = open(self.ctl_file, 'w')
f.write("disable")
f.close()
def cmd_set_samplecount(self, count):
print "set sample count to %d" % count
f = open(self.sample_count_file, 'w')
f.write("%s" % count)
f.close()
def set_short_repeat(self, short_repeat):
f = open(self.short_repeat_file, 'w')
f.write("%s" % short_repeat)
f.close()
def cmd_toggle_short_repeat(self):
f = open(self.short_repeat_file, 'r')
curval = int(f.read())
f.close()
if curval == 0:
curval = 1
else:
curval = 0
print "set short repeat to %d" % curval
self.set_short_repeat(curval)
def cmd_setchannel(self):
print "set channel to %d in mode %s" % (self.cur_chan, self.channel_mode)
if not self.noninvasive:
os.system("iw dev %s set channel %d %s" % (self.interface, self.cur_chan, self.channel_mode))
else: # this seems to be strange:
os.system("iw dev %s set channel %d %s" % (self.monitor_name, self.cur_chan, self.channel_mode))
def cmd_setfreq(self, idx):
freq = self.freqlist[idx]
chan = self.freq_to_chan(int(freq))
mode = self.channel_mode
print "set freq to %s (%d) in mode %s" % (freq, chan, mode)
if not self.noninvasive:
os.system("iw dev %s set freq %s %s" % (self.interface, freq, mode))
else: # this seems to be strange:
os.system("iw dev %s set freq %s %s" % (self.monitor_name, freq, mode))
self.freq_idx = idx
def fix_ht40_mode(self):
if self.channel_mode != "HT20":
# see https://wireless.wiki.kernel.org/en/developers/regulatory/processing_rules#mhz_channels1
if self.cur_chan < 8:
self.channel_mode = "HT40+"
else:
self.channel_mode = "HT40-"
def cmd_toggle_HTMode(self):
if self.channel_mode == "HT40+" or self.channel_mode == "HT40-":
self.channel_mode = "HT20"
else: # see https://wireless.wiki.kernel.org/en/developers/regulatory/processing_rules#mhz_channels1
if self.cur_chan < 8:
self.channel_mode = "HT40+"
else:
self.channel_mode = "HT40-"
self.cmd_setchannel()
self.cmd_trigger()
def dev_add_monitor(self):
if self.monitor_added:
return
print "add a monitor interface"
os.system("iw phy %s interface add %s type monitor" % (self.phy, self.monitor_name))
os.system("ip link set %s up" % self.monitor_name)
self.monitor_added = True
def dev_del_monitor(self):
if self.monitor_added:
os.system("ip link set %s down" % self.monitor_name)
os.system("iw dev %s del" % self.monitor_name)
self.monitor_added = False
def start(self):
if self.thread is None:
self.thread = Thread(target=self._scan, args=())
self.run = True
self.thread.start()
def stop(self):
if self.channel_mode != "HT20":
self.cmd_toggle_HTMode()
self.cmd_set_samplecount(8)
self.cmd_disable()
self.dev_del_monitor()
if self.thread is not None:
self.run = False
self.thread.join()
self.thread = None
self.mode.value = -1
def get_debugfs_dir(self):
return self.debugfs_dir
| gpl-2.0 | 7,727,370,529,989,816,000 | 32.680702 | 114 | 0.551307 | false |
kretusmaximus/MicroFTP.py | microftp/common_view.py | 1 | 2633 | # -*- coding: utf-8 -*-
import Tkinter
import tkFont
class Window(Tkinter.Tk):
def __init__(self, parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
def initialize(self):
pass
class Listbox(Tkinter.Listbox):
def autowidth(self, maxwidth):
f = tkFont.Font(font=self.cget("font"))
pixels = 0
for item in self.get(0, "end"):
pixels = max(pixels, f.measure(item))
# bump listbox size until all entries fit
pixels +=10
width = int(self.cget("width"))
for w in range(0, maxwidth+1, 5):
if self.winfo_reqwidth() >= pixels:
break
self.config(width=width+w)
class CommonFrame(Tkinter.Frame):
"""Common class for displaying UIs"""
def __init__(self, parent, core):
"""
:param parent: the containing Tkinter object
:param core: the MicroFTPCore object
:return:
"""
self.core = core
Tkinter.Frame.__init__(self, parent)
self.pack(expand=1, fill='both')
self.parent = parent
self.controls = {}
self.init_controls()
self.show_controls()
def init_controls(self):
pass
def show_controls(self):
pass
def hide_view(self, widgets_to_destroy):
"""Un-show widgets
:param widgets_to_destroy: list of widgets to .destroy()
:return:
"""
self.hide_controls(widgets_to_destroy)
self.pack_forget()
def hide_controls(self, widgets_to_destroy=None):
"""Hide visible widgets, leaving only blank frame
:param widgets_to_destroy: list of widgets that should be .destroy()ed rather than hidden
:return:
"""
for control in self.controls: # order isn't important
self.controls[control].pack_forget()
if widgets_to_destroy:
for widget in widgets_to_destroy:
widget.destroy()
def show_error(self, error_msg):
"""Hide everything, display an error message and a 'Back' button
:param error_msg: the message to show
:return:
"""
def reset_view(widgets_to_destroy):
self.hide_controls(widgets_to_destroy)
self.show_controls()
error_widgets = []
error_text = Tkinter.Label(self.parent, text=error_msg)
error_text.pack()
error_widgets.append(error_text)
back_button = Tkinter.Button(self.parent, text="Back", command=lambda: reset_view(error_widgets))
back_button.pack()
error_widgets.append(back_button)
| mit | 5,434,954,006,917,956,000 | 27.619565 | 105 | 0.582226 | false |
distributed-system-analysis/pbench | server/bin/pbench-server-prep-shim-002.py | 1 | 12572 | #!/usr/bin/env python3
# -*- mode: python -*-
# This script is used to prepare the tarballs that a version 002 client
# submits for further processing. It copies the tarballs and their MD5
# sums to the archive (after checking) and sets the state links, so
# that the dispatch script will pick them up and get the ball
# rolling. IOW, it does impedance matching between version 002 clients
# and the server scripts.
import os
import sys
import glob
import shutil
import selinux
import tempfile
from pathlib import Path
from pbench.common.exceptions import BadConfig
from pbench.common.logger import get_pbench_logger
from pbench.common.utils import md5sum
from pbench.server import PbenchServerConfig
from pbench.server.report import Report
from pbench.server.utils import quarantine
from pbench.server.database.models.tracker import Dataset, States, DatasetError
from pbench.server.database import init_db
_NAME_ = "pbench-server-prep-shim-002"
class Results:
def __init__(
self, nstatus="", ntotal=0, ntbs=0, nquarantined=0, ndups=0, nerrs=0,
):
self.nstatus = nstatus
self.ntotal = ntotal
self.ntbs = ntbs
self.nquarantined = nquarantined
self.ndups = ndups
self.nerrs = nerrs
def fetch_config_val(config, logger):
qdir = config.get("pbench-server", "pbench-quarantine-dir")
if not qdir:
logger.error("Failed: getconf.py pbench-quarantine-dir pbench-server")
return None, None
qdir = Path(qdir).resolve()
if not qdir.is_dir():
logger.error("Failed: {} does not exist, or is not a directory", qdir)
return None, None
# we are explicitly handling version-002 data in this shim
receive_dir_prefix = config.get("pbench-server", "pbench-receive-dir-prefix")
if not receive_dir_prefix:
logger.error("Failed: getconf.py pbench-receive-dir-prefix pbench-server")
return None, None
receive_dir = Path(f"{receive_dir_prefix}-002").resolve()
if not receive_dir.is_dir():
logger.error("Failed: {} does not exist, or is not a directory", receive_dir)
return None, None
return (qdir, receive_dir)
def qdirs_check(qdir_val, qdir, logger):
try:
os.makedirs(qdir)
except FileExistsError:
# directory already exists, ignore
pass
except Exception:
logger.exception(
"os.mkdir: Unable to create {} destination directory: {}", qdir_val, qdir,
)
return None
return qdir
def md5_check(tb, tbmd5, logger):
# read the md5sum from md5 file
try:
with tbmd5.open() as f:
archive_md5_hex_value = f.readline().split(" ")[0]
except Exception:
archive_md5_hex_value = None
logger.exception("Quarantine: Could not read {}", tbmd5)
# get hex value of the tarball's md5sum
try:
(_, archive_tar_hex_value) = md5sum(tb)
except Exception:
archive_tar_hex_value = None
logger.exception("Quarantine: Could not read {}", tb)
return (archive_md5_hex_value, archive_tar_hex_value)
def process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors):
# Check for results that are ready for processing: version 002 agents
# upload the MD5 file as xxx.md5.check and they rename it to xxx.md5
# after they are done with MD5 checking so that's what we look for.
list_check = glob.glob(
os.path.join(receive_dir, "**", "*.tar.xz.md5"), recursive=True
)
archive = config.ARCHIVE
logger.info("{}", config.TS)
list_check.sort()
nstatus = ""
ntotal = ntbs = nerrs = nquarantined = ndups = 0
for tbmd5 in list_check:
ntotal += 1
# full pathname of tarball
tb = Path(tbmd5[0:-4])
tbmd5 = Path(tbmd5)
# directory
tbdir = tb.parent
# resultname: get the basename foo.tar.xz and then strip the .tar.xz
resultname = tb.name
controller = tbdir.name
dest = archive / controller
# Create a new dataset tracker in UPLOADING state, and add it to the
# database.
#
# NOTE: Technically, this particular workflow has no "UPLOADING" as
# the `pbench-server-prep-shim-002` command isn't invoked until the
# tarball and MD5 has been entirely uploaded by the agent via `ssh`;
# this method however can't be supported once we have authorized user
# ownership, and the model fits the server `PUT` method where an
# unexpected termination could leave a tarball in "Uploading" state.
#
# TODO: We have no way to identify an owner here, so assign it to
# the arbitrary "pbench" user. This will go away when we drop this
# component entirely in favor of PUT.
try:
dataset = Dataset.create(
controller=controller, path=resultname, owner="pbench"
)
except DatasetError as e:
logger.error(
"Unable to create dataset {}>{}: {}", controller, resultname, str(e)
)
# TODO: Should we quarantine over this? Note it's not quite
# straightforward, as quarantine() expects that the Dataset has
# been created, so we'll get a cascade failure. Since prep-shim's
# days are numbered, I'm inclined not to worry about it here.
dataset = None
if all([(dest / resultname).is_file(), (dest / tbmd5.name).is_file()]):
logger.error("{}: Duplicate: {} duplicate name", config.TS, tb)
quarantine((duplicates / controller), logger, tb, tbmd5)
ndups += 1
continue
archive_tar_hex_value, archive_md5_hex_value = md5_check(tb, tbmd5, logger)
if any(
[
archive_tar_hex_value != archive_md5_hex_value,
archive_tar_hex_value is None,
archive_md5_hex_value is None,
]
):
logger.error("{}: Quarantined: {} failed MD5 check", config.TS, tb)
logger.info("{}: FAILED", tb.name)
logger.info("md5sum: WARNING: 1 computed checksum did NOT match")
quarantine((qdir_md5 / controller), logger, tb, tbmd5)
nquarantined += 1
continue
if dataset:
try:
dataset.md5 = archive_md5_hex_value
dataset.update()
except DatasetError as e:
logger.warn(
"Unable to update dataset {} with md5: {}", str(dataset), str(e)
)
# make the destination directory and its TODO subdir if necessary.
try:
os.makedirs(dest / "TODO")
except FileExistsError:
# directory already exists, ignore
pass
except Exception:
logger.error("{}: Error in creating TODO directory.", config.TS)
quarantine(os.path.join(errors, controller), logger, tb, tbmd5)
nerrs += 1
continue
# First, copy the small .md5 file to the destination. That way, if
# that operation fails it will fail quickly since the file is small.
try:
shutil.copy2(tbmd5, dest)
except Exception:
logger.error(
"{}: Error in copying .md5 file to Destination path.", config.TS
)
try:
os.remove(dest / tbmd5.name)
except FileNotFoundError:
logger.error(
"{}: Warning: cleanup of copy failure failed itself.", config.TS
)
quarantine((errors / controller), logger, tb, tbmd5)
nerrs += 1
continue
# Next, mv the "large" tar ball to the destination. If the destination
# is on the same device, the move should be quick. If the destination is
# on a different device, the move will be a copy and delete, and will
# take a bit longer. If it fails, the file will NOT be at the
# destination.
try:
shutil.move(str(tb), str(dest))
except Exception:
logger.error(
"{}: Error in moving tarball file to Destination path.", config.TS
)
try:
os.remove(dest / resultname)
except FileNotFoundError:
logger.error(
"{}: Warning: cleanup of copy failure failed itself.", config.TS
)
quarantine((errors / controller), logger, tb, tbmd5)
nerrs += 1
continue
# Restore the SELinux context properly
try:
selinux.restorecon(dest / tb.name)
selinux.restorecon(dest / tbmd5.name)
except Exception as e:
# log it but do not abort
logger.error("{}: Error: 'restorecon {}', {}", config.TS, dest / tb.name, e)
# Now that we have successfully moved the tar ball and its .md5 to the
# destination, we can remove the original .md5 file.
try:
os.remove(tbmd5)
except Exception as exc:
logger.error(
"{}: Warning: cleanup of successful copy operation failed: '{}'",
config.TS,
exc,
)
try:
os.symlink((dest / resultname), (dest / "TODO" / resultname))
except Exception as exc:
logger.error("{}: Error in creation of symlink. '{}'", config.TS, exc)
# if we fail to make the link, we quarantine the (already moved)
# tarball and .md5.
quarantine(
(errors / controller), logger, (dest / tb), (dest / tbmd5),
)
nerrs += 1
continue
ntbs += 1
try:
if dataset:
dataset.advance(States.UPLOADED)
except Exception:
logger.exception("Unable to finalize {}", dataset)
nstatus = f"{nstatus}{config.TS}: processed {tb}\n"
logger.info(f"{tb.name}: OK")
return Results(
nstatus=nstatus,
ntotal=ntotal,
ntbs=ntbs,
nquarantined=nquarantined,
ndups=ndups,
nerrs=nerrs,
)
def main(cfg_name):
if not cfg_name:
print(
f"{_NAME_}: ERROR: No config file specified; set"
" _PBENCH_SERVER_CONFIG env variable or use --config <file> on the"
" command line",
file=sys.stderr,
)
return 2
try:
config = PbenchServerConfig(cfg_name)
except BadConfig as e:
print(f"{_NAME_}: {e} (config file {cfg_name})", file=sys.stderr)
return 1
logger = get_pbench_logger(_NAME_, config)
# We're going to need the Postgres DB to track dataset state, so setup
# DB access.
init_db(config, logger)
qdir, receive_dir = fetch_config_val(config, logger)
if qdir is None and receive_dir is None:
return 2
qdir_md5 = qdirs_check("quarantine", Path(qdir, "md5-002"), logger)
duplicates = qdirs_check("duplicates", Path(qdir, "duplicates-002"), logger)
# The following directory holds tarballs that are quarantined because
# of operational errors on the server. They should be retried after
# the problem is fixed: basically, move them back into the reception
# area for 002 agents and wait.
errors = qdirs_check("errors", Path(qdir, "errors-002"), logger)
if qdir_md5 is None or duplicates is None or errors is None:
return 1
counts = process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors)
result_string = (
f"{config.TS}: Processed {counts.ntotal} entries,"
f" {counts.ntbs} tarballs successful,"
f" {counts.nquarantined} quarantined tarballs,"
f" {counts.ndups} duplicately-named tarballs,"
f" {counts.nerrs} errors."
)
logger.info(result_string)
# prepare and send report
with tempfile.NamedTemporaryFile(mode="w+t", dir=config.TMP) as reportfp:
reportfp.write(f"{counts.nstatus}{result_string}\n")
reportfp.seek(0)
report = Report(config, _NAME_)
report.init_report_template()
try:
report.post_status(config.timestamp(), "status", reportfp.name)
except Exception as exc:
logger.warning("Report post Unsuccesful: '{}'", exc)
return 0
if __name__ == "__main__":
cfg_name = os.environ.get("_PBENCH_SERVER_CONFIG")
status = main(cfg_name)
sys.exit(status)
| gpl-3.0 | -8,785,501,480,780,962,000 | 33.443836 | 88 | 0.595291 | false |
Donkyhotay/MoonPy | zope/rdb/tests/test_gadflyphantom.py | 1 | 2917 | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Gadfly Database adapter phatom tests
$Id: $
"""
__docformat__ = 'restructuredtext'
import os, shutil
import tempfile, threading
from unittest import TestCase, TestSuite, main, makeSuite
from zope.rdb.gadflyda import GadflyAdapter, setGadflyRoot
class GadflyTestBase(TestCase):
def setUp(self):
TestCase.setUp(self)
self.tempdir = None
def tearDown(self):
TestCase.tearDown(self)
if self.tempdir:
shutil.rmtree(self.tempdir)
setGadflyRoot()
def getGadflyRoot(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp('gadfly')
setGadflyRoot(self.tempdir)
return self.tempdir
def _create(self, *args):
return GadflyAdapter(*args)
def exec_sql(adapter, sql, args, fetch=False):
conn = adapter()
cur =conn.cursor()
cur.execute(sql, args)
rows = []
if fetch:
rows = cur.fetchall()
conn.commit()
return rows
class TestPhantom(GadflyTestBase):
def setUp(self):
GadflyTestBase.setUp(self)
dir = self.getGadflyRoot()
os.mkdir(os.path.join(dir, "demo"))
self.adapter = self._create("dbi://demo")
conn = self.adapter()
cur = conn.cursor()
cur.execute("create table t1 (name varchar)")
conn.commit()
def test_Phantom(self):
adapter = self.adapter
insert = "insert into t1 values (?)"
select = "select name from t1"
delete = "delete from t1"
count = 0
for name in ('a', 'b', 'c'):
t = threading.Thread(target=exec_sql,
args=(adapter, insert, (name,)))
t.start()
t.join()
rows = exec_sql(adapter, select, args=(), fetch=True)
count += 1
self.assertEqual(len(rows), count)
exec_sql(adapter, delete, args=())
t = threading.Thread(target=exec_sql,
args=(adapter, delete, ()))
t.start()
t.join()
rows = exec_sql(adapter, select, args=(), fetch=True)
self.assertEqual(len(rows), 0)
def test_suite():
return TestSuite((
makeSuite(TestPhantom),
))
if __name__=='__main__':
main(defaultTest='test_suite')
| gpl-3.0 | -2,021,767,907,325,298,700 | 27.881188 | 78 | 0.57182 | false |
kaushik94/sympy | sympy/integrals/tests/test_integrals.py | 2 | 59772 | from sympy import (
Abs, acos, acosh, Add, And, asin, asinh, atan, Ci, cos, sinh, cosh,
tanh, Derivative, diff, DiracDelta, E, Ei, Eq, exp, erf, erfc, erfi,
EulerGamma, Expr, factor, Function, gamma, gammasimp, I, Idx, im, IndexedBase,
integrate, Interval, Lambda, LambertW, log, Matrix, Max, meijerg, Min, nan,
Ne, O, oo, pi, Piecewise, polar_lift, Poly, polygamma, Rational, re, S, Si, sign,
simplify, sin, sinc, SingularityFunction, sqrt, sstr, Sum, Symbol,
symbols, sympify, tan, trigsimp, Tuple, lerchphi, exp_polar, li, hyper
)
from sympy.core.compatibility import range
from sympy.core.expr import unchanged
from sympy.functions.elementary.complexes import periodic_argument
from sympy.functions.elementary.integers import floor
from sympy.integrals.integrals import Integral
from sympy.integrals.risch import NonElementaryIntegral
from sympy.physics import units
from sympy.utilities.pytest import raises, slow, skip, ON_TRAVIS
from sympy.utilities.randtest import verify_numerically
x, y, a, t, x_1, x_2, z, s, b = symbols('x y a t x_1 x_2 z s b')
n = Symbol('n', integer=True)
f = Function('f')
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_principal_value():
g = 1 / x
assert Integral(g, (x, -oo, oo)).principal_value() == 0
assert Integral(g, (y, -oo, oo)).principal_value() == oo * sign(1 / x)
raises(ValueError, lambda: Integral(g, (x)).principal_value())
raises(ValueError, lambda: Integral(g).principal_value())
l = 1 / ((x ** 3) - 1)
assert Integral(l, (x, -oo, oo)).principal_value() == -sqrt(3)*pi/3
raises(ValueError, lambda: Integral(l, (x, -oo, 1)).principal_value())
d = 1 / (x ** 2 - 1)
assert Integral(d, (x, -oo, oo)).principal_value() == 0
assert Integral(d, (x, -2, 2)).principal_value() == -log(3)
v = x / (x ** 2 - 1)
assert Integral(v, (x, -oo, oo)).principal_value() == 0
assert Integral(v, (x, -2, 2)).principal_value() == 0
s = x ** 2 / (x ** 2 - 1)
assert Integral(s, (x, -oo, oo)).principal_value() is oo
assert Integral(s, (x, -2, 2)).principal_value() == -log(3) + 4
f = 1 / ((x ** 2 - 1) * (1 + x ** 2))
assert Integral(f, (x, -oo, oo)).principal_value() == -pi / 2
assert Integral(f, (x, -2, 2)).principal_value() == -atan(2) - log(3) / 2
def diff_test(i):
"""Return the set of symbols, s, which were used in testing that
i.diff(s) agrees with i.doit().diff(s). If there is an error then
the assertion will fail, causing the test to fail."""
syms = i.free_symbols
for s in syms:
assert (i.diff(s).doit() - i.doit().diff(s)).expand() == 0
return syms
def test_improper_integral():
assert integrate(log(x), (x, 0, 1)) == -1
assert integrate(x**(-2), (x, 1, oo)) == 1
assert integrate(1/(1 + exp(x)), (x, 0, oo)) == log(2)
def test_constructor():
# this is shared by Sum, so testing Integral's constructor
# is equivalent to testing Sum's
s1 = Integral(n, n)
assert s1.limits == (Tuple(n),)
s2 = Integral(n, (n,))
assert s2.limits == (Tuple(n),)
s3 = Integral(Sum(x, (x, 1, y)))
assert s3.limits == (Tuple(y),)
s4 = Integral(n, Tuple(n,))
assert s4.limits == (Tuple(n),)
s5 = Integral(n, (n, Interval(1, 2)))
assert s5.limits == (Tuple(n, 1, 2),)
# Testing constructor with inequalities:
s6 = Integral(n, n > 10)
assert s6.limits == (Tuple(n, 10, oo),)
s7 = Integral(n, (n > 2) & (n < 5))
assert s7.limits == (Tuple(n, 2, 5),)
def test_basics():
assert Integral(0, x) != 0
assert Integral(x, (x, 1, 1)) != 0
assert Integral(oo, x) != oo
assert Integral(S.NaN, x) is S.NaN
assert diff(Integral(y, y), x) == 0
assert diff(Integral(x, (x, 0, 1)), x) == 0
assert diff(Integral(x, x), x) == x
assert diff(Integral(t, (t, 0, x)), x) == x
e = (t + 1)**2
assert diff(integrate(e, (t, 0, x)), x) == \
diff(Integral(e, (t, 0, x)), x).doit().expand() == \
((1 + x)**2).expand()
assert diff(integrate(e, (t, 0, x)), t) == \
diff(Integral(e, (t, 0, x)), t) == 0
assert diff(integrate(e, (t, 0, x)), a) == \
diff(Integral(e, (t, 0, x)), a) == 0
assert diff(integrate(e, t), a) == diff(Integral(e, t), a) == 0
assert integrate(e, (t, a, x)).diff(x) == \
Integral(e, (t, a, x)).diff(x).doit().expand()
assert Integral(e, (t, a, x)).diff(x).doit() == ((1 + x)**2)
assert integrate(e, (t, x, a)).diff(x).doit() == (-(1 + x)**2).expand()
assert integrate(t**2, (t, x, 2*x)).diff(x) == 7*x**2
assert Integral(x, x).atoms() == {x}
assert Integral(f(x), (x, 0, 1)).atoms() == {S.Zero, S.One, x}
assert diff_test(Integral(x, (x, 3*y))) == {y}
assert diff_test(Integral(x, (a, 3*y))) == {x, y}
assert integrate(x, (x, oo, oo)) == 0 #issue 8171
assert integrate(x, (x, -oo, -oo)) == 0
# sum integral of terms
assert integrate(y + x + exp(x), x) == x*y + x**2/2 + exp(x)
assert Integral(x).is_commutative
n = Symbol('n', commutative=False)
assert Integral(n + x, x).is_commutative is False
def test_diff_wrt():
class Test(Expr):
_diff_wrt = True
is_commutative = True
t = Test()
assert integrate(t + 1, t) == t**2/2 + t
assert integrate(t + 1, (t, 0, 1)) == Rational(3, 2)
raises(ValueError, lambda: integrate(x + 1, x + 1))
raises(ValueError, lambda: integrate(x + 1, (x + 1, 0, 1)))
def test_basics_multiple():
assert diff_test(Integral(x, (x, 3*x, 5*y), (y, x, 2*x))) == {x}
assert diff_test(Integral(x, (x, 5*y), (y, x, 2*x))) == {x}
assert diff_test(Integral(x, (x, 5*y), (y, y, 2*x))) == {x, y}
assert diff_test(Integral(y, y, x)) == {x, y}
assert diff_test(Integral(y*x, x, y)) == {x, y}
assert diff_test(Integral(x + y, y, (y, 1, x))) == {x}
assert diff_test(Integral(x + y, (x, x, y), (y, y, x))) == {x, y}
def test_conjugate_transpose():
A, B = symbols("A B", commutative=False)
x = Symbol("x", complex=True)
p = Integral(A*B, (x,))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
x = Symbol("x", real=True)
p = Integral(A*B, (x,))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
def test_integration():
assert integrate(0, (t, 0, x)) == 0
assert integrate(3, (t, 0, x)) == 3*x
assert integrate(t, (t, 0, x)) == x**2/2
assert integrate(3*t, (t, 0, x)) == 3*x**2/2
assert integrate(3*t**2, (t, 0, x)) == x**3
assert integrate(1/t, (t, 1, x)) == log(x)
assert integrate(-1/t**2, (t, 1, x)) == 1/x - 1
assert integrate(t**2 + 5*t - 8, (t, 0, x)) == x**3/3 + 5*x**2/2 - 8*x
assert integrate(x**2, x) == x**3/3
assert integrate((3*t*x)**5, x) == (3*t)**5 * x**6 / 6
b = Symbol("b")
c = Symbol("c")
assert integrate(a*t, (t, 0, x)) == a*x**2/2
assert integrate(a*t**4, (t, 0, x)) == a*x**5/5
assert integrate(a*t**2 + b*t + c, (t, 0, x)) == a*x**3/3 + b*x**2/2 + c*x
def test_multiple_integration():
assert integrate((x**2)*(y**2), (x, 0, 1), (y, -1, 2)) == Rational(1)
assert integrate((y**2)*(x**2), x, y) == Rational(1, 9)*(x**3)*(y**3)
assert integrate(1/(x + 3)/(1 + x)**3, x) == \
log(3 + x)*Rational(-1, 8) + log(1 + x)*Rational(1, 8) + x/(4 + 8*x + 4*x**2)
assert integrate(sin(x*y)*y, (x, 0, 1), (y, 0, 1)) == -sin(1) + 1
def test_issue_3532():
assert integrate(exp(-x), (x, 0, oo)) == 1
def test_issue_3560():
assert integrate(sqrt(x)**3, x) == 2*sqrt(x)**5/5
assert integrate(sqrt(x), x) == 2*sqrt(x)**3/3
assert integrate(1/sqrt(x)**3, x) == -2/sqrt(x)
def test_integrate_poly():
p = Poly(x + x**2*y + y**3, x, y)
qx = integrate(p, x)
qy = integrate(p, y)
assert isinstance(qx, Poly) is True
assert isinstance(qy, Poly) is True
assert qx.gens == (x, y)
assert qy.gens == (x, y)
assert qx.as_expr() == x**2/2 + x**3*y/3 + x*y**3
assert qy.as_expr() == x*y + x**2*y**2/2 + y**4/4
def test_integrate_poly_defined():
p = Poly(x + x**2*y + y**3, x, y)
Qx = integrate(p, (x, 0, 1))
Qy = integrate(p, (y, 0, pi))
assert isinstance(Qx, Poly) is True
assert isinstance(Qy, Poly) is True
assert Qx.gens == (y,)
assert Qy.gens == (x,)
assert Qx.as_expr() == S.Half + y/3 + y**3
assert Qy.as_expr() == pi**4/4 + pi*x + pi**2*x**2/2
def test_integrate_omit_var():
y = Symbol('y')
assert integrate(x) == x**2/2
raises(ValueError, lambda: integrate(2))
raises(ValueError, lambda: integrate(x*y))
def test_integrate_poly_accurately():
y = Symbol('y')
assert integrate(x*sin(y), x) == x**2*sin(y)/2
# when passed to risch_norman, this will be a CPU hog, so this really
# checks, that integrated function is recognized as polynomial
assert integrate(x**1000*sin(y), x) == x**1001*sin(y)/1001
def test_issue_3635():
y = Symbol('y')
assert integrate(x**2, y) == x**2*y
assert integrate(x**2, (y, -1, 1)) == 2*x**2
# works in sympy and py.test but hangs in `setup.py test`
def test_integrate_linearterm_pow():
# check integrate((a*x+b)^c, x) -- issue 3499
y = Symbol('y', positive=True)
# TODO: Remove conds='none' below, let the assumption take care of it.
assert integrate(x**y, x, conds='none') == x**(y + 1)/(y + 1)
assert integrate((exp(y)*x + 1/y)**(1 + sin(y)), x, conds='none') == \
exp(-y)*(exp(y)*x + 1/y)**(2 + sin(y)) / (2 + sin(y))
def test_issue_3618():
assert integrate(pi*sqrt(x), x) == 2*pi*sqrt(x)**3/3
assert integrate(pi*sqrt(x) + E*sqrt(x)**3, x) == \
2*pi*sqrt(x)**3/3 + 2*E *sqrt(x)**5/5
def test_issue_3623():
assert integrate(cos((n + 1)*x), x) == Piecewise(
(sin(x*(n + 1))/(n + 1), Ne(n + 1, 0)), (x, True))
assert integrate(cos((n - 1)*x), x) == Piecewise(
(sin(x*(n - 1))/(n - 1), Ne(n - 1, 0)), (x, True))
assert integrate(cos((n + 1)*x) + cos((n - 1)*x), x) == \
Piecewise((sin(x*(n - 1))/(n - 1), Ne(n - 1, 0)), (x, True)) + \
Piecewise((sin(x*(n + 1))/(n + 1), Ne(n + 1, 0)), (x, True))
def test_issue_3664():
n = Symbol('n', integer=True, nonzero=True)
assert integrate(-1./2 * x * sin(n * pi * x/2), [x, -2, 0]) == \
2.0*cos(pi*n)/(pi*n)
assert integrate(x * sin(n * pi * x/2) * Rational(-1, 2), [x, -2, 0]) == \
2*cos(pi*n)/(pi*n)
def test_issue_3679():
# definite integration of rational functions gives wrong answers
assert NS(Integral(1/(x**2 - 8*x + 17), (x, 2, 4))) == '1.10714871779409'
def test_issue_3686(): # remove this when fresnel itegrals are implemented
from sympy import expand_func, fresnels
assert expand_func(integrate(sin(x**2), x)) == \
sqrt(2)*sqrt(pi)*fresnels(sqrt(2)*x/sqrt(pi))/2
def test_integrate_units():
m = units.m
s = units.s
assert integrate(x * m/s, (x, 1*s, 5*s)) == 12*m*s
def test_transcendental_functions():
assert integrate(LambertW(2*x), x) == \
-x + x*LambertW(2*x) + x/LambertW(2*x)
def test_log_polylog():
assert integrate(log(1 - x)/x, (x, 0, 1)) == -pi**2/6
assert integrate(log(x)*(1 - x)**(-1), (x, 0, 1)) == -pi**2/6
def test_issue_3740():
f = 4*log(x) - 2*log(x)**2
fid = diff(integrate(f, x), x)
assert abs(f.subs(x, 42).evalf() - fid.subs(x, 42).evalf()) < 1e-10
def test_issue_3788():
assert integrate(1/(1 + x**2), x) == atan(x)
def test_issue_3952():
f = sin(x)
assert integrate(f, x) == -cos(x)
raises(ValueError, lambda: integrate(f, 2*x))
def test_issue_4516():
assert integrate(2**x - 2*x, x) == 2**x/log(2) - x**2
def test_issue_7450():
ans = integrate(exp(-(1 + I)*x), (x, 0, oo))
assert re(ans) == S.Half and im(ans) == Rational(-1, 2)
def test_issue_8623():
assert integrate((1 + cos(2*x)) / (3 - 2*cos(2*x)), (x, 0, pi)) == -pi/2 + sqrt(5)*pi/2
assert integrate((1 + cos(2*x))/(3 - 2*cos(2*x))) == -x/2 + sqrt(5)*(atan(sqrt(5)*tan(x)) + \
pi*floor((x - pi/2)/pi))/2
def test_issue_9569():
assert integrate(1 / (2 - cos(x)), (x, 0, pi)) == pi/sqrt(3)
assert integrate(1/(2 - cos(x))) == 2*sqrt(3)*(atan(sqrt(3)*tan(x/2)) + pi*floor((x/2 - pi/2)/pi))/3
def test_issue_13749():
assert integrate(1 / (2 + cos(x)), (x, 0, pi)) == pi/sqrt(3)
assert integrate(1/(2 + cos(x))) == 2*sqrt(3)*(atan(sqrt(3)*tan(x/2)/3) + pi*floor((x/2 - pi/2)/pi))/3
def test_matrices():
M = Matrix(2, 2, lambda i, j: (i + j + 1)*sin((i + j + 1)*x))
assert integrate(M, x) == Matrix([
[-cos(x), -cos(2*x)],
[-cos(2*x), -cos(3*x)],
])
def test_integrate_functions():
# issue 4111
assert integrate(f(x), x) == Integral(f(x), x)
assert integrate(f(x), (x, 0, 1)) == Integral(f(x), (x, 0, 1))
assert integrate(f(x)*diff(f(x), x), x) == f(x)**2/2
assert integrate(diff(f(x), x) / f(x), x) == log(f(x))
def test_integrate_derivatives():
assert integrate(Derivative(f(x), x), x) == f(x)
assert integrate(Derivative(f(y), y), x) == x*Derivative(f(y), y)
assert integrate(Derivative(f(x), x)**2, x) == \
Integral(Derivative(f(x), x)**2, x)
def test_transform():
a = Integral(x**2 + 1, (x, -1, 2))
fx = x
fy = 3*y + 1
assert a.doit() == a.transform(fx, fy).doit()
assert a.transform(fx, fy).transform(fy, fx) == a
fx = 3*x + 1
fy = y
assert a.transform(fx, fy).transform(fy, fx) == a
a = Integral(sin(1/x), (x, 0, 1))
assert a.transform(x, 1/y) == Integral(sin(y)/y**2, (y, 1, oo))
assert a.transform(x, 1/y).transform(y, 1/x) == a
a = Integral(exp(-x**2), (x, -oo, oo))
assert a.transform(x, 2*y) == Integral(2*exp(-4*y**2), (y, -oo, oo))
# < 3 arg limit handled properly
assert Integral(x, x).transform(x, a*y).doit() == \
Integral(y*a**2, y).doit()
_3 = S(3)
assert Integral(x, (x, 0, -_3)).transform(x, 1/y).doit() == \
Integral(-1/x**3, (x, -oo, -1/_3)).doit()
assert Integral(x, (x, 0, _3)).transform(x, 1/y) == \
Integral(y**(-3), (y, 1/_3, oo))
# issue 8400
i = Integral(x + y, (x, 1, 2), (y, 1, 2))
assert i.transform(x, (x + 2*y, x)).doit() == \
i.transform(x, (x + 2*z, x)).doit() == 3
i = Integral(x, (x, a, b))
assert i.transform(x, 2*s) == Integral(4*s, (s, a/2, b/2))
raises(ValueError, lambda: i.transform(x, 1))
raises(ValueError, lambda: i.transform(x, s*t))
raises(ValueError, lambda: i.transform(x, -s))
raises(ValueError, lambda: i.transform(x, (s, t)))
raises(ValueError, lambda: i.transform(2*x, 2*s))
i = Integral(x**2, (x, 1, 2))
raises(ValueError, lambda: i.transform(x**2, s))
am = Symbol('a', negative=True)
bp = Symbol('b', positive=True)
i = Integral(x, (x, bp, am))
i.transform(x, 2*s)
assert i.transform(x, 2*s) == Integral(-4*s, (s, am/2, bp/2))
i = Integral(x, (x, a))
assert i.transform(x, 2*s) == Integral(4*s, (s, a/2))
def test_issue_4052():
f = S.Half*asin(x) + x*sqrt(1 - x**2)/2
assert integrate(cos(asin(x)), x) == f
assert integrate(sin(acos(x)), x) == f
@slow
def test_evalf_integrals():
assert NS(Integral(x, (x, 2, 5)), 15) == '10.5000000000000'
gauss = Integral(exp(-x**2), (x, -oo, oo))
assert NS(gauss, 15) == '1.77245385090552'
assert NS(gauss**2 - pi + E*Rational(
1, 10**20), 15) in ('2.71828182845904e-20', '2.71828182845905e-20')
# A monster of an integral from http://mathworld.wolfram.com/DefiniteIntegral.html
t = Symbol('t')
a = 8*sqrt(3)/(1 + 3*t**2)
b = 16*sqrt(2)*(3*t + 1)*sqrt(4*t**2 + t + 1)**3
c = (3*t**2 + 1)*(11*t**2 + 2*t + 3)**2
d = sqrt(2)*(249*t**2 + 54*t + 65)/(11*t**2 + 2*t + 3)**2
f = a - b/c - d
assert NS(Integral(f, (t, 0, 1)), 50) == \
NS((3*sqrt(2) - 49*pi + 162*atan(sqrt(2)))/12, 50)
# http://mathworld.wolfram.com/VardisIntegral.html
assert NS(Integral(log(log(1/x))/(1 + x + x**2), (x, 0, 1)), 15) == \
NS('pi/sqrt(3) * log(2*pi**(5/6) / gamma(1/6))', 15)
# http://mathworld.wolfram.com/AhmedsIntegral.html
assert NS(Integral(atan(sqrt(x**2 + 2))/(sqrt(x**2 + 2)*(x**2 + 1)), (x,
0, 1)), 15) == NS(5*pi**2/96, 15)
# http://mathworld.wolfram.com/AbelsIntegral.html
assert NS(Integral(x/((exp(pi*x) - exp(
-pi*x))*(x**2 + 1)), (x, 0, oo)), 15) == NS('log(2)/2-1/4', 15)
# Complex part trimming
# http://mathworld.wolfram.com/VardisIntegral.html
assert NS(Integral(log(log(sin(x)/cos(x))), (x, pi/4, pi/2)), 15, chop=True) == \
NS('pi/4*log(4*pi**3/gamma(1/4)**4)', 15)
#
# Endpoints causing trouble (rounding error in integration points -> complex log)
assert NS(
2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 17, chop=True) == NS(2, 17)
assert NS(
2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 20, chop=True) == NS(2, 20)
assert NS(
2 + Integral(log(2*cos(x/2)), (x, -pi, pi)), 22, chop=True) == NS(2, 22)
# Needs zero handling
assert NS(pi - 4*Integral(
'sqrt(1-x**2)', (x, 0, 1)), 15, maxn=30, chop=True) in ('0.0', '0')
# Oscillatory quadrature
a = Integral(sin(x)/x**2, (x, 1, oo)).evalf(maxn=15)
assert 0.49 < a < 0.51
assert NS(
Integral(sin(x)/x**2, (x, 1, oo)), quad='osc') == '0.504067061906928'
assert NS(Integral(
cos(pi*x + 1)/x, (x, -oo, -1)), quad='osc') == '0.276374705640365'
# indefinite integrals aren't evaluated
assert NS(Integral(x, x)) == 'Integral(x, x)'
assert NS(Integral(x, (x, y))) == 'Integral(x, (x, y))'
def test_evalf_issue_939():
# https://github.com/sympy/sympy/issues/4038
# The output form of an integral may differ by a step function between
# revisions, making this test a bit useless. This can't be said about
# other two tests. For now, all values of this evaluation are used here,
# but in future this should be reconsidered.
assert NS(integrate(1/(x**5 + 1), x).subs(x, 4), chop=True) in \
['-0.000976138910649103', '0.965906660135753', '1.93278945918216']
assert NS(Integral(1/(x**5 + 1), (x, 2, 4))) == '0.0144361088886740'
assert NS(
integrate(1/(x**5 + 1), (x, 2, 4)), chop=True) == '0.0144361088886740'
def test_double_previously_failing_integrals():
# Double integrals not implemented <- Sure it is!
res = integrate(sqrt(x) + x*y, (x, 1, 2), (y, -1, 1))
# Old numerical test
assert NS(res, 15) == '2.43790283299492'
# Symbolic test
assert res == Rational(-4, 3) + 8*sqrt(2)/3
# double integral + zero detection
assert integrate(sin(x + x*y), (x, -1, 1), (y, -1, 1)) is S.Zero
def test_integrate_SingularityFunction():
in_1 = SingularityFunction(x, a, 3) + SingularityFunction(x, 5, -1)
out_1 = SingularityFunction(x, a, 4)/4 + SingularityFunction(x, 5, 0)
assert integrate(in_1, x) == out_1
in_2 = 10*SingularityFunction(x, 4, 0) - 5*SingularityFunction(x, -6, -2)
out_2 = 10*SingularityFunction(x, 4, 1) - 5*SingularityFunction(x, -6, -1)
assert integrate(in_2, x) == out_2
in_3 = 2*x**2*y -10*SingularityFunction(x, -4, 7) - 2*SingularityFunction(y, 10, -2)
out_3_1 = 2*x**3*y/3 - 2*x*SingularityFunction(y, 10, -2) - 5*SingularityFunction(x, -4, 8)/4
out_3_2 = x**2*y**2 - 10*y*SingularityFunction(x, -4, 7) - 2*SingularityFunction(y, 10, -1)
assert integrate(in_3, x) == out_3_1
assert integrate(in_3, y) == out_3_2
assert unchanged(Integral, in_3, (x,))
assert Integral(in_3, x) == Integral(in_3, (x,))
assert Integral(in_3, x).doit() == out_3_1
in_4 = 10*SingularityFunction(x, -4, 7) - 2*SingularityFunction(x, 10, -2)
out_4 = 5*SingularityFunction(x, -4, 8)/4 - 2*SingularityFunction(x, 10, -1)
assert integrate(in_4, (x, -oo, x)) == out_4
assert integrate(SingularityFunction(x, 5, -1), x) == SingularityFunction(x, 5, 0)
assert integrate(SingularityFunction(x, 0, -1), (x, -oo, oo)) == 1
assert integrate(5*SingularityFunction(x, 5, -1), (x, -oo, oo)) == 5
assert integrate(SingularityFunction(x, 5, -1) * f(x), (x, -oo, oo)) == f(5)
def test_integrate_DiracDelta():
# This is here to check that deltaintegrate is being called, but also
# to test definite integrals. More tests are in test_deltafunctions.py
assert integrate(DiracDelta(x) * f(x), (x, -oo, oo)) == f(0)
assert integrate(DiracDelta(x)**2, (x, -oo, oo)) == DiracDelta(0)
# issue 4522
assert integrate(integrate((4 - 4*x + x*y - 4*y) * \
DiracDelta(x)*DiracDelta(y - 1), (x, 0, 1)), (y, 0, 1)) == 0
# issue 5729
p = exp(-(x**2 + y**2))/pi
assert integrate(p*DiracDelta(x - 10*y), (x, -oo, oo), (y, -oo, oo)) == \
integrate(p*DiracDelta(x - 10*y), (y, -oo, oo), (x, -oo, oo)) == \
integrate(p*DiracDelta(10*x - y), (x, -oo, oo), (y, -oo, oo)) == \
integrate(p*DiracDelta(10*x - y), (y, -oo, oo), (x, -oo, oo)) == \
1/sqrt(101*pi)
def test_integrate_returns_piecewise():
assert integrate(x**y, x) == Piecewise(
(x**(y + 1)/(y + 1), Ne(y, -1)), (log(x), True))
assert integrate(x**y, y) == Piecewise(
(x**y/log(x), Ne(log(x), 0)), (y, True))
assert integrate(exp(n*x), x) == Piecewise(
(exp(n*x)/n, Ne(n, 0)), (x, True))
assert integrate(x*exp(n*x), x) == Piecewise(
((n*x - 1)*exp(n*x)/n**2, Ne(n**2, 0)), (x**2/2, True))
assert integrate(x**(n*y), x) == Piecewise(
(x**(n*y + 1)/(n*y + 1), Ne(n*y, -1)), (log(x), True))
assert integrate(x**(n*y), y) == Piecewise(
(x**(n*y)/(n*log(x)), Ne(n*log(x), 0)), (y, True))
assert integrate(cos(n*x), x) == Piecewise(
(sin(n*x)/n, Ne(n, 0)), (x, True))
assert integrate(cos(n*x)**2, x) == Piecewise(
((n*x/2 + sin(n*x)*cos(n*x)/2)/n, Ne(n, 0)), (x, True))
assert integrate(x*cos(n*x), x) == Piecewise(
(x*sin(n*x)/n + cos(n*x)/n**2, Ne(n, 0)), (x**2/2, True))
assert integrate(sin(n*x), x) == Piecewise(
(-cos(n*x)/n, Ne(n, 0)), (0, True))
assert integrate(sin(n*x)**2, x) == Piecewise(
((n*x/2 - sin(n*x)*cos(n*x)/2)/n, Ne(n, 0)), (0, True))
assert integrate(x*sin(n*x), x) == Piecewise(
(-x*cos(n*x)/n + sin(n*x)/n**2, Ne(n, 0)), (0, True))
assert integrate(exp(x*y), (x, 0, z)) == Piecewise(
(exp(y*z)/y - 1/y, (y > -oo) & (y < oo) & Ne(y, 0)), (z, True))
def test_integrate_max_min():
x = symbols('x', real=True)
assert integrate(Min(x, 2), (x, 0, 3)) == 4
assert integrate(Max(x**2, x**3), (x, 0, 2)) == Rational(49, 12)
assert integrate(Min(exp(x), exp(-x))**2, x) == Piecewise( \
(exp(2*x)/2, x <= 0), (1 - exp(-2*x)/2, True))
# issue 7907
c = symbols('c', extended_real=True)
int1 = integrate(Max(c, x)*exp(-x**2), (x, -oo, oo))
int2 = integrate(c*exp(-x**2), (x, -oo, c))
int3 = integrate(x*exp(-x**2), (x, c, oo))
assert int1 == int2 + int3 == sqrt(pi)*c*erf(c)/2 + \
sqrt(pi)*c/2 + exp(-c**2)/2
def test_integrate_Abs_sign():
assert integrate(Abs(x), (x, -2, 1)) == Rational(5, 2)
assert integrate(Abs(x), (x, 0, 1)) == S.Half
assert integrate(Abs(x + 1), (x, 0, 1)) == Rational(3, 2)
assert integrate(Abs(x**2 - 1), (x, -2, 2)) == 4
assert integrate(Abs(x**2 - 3*x), (x, -15, 15)) == 2259
assert integrate(sign(x), (x, -1, 2)) == 1
assert integrate(sign(x)*sin(x), (x, -pi, pi)) == 4
assert integrate(sign(x - 2) * x**2, (x, 0, 3)) == Rational(11, 3)
t, s = symbols('t s', real=True)
assert integrate(Abs(t), t) == Piecewise(
(-t**2/2, t <= 0), (t**2/2, True))
assert integrate(Abs(2*t - 6), t) == Piecewise(
(-t**2 + 6*t, t <= 3), (t**2 - 6*t + 18, True))
assert (integrate(abs(t - s**2), (t, 0, 2)) ==
2*s**2*Min(2, s**2) - 2*s**2 - Min(2, s**2)**2 + 2)
assert integrate(exp(-Abs(t)), t) == Piecewise(
(exp(t), t <= 0), (2 - exp(-t), True))
assert integrate(sign(2*t - 6), t) == Piecewise(
(-t, t < 3), (t - 6, True))
assert integrate(2*t*sign(t**2 - 1), t) == Piecewise(
(t**2, t < -1), (-t**2 + 2, t < 1), (t**2, True))
assert integrate(sign(t), (t, s + 1)) == Piecewise(
(s + 1, s + 1 > 0), (-s - 1, s + 1 < 0), (0, True))
def test_subs1():
e = Integral(exp(x - y), x)
assert e.subs(y, 3) == Integral(exp(x - 3), x)
e = Integral(exp(x - y), (x, 0, 1))
assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x - y)*f(y), (y, -oo, oo))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo))
def test_subs2():
e = Integral(exp(x - y), x, t)
assert e.subs(y, 3) == Integral(exp(x - 3), x, t)
e = Integral(exp(x - y), (x, 0, 1), (t, 0, 1))
assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 1), (t, 0, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x - y)*f(y), (y, -oo, oo), (t, 0, 1))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs3():
e = Integral(exp(x - y), (x, 0, y), (t, y, 1))
assert e.subs(y, 3) == Integral(exp(x - 3), (x, 0, 3), (t, 3, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x - y)*f(y), (y, -oo, oo), (t, x, 1))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs4():
e = Integral(exp(x), (x, 0, y), (t, y, 1))
assert e.subs(y, 3) == Integral(exp(x), (x, 0, 3), (t, 3, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(y)*f(y), (y, -oo, oo), (t, x, 1))
assert conv.subs({x: 0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs5():
e = Integral(exp(-x**2), (x, -oo, oo))
assert e.subs(x, 5) == e
e = Integral(exp(-x**2 + y), x)
assert e.subs(y, 5) == Integral(exp(-x**2 + 5), x)
e = Integral(exp(-x**2 + y), (x, x))
assert e.subs(x, 5) == Integral(exp(y - x**2), (x, 5))
assert e.subs(y, 5) == Integral(exp(-x**2 + 5), x)
e = Integral(exp(-x**2 + y), (y, -oo, oo), (x, -oo, oo))
assert e.subs(x, 5) == e
assert e.subs(y, 5) == e
# Test evaluation of antiderivatives
e = Integral(exp(-x**2), (x, x))
assert e.subs(x, 5) == Integral(exp(-x**2), (x, 5))
e = Integral(exp(x), x)
assert (e.subs(x,1) - e.subs(x,0) - Integral(exp(x), (x, 0, 1))
).doit().is_zero
def test_subs6():
a, b = symbols('a b')
e = Integral(x*y, (x, f(x), f(y)))
assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)))
assert e.subs(y, 1) == Integral(x, (x, f(x), f(1)))
e = Integral(x*y, (x, f(x), f(y)), (y, f(x), f(y)))
assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)), (y, f(1), f(y)))
assert e.subs(y, 1) == Integral(x*y, (x, f(x), f(y)), (y, f(x), f(1)))
e = Integral(x*y, (x, f(x), f(a)), (y, f(x), f(a)))
assert e.subs(a, 1) == Integral(x*y, (x, f(x), f(1)), (y, f(x), f(1)))
def test_subs7():
e = Integral(x, (x, 1, y), (y, 1, 2))
assert e.subs({x: 1, y: 2}) == e
e = Integral(sin(x) + sin(y), (x, sin(x), sin(y)),
(y, 1, 2))
assert e.subs(sin(y), 1) == e
assert e.subs(sin(x), 1) == Integral(sin(x) + sin(y), (x, 1, sin(y)),
(y, 1, 2))
def test_expand():
e = Integral(f(x)+f(x**2), (x, 1, y))
assert e.expand() == Integral(f(x), (x, 1, y)) + Integral(f(x**2), (x, 1, y))
def test_integration_variable():
raises(ValueError, lambda: Integral(exp(-x**2), 3))
raises(ValueError, lambda: Integral(exp(-x**2), (3, -oo, oo)))
def test_expand_integral():
assert Integral(cos(x**2)*(sin(x**2) + 1), (x, 0, 1)).expand() == \
Integral(cos(x**2)*sin(x**2), (x, 0, 1)) + \
Integral(cos(x**2), (x, 0, 1))
assert Integral(cos(x**2)*(sin(x**2) + 1), x).expand() == \
Integral(cos(x**2)*sin(x**2), x) + \
Integral(cos(x**2), x)
def test_as_sum_midpoint1():
e = Integral(sqrt(x**3 + 1), (x, 2, 10))
assert e.as_sum(1, method="midpoint") == 8*sqrt(217)
assert e.as_sum(2, method="midpoint") == 4*sqrt(65) + 12*sqrt(57)
assert e.as_sum(3, method="midpoint") == 8*sqrt(217)/3 + \
8*sqrt(3081)/27 + 8*sqrt(52809)/27
assert e.as_sum(4, method="midpoint") == 2*sqrt(730) + \
4*sqrt(7) + 4*sqrt(86) + 6*sqrt(14)
assert abs(e.as_sum(4, method="midpoint").n() - e.n()) < 0.5
e = Integral(sqrt(x**3 + y**3), (x, 2, 10), (y, 0, 10))
raises(NotImplementedError, lambda: e.as_sum(4))
def test_as_sum_midpoint2():
e = Integral((x + y)**2, (x, 0, 1))
n = Symbol('n', positive=True, integer=True)
assert e.as_sum(1, method="midpoint").expand() == Rational(1, 4) + y + y**2
assert e.as_sum(2, method="midpoint").expand() == Rational(5, 16) + y + y**2
assert e.as_sum(3, method="midpoint").expand() == Rational(35, 108) + y + y**2
assert e.as_sum(4, method="midpoint").expand() == Rational(21, 64) + y + y**2
assert e.as_sum(n, method="midpoint").expand() == \
y**2 + y + Rational(1, 3) - 1/(12*n**2)
def test_as_sum_left():
e = Integral((x + y)**2, (x, 0, 1))
assert e.as_sum(1, method="left").expand() == y**2
assert e.as_sum(2, method="left").expand() == Rational(1, 8) + y/2 + y**2
assert e.as_sum(3, method="left").expand() == Rational(5, 27) + y*Rational(2, 3) + y**2
assert e.as_sum(4, method="left").expand() == Rational(7, 32) + y*Rational(3, 4) + y**2
assert e.as_sum(n, method="left").expand() == \
y**2 + y + Rational(1, 3) - y/n - 1/(2*n) + 1/(6*n**2)
assert e.as_sum(10, method="left", evaluate=False).has(Sum)
def test_as_sum_right():
e = Integral((x + y)**2, (x, 0, 1))
assert e.as_sum(1, method="right").expand() == 1 + 2*y + y**2
assert e.as_sum(2, method="right").expand() == Rational(5, 8) + y*Rational(3, 2) + y**2
assert e.as_sum(3, method="right").expand() == Rational(14, 27) + y*Rational(4, 3) + y**2
assert e.as_sum(4, method="right").expand() == Rational(15, 32) + y*Rational(5, 4) + y**2
assert e.as_sum(n, method="right").expand() == \
y**2 + y + Rational(1, 3) + y/n + 1/(2*n) + 1/(6*n**2)
def test_as_sum_trapezoid():
e = Integral((x + y)**2, (x, 0, 1))
assert e.as_sum(1, method="trapezoid").expand() == y**2 + y + S.Half
assert e.as_sum(2, method="trapezoid").expand() == y**2 + y + Rational(3, 8)
assert e.as_sum(3, method="trapezoid").expand() == y**2 + y + Rational(19, 54)
assert e.as_sum(4, method="trapezoid").expand() == y**2 + y + Rational(11, 32)
assert e.as_sum(n, method="trapezoid").expand() == \
y**2 + y + Rational(1, 3) + 1/(6*n**2)
assert Integral(sign(x), (x, 0, 1)).as_sum(1, 'trapezoid') == S.Half
def test_as_sum_raises():
e = Integral((x + y)**2, (x, 0, 1))
raises(ValueError, lambda: e.as_sum(-1))
raises(ValueError, lambda: e.as_sum(0))
raises(ValueError, lambda: Integral(x).as_sum(3))
raises(ValueError, lambda: e.as_sum(oo))
raises(ValueError, lambda: e.as_sum(3, method='xxxx2'))
def test_nested_doit():
e = Integral(Integral(x, x), x)
f = Integral(x, x, x)
assert e.doit() == f.doit()
def test_issue_4665():
# Allow only upper or lower limit evaluation
e = Integral(x**2, (x, None, 1))
f = Integral(x**2, (x, 1, None))
assert e.doit() == Rational(1, 3)
assert f.doit() == Rational(-1, 3)
assert Integral(x*y, (x, None, y)).subs(y, t) == Integral(x*t, (x, None, t))
assert Integral(x*y, (x, y, None)).subs(y, t) == Integral(x*t, (x, t, None))
assert integrate(x**2, (x, None, 1)) == Rational(1, 3)
assert integrate(x**2, (x, 1, None)) == Rational(-1, 3)
assert integrate("x**2", ("x", "1", None)) == Rational(-1, 3)
def test_integral_reconstruct():
e = Integral(x**2, (x, -1, 1))
assert e == Integral(*e.args)
def test_doit_integrals():
e = Integral(Integral(2*x), (x, 0, 1))
assert e.doit() == Rational(1, 3)
assert e.doit(deep=False) == Rational(1, 3)
f = Function('f')
# doesn't matter if the integral can't be performed
assert Integral(f(x), (x, 1, 1)).doit() == 0
# doesn't matter if the limits can't be evaluated
assert Integral(0, (x, 1, Integral(f(x), x))).doit() == 0
assert Integral(x, (a, 0)).doit() == 0
limits = ((a, 1, exp(x)), (x, 0))
assert Integral(a, *limits).doit() == Rational(1, 4)
assert Integral(a, *list(reversed(limits))).doit() == 0
def test_issue_4884():
assert integrate(sqrt(x)*(1 + x)) == \
Piecewise(
(2*sqrt(x)*(x + 1)**2/5 - 2*sqrt(x)*(x + 1)/15 - 4*sqrt(x)/15,
Abs(x + 1) > 1),
(2*I*sqrt(-x)*(x + 1)**2/5 - 2*I*sqrt(-x)*(x + 1)/15 -
4*I*sqrt(-x)/15, True))
assert integrate(x**x*(1 + log(x))) == x**x
def test_is_number():
from sympy.abc import x, y, z
from sympy import cos, sin
assert Integral(x).is_number is False
assert Integral(1, x).is_number is False
assert Integral(1, (x, 1)).is_number is True
assert Integral(1, (x, 1, 2)).is_number is True
assert Integral(1, (x, 1, y)).is_number is False
assert Integral(1, (x, y)).is_number is False
assert Integral(x, y).is_number is False
assert Integral(x, (y, 1, x)).is_number is False
assert Integral(x, (y, 1, 2)).is_number is False
assert Integral(x, (x, 1, 2)).is_number is True
# `foo.is_number` should always be equivalent to `not foo.free_symbols`
# in each of these cases, there are pseudo-free symbols
i = Integral(x, (y, 1, 1))
assert i.is_number is False and i.n() == 0
i = Integral(x, (y, z, z))
assert i.is_number is False and i.n() == 0
i = Integral(1, (y, z, z + 2))
assert i.is_number is False and i.n() == 2
assert Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number is True
assert Integral(x*y, (x, 1, 2), (y, 1, z)).is_number is False
assert Integral(x, (x, 1)).is_number is True
assert Integral(x, (x, 1, Integral(y, (y, 1, 2)))).is_number is True
assert Integral(Sum(z, (z, 1, 2)), (x, 1, 2)).is_number is True
# it is possible to get a false negative if the integrand is
# actually an unsimplified zero, but this is true of is_number in general.
assert Integral(sin(x)**2 + cos(x)**2 - 1, x).is_number is False
assert Integral(f(x), (x, 0, 1)).is_number is True
def test_symbols():
from sympy.abc import x, y, z
assert Integral(0, x).free_symbols == {x}
assert Integral(x).free_symbols == {x}
assert Integral(x, (x, None, y)).free_symbols == {y}
assert Integral(x, (x, y, None)).free_symbols == {y}
assert Integral(x, (x, 1, y)).free_symbols == {y}
assert Integral(x, (x, y, 1)).free_symbols == {y}
assert Integral(x, (x, x, y)).free_symbols == {x, y}
assert Integral(x, x, y).free_symbols == {x, y}
assert Integral(x, (x, 1, 2)).free_symbols == set()
assert Integral(x, (y, 1, 2)).free_symbols == {x}
# pseudo-free in this case
assert Integral(x, (y, z, z)).free_symbols == {x, z}
assert Integral(x, (y, 1, 2), (y, None, None)).free_symbols == {x, y}
assert Integral(x, (y, 1, 2), (x, 1, y)).free_symbols == {y}
assert Integral(2, (y, 1, 2), (y, 1, x), (x, 1, 2)).free_symbols == set()
assert Integral(2, (y, x, 2), (y, 1, x), (x, 1, 2)).free_symbols == set()
assert Integral(2, (x, 1, 2), (y, x, 2), (y, 1, 2)).free_symbols == \
{x}
def test_is_zero():
from sympy.abc import x, m
assert Integral(0, (x, 1, x)).is_zero
assert Integral(1, (x, 1, 1)).is_zero
assert Integral(1, (x, 1, 2), (y, 2)).is_zero is False
assert Integral(x, (m, 0)).is_zero
assert Integral(x + m, (m, 0)).is_zero is None
i = Integral(m, (m, 1, exp(x)), (x, 0))
assert i.is_zero is None
assert Integral(m, (x, 0), (m, 1, exp(x))).is_zero is True
assert Integral(x, (x, oo, oo)).is_zero # issue 8171
assert Integral(x, (x, -oo, -oo)).is_zero
# this is zero but is beyond the scope of what is_zero
# should be doing
assert Integral(sin(x), (x, 0, 2*pi)).is_zero is None
def test_series():
from sympy.abc import x
i = Integral(cos(x), (x, x))
e = i.lseries(x)
assert i.nseries(x, n=8).removeO() == Add(*[next(e) for j in range(4)])
def test_trig_nonelementary_integrals():
x = Symbol('x')
assert integrate((1 + sin(x))/x, x) == log(x) + Si(x)
# next one comes out as log(x) + log(x**2)/2 + Ci(x)
# so not hardcoding this log ugliness
assert integrate((cos(x) + 2)/x, x).has(Ci)
def test_issue_4403():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z', positive=True)
assert integrate(sqrt(x**2 + z**2), x) == \
z**2*asinh(x/z)/2 + x*sqrt(x**2 + z**2)/2
assert integrate(sqrt(x**2 - z**2), x) == \
-z**2*acosh(x/z)/2 + x*sqrt(x**2 - z**2)/2
x = Symbol('x', real=True)
y = Symbol('y', positive=True)
assert integrate(1/(x**2 + y**2)**S('3/2'), x) == \
x/(y**2*sqrt(x**2 + y**2))
# If y is real and nonzero, we get x*Abs(y)/(y**3*sqrt(x**2 + y**2)),
# which results from sqrt(1 + x**2/y**2) = sqrt(x**2 + y**2)/|y|.
def test_issue_4403_2():
assert integrate(sqrt(-x**2 - 4), x) == \
-2*atan(x/sqrt(-4 - x**2)) + x*sqrt(-4 - x**2)/2
def test_issue_4100():
R = Symbol('R', positive=True)
assert integrate(sqrt(R**2 - x**2), (x, 0, R)) == pi*R**2/4
def test_issue_5167():
from sympy.abc import w, x, y, z
f = Function('f')
assert Integral(Integral(f(x), x), x) == Integral(f(x), x, x)
assert Integral(f(x)).args == (f(x), Tuple(x))
assert Integral(Integral(f(x))).args == (f(x), Tuple(x), Tuple(x))
assert Integral(Integral(f(x)), y).args == (f(x), Tuple(x), Tuple(y))
assert Integral(Integral(f(x), z), y).args == (f(x), Tuple(z), Tuple(y))
assert Integral(Integral(Integral(f(x), x), y), z).args == \
(f(x), Tuple(x), Tuple(y), Tuple(z))
assert integrate(Integral(f(x), x), x) == Integral(f(x), x, x)
assert integrate(Integral(f(x), y), x) == y*Integral(f(x), x)
assert integrate(Integral(f(x), x), y) in [Integral(y*f(x), x), y*Integral(f(x), x)]
assert integrate(Integral(2, x), x) == x**2
assert integrate(Integral(2, x), y) == 2*x*y
# don't re-order given limits
assert Integral(1, x, y).args != Integral(1, y, x).args
# do as many as possible
assert Integral(f(x), y, x, y, x).doit() == y**2*Integral(f(x), x, x)/2
assert Integral(f(x), (x, 1, 2), (w, 1, x), (z, 1, y)).doit() == \
y*(x - 1)*Integral(f(x), (x, 1, 2)) - (x - 1)*Integral(f(x), (x, 1, 2))
def test_issue_4890():
z = Symbol('z', positive=True)
assert integrate(exp(-log(x)**2), x) == \
sqrt(pi)*exp(Rational(1, 4))*erf(log(x) - S.Half)/2
assert integrate(exp(log(x)**2), x) == \
sqrt(pi)*exp(Rational(-1, 4))*erfi(log(x)+S.Half)/2
assert integrate(exp(-z*log(x)**2), x) == \
sqrt(pi)*exp(1/(4*z))*erf(sqrt(z)*log(x) - 1/(2*sqrt(z)))/(2*sqrt(z))
def test_issue_4551():
assert not integrate(1/(x*sqrt(1 - x**2)), x).has(Integral)
def test_issue_4376():
n = Symbol('n', integer=True, positive=True)
assert simplify(integrate(n*(x**(1/n) - 1), (x, 0, S.Half)) -
(n**2 - 2**(1/n)*n**2 - n*2**(1/n))/(2**(1 + 1/n) + n*2**(1 + 1/n))) == 0
def test_issue_4517():
assert integrate((sqrt(x) - x**3)/x**Rational(1, 3), x) == \
6*x**Rational(7, 6)/7 - 3*x**Rational(11, 3)/11
def test_issue_4527():
k, m = symbols('k m', integer=True)
assert integrate(sin(k*x)*sin(m*x), (x, 0, pi)).simplify() == \
Piecewise((0, Eq(k, 0) | Eq(m, 0)),
(-pi/2, Eq(k, -m) | (Eq(k, 0) & Eq(m, 0))),
(pi/2, Eq(k, m) | (Eq(k, 0) & Eq(m, 0))),
(0, True))
# Should be possible to further simplify to:
# Piecewise(
# (0, Eq(k, 0) | Eq(m, 0)),
# (-pi/2, Eq(k, -m)),
# (pi/2, Eq(k, m)),
# (0, True))
assert integrate(sin(k*x)*sin(m*x), (x,)) == Piecewise(
(0, And(Eq(k, 0), Eq(m, 0))),
(-x*sin(m*x)**2/2 - x*cos(m*x)**2/2 + sin(m*x)*cos(m*x)/(2*m), Eq(k, -m)),
(x*sin(m*x)**2/2 + x*cos(m*x)**2/2 - sin(m*x)*cos(m*x)/(2*m), Eq(k, m)),
(m*sin(k*x)*cos(m*x)/(k**2 - m**2) -
k*sin(m*x)*cos(k*x)/(k**2 - m**2), True))
def test_issue_4199():
ypos = Symbol('y', positive=True)
# TODO: Remove conds='none' below, let the assumption take care of it.
assert integrate(exp(-I*2*pi*ypos*x)*x, (x, -oo, oo), conds='none') == \
Integral(exp(-I*2*pi*ypos*x)*x, (x, -oo, oo))
@slow
def test_issue_3940():
a, b, c, d = symbols('a:d', positive=True, finite=True)
assert integrate(exp(-x**2 + I*c*x), x) == \
-sqrt(pi)*exp(-c**2/4)*erf(I*c/2 - x)/2
assert integrate(exp(a*x**2 + b*x + c), x) == \
sqrt(pi)*exp(c)*exp(-b**2/(4*a))*erfi(sqrt(a)*x + b/(2*sqrt(a)))/(2*sqrt(a))
from sympy import expand_mul
from sympy.abc import k
assert expand_mul(integrate(exp(-x**2)*exp(I*k*x), (x, -oo, oo))) == \
sqrt(pi)*exp(-k**2/4)
a, d = symbols('a d', positive=True)
assert expand_mul(integrate(exp(-a*x**2 + 2*d*x), (x, -oo, oo))) == \
sqrt(pi)*exp(d**2/a)/sqrt(a)
def test_issue_5413():
# Note that this is not the same as testing ratint() because integrate()
# pulls out the coefficient.
assert integrate(-a/(a**2 + x**2), x) == I*log(-I*a + x)/2 - I*log(I*a + x)/2
def test_issue_4892a():
A, z = symbols('A z')
c = Symbol('c', nonzero=True)
P1 = -A*exp(-z)
P2 = -A/(c*t)*(sin(x)**2 + cos(y)**2)
h1 = -sin(x)**2 - cos(y)**2
h2 = -sin(x)**2 + sin(y)**2 - 1
# there is still some non-deterministic behavior in integrate
# or trigsimp which permits one of the following
assert integrate(c*(P2 - P1), t) in [
c*(-A*(-h1)*log(c*t)/c + A*t*exp(-z)),
c*(-A*(-h2)*log(c*t)/c + A*t*exp(-z)),
c*( A* h1 *log(c*t)/c + A*t*exp(-z)),
c*( A* h2 *log(c*t)/c + A*t*exp(-z)),
(A*c*t - A*(-h1)*log(t)*exp(z))*exp(-z),
(A*c*t - A*(-h2)*log(t)*exp(z))*exp(-z),
]
def test_issue_4892b():
# Issues relating to issue 4596 are making the actual result of this hard
# to test. The answer should be something like
#
# (-sin(y) + sqrt(-72 + 48*cos(y) - 8*cos(y)**2)/2)*log(x + sqrt(-72 +
# 48*cos(y) - 8*cos(y)**2)/(2*(3 - cos(y)))) + (-sin(y) - sqrt(-72 +
# 48*cos(y) - 8*cos(y)**2)/2)*log(x - sqrt(-72 + 48*cos(y) -
# 8*cos(y)**2)/(2*(3 - cos(y)))) + x**2*sin(y)/2 + 2*x*cos(y)
expr = (sin(y)*x**3 + 2*cos(y)*x**2 + 12)/(x**2 + 2)
assert trigsimp(factor(integrate(expr, x).diff(x) - expr)) == 0
def test_issue_5178():
assert integrate(sin(x)*f(y, z), (x, 0, pi), (y, 0, pi), (z, 0, pi)) == \
2*Integral(f(y, z), (y, 0, pi), (z, 0, pi))
def test_integrate_series():
f = sin(x).series(x, 0, 10)
g = x**2/2 - x**4/24 + x**6/720 - x**8/40320 + x**10/3628800 + O(x**11)
assert integrate(f, x) == g
assert diff(integrate(f, x), x) == f
assert integrate(O(x**5), x) == O(x**6)
def test_atom_bug():
from sympy import meijerg
from sympy.integrals.heurisch import heurisch
assert heurisch(meijerg([], [], [1], [], x), x) is None
def test_limit_bug():
z = Symbol('z', zero=False)
assert integrate(sin(x*y*z), (x, 0, pi), (y, 0, pi)) == \
(log(z) + EulerGamma + log(pi))/z - Ci(pi**2*z)/z + log(pi)/z
def test_issue_4703():
g = Function('g')
assert integrate(exp(x)*g(x), x).has(Integral)
def test_issue_1888():
f = Function('f')
assert integrate(f(x).diff(x)**2, x).has(Integral)
# The following tests work using meijerint.
def test_issue_3558():
from sympy import Si
assert integrate(cos(x*y), (x, -pi/2, pi/2), (y, 0, pi)) == 2*Si(pi**2/2)
def test_issue_4422():
assert integrate(1/sqrt(16 + 4*x**2), x) == asinh(x/2) / 2
def test_issue_4493():
from sympy import simplify
assert simplify(integrate(x*sqrt(1 + 2*x), x)) == \
sqrt(2*x + 1)*(6*x**2 + x - 1)/15
def test_issue_4737():
assert integrate(sin(x)/x, (x, -oo, oo)) == pi
assert integrate(sin(x)/x, (x, 0, oo)) == pi/2
assert integrate(sin(x)/x, x) == Si(x)
def test_issue_4992():
# Note: psi in _check_antecedents becomes NaN.
from sympy import simplify, expand_func, polygamma, gamma
a = Symbol('a', positive=True)
assert simplify(expand_func(integrate(exp(-x)*log(x)*x**a, (x, 0, oo)))) == \
(a*polygamma(0, a) + 1)*gamma(a)
def test_issue_4487():
from sympy import lowergamma, simplify
assert simplify(integrate(exp(-x)*x**y, x)) == lowergamma(y + 1, x)
def test_issue_4215():
x = Symbol("x")
assert integrate(1/(x**2), (x, -1, 1)) is oo
def test_issue_4400():
n = Symbol('n', integer=True, positive=True)
assert integrate((x**n)*log(x), x) == \
n*x*x**n*log(x)/(n**2 + 2*n + 1) + x*x**n*log(x)/(n**2 + 2*n + 1) - \
x*x**n/(n**2 + 2*n + 1)
def test_issue_6253():
# Note: this used to raise NotImplementedError
# Note: psi in _check_antecedents becomes NaN.
assert integrate((sqrt(1 - x) + sqrt(1 + x))**2/x, x, meijerg=True) == \
Integral((sqrt(-x + 1) + sqrt(x + 1))**2/x, x)
def test_issue_4153():
assert integrate(1/(1 + x + y + z), (x, 0, 1), (y, 0, 1), (z, 0, 1)) in [
-12*log(3) - 3*log(6)/2 + 3*log(8)/2 + 5*log(2) + 7*log(4),
6*log(2) + 8*log(4) - 27*log(3)/2, 22*log(2) - 27*log(3)/2,
-12*log(3) - 3*log(6)/2 + 47*log(2)/2]
def test_issue_4326():
R, b, h = symbols('R b h')
# It doesn't matter if we can do the integral. Just make sure the result
# doesn't contain nan. This is really a test against _eval_interval.
e = integrate(((h*(x - R + b))/b)*sqrt(R**2 - x**2), (x, R - b, R))
assert not e.has(nan)
# See that it evaluates
assert not e.has(Integral)
def test_powers():
assert integrate(2**x + 3**x, x) == 2**x/log(2) + 3**x/log(3)
def test_manual_option():
raises(ValueError, lambda: integrate(1/x, x, manual=True, meijerg=True))
# an example of a function that manual integration cannot handle
assert integrate(log(1+x)/x, (x, 0, 1), manual=True).has(Integral)
def test_meijerg_option():
raises(ValueError, lambda: integrate(1/x, x, meijerg=True, risch=True))
# an example of a function that meijerg integration cannot handle
assert integrate(tan(x), x, meijerg=True) == Integral(tan(x), x)
def test_risch_option():
# risch=True only allowed on indefinite integrals
raises(ValueError, lambda: integrate(1/log(x), (x, 0, oo), risch=True))
assert integrate(exp(-x**2), x, risch=True) == NonElementaryIntegral(exp(-x**2), x)
assert integrate(log(1/x)*y, x, y, risch=True) == y**2*(x*log(1/x)/2 + x/2)
assert integrate(erf(x), x, risch=True) == Integral(erf(x), x)
# TODO: How to test risch=False?
def test_heurisch_option():
raises(ValueError, lambda: integrate(1/x, x, risch=True, heurisch=True))
# an integral that heurisch can handle
assert integrate(exp(x**2), x, heurisch=True) == sqrt(pi)*erfi(x)/2
# an integral that heurisch currently cannot handle
assert integrate(exp(x)/x, x, heurisch=True) == Integral(exp(x)/x, x)
# an integral where heurisch currently hangs, issue 15471
assert integrate(log(x)*cos(log(x))/x**Rational(3, 4), x, heurisch=False) == (
-128*x**Rational(1, 4)*sin(log(x))/289 + 240*x**Rational(1, 4)*cos(log(x))/289 +
(16*x**Rational(1, 4)*sin(log(x))/17 + 4*x**Rational(1, 4)*cos(log(x))/17)*log(x))
def test_issue_6828():
f = 1/(1.08*x**2 - 4.3)
g = integrate(f, x).diff(x)
assert verify_numerically(f, g, tol=1e-12)
def test_issue_4803():
x_max = Symbol("x_max")
assert integrate(y/pi*exp(-(x_max - x)/cos(a)), x) == \
y*exp((x - x_max)/cos(a))*cos(a)/pi
def test_issue_4234():
assert integrate(1/sqrt(1 + tan(x)**2)) == tan(x)/sqrt(1 + tan(x)**2)
def test_issue_4492():
assert simplify(integrate(x**2 * sqrt(5 - x**2), x)) == Piecewise(
(I*(2*x**5 - 15*x**3 + 25*x - 25*sqrt(x**2 - 5)*acosh(sqrt(5)*x/5)) /
(8*sqrt(x**2 - 5)), 1 < Abs(x**2)/5),
((-2*x**5 + 15*x**3 - 25*x + 25*sqrt(-x**2 + 5)*asin(sqrt(5)*x/5)) /
(8*sqrt(-x**2 + 5)), True))
def test_issue_2708():
# This test needs to use an integration function that can
# not be evaluated in closed form. Update as needed.
f = 1/(a + z + log(z))
integral_f = NonElementaryIntegral(f, (z, 2, 3))
assert Integral(f, (z, 2, 3)).doit() == integral_f
assert integrate(f + exp(z), (z, 2, 3)) == integral_f - exp(2) + exp(3)
assert integrate(2*f + exp(z), (z, 2, 3)) == \
2*integral_f - exp(2) + exp(3)
assert integrate(exp(1.2*n*s*z*(-t + z)/t), (z, 0, x)) == \
NonElementaryIntegral(exp(-1.2*n*s*z)*exp(1.2*n*s*z**2/t),
(z, 0, x))
def test_issue_2884():
f = (4.000002016020*x + 4.000002016020*y + 4.000006024032)*exp(10.0*x)
e = integrate(f, (x, 0.1, 0.2))
assert str(e) == '1.86831064982608*y + 2.16387491480008'
def test_issue_8368():
assert integrate(exp(-s*x)*cosh(x), (x, 0, oo)) == \
Piecewise(
( pi*Piecewise(
( -s/(pi*(-s**2 + 1)),
Abs(s**2) < 1),
( 1/(pi*s*(1 - 1/s**2)),
Abs(s**(-2)) < 1),
( meijerg(
((S.Half,), (0, 0)),
((0, S.Half), (0,)),
polar_lift(s)**2),
True)
),
And(
Abs(periodic_argument(polar_lift(s)**2, oo)) < pi,
cos(Abs(periodic_argument(polar_lift(s)**2, oo))/2)*sqrt(Abs(s**2)) - 1 > 0,
Ne(s**2, 1))
),
(
Integral(exp(-s*x)*cosh(x), (x, 0, oo)),
True))
assert integrate(exp(-s*x)*sinh(x), (x, 0, oo)) == \
Piecewise(
( -1/(s + 1)/2 - 1/(-s + 1)/2,
And(
Ne(1/s, 1),
Abs(periodic_argument(s, oo)) < pi/2,
Abs(periodic_argument(s, oo)) <= pi/2,
cos(Abs(periodic_argument(s, oo)))*Abs(s) - 1 > 0)),
( Integral(exp(-s*x)*sinh(x), (x, 0, oo)),
True))
def test_issue_8901():
assert integrate(sinh(1.0*x)) == 1.0*cosh(1.0*x)
assert integrate(tanh(1.0*x)) == 1.0*x - 1.0*log(tanh(1.0*x) + 1)
assert integrate(tanh(x)) == x - log(tanh(x) + 1)
@slow
def test_issue_8945():
assert integrate(sin(x)**3/x, (x, 0, 1)) == -Si(3)/4 + 3*Si(1)/4
assert integrate(sin(x)**3/x, (x, 0, oo)) == pi/4
assert integrate(cos(x)**2/x**2, x) == -Si(2*x) - cos(2*x)/(2*x) - 1/(2*x)
@slow
def test_issue_7130():
if ON_TRAVIS:
skip("Too slow for travis.")
i, L, a, b = symbols('i L a b')
integrand = (cos(pi*i*x/L)**2 / (a + b*x)).rewrite(exp)
assert x not in integrate(integrand, (x, 0, L)).free_symbols
def test_issue_10567():
a, b, c, t = symbols('a b c t')
vt = Matrix([a*t, b, c])
assert integrate(vt, t) == Integral(vt, t).doit()
assert integrate(vt, t) == Matrix([[a*t**2/2], [b*t], [c*t]])
def test_issue_11856():
t = symbols('t')
assert integrate(sinc(pi*t), t) == Si(pi*t)/pi
@slow
def test_issue_11876():
assert integrate(sqrt(log(1/x)), (x, 0, 1)) == sqrt(pi)/2
def test_issue_4950():
assert integrate((-60*exp(x) - 19.2*exp(4*x))*exp(4*x), x) ==\
-2.4*exp(8*x) - 12.0*exp(5*x)
def test_issue_4968():
assert integrate(sin(log(x**2))) == x*sin(2*log(x))/5 - 2*x*cos(2*log(x))/5
def test_singularities():
assert integrate(1/x**2, (x, -oo, oo)) is oo
assert integrate(1/x**2, (x, -1, 1)) is oo
assert integrate(1/(x - 1)**2, (x, -2, 2)) is oo
assert integrate(1/x**2, (x, 1, -1)) is -oo
assert integrate(1/(x - 1)**2, (x, 2, -2)) is -oo
def test_issue_12645():
x, y = symbols('x y', real=True)
assert (integrate(sin(x*x*x + y*y),
(x, -sqrt(pi - y*y), sqrt(pi - y*y)),
(y, -sqrt(pi), sqrt(pi)))
== Integral(sin(x**3 + y**2),
(x, -sqrt(-y**2 + pi), sqrt(-y**2 + pi)),
(y, -sqrt(pi), sqrt(pi))))
def test_issue_12677():
assert integrate(sin(x) / (cos(x)**3) , (x, 0, pi/6)) == Rational(1,6)
def test_issue_14078():
assert integrate((cos(3*x)-cos(x))/x, (x, 0, oo)) == -log(3)
def test_issue_14064():
assert integrate(1/cosh(x), (x, 0, oo)) == pi/2
def test_issue_14027():
assert integrate(1/(1 + exp(x - S.Half)/(1 + exp(x))), x) == \
x - exp(S.Half)*log(exp(x) + exp(S.Half)/(1 + exp(S.Half)))/(exp(S.Half) + E)
def test_issue_8170():
assert integrate(tan(x), (x, 0, pi/2)) is S.Infinity
def test_issue_8440_14040():
assert integrate(1/x, (x, -1, 1)) is S.NaN
assert integrate(1/(x + 1), (x, -2, 3)) is S.NaN
def test_issue_14096():
assert integrate(1/(x + y)**2, (x, 0, 1)) == -1/(y + 1) + 1/y
assert integrate(1/(1 + x + y + z)**2, (x, 0, 1), (y, 0, 1), (z, 0, 1)) == \
-4*log(4) - 6*log(2) + 9*log(3)
def test_issue_14144():
assert Abs(integrate(1/sqrt(1 - x**3), (x, 0, 1)).n() - 1.402182) < 1e-6
assert Abs(integrate(sqrt(1 - x**3), (x, 0, 1)).n() - 0.841309) < 1e-6
def test_issue_14375():
# This raised a TypeError. The antiderivative has exp_polar, which
# may be possible to unpolarify, so the exact output is not asserted here.
assert integrate(exp(I*x)*log(x), x).has(Ei)
def test_issue_14437():
f = Function('f')(x, y, z)
assert integrate(f, (x, 0, 1), (y, 0, 2), (z, 0, 3)) == \
Integral(f, (x, 0, 1), (y, 0, 2), (z, 0, 3))
def test_issue_14470():
assert integrate(1/sqrt(exp(x) + 1), x) == \
log(-1 + 1/sqrt(exp(x) + 1)) - log(1 + 1/sqrt(exp(x) + 1))
def test_issue_14877():
f = exp(1 - exp(x**2)*x + 2*x**2)*(2*x**3 + x)/(1 - exp(x**2)*x)**2
assert integrate(f, x) == \
-exp(2*x**2 - x*exp(x**2) + 1)/(x*exp(3*x**2) - exp(2*x**2))
def test_issue_14782():
f = sqrt(-x**2 + 1)*(-x**2 + x)
assert integrate(f, [x, -1, 1]) == - pi / 8
@slow
def test_issue_14782_slow():
f = sqrt(-x**2 + 1)*(-x**2 + x)
assert integrate(f, [x, 0, 1]) == S.One / 3 - pi / 16
def test_issue_12081():
f = x**(Rational(-3, 2))*exp(-x)
assert integrate(f, [x, 0, oo]) is oo
def test_issue_15285():
y = 1/x - 1
f = 4*y*exp(-2*y)/x**2
assert integrate(f, [x, 0, 1]) == 1
def test_issue_15432():
assert integrate(x**n * exp(-x) * log(x), (x, 0, oo)).gammasimp() == Piecewise(
(gamma(n + 1)*polygamma(0, n) + gamma(n + 1)/n, re(n) + 1 > 0),
(Integral(x**n*exp(-x)*log(x), (x, 0, oo)), True))
def test_issue_15124():
omega = IndexedBase('omega')
m, p = symbols('m p', cls=Idx)
assert integrate(exp(x*I*(omega[m] + omega[p])), x, conds='none') == \
-I*exp(I*x*omega[m])*exp(I*x*omega[p])/(omega[m] + omega[p])
def test_issue_15218():
assert Eq(x, y).integrate(x) == Eq(x**2/2, x*y)
assert Integral(Eq(x, y), x) == Eq(Integral(x, x), Integral(y, x))
assert Integral(Eq(x, y), x).doit() == Eq(x**2/2, x*y)
def test_issue_15292():
res = integrate(exp(-x**2*cos(2*t)) * cos(x**2*sin(2*t)), (x, 0, oo))
assert isinstance(res, Piecewise)
assert gammasimp((res - sqrt(pi)/2 * cos(t)).subs(t, pi/6)) == 0
def test_issue_4514():
assert integrate(sin(2*x)/sin(x), x) == 2*sin(x)
def test_issue_15457():
x, a, b = symbols('x a b', real=True)
definite = integrate(exp(Abs(x-2)), (x, a, b))
indefinite = integrate(exp(Abs(x-2)), x)
assert definite.subs({a: 1, b: 3}) == -2 + 2*E
assert indefinite.subs(x, 3) - indefinite.subs(x, 1) == -2 + 2*E
assert definite.subs({a: -3, b: -1}) == -exp(3) + exp(5)
assert indefinite.subs(x, -1) - indefinite.subs(x, -3) == -exp(3) + exp(5)
def test_issue_15431():
assert integrate(x*exp(x)*log(x), x) == \
(x*exp(x) - exp(x))*log(x) - exp(x) + Ei(x)
def test_issue_15640_log_substitutions():
f = x/log(x)
F = Ei(2*log(x))
assert integrate(f, x) == F and F.diff(x) == f
f = x**3/log(x)**2
F = -x**4/log(x) + 4*Ei(4*log(x))
assert integrate(f, x) == F and F.diff(x) == f
f = sqrt(log(x))/x**2
F = -sqrt(pi)*erfc(sqrt(log(x)))/2 - sqrt(log(x))/x
assert integrate(f, x) == F and F.diff(x) == f
def test_issue_15509():
from sympy.vector import CoordSys3D
N = CoordSys3D('N')
x = N.x
assert integrate(cos(a*x + b), (x, x_1, x_2), heurisch=True) == Piecewise(
(-sin(a*x_1 + b)/a + sin(a*x_2 + b)/a, (a > -oo) & (a < oo) & Ne(a, 0)), \
(-x_1*cos(b) + x_2*cos(b), True))
def test_issue_4311_fast():
x = symbols('x', real=True)
assert integrate(x*abs(9-x**2), x) == Piecewise(
(x**4/4 - 9*x**2/2, x <= -3),
(-x**4/4 + 9*x**2/2 - Rational(81, 2), x <= 3),
(x**4/4 - 9*x**2/2, True))
def test_integrate_with_complex_constants():
K = Symbol('K', real=True, positive=True)
x = Symbol('x', real=True)
m = Symbol('m', real=True)
assert integrate(exp(-I*K*x**2+m*x), x) == sqrt(I)*sqrt(pi)*exp(-I*m**2
/(4*K))*erfi((-2*I*K*x + m)/(2*sqrt(K)*sqrt(-I)))/(2*sqrt(K))
assert integrate(1/(1 + I*x**2), x) == -sqrt(I)*log(x - sqrt(I))/2 +\
sqrt(I)*log(x + sqrt(I))/2
assert integrate(exp(-I*x**2), x) == sqrt(pi)*erf(sqrt(I)*x)/(2*sqrt(I))
def test_issue_14241():
x = Symbol('x')
n = Symbol('n', positive=True, integer=True)
assert integrate(n * x ** (n - 1) / (x + 1), x) == \
n**2*x**n*lerchphi(x*exp_polar(I*pi), 1, n)*gamma(n)/gamma(n + 1)
def test_issue_13112():
assert integrate(sin(t)**2 / (5 - 4*cos(t)), [t, 0, 2*pi]) == pi / 4
def test_issue_14709b():
h = Symbol('h', positive=True)
i = integrate(x*acos(1 - 2*x/h), (x, 0, h))
assert i == 5*h**2*pi/16
def test_issue_8614():
x = Symbol('x')
t = Symbol('t')
assert integrate(exp(t)/t, (t, -oo, x)) == Ei(x)
assert integrate((exp(-x) - exp(-2*x))/x, (x, 0, oo)) == log(2)
def test_issue_15494():
s = symbols('s', real=True, positive=True)
integrand = (exp(s/2) - 2*exp(1.6*s) + exp(s))*exp(s)
solution = integrate(integrand, s)
assert solution != S.NaN
# Not sure how to test this properly as it is a symbolic expression with floats
# assert str(solution) == '0.666666666666667*exp(1.5*s) + 0.5*exp(2.0*s) - 0.769230769230769*exp(2.6*s)'
# Maybe
assert abs(solution.subs(s, 1) - (-3.67440080236188)) <= 1e-8
integrand = (exp(s/2) - 2*exp(S(8)/5*s) + exp(s))*exp(s)
assert integrate(integrand, s) == -10*exp(13*s/5)/13 + 2*exp(3*s/2)/3 + exp(2*s)/2
def test_li_integral():
y = Symbol('y')
assert Integral(li(y*x**2), x).doit() == Piecewise(
(x*li(x**2*y) - x*Ei(3*log(x) + 3*log(y)/2)/(sqrt(y)*sqrt(x**2)), Ne(y, 0)),
(0, True))
def test_issue_17473():
x = Symbol('x')
n = Symbol('n')
assert integrate(sin(x**n), x) == \
x*x**n*gamma(S(1)/2 + 1/(2*n))*hyper((S(1)/2 + 1/(2*n),),
(S(3)/2, S(3)/2 + 1/(2*n)),
-x**(2*n)/4)/(2*n*gamma(S(3)/2 + 1/(2*n)))
def test_issue_17671():
assert integrate(log(log(x)) / x**2, [x, 1, oo]) == -EulerGamma
assert integrate(log(log(x)) / x**3, [x, 1, oo]) == -log(2)/2 - EulerGamma/2
assert integrate(log(log(x)) / x**10, [x, 1, oo]) == -2*log(3)/9 - EulerGamma/9
| bsd-3-clause | -3,841,913,268,088,145,000 | 35.919086 | 108 | 0.53326 | false |
ArcherSys/ArcherSys | Lib/test/testcodec.py | 1 | 3278 | <<<<<<< HEAD
<<<<<<< HEAD
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
=======
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 195,848,417,422,403,780 | 20.853333 | 68 | 0.680293 | false |
ebrelsford/django-moderation | setup.py | 1 | 1044 | from setuptools import setup, find_packages
import os
version = '0.3.2'
setup(name='django-moderation',
version=version,
description="Generic Django objects moderation application",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
keywords='django moderation models',
author='Dominik Szopa',
author_email='[email protected]',
url='http://github.com/dominno/django-moderation',
license='BSD',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires=[
'setuptools',
],
zip_safe=False,
)
| bsd-3-clause | -987,593,874,246,280,200 | 30.636364 | 72 | 0.582375 | false |
akx/coffin | tests/test_env.py | 1 | 1798 | """Test construction of the implicitly provided JinjaEnvironment,
in the common.py module.
"""
from coffin.common import get_env
from django.test.utils import override_settings
def test_i18n():
with override_settings(USE_I18N=True):
assert get_env().from_string('{{ _("test") }}').render() == 'test'
class TestLoaders:
def test_django_loader_replace(self):
from coffin.template.loaders import jinja_loader_from_django_loader
from jinja2 import loaders
# Test replacement of filesystem loader
l = jinja_loader_from_django_loader('django.template.loaders.filesystem.Loader')
assert isinstance(l, loaders.FileSystemLoader)
# Since we don't do exact matches for the loader string, make sure we
# are not replacing loaders that are outside the Django namespace.
l = jinja_loader_from_django_loader('djangoaddon.template.loaders.filesystem.Loader')
assert not isinstance(l, loaders.FileSystemLoader)
def test_cached_loader(self):
from jinja2 import loaders
with override_settings(TEMPLATE_LOADERS=[
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),]):
env = get_env()
assert len(env.loader.loaders) == 1
cached_loader = get_env().loader.loaders[0]
assert hasattr(cached_loader, 'template_cache')
assert len(cached_loader.loader.loaders) == 2
assert isinstance(cached_loader.loader.loaders[0], loaders.FileSystemLoader)
# the cached loader can find a template too.
assert env.loader.load(env, 'render-x.html').render({'x': 'foo'}) == 'foo'
| bsd-3-clause | 4,239,459,690,103,146,500 | 38.086957 | 93 | 0.657953 | false |
piyushroshan/tuxofwar2012 | questiondb.py | 1 | 1400 | # Model Defining Questions Database
import string
from google.appengine.ext import db
class questionm(db.Model):
questionNumber = db.IntegerProperty(required=True)
question = db.StringProperty(required=True, multiline=True)
qimage = db.StringProperty()
opt1 = db.StringProperty(required=True, multiline=True)
opt2 = db.StringProperty(required=True, multiline=True)
opt3 = db.StringProperty(required=True, multiline=True)
opt4 = db.StringProperty(required=True, multiline=True)
ans = db.StringProperty(required=True)
def getQuestion(num,var):
query = questionm.all()
q = query.filter('questionNumber =',num).get()
if q:
return ("{"+
"\"num\" : " + "\""+ str(var) +"\""+","+
"\"question\" : "+"\""+q.question.replace('\r\n','<br />')+"\""+","+
"\"image\" : "+"\""+q.qimage+"\""+","+
"\"options\" : " + "["+
"\""+q.opt1.replace('\r\n','<br />')+"\""+","+
"\""+q.opt2.replace('\r\n','<br />')+"\""+","+
"\""+q.opt3.replace('\r\n','<br />')+"\""+","+
"\""+q.opt4.replace('\r\n','<br />')+"\""+
"]"+
"}")
else:
return ("{"+
"\"num\" : " + "\""+"\""+","+
"\"question\" : "+"\""+"Sorry question not found. We'll fix it Soon"+"\""+","+
"\"image\" : "+"\""+"\""+","+
"\"options\" : " + "["+
"\""+""+"\""+","+
"\""+""+"\""+","+
"\""+""+"\""+","+
"\""+""+"\""+
"]"+
"}")
| gpl-2.0 | -2,501,730,747,631,768,600 | 32.333333 | 81 | 0.487857 | false |
playpauseandstop/setman | setman/utils/importlib.py | 1 | 1408 | """
Backported from `importlib <http://pypi.python.org/pypi/importlib>` library,
which itself backported from Python 3.x branch.
"""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| bsd-3-clause | -9,140,119,519,811,707,000 | 33.341463 | 79 | 0.617898 | false |
authman/Python201609 | Nguyen_Ken/Assignments/Flask/registration_form/server.py | 1 | 1721 | from flask import Flask, render_template, request, redirect, session, flash
import re
app = Flask(__name__)
app.secret_key = 'secretsquirrel'
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def process():
session['email'] = request.form['email']
session['first_name'] = request.form['first_name']
session['last_name'] = request.form['last_name']
session['password'] = request.form['password1']
session['confirm_password'] = request.form['password2']
if len(session['email']) < 1:
flash('Please enter your email', 'error')
elif not EMAIL_REGEX.match(session['email']):
flash('That is not a valid email address', 'error')
elif len(session['first_name']) < 1:
flash('Please enter your first name', 'error')
elif not session['first_name'].isalpha():
flash('Your name cannot contain numbers or special characters', 'error')
elif len(session['last_name']) < 1:
flash('Please enter your last name', 'error')
elif not session['last_name'].isalpha():
flash('Your name cannot contain numbers or special characters', 'error')
elif len(session['password']) < 1:
flash('Please enter a password', 'error')
elif len(session['password']) < 8:
flash('Your password must be greater than 8 characters', 'error')
elif not session['confirm_password'] == session['password']:
flash('Your password does not match!', 'error')
else:
flash('Thanks for submitting your information', 'success')
return redirect('/')
app.run(debug=True)
| mit | 6,433,111,740,301,173,000 | 27.683333 | 80 | 0.632191 | false |
altova/sec-edgar-tools | sec_filing_to_xlsx.py | 1 | 11948 | # Copyright 2015 Altova GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__copyright__ = "Copyright 2015-2017 Altova GmbH"
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
# This script generates Excel reports from a SEC EDGAR filing.
# NOTE: You must first download the source code of the 3rd party Python module xlsxwriter from https://pypi.python.org/pypi/XlsxWriter
# and extract the xslxwriter folder in the archive to the lib/python3.4 subfolder of the RaptorXML server installation directory.
#
# Example invocation:
# raptorxmlxbrl valxbrl --script=sec_filing_to_xlsx.py nanonull.xbrl
import os, datetime, itertools
from altova import *
try:
import xlsxwriter
except:
raise ImportError('Please install the 3rd party python module xlsxwrite from https://pypi.python.org/pypi/XlsxWriter')
lang='en-US'
formats = {}
def isPeriodStart(role):
return role in (
'http://www.xbrl.org/2003/role/periodStartLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodStart',
'http://www.xbrl.org/2009/role/negatedPeriodStartLabel'
)
def isPeriodEnd(role):
return role in (
'http://www.xbrl.org/2003/role/periodEndLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodEnd',
'http://www.xbrl.org/2009/role/negatedPeriodEndLabel'
)
def isTotal(role):
return role in (
'http://www.xbrl.org/2003/role/totalLabel',
'http://xbrl.us/us-gaap/role/label/negatedTotal',
'http://www.xbrl.org/2009/role/negatedTotalLabel'
)
def isNegated(role):
return role in (
'http://xbrl.us/us-gaap/role/label/negated',
'http://www.xbrl.org/2009/role/negatedLabel',
'http://www.xbrl.org/2009/role/negatedNetLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodEnd',
'http://www.xbrl.org/2009/role/negatedPeriodEndLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodStart',
'http://www.xbrl.org/2009/role/negatedPeriodStartLabel',
'http://www.xbrl.org/2009/role/negatedTerseLabel',
'http://xbrl.us/us-gaap/role/label/negatedTotal',
'http://www.xbrl.org/2009/role/negatedTotalLabel'
)
def domainMembersFromPresentationTreeRecursive(network,parent,domain_members):
for rel in network.relationships_from(parent):
domain_members.append(rel.target)
domainMembersFromPresentationTreeRecursive(network,rel.target,domain_members)
def conceptsFromPresentationTreeRecursive(network,parent,concepts):
for rel in network.relationships_from(parent):
if not rel.target.abstract:
concepts.append((rel.target,rel.preferred_label))
conceptsFromPresentationTreeRecursive(network,rel.target,concepts)
def analyzePresentationTree(network,roots):
concepts = []
dimensions = {}
for rel in network.relationships_from(roots[0]):
if isinstance(rel.target,xbrl.xdt.Hypercube):
for rel2 in network.relationships_from(rel.target):
if isinstance(rel2.target,xbrl.xdt.Dimension):
domainMembersFromPresentationTreeRecursive(network,rel2.target,dimensions.setdefault(rel2.target,[]))
else:
conceptsFromPresentationTreeRecursive(network,rel2.target,concepts)
else:
conceptsFromPresentationTreeRecursive(network,rel.target,concepts)
return concepts, dimensions
def calcTableData(instance,role,contexts,concepts,dimensions):
table = {'columns': [], 'height': len(concepts)}
bIsCashFlow = 'cash' in role[1].lower() and 'flow' in role[1].lower()
for context in contexts:
cs = xbrl.ConstraintSet(context)
period = cs[xbrl.Aspect.PERIOD]
dimension_aspects = [value for aspect,value in cs.items() if isinstance(aspect,xbrl.xdt.Dimension)]
bEliminate = False
for val in dimension_aspects:
domain = dimensions.get(val.dimension,None)
if not domain or val.value not in domain:
bEliminate = True
for dim in set(dimensions.keys())-set([value.dimension for value in dimension_aspects]):
if dim.default_member and dim.default_member not in dimensions[dim]:
bEliminate = True
if bEliminate:
continue
bEmpty = True
bHasCash = False
column = {'period': period, 'dimensions': dimension_aspects, 'rows': []}
for concept in concepts:
cs[xbrl.Aspect.CONCEPT] = concept[0]
if isPeriodStart(concept[1]):
if period.period_type == xbrl.PeriodType.START_END:
cs[xbrl.Aspect.PERIOD] = xbrl.PeriodAspectValue.from_instant(period.start)
else:
column['rows'].append({'concept': concept, 'facts': xbrl.FactSet()})
continue
elif isPeriodEnd(concept[1]):
if period.period_type == xbrl.PeriodType.START_END:
cs[xbrl.Aspect.PERIOD] = xbrl.PeriodAspectValue.from_instant(period.end)
else:
column['rows'].append({'concept': concept, 'facts': xbrl.FactSet()})
continue
else:
cs[xbrl.Aspect.PERIOD] = period
facts = instance.facts.filter(cs,allow_additional_dimensions=False)
if len(facts):
bEmpty = False
if bIsCashFlow and not bHasCash and concept[0].is_duration():
bHasCash = 'cash' in next(iter(concept[0].labels(label_role=concept[1],lang=lang))).text.lower()
column['rows'].append({'concept': concept, 'facts': facts})
if not bEmpty and (not bIsCashFlow or bHasCash):
table['columns'].append(column)
return table
def formatConcept(concept):
preferredLabel = concept[1] if concept[1] else 'http://www.xbrl.org/2003/role/label'
labels = list(concept[0].labels(label_role=preferredLabel,lang=lang))
if labels:
return labels[0].text
return str(concept[0].qname)
def formatPeriod(period):
if period.period_type == xbrl.PeriodType.INSTANT:
return period.instant.strftime('%d. %B %Y')
elif period.period_type == xbrl.PeriodType.START_END:
return '%s to %s' % (period.start.strftime('%d. %B %Y'), period.end.strftime('%d. %B %Y'))
elif period.period_type == xbrl.PeriodType.FOREVER:
return 'Forever'
return ''
def formatDimensionValue(dimValue):
return formatConcept((dimValue.value,'http://www.xbrl.org/2003/role/terseLabel'))
def formatFact(dts,fact,preferredLabel=None):
if fact.xsi_nil:
return ('#N/A',None)
elif fact.concept.is_numeric():
if fact.concept.is_fraction():
val = fact.effective_fraction_value
else:
val = fact.effective_numeric_value
if isNegated(preferredLabel):
val *= -1
if fact.concept.is_monetary():
if isTotal(preferredLabel):
return (val,formats['monetary_total'])
return (val,formats['monetary'])
return (val,None)
elif fact.concept.is_qname():
concept = dts.resolve_concept(fact.qname_value)
if concept:
for label in concept.labels():
return (label.text,None)
return (str(fact.qname_value),None)
else:
return (fact.normalized_value,None)
def getDuration(column):
p = column['period']
if p.period_type == xbrl.PeriodType.INSTANT:
return 0
return (p.end.year - p.start.year) * 12 + p.end.month - p.start.month
def getEndDate(column):
p = column['period']
if p.period_type == xbrl.PeriodType.INSTANT:
return p.instant
return p.end
def generateTable(workbook, dts, role, table):
columns = sorted(table['columns'],key=lambda x: (-getDuration(x),getEndDate(x)),reverse=True)
worksheet = workbook.add_worksheet(role[1].split(' - ')[0])
worksheet.set_column(0,0,70)
worksheet.set_column(1,1+len(table['columns']),20)
worksheet.write(0,0,role[1].split(' - ')[2],formats['caption'])
col = 1
row_start = 1
for duration, group in itertools.groupby(columns,key=getDuration):
cols = list(group)
if duration > 0:
if len(cols) > 1:
worksheet.merge_range(0,col,0,col+len(cols)-1,'%d Months Ended' % getDuration(cols[0]),formats['center'])
else:
worksheet.write(0,col,'%d Months Ended' % getDuration(cols[0]),formats['center'])
row = 1
else:
row = 0
for column in cols:
worksheet.write(row,col,getEndDate(column)-datetime.timedelta(days=1),formats['date'])
for i, dimValue in enumerate(column['dimensions']):
dimLabel = formatDimensionValue(dimValue)
if '[Domain]' not in dimLabel:
worksheet.write(row+1+i,col,dimLabel)
col += 1
row_start = max(row_start,row+2+len(column['dimensions']))
for row in range(table['height']):
concept = columns[0]['rows'][row]['concept']
worksheet.write(row_start+row,0,formatConcept(concept),formats['header'])
for col, column in enumerate(columns):
for fact in column['rows'][row]['facts']:
worksheet.write(row_start+row,1+col,*formatFact(dts,fact,concept[1]))
footnotes = [footnote.text for footnote in fact.footnotes(lang=lang)]
if footnotes:
worksheet.write_comment(row_start+row,1+col,'\n'.join(footnotes),{'x_scale':5,'y_scale':2})
def generateTables(path, dts, instance):
global formats
workbook = xlsxwriter.Workbook(path)
formats['center'] = workbook.add_format({'align':'center'})
formats['caption'] = workbook.add_format({'text_wrap':True,'bold':True})
formats['header'] = workbook.add_format({'text_wrap':True})
formats['date'] = workbook.add_format({'num_format':'mmm. d, yyyy','bold':True})
formats['monetary'] = workbook.add_format({'num_format': '#,##0_);[Red](#,##0)'})
formats['monetary_total'] = workbook.add_format({'num_format': '#,##0_);[Red](#,##0)', 'underline':33})
# Calculate table data
tables = {}
contexts = list(instance.contexts)
roles = [(role, dts.role_type(role).definition.value) for role in dts.presentation_link_roles()]
roles = sorted(roles, key=lambda role: role[1].split(' - ')[0])
for role in roles:
presentation_network = dts.presentation_base_set(role[0]).network_of_relationships()
roots = list(presentation_network.roots)
tables[role] = calcTableData(instance,role,contexts,*analyzePresentationTree(presentation_network,roots))
# Generate excel sheet for each non-empty table
for role in roles:
if tables[role]['columns']:
generateTable(workbook, dts, role, tables[role])
workbook.close()
# Main entry point, will be called by RaptorXML after the XBRL instance validation job has finished
def on_xbrl_finished(job, instance):
# instance object will be None if XBRL 2.1 validation was not successful
if instance:
path = os.path.join(job.output_dir,'table.xlsx')
generateTables(path, instance.dts, instance)
# Register new output file with RaptorXML engine
job.append_output_filename(path) | apache-2.0 | -8,564,099,729,764,443,000 | 42.286232 | 134 | 0.640214 | false |
Kyly/mustaske | test/selenium_src/leave_room.py | 1 | 3018 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class LeaveRoom(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:3000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_leave_room(self):
driver = self.driver
driver.get(self.base_url)
driver.find_element_by_css_selector("input.form-control").clear()
driver.find_element_by_css_selector("input.form-control").send_keys("Test_Room")
driver.find_element_by_id("make-room").click()
ownerRoomName = driver.find_element_by_css_selector("span.navbar-brand.room-name").text
self.assertEqual("Test_Room",ownerRoomName)
driver.find_element_by_css_selector("span.fa.fa-cogs").click()
ownerRoomID = driver.find_element_by_class_name("drop-down-room-id").text
driver.execute_script("$(window.open('"+self.base_url+"'))")
driver.switch_to_window(driver.window_handles[-1])
driver.find_element_by_css_selector("input.form-control").clear()
driver.find_element_by_css_selector("input.form-control").send_keys(ownerRoomID)
driver.find_element_by_id("join-room").click()
audienceRoomName = driver.find_element_by_css_selector("span.navbar-brand.room-name").text
self.assertEqual(ownerRoomName,audienceRoomName)
driver.find_element_by_css_selector("span.fa.fa-cogs").click()
audienceRoomID = driver.find_element_by_class_name("drop-down-room-id").text
self.assertEqual(ownerRoomID,audienceRoomID)
driver.find_element_by_xpath("//li/ul/li[4]/a/span").click()
try: self.assertTrue(self.is_element_present(By.ID, "join-create-room"))
except AssertionError as e: self.verificationErrors.append(str(e))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| mit | 4,663,683,868,890,262,000 | 41.507042 | 98 | 0.661034 | false |
jayvdb/flake8-copyright | setup.py | 1 | 1860 | # -=- encoding: utf-8 -=-
#
# Copyright (C) 2014 Savoir-faire Linux Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from setuptools import setup
def get_version(fname='flake8_copyright.py'):
with open(fname) as f:
for line in f:
if line.startswith('__version__'):
return eval(line.split('=')[-1])
def get_long_description():
descr = []
for fname in ('README.rst',):
with open(fname) as f:
descr.append(f.read())
return '\n\n'.join(descr)
setup(
name='flake8-copyright',
version=get_version(),
description='Adds copyright checks to flake8',
long_description=get_long_description(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
author='Virgil Dupras',
author_email='[email protected]',
url='https://github.com/savoirfairelinux/flake8-copyright',
keywords='pep8 flake8 copyright',
py_modules=['flake8_copyright'],
install_requires=[
'setuptools',
],
entry_points={
'flake8.extension': ['flake8_copyright = flake8_copyright:CopyrightChecker'],
},
)
| gpl-3.0 | 3,025,203,135,656,725,500 | 30.525424 | 85 | 0.667204 | false |
berkmancenter/mediacloud | apps/common/tests/python/mediawords/languages/test_ro.py | 1 | 5631 | from unittest import TestCase
from mediawords.languages.ro import RomanianLanguage
# noinspection SpellCheckingInspection
class TestRomanianLanguage(TestCase):
def setUp(self):
self.__tokenizer = RomanianLanguage()
def test_language_code(self):
assert self.__tokenizer.language_code() == "ro"
def test_sample_sentence(self):
assert len(self.__tokenizer.sample_sentence())
def test_stop_words_map(self):
stop_words = self.__tokenizer.stop_words_map()
assert "acesta" in stop_words
assert "not_a_stopword" not in stop_words
def test_stem(self):
input_words = ["apropierea", "Splaiului"]
expected_stems = ["apropier", "splai"]
actual_stems = self.__tokenizer.stem_words(input_words)
assert expected_stems == actual_stems
def test_split_text_to_sentences(self):
input_text = """
În prezent, din întreg ansamblul mănăstirii s-a mai păstrat doar biserica și o clopotniță. Acestea se află
amplasate pe strada Sapienței din sectorul 5 al municipiului București, în spatele unor blocuri construite
în timpul regimului comunist, din apropierea Splaiului Independenței și a parcului Izvor. În 1813 Mănăstirea
Mihai-Vodă „era printre mănăstirile mari ale țării”.
"""
expected_sentences = [
'În prezent, din întreg ansamblul mănăstirii s-a mai păstrat doar biserica și o clopotniță.',
(
'Acestea se află amplasate pe strada Sapienței din sectorul 5 al municipiului București, în spatele '
'unor blocuri construite în timpul regimului comunist, din apropierea Splaiului Independenței și a '
'parcului Izvor.'
),
'În 1813 Mănăstirea Mihai-Vodă „era printre mănăstirile mari ale țării”.',
]
actual_sentences = self.__tokenizer.split_text_to_sentences(input_text)
assert expected_sentences == actual_sentences
def test_split_text_to_sentences_names(self):
"""Names ("Sf. Mc. Trifon" and others)."""
input_text = """
În prezent în interiorul bisericii există o raclă în care sunt păstrate moștele următorilor Sfinți: Sf. Ioan
Iacob Hozevitul, Sf. Xenia Petrovna, Sf. Teofil, Sf. Mc. Sevastiana, Sf. Mc. Ciprian, Sf. Mc. Iustina, Sf.
Mc. Clement, Sf. Mc. Trifon, Cuv. Auxenție, Sf. Dionisie Zakynthos, Sf. Mc. Anastasie, Sf. Mc. Panaghiotis,
Sf. Spiridon, Sf. Nifon II, Sf. Ignatie Zagorski, Sf. Prooroc Ioan Botezătorul, Cuv. Sava cel Sfințit, Sf.
Mc. Eustatie, Sf. Mc. Theodor Stratilat, Cuv. Paisie, Cuv. Stelian Paflagonul, Sf. Mc. Mercurie, Sf. Mc.
Arhidiacon Ștefan, Sf. Apostol Andrei, Sf. Mc. Dimitrie, Sf. Mc. Haralambie.
"""
expected_sentences = [
(
'În prezent în interiorul bisericii există o raclă în care sunt păstrate moștele următorilor Sfinți: '
'Sf. Ioan Iacob Hozevitul, Sf. Xenia Petrovna, Sf. Teofil, Sf. Mc. Sevastiana, Sf. Mc. Ciprian, Sf. '
'Mc. Iustina, Sf. Mc. Clement, Sf. Mc. Trifon, Cuv. Auxenție, Sf. Dionisie Zakynthos, Sf. Mc. '
'Anastasie, Sf. Mc. Panaghiotis, Sf. Spiridon, Sf. Nifon II, Sf. Ignatie Zagorski, Sf. Prooroc Ioan '
'Botezătorul, Cuv. Sava cel Sfințit, Sf. Mc. Eustatie, Sf. Mc. Theodor Stratilat, Cuv. Paisie, Cuv. '
'Stelian Paflagonul, Sf. Mc. Mercurie, Sf. Mc. Arhidiacon Ștefan, Sf. Apostol Andrei, Sf. Mc. '
'Dimitrie, Sf. Mc. Haralambie.'
),
]
actual_sentences = self.__tokenizer.split_text_to_sentences(input_text)
assert expected_sentences == actual_sentences
def test_split_text_to_sentences_abbreviation(self):
"""Abbreviation ("nr.4")."""
input_text = """
Translatarea în pantă a bisericii, pe o distanță de 289 m și coborâtă pe verticală cu 6,2 m, a avut loc în
anul 1985. Operațiune în sine de translatare a edificiului, de pe Dealul Mihai Vodă, fosta stradă a
Arhivelor nr.2 și până în locul în care se află și astăzi, Strada Sapienței nr.4, în apropierea malului
Dâmboviței, a fost considerată la vremea respectivă o performanță deosebită.
"""
expected_sentences = [
(
'Translatarea în pantă a bisericii, pe o distanță de 289 m și coborâtă pe verticală cu 6,2 m, a avut '
'loc în anul 1985.'
),
(
'Operațiune în sine de translatare a edificiului, de pe Dealul Mihai Vodă, fosta stradă a Arhivelor '
'nr.2 și până în locul în care se află și astăzi, Strada Sapienței nr.4, în apropierea malului '
'Dâmboviței, a fost considerată la vremea respectivă o performanță deosebită.'
),
]
actual_sentences = self.__tokenizer.split_text_to_sentences(input_text)
assert expected_sentences == actual_sentences
def test_split_sentence_to_words(self):
input_sentence = 'În 1813 Mănăstirea Mihai-Vodă „era printre mănăstirile mari ale țării”.'
expected_words = [
'în', '1813', 'mănăstirea', 'mihai-vodă', 'era', 'printre', 'mănăstirile', 'mari', 'ale', 'țării',
]
actual_words = self.__tokenizer.split_sentence_to_words(input_sentence)
assert expected_words == actual_words
| agpl-3.0 | 8,981,776,761,986,026,000 | 53.71 | 120 | 0.634984 | false |
mohitsethi/solum | solum/api/controllers/v1/extension.py | 1 | 2846 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from solum.api.controllers import common_types
from solum.api.controllers.v1 import types as api_types
from solum.openstack.common.gettextutils import _
class Extension(api_types.Base):
"""The Extension resource represents changes that the Provider has added
onto a Platform in addition to the ones supplied by Solum by default.
This may include additional protocol semantics, resource types,
application lifecycle states, resource attributes, etc. Anything may be
added, as long as it does not contradict the base functionality offered
by Solum.
"""
version = wtypes.text
"Version of the extension."
documentation = common_types.Uri
"Documentation URI to the extension."
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/extensions/mysql',
name='mysql',
type='extension',
tags=['large'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='A mysql extension',
version='2.13',
documentation='http://example.com/docs/ext/mysql')
class ExtensionController(rest.RestController):
"""Manages operations on a single extension."""
def __init__(self, extension_id):
pecan.request.context['extension_id'] = extension_id
self._id = extension_id
@wsme_pecan.wsexpose(Extension, wtypes.text)
def get(self):
"""Return this extension."""
error = _("Not implemented")
pecan.response.translatable_error = error
raise wsme.exc.ClientSideError(six.text_type(error))
class ExtensionsController(rest.RestController):
"""Manages operations on the extensions collection."""
@pecan.expose()
def _lookup(self, extension_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return ExtensionController(extension_id), remainder
@wsme_pecan.wsexpose([Extension])
def get_all(self):
"""Return all extensions, based on the query provided."""
return []
| apache-2.0 | -1,911,150,574,673,588,200 | 34.575 | 76 | 0.682713 | false |
amacd31/bom_data_parser | tests/test_hrs.py | 1 | 2066 | import os
import numpy as np
import pandas as pd
import unittest
from datetime import datetime
from bom_data_parser import read_hrs_csv
class HRSTest(unittest.TestCase):
def setUp(self):
self.test_cdo_file = os.path.join(os.path.dirname(__file__), 'data', 'HRS', '410730_daily_ts.csv')
def test_hrs(self):
data, attributes = read_hrs_csv(self.test_cdo_file)
self.assertTrue('Q' in data.columns)
self.assertTrue('QCode' in data.columns)
self.assertEqual(attributes['station_name'], 'Cotter River at Gingera (410730)')
self.assertEqual(attributes['catchment_area'], 130.0)
self.assertEqual(attributes['latitude'], 148.8212)
self.assertEqual(attributes['longitude'], -35.5917)
self.assertEqual(data.index[0], datetime(1963,7,5))
self.assertEqual(data.index[-1], datetime(2012,10,4))
self.assertAlmostEqual(data.Q.values[0], 127.312,3)
self.assertAlmostEqual(data.Q.values[-1], 186.238,3)
self.assertEqual(data.QCode.values[0], 10)
self.assertEqual(data.QCode.values[-1], 10)
def test_hrs_201510_format(self):
test_file = os.path.join(os.path.dirname(__file__), 'data', 'HRS', '410730_daily_ts_201510.csv')
data, attributes = read_hrs_csv(test_file)
self.assertTrue('Flow (ML)' in data.columns)
self.assertTrue('Bureau QCode' in data.columns)
self.assertEqual(attributes['station_name'], 'Cotter River at Gingera (410730)')
self.assertEqual(attributes['catchment_area'], 130.0)
self.assertEqual(attributes['latitude'], 148.8212)
self.assertEqual(attributes['longitude'], -35.5917)
self.assertEqual(data.index[0], datetime(1963,7,5))
self.assertEqual(data.index[-1], datetime(2014,12,31))
self.assertAlmostEqual(data['Flow (ML)'].values[0], 127.322,3)
self.assertAlmostEqual(data['Flow (ML)'].values[-1], 16.1915,4)
self.assertEqual(data['Bureau QCode'].values[0], 'A')
self.assertEqual(data['Bureau QCode'].values[-1], 'A')
| bsd-3-clause | 2,147,693,488,770,582,500 | 42.041667 | 106 | 0.659245 | false |
Microsoft/hummingbird | hummingbird/ml/exceptions.py | 1 | 1451 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Common errors.
"""
_missing_converter = """
It usually means the pipeline being converted contains a
transformer or a predictor with no corresponding converter implemented.
Please fill an issue at https://github.com/microsoft/hummingbird.
"""
_missing_backend = """
It usually means the backend is not currently supported.
Please check the spelling or fill an issue at https://github.com/microsoft/hummingbird.
"""
_constant_error = """
It usually means a constant is not available or you are trying to override a constant value.
"""
class MissingConverter(RuntimeError):
"""
Raised when there is no registered converter for a machine learning operator.
"""
def __init__(self, msg):
super().__init__(msg + _missing_converter)
class MissingBackend(RuntimeError):
"""
Raised when the selected backend is not supported.
"""
def __init__(self, msg):
super().__init__(msg + _missing_backend)
class ConstantError(TypeError):
"""
Raised when a constant is not available or it get overwritten.
"""
def __init__(self, msg):
super().__init__(msg + _constant_error)
| mit | 3,006,908,187,354,725,400 | 29.229167 | 92 | 0.623019 | false |
zhaogaolong/oneFinger | openstack/api/opentack_ansible.py | 1 | 1743 | #!/usr/bin/env python
# coding:utf8
# import pdb
import ansible.runner
from one_finger.cloud_logging import cloud_logging as logging
log = logging.logger
class CmmAndRun():
def __init__(self, module_name='shell', host=None, cmd=None, timeout=20):
self.username = 'root'
self.module_name = module_name
self.host = host
self.cmd = cmd
self.timeout = timeout
self.update_ansible_hosts()
def update_ansible_hosts(self):
status = False
b = open('/etc/ansible/hosts')
for line in b.readlines():
if self.host in line:
status = True
b.close()
if not status:
b = open('/etc/ansible/hosts','a')
b.writelines(self.host)
b.writelines('\n')
b.close()
def start(self):
runner = ansible.runner.Runner(
module_name=self.module_name,
module_args=self.cmd,
pattern=self.host,
timeout=self.timeout,
)
log.debug('ansible %s RunCommand: %s' % (self.host, self.cmd))
# import pdb
# pdb.set_trace()
datastructure = runner.run()
# print datastructure
log.debug('ansible sttout %s' % datastructure)
# print datastructure
if datastructure['dark']:
pass
else:
if not datastructure['contacted'][self.host]['rc']:
data = datastructure['contacted'][self.host]['stdout']
return data
else:
return None
if __name__ == '__main__':
ac = CmmAndRun(host='172.16.254.1', cmd='date')
print ac.start() | apache-2.0 | -7,011,476,624,317,718,000 | 24.846154 | 77 | 0.522088 | false |
moiseshiraldo/inviMarket | inviMarket/views/register.py | 1 | 2814 | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.utils import timezone
from django.utils.translation import ugettext as _
import hashlib
import datetime
import random
from inviMarket.models import Profile
from inviMarket.forms import RegisterForm
def register(request):
"""
Display the user registration form and store the :model:`auth.User` and
his :model:`inviMarket.Profile` in the database.
**Context**
``form``
An instace of the user registration form.
``error``
A string variable containing any general error message.
**Template:**
:template:`inviMarket/register.html`
"""
error = None
if request.user.is_authenticated():
return redirect('index')
if request.method == 'POST':
form = RegisterForm(request.POST)
if 'terms' not in request.POST:
error= _("You must read and accept the terms and conditions.")
elif form.is_valid():
if form.cleaned_data['last_name'] != "":
return redirect('confirm')
new_user = form.save()
# Create a random activation key and store it in the user profile
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt+new_user.email).hexdigest()
key_expires = timezone.now() + datetime.timedelta(2)
lang = request.LANGUAGE_CODE
profile = Profile(user=new_user, activation_key=activation_key,
key_expires=key_expires, lang=lang, last_visit=timezone.now())
profile.save()
# Send the activation key to the user
text = render_to_string('email/activation.txt',
{'name': new_user.first_name,
'uidb64': urlsafe_base64_encode(force_bytes(new_user.id)),
'key': activation_key,
'domain': settings.DOMAIN,
})
html = render_to_string('email/activation.html',
{'name': new_user.first_name,
'uidb64': urlsafe_base64_encode(force_bytes(new_user.id)),
'key': activation_key,
'domain': settings.DOMAIN,
})
subject = "Account activation"
send_mail(subject, text, "inviMarket <[email protected]>",
[new_user.email], html_message=html,fail_silently=False)
return redirect('confirm')
else:
form = RegisterForm()
return render(request, 'register.html', {'form': form, 'error': error}) | agpl-3.0 | -5,880,959,783,419,259,000 | 36.533333 | 78 | 0.615849 | false |
scealiontach/cryptotrading | src/autotrader/hashnest.py | 1 | 4020 | import urllib,urllib.parse,urllib.request,urllib.error
import json
import time,datetime
import hashlib,hmac,base64
import logging
LOG=logging.getLogger(__name__)
#SELL='sale'
#BUY='purchase'
class hashnest(object):
URL = 'https://www.hashnest.com/api/v1/'
def __init__(self,username,key,secret):
self.username=username
self.key=key
self.secret=secret
def get_nonce(self):
self.utcnow=a=datetime.datetime.utcnow()
b=datetime.datetime(1970,1,1,0,0,0,0)
self.nonce= int((a-b).total_seconds()*1000)
return self.nonce
def signature(self,req):
nonce=self.get_nonce()
message = str(nonce) + self.username + self.key
req['access_key']=self.key
req['nonce']=nonce
req['signature']= hmac.new(self.secret.encode(), msg=message.encode(), digestmod=hashlib.sha256).hexdigest()
return urllib.parse.urlencode(req)
def request(self,url,req={}):
url = self.URL + url
data= self.signature(req)
url=url+'?'+data
req = urllib.request.Request(url, method='POST')
retry=True
retry_count=0
while retry:
try:
with urllib.request.urlopen(req) as resp:
r=resp.read()
retObj=json.loads(r.decode())
time.sleep(1)
retry=False
except urllib.error.HTTPError as e:
if e.code!=401:
raise e
else:
if retry_count<10:
time.sleep(retry_count**2)
retry_count+=1
else:
raise e
pass
pass
pass
pass
return retObj
def get_account_info(self):
return self.request('account')
def get_account_balance(self):
return self.request('currency_accounts')
def get_account_hashrate(self):
return self.request('hash_accounts')
def get_account_orders(self,cmi):
param={'currency_market_id':cmi}
return self.request('orders/active',param)
def get_account_trade_history(self,cmi,page=1,page_amount=10):
param={'currency_market_id':cmi}
param['page']=page
param['page_per_amount']=page_amount
return self.request('orders/history',param)
def create_order(self,cmi,amount,ppc,category):
param={'currency_market_id':cmi}
param['amount']=amount
param['ppc']=ppc
param['category']=category
return self.request('orders',param)
def cancel_order(self,order_id):
param={'order_id':order_id}
return self.request('orders/revoke',param)
def cancel_all_orders(self,cmi,category):
param={'currency_market_id':cmi}
param['category']=category
return self.request('orders/quick_revoke',param)
def get_opened_markets(self):
return self.request('currency_markets')
def get_book(self,cmi):
param={'currency_market_id':cmi}
return self.request('currency_markets/orders',param)
def get_trade_history(self,cmi,page=1,page_amount=10):
param={'currency_market_id':cmi}
param['page']=page
param['page_per_amount']=page_amount
param['page_size']=page_amount
return self.request('currency_markets/order_history',param)
def pretty_print_POST(req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
return '{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.get_method() + ' ' + req.get_full_url(),
'\n'.join('{}: {}'.format(k, v) for k, v in req.header_items()),
req.data,
)
| apache-2.0 | -3,889,870,154,564,766,000 | 29.687023 | 116 | 0.568159 | false |
sony/nnabla | python/test/utils/learning_rate_scheduler/test_cosine_scheduler.py | 1 | 1249 | # Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from learning_rate_scheduler_test_utils import scheduler_tester
import nnabla.utils.learning_rate_scheduler as lrs
import math
class RefCosine(object):
def __init__(self, init_lr, max_iter):
self.init_lr = init_lr
self.max_iter = max_iter
def get_learning_rate(self, iter):
return self.init_lr * ((math.cos(iter * 1.0 / self.max_iter * math.pi) + 1.0) * 0.5)
@pytest.mark.parametrize("init_lr", [0.1, 0.01])
@pytest.mark.parametrize("max_iter", [1000, 10000])
def test_cosine_scheduler(init_lr, max_iter):
scheduler_tester(
lrs.CosineScheduler, RefCosine, max_iter, [init_lr, max_iter])
| apache-2.0 | 3,984,834,450,309,616,600 | 33.694444 | 92 | 0.717374 | false |
foxmask/django-th | django_th/html_entities.py | 1 | 1275 | # coding: utf-8
import html.entities as htmlentities
import re
class HtmlEntities:
def __init__(self, my_string):
self.my_string = my_string
def html_entity_decode_char(self, m, defs=htmlentities.entitydefs):
"""
decode html entity into one of the html char
"""
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0)
def html_entity_decode_codepoint(self, m,
defs=htmlentities.codepoint2name):
"""
decode html entity into one of the codepoint2name
"""
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0)
@property
def html_entity_decode(self):
"""
entry point of this set of tools
to decode html entities
"""
pattern = re.compile(r"&#(\w+?);")
string = pattern.sub(self.html_entity_decode_char, self.my_string)
return pattern.sub(self.html_entity_decode_codepoint, string)
| bsd-3-clause | 8,504,328,862,743,241,000 | 27.977273 | 74 | 0.545098 | false |
cihai/cihai-python | tests/test_conversion.py | 1 | 3543 | # -*- coding: utf-8 -*-
"""Tests for cihai.
test.conversion
~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, print_function, unicode_literals
from cihai import conversion
from cihai._compat import string_types, text_type
def test_text_type():
c1 = '(same as U+7A69 穩) firm; stable; secure'
c2 = text_type()
assert isinstance(c1, string_types)
assert isinstance(c2, text_type)
"""Return UCN character from Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding Unicode UCN ('U+4E00').
U+369D kSemanticVariant U+595E<kMatthews U+594E<kMatthews
U+3CE2 kTraditionalVariant U+23FB7
U+3FF7 kSemanticVariant U+7CD9<kMatthews,kMeyerWempe
U+345A kDefinition (non-classical form of 那) that, there
U+349A kDefinition (same as U+7A69 穩) firm; stable; secure,
dependent upon others
U+34B5 kMandarin mào
U+356D kCantonese au3 jaau1
"""
def test_ucn_from_unicode():
text = '一'
python_unicode = u'\u4e00'
expected = "U+4E00"
bytes_expected = b"U+4E00"
assert conversion.python_to_ucn(python_unicode) == expected
assert isinstance(conversion.python_to_ucn(python_unicode), text_type)
assert isinstance(conversion.python_to_ucn(python_unicode, as_bytes=True), bytes)
assert conversion.python_to_ucn(text, as_bytes=True) == bytes_expected
def test_ucn_from_unicode_16():
text = '𦄀'
python_unicode = u'\u26100'
expected = "U+26100"
bytes_expected = b"U+26100"
assert conversion.python_to_ucn(python_unicode) == expected
assert isinstance(conversion.python_to_ucn(python_unicode), text_type)
assert isinstance(conversion.python_to_ucn(python_unicode, as_bytes=True), bytes)
assert conversion.python_to_ucn(text, as_bytes=True) == bytes_expected
def test_ucn_to_unicode():
before = 'U+4E00'
expected = '\u4e00'
result = conversion.ucn_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
# wide character
before = 'U+20001'
expected = '\U00020001'
result = conversion.ucn_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
before = '(same as U+7A69 穩) firm; stable; secure'
expected = '(same as 穩 穩) firm; stable; secure'
result = conversion.ucnstring_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
"""Return EUC character from a Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding EUC hex ('d2bb').
"""
def test_hexd():
assert conversion.hexd(0xFFFF) == 'ffff'
def test_euc_from_unicode():
expected = '一' # u'\u4e00'
euc_bytestring = b'd2bb'
euc_unicode = 'd2bb'
result = conversion.python_to_euc(expected, as_bytes=True)
assert euc_bytestring == result
assert isinstance(result, bytes)
result = conversion.python_to_euc(expected)
assert euc_unicode == result
assert isinstance(result, text_type)
def test_euc_to_utf8():
expected = '一'
euc_bytestring = b'b0ec'
result = conversion.euc_to_utf8(euc_bytestring)
assert expected == result
def test_euc_to_unicode():
expected = '一'
expected_ustring = u'\u4e00'
euc_bytestring = b'd2bb'
result = conversion.euc_to_unicode(euc_bytestring)
assert expected == expected_ustring
assert isinstance(result, text_type)
assert expected == result
assert expected_ustring == result
| bsd-3-clause | 5,327,725,858,940,054,000 | 23.608392 | 85 | 0.687411 | false |
sajeeshcs/nested_projects_keystone | keystone/credential/controllers.py | 1 | 4503 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
from keystone.credential import schema
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import jsonutils
@dependency.requires('credential_api')
class CredentialV3(controller.V3Controller):
collection_name = 'credentials'
member_name = 'credential'
def __init__(self):
super(CredentialV3, self).__init__()
self.get_member_from_driver = self.credential_api.get_credential
def _assign_unique_id(self, ref, trust_id=None):
# Generates and assigns a unique identifier to
# a credential reference.
if ref.get('type', '').lower() == 'ec2':
try:
blob = jsonutils.loads(ref.get('blob'))
except (ValueError, TypeError):
raise exception.ValidationError(
message=_('Invalid blob in credential'))
if not blob or not isinstance(blob, dict):
raise exception.ValidationError(attribute='blob',
target='credential')
if blob.get('access') is None:
raise exception.ValidationError(attribute='access',
target='blob')
ret_ref = ref.copy()
ret_ref['id'] = hashlib.sha256(blob['access']).hexdigest()
# Update the blob with the trust_id, so credentials created
# with a trust scoped token will result in trust scoped
# tokens when authentication via ec2tokens happens
if trust_id is not None:
blob['trust_id'] = trust_id
ret_ref['blob'] = jsonutils.dumps(blob)
return ret_ref
else:
return super(CredentialV3, self)._assign_unique_id(ref)
@controller.protected()
@validation.validated(schema.credential_create, 'credential')
def create_credential(self, context, credential):
trust_id = self._get_trust_id_for_request(context)
ref = self._assign_unique_id(self._normalize_dict(credential),
trust_id)
ref = self.credential_api.create_credential(ref['id'], ref)
return CredentialV3.wrap_member(context, ref)
@staticmethod
def _blob_to_json(ref):
# credentials stored via ec2tokens before the fix for #1259584
# need json serializing, as that's the documented API format
blob = ref.get('blob')
if isinstance(blob, dict):
new_ref = ref.copy()
new_ref['blob'] = jsonutils.dumps(blob)
return new_ref
else:
return ref
@controller.filterprotected('user_id')
def list_credentials(self, context, filters):
hints = CredentialV3.build_driver_hints(context, filters)
refs = self.credential_api.list_credentials(hints)
ret_refs = [self._blob_to_json(r) for r in refs]
return CredentialV3.wrap_collection(context, ret_refs,
hints=hints)
@controller.protected()
def get_credential(self, context, credential_id):
ref = self.credential_api.get_credential(credential_id)
ret_ref = self._blob_to_json(ref)
return CredentialV3.wrap_member(context, ret_ref)
@controller.protected()
@validation.validated(schema.credential_update, 'credential')
def update_credential(self, context, credential_id, credential):
self._require_matching_id(credential_id, credential)
ref = self.credential_api.update_credential(credential_id, credential)
return CredentialV3.wrap_member(context, ref)
@controller.protected()
def delete_credential(self, context, credential_id):
return self.credential_api.delete_credential(credential_id)
| apache-2.0 | -4,493,071,998,995,930,600 | 41.084112 | 78 | 0.642016 | false |
guoxuesong/deepstacks | deepstacks/lasagne/utils.py | 1 | 2721 | #!/usr/bin/env python
# coding:utf-8
# vi:tabstop=4:shiftwidth=4:expandtab:sts=4
import theano
import lasagne
from join import join_layer as JoinLayer
from ..utils.curry import curry
floatX = theano.config.floatX
def ordered_errors(errors, m=None, prefix='', deterministic=False):
res = []
for t in errors:
if m is None:
res += [[prefix+t, map(curry(lasagne.layers.get_output,deterministic=deterministic), errors[t])]]
else:
tmp = map(lambda x: JoinLayer(x, m), errors[t])
res += [[prefix+t, map(curry(lasagne.layers.get_output,deterministic=deterministic), tmp)]]
return sorted(res, key=lambda x: x[0])
def get_loss(errors, watchpoints, loss0=None):
errors = ordered_errors(errors)
watch_errors = ordered_errors(watchpoints)
errors1 = []
watch_errors1 = []
train_watch_errors1 = []
tagslice = []
count = 0
valtagslice = []
valcount = 0
for tag, errs in errors:
errors1 += errs
tagslice += [[tag, slice(count, count+len(errs))]]
count += len(errs)
for tag, errs in watch_errors:
if tag.startswith('train:'):
train_watch_errors1 += errs
tagslice += [[tag, slice(count, count+len(errs))]]
count += len(errs)
else:
watch_errors1 += errs
valtagslice += [[tag, slice(valcount, valcount+len(errs))]]
valcount += len(errs)
errors1 = [errors1]
watch_errors1 = [watch_errors1]
train_watch_errors1 = [train_watch_errors1]
loss = loss0 if loss0 is not None else 0.0
losslist = []
vallosslist = []
tmp = 0.0
for ee in errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
loss = loss+tmp
for ee in watch_errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
vallosslist = vallosslist+[tmp]
# loss = loss+tmp
for ee in train_watch_errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
# loss = loss+tmp
return loss, losslist, tagslice
def get_watchslice(watchpoints):
trainwatch = {}
valwatch = {}
for tag, errs in watchpoints:
if tag.startswith('train:'):
trainwatch[tag] = errs
else:
valwatch[tag] = errs
ig, train_values, train_tagslice = get_loss(trainwatch, [])
ig, val_values, val_tagslice = get_loss(valwatch, [])
return train_values, train_tagslice, val_values, val_tagslice
| mit | 6,609,764,372,025,347,000 | 30.275862 | 109 | 0.575891 | false |
DevynCJohnson/Pybooster | pylib/convvolume.py | 1 | 120739 | #!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Volume measurement conversions.
@file convvolume.py
@package pybooster.convvolume
@version 2020.08.08
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from typing import Union
__all__: list = [
# CANADIAN CUPS #
r'cancup2cubicinch',
r'cancup2cubicmeter',
r'cancup2liter',
r'cancup2metriccup',
r'cancup2metrictablespoon',
r'cancup2metricteaspoon',
r'cancup2ukfluidounce',
r'cancup2ukgallon',
r'cancup2ukgill',
r'cancup2ukpint',
r'cancup2ukquart',
r'cancup2uktablespoon',
r'cancup2ukteaspoon',
r'cancup2uscup',
r'cancup2usdrygallon',
r'cancup2usdrypint',
r'cancup2usdryquart',
r'cancup2usfluidounce',
r'cancup2usgill',
r'cancup2usliquidgallon',
r'cancup2usliquidpint',
r'cancup2usliquidquart',
r'cancup2ustablespoon',
r'cancup2usteaspoon',
# CUBIC INCHES #
r'cubicinch2cancup',
r'cubicinch2cubicmeter',
r'cubicinch2liter',
r'cubicinch2metriccup',
r'cubicinch2metrictablespoon',
r'cubicinch2metricteaspoon',
r'cubicinch2ukfluidounce',
r'cubicinch2ukgallon',
r'cubicinch2ukgill',
r'cubicinch2ukpint',
r'cubicinch2ukquart',
r'cubicinch2uktablespoon',
r'cubicinch2ukteaspoon',
r'cubicinch2uscup',
r'cubicinch2usdrygallon',
r'cubicinch2usdrypint',
r'cubicinch2usdryquart',
r'cubicinch2usfluidounce',
r'cubicinch2usgill',
r'cubicinch2usliquidgallon',
r'cubicinch2usliquidpint',
r'cubicinch2usliquidquart',
r'cubicinch2ustablespoon',
r'cubicinch2usteaspoon',
# CUBIC METERS #
r'cubicmeter2cancup',
r'cubicmeter2cubicinch',
r'cubicmeter2liter',
r'cubicmeter2metriccup',
r'cubicmeter2metrictablespoon',
r'cubicmeter2metricteaspoon',
r'cubicmeter2ukfluidounce',
r'cubicmeter2ukgallon',
r'cubicmeter2ukgill',
r'cubicmeter2ukpint',
r'cubicmeter2ukquart',
r'cubicmeter2uktablespoon',
r'cubicmeter2ukteaspoon',
r'cubicmeter2uscup',
r'cubicmeter2usdrygallon',
r'cubicmeter2usdrypint',
r'cubicmeter2usdryquart',
r'cubicmeter2usfluidounce',
r'cubicmeter2usgill',
r'cubicmeter2usliquidgallon',
r'cubicmeter2usliquidpint',
r'cubicmeter2usliquidquart',
r'cubicmeter2ustablespoon',
r'cubicmeter2usteaspoon',
# LITERS #
r'liter2cancup',
r'liter2cubicinch',
r'liter2cubicmeter',
r'liter2metriccup',
r'liter2metrictablespoon',
r'liter2metricteaspoon',
r'liter2ukfluidounce',
r'liter2ukgallon',
r'liter2ukgill',
r'liter2ukpint',
r'liter2ukquart',
r'liter2uktablespoon',
r'liter2ukteaspoon',
r'liter2uscup',
r'liter2usdrygallon',
r'liter2usdrypint',
r'liter2usdryquart',
r'liter2usfluidounce',
r'liter2usgill',
r'liter2usliquidgallon',
r'liter2usliquidpint',
r'liter2usliquidquart',
r'liter2ustablespoon',
r'liter2usteaspoon',
# METRIC CUPS #
r'metriccup2cancup',
r'metriccup2cubicinch',
r'metriccup2cubicmeter',
r'metriccup2liter',
r'metriccup2metrictablespoon',
r'metriccup2metricteaspoon',
r'metriccup2ukfluidounce',
r'metriccup2ukgallon',
r'metriccup2ukgill',
r'metriccup2ukpint',
r'metriccup2ukquart',
r'metriccup2uktablespoon',
r'metriccup2ukteaspoon',
r'metriccup2uscup',
r'metriccup2usdrygallon',
r'metriccup2usdrypint',
r'metriccup2usdryquart',
r'metriccup2usfluidounce',
r'metriccup2usgill',
r'metriccup2usliquidgallon',
r'metriccup2usliquidpint',
r'metriccup2usliquidquart',
r'metriccup2ustablespoon',
r'metriccup2usteaspoon',
# METRIC TABLESPOONS #
r'metrictablespoon2cancup',
r'metrictablespoon2cubicinch',
r'metrictablespoon2cubicmeter',
r'metrictablespoon2liter',
r'metrictablespoon2metriccup',
r'metrictablespoon2metricteaspoon',
r'metrictablespoon2ukfluidounce',
r'metrictablespoon2ukgallon',
r'metrictablespoon2ukgill',
r'metrictablespoon2ukpint',
r'metrictablespoon2ukquart',
r'metrictablespoon2uktablespoon',
r'metrictablespoon2ukteaspoon',
r'metrictablespoon2uscup',
r'metrictablespoon2usdrygallon',
r'metrictablespoon2usdrypint',
r'metrictablespoon2usdryquart',
r'metrictablespoon2usfluidounce',
r'metrictablespoon2usgill',
r'metrictablespoon2usliquidgallon',
r'metrictablespoon2usliquidpint',
r'metrictablespoon2usliquidquart',
r'metrictablespoon2ustablespoon',
r'metrictablespoon2usteaspoon',
# METRIC TEASPOONS #
r'metricteaspoon2cancup',
r'metricteaspoon2cubicinch',
r'metricteaspoon2cubicmeter',
r'metricteaspoon2liter',
r'metricteaspoon2metriccup',
r'metricteaspoon2metrictablespoon',
r'metricteaspoon2ukfluidounce',
r'metricteaspoon2ukgallon',
r'metricteaspoon2ukgill',
r'metricteaspoon2ukpint',
r'metricteaspoon2ukquart',
r'metricteaspoon2uktablespoon',
r'metricteaspoon2ukteaspoon',
r'metricteaspoon2uscup',
r'metricteaspoon2usdrygallon',
r'metricteaspoon2usdrypint',
r'metricteaspoon2usdryquart',
r'metricteaspoon2usfluidounce',
r'metricteaspoon2usgill',
r'metricteaspoon2usliquidgallon',
r'metricteaspoon2usliquidpint',
r'metricteaspoon2usliquidquart',
r'metricteaspoon2ustablespoon',
r'metricteaspoon2usteaspoon',
# UK FLUID OUNCES #
r'ukfluidounce2cancup',
r'ukfluidounce2cubicinch',
r'ukfluidounce2cubicmeter',
r'ukfluidounce2liter',
r'ukfluidounce2metriccup',
r'ukfluidounce2metrictablespoon',
r'ukfluidounce2metricteaspoon',
r'ukfluidounce2ukgallon',
r'ukfluidounce2ukgill',
r'ukfluidounce2ukpint',
r'ukfluidounce2ukquart',
r'ukfluidounce2uktablespoon',
r'ukfluidounce2ukteaspoon',
r'ukfluidounce2uscup',
r'ukfluidounce2usdrygallon',
r'ukfluidounce2usdrypint',
r'ukfluidounce2usdryquart',
r'ukfluidounce2usfluidounce',
r'ukfluidounce2usgill',
r'ukfluidounce2usliquidgallon',
r'ukfluidounce2usliquidpint',
r'ukfluidounce2usliquidquart',
r'ukfluidounce2ustablespoon',
r'ukfluidounce2usteaspoon',
# UK GALLONS #
r'ukgallon2cancup',
r'ukgallon2cubicinch',
r'ukgallon2cubicmeter',
r'ukgallon2liter',
r'ukgallon2metriccup',
r'ukgallon2metrictablespoon',
r'ukgallon2metricteaspoon',
r'ukgallon2ukfluidounce',
r'ukgallon2ukgill',
r'ukgallon2ukpint',
r'ukgallon2ukquart',
r'ukgallon2uktablespoon',
r'ukgallon2ukteaspoon',
r'ukgallon2uscup',
r'ukgallon2usdrygallon',
r'ukgallon2usdrypint',
r'ukgallon2usdryquart',
r'ukgallon2usfluidounce',
r'ukgallon2usgill',
r'ukgallon2usliquidgallon',
r'ukgallon2usliquidpint',
r'ukgallon2usliquidquart',
r'ukgallon2ustablespoon',
r'ukgallon2usteaspoon',
# UK GILLS #
r'ukgill2cancup',
r'ukgill2cubicinch',
r'ukgill2cubicmeter',
r'ukgill2liter',
r'ukgill2metriccup',
r'ukgill2metrictablespoon',
r'ukgill2metricteaspoon',
r'ukgill2ukfluidounce',
r'ukgill2ukgallon',
r'ukgill2ukpint',
r'ukgill2ukquart',
r'ukgill2uktablespoon',
r'ukgill2ukteaspoon',
r'ukgill2uscup',
r'ukgill2usdrygallon',
r'ukgill2usdrypint',
r'ukgill2usdryquart',
r'ukgill2usfluidounce',
r'ukgill2usgill',
r'ukgill2usliquidgallon',
r'ukgill2usliquidpint',
r'ukgill2usliquidquart',
r'ukgill2ustablespoon',
r'ukgill2usteaspoon',
# UK PINTS #
r'ukpint2cancup',
r'ukpint2cubicinch',
r'ukpint2cubicmeter',
r'ukpint2liter',
r'ukpint2metriccup',
r'ukpint2metrictablespoon',
r'ukpint2metricteaspoon',
r'ukpint2ukfluidounce',
r'ukpint2ukgallon',
r'ukpint2ukgill',
r'ukpint2ukquart',
r'ukpint2uktablespoon',
r'ukpint2ukteaspoon',
r'ukpint2uscup',
r'ukpint2usdrygallon',
r'ukpint2usdrypint',
r'ukpint2usdryquart',
r'ukpint2usfluidounce',
r'ukpint2usgill',
r'ukpint2usliquidgallon',
r'ukpint2usliquidpint',
r'ukpint2usliquidquart',
r'ukpint2ustablespoon',
r'ukpint2usteaspoon',
# UK QUARTS #
r'ukquart2cancup',
r'ukquart2cubicinch',
r'ukquart2cubicmeter',
r'ukquart2liter',
r'ukquart2metriccup',
r'ukquart2metrictablespoon',
r'ukquart2metricteaspoon',
r'ukquart2ukfluidounce',
r'ukquart2ukgallon',
r'ukquart2ukgill',
r'ukquart2ukpint',
r'ukquart2uktablespoon',
r'ukquart2ukteaspoon',
r'ukquart2uscup',
r'ukquart2usdrygallon',
r'ukquart2usdrypint',
r'ukquart2usdryquart',
r'ukquart2usfluidounce',
r'ukquart2usgill',
r'ukquart2usliquidgallon',
r'ukquart2usliquidpint',
r'ukquart2usliquidquart',
r'ukquart2ustablespoon',
r'ukquart2usteaspoon',
# UK TABLESPOONS #
r'uktablespoon2cancup',
r'uktablespoon2cubicinch',
r'uktablespoon2cubicmeter',
r'uktablespoon2liter',
r'uktablespoon2metriccup',
r'uktablespoon2metrictablespoon',
r'uktablespoon2metricteaspoon',
r'uktablespoon2ukfluidounce',
r'uktablespoon2ukgallon',
r'uktablespoon2ukgill',
r'uktablespoon2ukpint',
r'uktablespoon2ukquart',
r'uktablespoon2ukteaspoon',
r'uktablespoon2uscup',
r'uktablespoon2usdrygallon',
r'uktablespoon2usdrypint',
r'uktablespoon2usdryquart',
r'uktablespoon2usfluidounce',
r'uktablespoon2usgill',
r'uktablespoon2usliquidgallon',
r'uktablespoon2usliquidpint',
r'uktablespoon2usliquidquart',
r'uktablespoon2ustablespoon',
r'uktablespoon2usteaspoon',
# UK TEASPOONS #
r'ukteaspoon2cancup',
r'ukteaspoon2cubicinch',
r'ukteaspoon2cubicmeter',
r'ukteaspoon2liter',
r'ukteaspoon2metriccup',
r'ukteaspoon2metrictablespoon',
r'ukteaspoon2metricteaspoon',
r'ukteaspoon2ukfluidounce',
r'ukteaspoon2ukgallon',
r'ukteaspoon2ukgill',
r'ukteaspoon2ukpint',
r'ukteaspoon2ukquart',
r'ukteaspoon2uktablespoon',
r'ukteaspoon2uscup',
r'ukteaspoon2usdrygallon',
r'ukteaspoon2usdrypint',
r'ukteaspoon2usdryquart',
r'ukteaspoon2usfluidounce',
r'ukteaspoon2usgill',
r'ukteaspoon2usliquidgallon',
r'ukteaspoon2usliquidpint',
r'ukteaspoon2usliquidquart',
r'ukteaspoon2ustablespoon',
r'ukteaspoon2usteaspoon',
# US CUPS #
r'uscup2cancup',
r'uscup2cubicinch',
r'uscup2cubicmeter',
r'uscup2liter',
r'uscup2metriccup',
r'uscup2metrictablespoon',
r'uscup2metricteaspoon',
r'uscup2ukfluidounce',
r'uscup2ukgallon',
r'uscup2ukgill',
r'uscup2ukpint',
r'uscup2ukquart',
r'uscup2uktablespoon',
r'uscup2ukteaspoon',
r'uscup2usdrygallon',
r'uscup2usdrypint',
r'uscup2usdryquart',
r'uscup2usfluidounce',
r'uscup2usgill',
r'uscup2usliquidgallon',
r'uscup2usliquidpint',
r'uscup2usliquidquart',
r'uscup2ustablespoon',
r'uscup2usteaspoon',
# US DRY GALLONS #
r'usdrygallon2cancup',
r'usdrygallon2cubicinch',
r'usdrygallon2cubicmeter',
r'usdrygallon2liter',
r'usdrygallon2metriccup',
r'usdrygallon2metrictablespoon',
r'usdrygallon2metricteaspoon',
r'usdrygallon2ukfluidounce',
r'usdrygallon2ukgallon',
r'usdrygallon2ukgill',
r'usdrygallon2ukpint',
r'usdrygallon2ukquart',
r'usdrygallon2uktablespoon',
r'usdrygallon2ukteaspoon',
r'usdrygallon2uscup',
r'usdrygallon2usdrypint',
r'usdrygallon2usdryquart',
r'usdrygallon2usfluidounce',
r'usdrygallon2usgill',
r'usdrygallon2usliquidgallon',
r'usdrygallon2usliquidpint',
r'usdrygallon2usliquidquart',
r'usdrygallon2ustablespoon',
r'usdrygallon2usteaspoon',
# US DRY PINTS #
r'usdrypint2cancup',
r'usdrypint2cubicinch',
r'usdrypint2cubicmeter',
r'usdrypint2liter',
r'usdrypint2metriccup',
r'usdrypint2metrictablespoon',
r'usdrypint2metricteaspoon',
r'usdrypint2ukfluidounce',
r'usdrypint2ukgallon',
r'usdrypint2ukgill',
r'usdrypint2ukpint',
r'usdrypint2ukquart',
r'usdrypint2uktablespoon',
r'usdrypint2ukteaspoon',
r'usdrypint2uscup',
r'usdrypint2usdrygallon',
r'usdrypint2usdryquart',
r'usdrypint2usfluidounce',
r'usdrypint2usgill',
r'usdrypint2usliquidgallon',
r'usdrypint2usliquidpint',
r'usdrypint2usliquidquart',
r'usdrypint2ustablespoon',
r'usdrypint2usteaspoon',
# US DRY QUARTS #
r'usdryquart2cancup',
r'usdryquart2cubicinch',
r'usdryquart2cubicmeter',
r'usdryquart2liter',
r'usdryquart2metriccup',
r'usdryquart2metrictablespoon',
r'usdryquart2metricteaspoon',
r'usdryquart2ukfluidounce',
r'usdryquart2ukgallon',
r'usdryquart2ukgill',
r'usdryquart2ukpint',
r'usdryquart2ukquart',
r'usdryquart2uktablespoon',
r'usdryquart2ukteaspoon',
r'usdryquart2uscup',
r'usdryquart2usdrygallon',
r'usdryquart2usdrypint',
r'usdryquart2usfluidounce',
r'usdryquart2usgill',
r'usdryquart2usliquidgallon',
r'usdryquart2usliquidpint',
r'usdryquart2usliquidquart',
r'usdryquart2ustablespoon',
r'usdryquart2usteaspoon',
# US FLUID OUNCES #
r'usfluidounce2cancup',
r'usfluidounce2cubicinch',
r'usfluidounce2cubicmeter',
r'usfluidounce2liter',
r'usfluidounce2metriccup',
r'usfluidounce2metrictablespoon',
r'usfluidounce2metricteaspoon',
r'usfluidounce2ukfluidounce',
r'usfluidounce2ukgallon',
r'usfluidounce2ukgill',
r'usfluidounce2ukpint',
r'usfluidounce2ukquart',
r'usfluidounce2uktablespoon',
r'usfluidounce2ukteaspoon',
r'usfluidounce2uscup',
r'usfluidounce2usdrygallon',
r'usfluidounce2usdrypint',
r'usfluidounce2usdryquart',
r'usfluidounce2usgill',
r'usfluidounce2usliquidgallon',
r'usfluidounce2usliquidpint',
r'usfluidounce2usliquidquart',
r'usfluidounce2ustablespoon',
r'usfluidounce2usteaspoon',
# US GILLS #
r'usgill2cancup',
r'usgill2cubicinch',
r'usgill2cubicmeter',
r'usgill2liter',
r'usgill2metriccup',
r'usgill2metrictablespoon',
r'usgill2metricteaspoon',
r'usgill2ukfluidounce',
r'usgill2ukgallon',
r'usgill2ukgill',
r'usgill2ukpint',
r'usgill2ukquart',
r'usgill2uktablespoon',
r'usgill2ukteaspoon',
r'usgill2uscup',
r'usgill2usdrygallon',
r'usgill2usdrypint',
r'usgill2usdryquart',
r'usgill2usfluidounce',
r'usgill2usliquidgallon',
r'usgill2usliquidpint',
r'usgill2usliquidquart',
r'usgill2ustablespoon',
r'usgill2usteaspoon',
# US LIQUID GALLONS #
r'usliquidgallon2cancup',
r'usliquidgallon2cubicinch',
r'usliquidgallon2cubicmeter',
r'usliquidgallon2liter',
r'usliquidgallon2metriccup',
r'usliquidgallon2metrictablespoon',
r'usliquidgallon2metricteaspoon',
r'usliquidgallon2ukfluidounce',
r'usliquidgallon2ukgallon',
r'usliquidgallon2ukgill',
r'usliquidgallon2ukpint',
r'usliquidgallon2ukquart',
r'usliquidgallon2uktablespoon',
r'usliquidgallon2ukteaspoon',
r'usliquidgallon2uscup',
r'usliquidgallon2usdrygallon',
r'usliquidgallon2usdrypint',
r'usliquidgallon2usdryquart',
r'usliquidgallon2usfluidounce',
r'usliquidgallon2usgill',
r'usliquidgallon2usliquidpint',
r'usliquidgallon2usliquidquart',
r'usliquidgallon2ustablespoon',
r'usliquidgallon2usteaspoon',
# US LIQUID PINTS #
r'usliquidpint2cancup',
r'usliquidpint2cubicinch',
r'usliquidpint2cubicmeter',
r'usliquidpint2liter',
r'usliquidpint2metriccup',
r'usliquidpint2metrictablespoon',
r'usliquidpint2metricteaspoon',
r'usliquidpint2ukfluidounce',
r'usliquidpint2ukgallon',
r'usliquidpint2ukgill',
r'usliquidpint2ukpint',
r'usliquidpint2ukquart',
r'usliquidpint2uktablespoon',
r'usliquidpint2ukteaspoon',
r'usliquidpint2uscup',
r'usliquidpint2usdrygallon',
r'usliquidpint2usdrypint',
r'usliquidpint2usdryquart',
r'usliquidpint2usfluidounce',
r'usliquidpint2usgill',
r'usliquidpint2usliquidgallon',
r'usliquidpint2usliquidquart',
r'usliquidpint2ustablespoon',
r'usliquidpint2usteaspoon',
# US LIQUID QUARTS #
r'usliquidquart2cancup',
r'usliquidquart2cubicinch',
r'usliquidquart2cubicmeter',
r'usliquidquart2liter',
r'usliquidquart2metriccup',
r'usliquidquart2metrictablespoon',
r'usliquidquart2metricteaspoon',
r'usliquidquart2ukfluidounce',
r'usliquidquart2ukgallon',
r'usliquidquart2ukgill',
r'usliquidquart2ukpint',
r'usliquidquart2ukquart',
r'usliquidquart2uktablespoon',
r'usliquidquart2ukteaspoon',
r'usliquidquart2uscup',
r'usliquidquart2usdrygallon',
r'usliquidquart2usdrypint',
r'usliquidquart2usdryquart',
r'usliquidquart2usfluidounce',
r'usliquidquart2usgill',
r'usliquidquart2usliquidgallon',
r'usliquidquart2usliquidpint',
r'usliquidquart2ustablespoon',
r'usliquidquart2usteaspoon',
# US TABLESPOONS #
r'ustablespoon2cancup',
r'ustablespoon2cubicinch',
r'ustablespoon2cubicmeter',
r'ustablespoon2liter',
r'ustablespoon2metriccup',
r'ustablespoon2metrictablespoon',
r'ustablespoon2metricteaspoon',
r'ustablespoon2ukfluidounce',
r'ustablespoon2ukgallon',
r'ustablespoon2ukgill',
r'ustablespoon2ukpint',
r'ustablespoon2ukquart',
r'ustablespoon2uktablespoon',
r'ustablespoon2ukteaspoon',
r'ustablespoon2uscup',
r'ustablespoon2usdrygallon',
r'ustablespoon2usdrypint',
r'ustablespoon2usdryquart',
r'ustablespoon2usfluidounce',
r'ustablespoon2usgill',
r'ustablespoon2usliquidgallon',
r'ustablespoon2usliquidpint',
r'ustablespoon2usliquidquart',
r'ustablespoon2usteaspoon',
# US TEASPOONS #
r'usteaspoon2cancup',
r'usteaspoon2cubicinch',
r'usteaspoon2cubicmeter',
r'usteaspoon2liter',
r'usteaspoon2metriccup',
r'usteaspoon2metrictablespoon',
r'usteaspoon2metricteaspoon',
r'usteaspoon2ukfluidounce',
r'usteaspoon2ukgallon',
r'usteaspoon2ukgill',
r'usteaspoon2ukpint',
r'usteaspoon2ukquart',
r'usteaspoon2uktablespoon',
r'usteaspoon2ukteaspoon',
r'usteaspoon2uscup',
r'usteaspoon2usdrygallon',
r'usteaspoon2usdrypint',
r'usteaspoon2usdryquart',
r'usteaspoon2usfluidounce',
r'usteaspoon2usgill',
r'usteaspoon2usliquidgallon',
r'usteaspoon2usliquidpint',
r'usteaspoon2usliquidquart',
r'usteaspoon2ustablespoon'
]
# CUBIC INCHES #
def cancup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Cubic Inches."""
return round(_volume * 13.871, _round)
def cancup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Cubic Meters."""
return round(_volume * 0.00022730372323498659, _round)
def cancup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Liters."""
return round(_volume * 0.22730372323498658908, _round)
def cancup2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Cups."""
return round(_volume * 0.90922, _round)
def cancup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Tablespoons."""
return round(_volume * 15.154, _round)
def cancup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Teaspoons."""
return round(_volume * 45.461, _round)
def cancup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Fluid Ounces."""
return round(_volume * 8.0, _round)
def cancup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Gallons."""
return round(_volume * 0.050000, _round)
def cancup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Gills."""
return round(_volume * 1.6000, _round)
def cancup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Pints."""
return round(_volume * 0.40000, _round)
def cancup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Quarts."""
return round(_volume * 0.20000, _round)
def cancup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Tablespoons."""
return round(_volume * 16.0, _round)
def cancup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Teaspoons."""
return round(_volume * 64.0, _round)
def cancup2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Cups."""
return round(_volume * 0.96076, _round)
def cancup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Gallons."""
return round(_volume * 0.051603, _round)
def cancup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Pints."""
return round(_volume * 0.41282, _round)
def cancup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Quarts."""
return round(_volume * 0.20641, _round)
def cancup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Fluid Ounces."""
return round(_volume * 7.6861, _round)
def cancup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Gills."""
return round(_volume * 1.9215, _round)
def cancup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Gallons."""
return round(_volume * 0.060047, _round)
def cancup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Pints."""
return round(_volume * 0.48038, _round)
def cancup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Quarts."""
return round(_volume * 0.24019, _round)
def cancup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Tablespoons."""
return round(_volume * 15.372, _round)
def cancup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Teaspoons."""
return round(_volume * 46.116, _round)
# CUBIC INCHES #
def cubicinch2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Canadian Cups."""
return round(_volume * 0.072093, _round)
def cubicinch2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Cubic Meters."""
return round(_volume * 0.000016386995, _round)
def cubicinch2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Liters."""
return round(_volume * 0.016386995, _round)
def cubicinch2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Cups."""
return round(_volume * 0.065548, _round)
def cubicinch2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Tablespoons."""
return round(_volume * 1.0925, _round)
def cubicinch2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Teaspoons."""
return round(_volume * 3.2774, _round)
def cubicinch2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Fluid Ounces."""
return round(_volume * 0.57674, _round)
def cubicinch2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Gallons."""
return round(_volume * 0.0036047, _round)
def cubicinch2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Gills."""
return round(_volume * 0.11535, _round)
def cubicinch2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Pints."""
return round(_volume * 0.028837, _round)
def cubicinch2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Quarts."""
return round(_volume * 0.014419, _round)
def cubicinch2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Tablespoons."""
return round(_volume * 1.1535, _round)
def cubicinch2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Teaspoons."""
return round(_volume * 4.6139, _round)
def cubicinch2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Cups."""
return round(_volume * 0.069264, _round)
def cubicinch2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Gallons."""
return round(_volume * 0.0037202, _round)
def cubicinch2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Pints."""
return round(_volume * 0.029762, _round)
def cubicinch2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Quarts."""
return round(_volume * 0.014881, _round)
def cubicinch2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Fluid Ounces."""
return round(_volume * 0.55411, _round)
def cubicinch2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Gills."""
return round(_volume * 0.13853, _round)
def cubicinch2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Gallons."""
return round(_volume * 0.0043290, _round)
def cubicinch2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Pints."""
return round(_volume * 0.034632, _round)
def cubicinch2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Quarts."""
return round(_volume * 0.017316, _round)
def cubicinch2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Tablespoons."""
return round(_volume * 1.1082, _round)
def cubicinch2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Teaspoons."""
return round(_volume * 3.3247, _round)
# CUBIC METERS #
def cubicmeter2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Canadian Cups."""
return round(_volume * 4399.4, _round)
def cubicmeter2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Cubic Inches."""
return round(_volume * 61024.0, _round)
def cubicmeter2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Liters."""
return round(_volume * 1000.0, _round)
def cubicmeter2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Cups."""
return round(_volume * 4000.0, _round)
def cubicmeter2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Tablespoons."""
return round(_volume * 66667.0, _round)
def cubicmeter2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Teaspoons."""
return round(_volume * 200000.0, _round)
def cubicmeter2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Fluid Ounces."""
return round(_volume * 35195.0, _round)
def cubicmeter2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Gallons."""
return round(_volume * 219.97, _round)
def cubicmeter2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Gills."""
return round(_volume * 7039.0, _round)
def cubicmeter2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Pints."""
return round(_volume * 1759.8, _round)
def cubicmeter2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Quarts."""
return round(_volume * 879.88, _round)
def cubicmeter2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Tablespoons."""
return round(_volume * 70390.0, _round)
def cubicmeter2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Teaspoons."""
return round(_volume * 281560.0, _round)
def cubicmeter2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Cups."""
return round(_volume * 4226.8, _round)
def cubicmeter2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Gallons."""
return round(_volume * 227.02, _round)
def cubicmeter2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Pints."""
return round(_volume * 1816.2, _round)
def cubicmeter2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Quarts."""
return round(_volume * 908.08, _round)
def cubicmeter2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Fluid Ounces."""
return round(_volume * 33814, _round)
def cubicmeter2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Gills."""
return round(_volume * 8453.5, _round)
def cubicmeter2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Gallons."""
return round(_volume * 264.17, _round)
def cubicmeter2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Pints."""
return round(_volume * 2113.4, _round)
def cubicmeter2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Quarts."""
return round(_volume * 1056.7, _round)
def cubicmeter2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Tablespoons."""
return round(_volume * 67628.0, _round)
def cubicmeter2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Teaspoons."""
return round(_volume * 202880.0, _round)
# LITERS #
def liter2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Canadian Cups."""
return round(_volume * 4.3994, _round)
def liter2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Cubic Inches."""
return round(_volume * 61.024, _round)
def liter2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Cubic Meters."""
return round(_volume * 0.001, _round)
def liter2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Cups."""
return round(_volume * 4.0, _round)
def liter2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Tablespoons."""
return round(_volume * 66.667, _round)
def liter2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Teaspoons."""
return round(_volume * 200.0, _round)
def liter2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Fluid Ounces."""
return round(_volume * 35.195, _round)
def liter2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Gallons."""
return round(_volume * 0.21997, _round)
def liter2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Gills."""
return round(_volume * 7.039, _round)
def liter2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Pints."""
return round(_volume * 1.7598, _round)
def liter2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Quarts."""
return round(_volume * 0.87988, _round)
def liter2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Tablespoons."""
return round(_volume * 70.39, _round)
def liter2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Teaspoons."""
return round(_volume * 281.560, _round)
def liter2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Cups."""
return round(_volume * 4.2268, _round)
def liter2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Gallons."""
return round(_volume * 0.22702, _round)
def liter2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Pints."""
return round(_volume * 1.8162, _round)
def liter2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Quarts."""
return round(_volume * 0.90808, _round)
def liter2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Fluid Ounces."""
return round(_volume * 33.814, _round)
def liter2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Gills."""
return round(_volume * 8.4535, _round)
def liter2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Gallons."""
return round(_volume * 0.26417, _round)
def liter2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Pints."""
return round(_volume * 2.1134, _round)
def liter2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Quarts."""
return round(_volume * 1.0567, _round)
def liter2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Tablespoons."""
return round(_volume * 67.628, _round)
def liter2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Teaspoons."""
return round(_volume * 202.8800, _round)
# METRIC CUPS #
def metriccup2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Canadian Cups."""
return round(_volume * 1.0998, _round)
def metriccup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Cubic Inches."""
return round(_volume * 15.256, _round)
def metriccup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Cubic Meters."""
return round(_volume * 0.00025, _round)
def metriccup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Liters."""
return round(_volume * 0.250, _round)
def metriccup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Metric Tablespoons."""
return round(_volume * 16.667, _round)
def metriccup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Metric Teaspoons."""
return round(_volume * 50.0, _round)
def metriccup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Fluid Ounces."""
return round(_volume * 8.7988, _round)
def metriccup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Gallons."""
return round(_volume * 0.054992, _round)
def metriccup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Gills."""
return round(_volume * 1.7598, _round)
def metriccup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Pints."""
return round(_volume * 0.43994, _round)
def metriccup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Quarts."""
return round(_volume * 0.21997, _round)
def metriccup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Tablespoons."""
return round(_volume * 17.598, _round)
def metriccup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Teaspoons."""
return round(_volume * 70.390, _round)
def metriccup2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Cups."""
return round(_volume * 1.0567, _round)
def metriccup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Gallons."""
return round(_volume * 0.056755, _round)
def metriccup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Pints."""
return round(_volume * 0.45404, _round)
def metriccup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Quarts."""
return round(_volume * 0.22702, _round)
def metriccup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Fluid Ounces."""
return round(_volume * 8.4535, _round)
def metriccup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Gills."""
return round(_volume * 2.1134, _round)
def metriccup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Gallons."""
return round(_volume * 0.066043, _round)
def metriccup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Pints."""
return round(_volume * 0.52834, _round)
def metriccup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Quarts."""
return round(_volume * 0.26417, _round)
def metriccup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Tablespoons."""
return round(_volume * 16.907, _round)
def metriccup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Teaspoons."""
return round(_volume * 50.721, _round)
# METRIC TABLESPOONS #
def metrictablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Canadian Cups."""
return round(_volume * 0.065991, _round)
def metrictablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Cubic Inches."""
return round(_volume * 0.91536, _round)
def metrictablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Cubic Meters."""
return round(_volume * 0.000014999925000375, _round)
def metrictablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Liters."""
return round(_volume * 0.01499992500037499813, _round)
def metrictablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Metric Cups."""
return round(_volume * 0.060, _round)
def metrictablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Metric Teaspoons."""
return round(_volume * 3.0, _round)
def metrictablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.52793, _round)
def metrictablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Gallons."""
return round(_volume * 0.0032995, _round)
def metrictablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Gills."""
return round(_volume * 0.10559, _round)
def metrictablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Pints."""
return round(_volume * 0.026396, _round)
def metrictablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Quarts."""
return round(_volume * 0.013198, _round)
def metrictablespoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Tablespoons."""
return round(_volume * 1.0559, _round)
def metrictablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Teaspoons."""
return round(_volume * 4.2234, _round)
def metrictablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Cups."""
return round(_volume * 0.063401, _round)
def metrictablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0034053, _round)
def metrictablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Pints."""
return round(_volume * 0.027242, _round)
def metrictablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Quarts."""
return round(_volume * 0.013621, _round)
def metrictablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.50721, _round)
def metrictablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Gills."""
return round(_volume * 0.12680, _round)
def metrictablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0039626, _round)
def metrictablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Pints."""
return round(_volume * 0.031701, _round)
def metrictablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015850, _round)
def metrictablespoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Tablespoons."""
return round(_volume * 1.0144, _round)
def metrictablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Teaspoons."""
return round(_volume * 3.0433, _round)
# METRIC TEASPOONS #
def metricteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Canadian Cups."""
return round(_volume * 0.021997, _round)
def metricteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Cubic Inches."""
return round(_volume * 0.30512, _round)
def metricteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Cubic Meters."""
return round(_volume * 0.000005, _round)
def metricteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Liters."""
return round(_volume * 0.005, _round)
def metricteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Metric Cups."""
return round(_volume * 0.020000, _round)
def metricteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.33333333, _round)
def metricteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.17598, _round)
def metricteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Gallons."""
return round(_volume * 0.0010998, _round)
def metricteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Gills."""
return round(_volume * 0.035195, _round)
def metricteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Pints."""
return round(_volume * 0.0087988, _round)
def metricteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Quarts."""
return round(_volume * 0.0043994, _round)
def metricteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Tablespoons."""
return round(_volume * 0.35195, _round)
def metricteaspoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Teaspoons."""
return round(_volume * 1.4078, _round)
def metricteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Cups."""
return round(_volume * 0.021134, _round)
def metricteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Gallons."""
return round(_volume * 0.0011351, _round)
def metricteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Pints."""
return round(_volume * 0.0090808, _round)
def metricteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0045404, _round)
def metricteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.16907, _round)
def metricteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Gills."""
return round(_volume * 0.042268, _round)
def metricteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.0013209, _round)
def metricteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Pints."""
return round(_volume * 0.010567, _round)
def metricteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0052834, _round)
def metricteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Tablespoons."""
return round(_volume * 0.33814, _round)
def metricteaspoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Teaspoons."""
return round(_volume * 1.0144, _round)
# UK FLUID OUNCES #
def ukfluidounce2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Canadian Cups."""
return round(_volume * 0.12500, _round)
def ukfluidounce2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Cubic Inches."""
return round(_volume * 1.7339, _round)
def ukfluidounce2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Cubic Meters."""
return round(_volume * 0.00002841312686461145, _round)
def ukfluidounce2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Liters."""
return round(_volume * 0.02841312686461145049, _round)
def ukfluidounce2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Cups."""
return round(_volume * 0.11365, _round)
def ukfluidounce2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Tablespoons."""
return round(_volume * 1.8942, _round)
def ukfluidounce2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Teaspoons."""
return round(_volume * 5.6826, _round)
def ukfluidounce2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Gallons."""
return round(_volume * 0.0062500, _round)
def ukfluidounce2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Gills."""
return round(_volume * 0.20000, _round)
def ukfluidounce2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Pints."""
return round(_volume * 0.050000, _round)
def ukfluidounce2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Quarts."""
return round(_volume * 0.025000, _round)
def ukfluidounce2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Tablespoons."""
return round(_volume * 2.0000, _round)
def ukfluidounce2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Teaspoons."""
return round(_volume * 8.0000, _round)
def ukfluidounce2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Cups."""
return round(_volume * 0.12009, _round)
def ukfluidounce2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Gallons."""
return round(_volume * 0.0064504, _round)
def ukfluidounce2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Pints."""
return round(_volume * 0.051603, _round)
def ukfluidounce2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Quarts."""
return round(_volume * 0.025801, _round)
def ukfluidounce2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Fluid Ounces."""
return round(_volume * 0.96076, _round)
def ukfluidounce2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Gills."""
return round(_volume * 0.24019, _round)
def ukfluidounce2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Gallons."""
return round(_volume * 0.0075059, _round)
def ukfluidounce2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Pints."""
return round(_volume * 0.060047, _round)
def ukfluidounce2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Quarts."""
return round(_volume * 0.030024, _round)
def ukfluidounce2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Tablespoons."""
return round(_volume * 1.9215, _round)
def ukfluidounce2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Teaspoons."""
return round(_volume * 5.7646, _round)
# UK GALLONS #
def ukgallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Canadian Cups."""
return round(_volume * 20.000, _round)
def ukgallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Cubic Inches."""
return round(_volume * 277.42, _round)
def ukgallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Cubic Meters."""
return round(_volume * 0.00454607446469973178, _round)
def ukgallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Liters."""
return round(_volume * 4.54607446469973178161, _round)
def ukgallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Cups."""
return round(_volume * 18.184, _round)
def ukgallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Tablespoons."""
return round(_volume * 303.07, _round)
def ukgallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Teaspoons."""
return round(_volume * 909.22, _round)
def ukgallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Fluid Ounces."""
return round(_volume * 160.00, _round)
def ukgallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Gills."""
return round(_volume * 32.000, _round)
def ukgallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Pints."""
return round(_volume * 8.0000, _round)
def ukgallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Quarts."""
return round(_volume * 4.0000, _round)
def ukgallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Tablespoons."""
return round(_volume * 320.00, _round)
def ukgallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Teaspoons."""
return round(_volume * 1280.0, _round)
def ukgallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Cups."""
return round(_volume * 19.215, _round)
def ukgallon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Gallons."""
return round(_volume * 1.0321, _round)
def ukgallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Pints."""
return round(_volume * 8.2565, _round)
def ukgallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Quarts."""
return round(_volume * 4.1282, _round)
def ukgallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Fluid Ounces."""
return round(_volume * 153.72, _round)
def ukgallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Gills."""
return round(_volume * 38.430, _round)
def ukgallon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Gallons."""
return round(_volume * 1.2009, _round)
def ukgallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Pints."""
return round(_volume * 9.6076, _round)
def ukgallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Quarts."""
return round(_volume * 4.8038, _round)
def ukgallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Tablespoons."""
return round(_volume * 307.44, _round)
def ukgallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Teaspoons."""
return round(_volume * 922.33, _round)
# UK GILLS #
def ukgill2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Canadian Cups."""
return round(_volume * 0.62500, _round)
def ukgill2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Cubic Inches."""
return round(_volume * 8.6694, _round)
def ukgill2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Cubic Meters."""
return round(_volume * 0.00014206563432305725, _round)
def ukgill2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Liters."""
return round(_volume * 0.14206563432305725245, _round)
def ukgill2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Cups."""
return round(_volume * 0.56826, _round)
def ukgill2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Tablespoons."""
return round(_volume * 9.4710, _round)
def ukgill2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Teaspoons."""
return round(_volume * 28.413, _round)
def ukgill2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Fluid Ounces."""
return round(_volume * 5.0000, _round)
def ukgill2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Gallons."""
return round(_volume * 0.031250, _round)
def ukgill2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Pints."""
return round(_volume * 0.25000, _round)
def ukgill2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Quarts."""
return round(_volume * 0.12500, _round)
def ukgill2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Tablespoons."""
return round(_volume * 10.000, _round)
def ukgill2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Teaspoons."""
return round(_volume * 40.000, _round)
def ukgill2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Cups."""
return round(_volume * 0.60047, _round)
def ukgill2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Gallons."""
return round(_volume * 0.032252, _round)
def ukgill2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Pints."""
return round(_volume * 0.25801, _round)
def ukgill2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Quarts."""
return round(_volume * 0.12901, _round)
def ukgill2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Fluid Ounces."""
return round(_volume * 4.8038, _round)
def ukgill2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Gills."""
return round(_volume * 1.2009, _round)
def ukgill2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Gallons."""
return round(_volume * 0.037530, _round)
def ukgill2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Pints."""
return round(_volume * 0.30024, _round)
def ukgill2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Quarts."""
return round(_volume * 0.15012, _round)
def ukgill2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Tablespoons."""
return round(_volume * 9.6076, _round)
def ukgill2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Teaspoons."""
return round(_volume * 28.823, _round)
# UK PINTS #
def ukpint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Canadian Cups."""
return round(_volume * 2.5000, _round)
def ukpint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Cubic Inches."""
return round(_volume * 34.677, _round)
def ukpint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Cubic Meters."""
return round(_volume * 0.00056824639163541312, _round)
def ukpint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Liters."""
return round(_volume * 0.56824639163541311513, _round)
def ukpint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Cups."""
return round(_volume * 2.2730, _round)
def ukpint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Tablespoons."""
return round(_volume * 37.884, _round)
def ukpint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Teaspoons."""
return round(_volume * 113.65, _round)
def ukpint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Fluid Ounces."""
return round(_volume * 20.000, _round)
def ukpint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Gallons."""
return round(_volume * 0.12500, _round)
def ukpint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Gills."""
return round(_volume * 4.0000, _round)
def ukpint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Quarts."""
return round(_volume * 0.50000, _round)
def ukpint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Tablespoons."""
return round(_volume * 40.000, _round)
def ukpint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Teaspoons."""
return round(_volume * 160.00, _round)
def ukpint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Cups."""
return round(_volume * 2.4019, _round)
def ukpint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Gallons."""
return round(_volume * 0.0011351, _round)
def ukpint2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Pints."""
return round(_volume * 1.0321, _round)
def ukpint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Quarts."""
return round(_volume * 0.51603, _round)
def ukpint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Fluid Ounces."""
return round(_volume * 19.215, _round)
def ukpint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Gills."""
return round(_volume * 4.8038, _round)
def ukpint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Gallons."""
return round(_volume * 0.15012, _round)
def ukpint2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Pints."""
return round(_volume * 1.2009, _round)
def ukpint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Quarts."""
return round(_volume * 0.60047, _round)
def ukpint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Tablespoons."""
return round(_volume * 38.430, _round)
def ukpint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Teaspoons."""
return round(_volume * 115.29, _round)
# UK QUARTS #
def ukquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Canadian Cups."""
return round(_volume * 5.0000, _round)
def ukquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Cubic Inches."""
return round(_volume * 69.355, _round)
def ukquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Cubic Meters."""
return round(_volume * 0.00113651861617493295, _round)
def ukquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Liters."""
return round(_volume * 1.1365186161749329454, _round)
def ukquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Cups."""
return round(_volume * 4.5461, _round)
def ukquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Tablespoons."""
return round(_volume * 75.768, _round)
def ukquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Teaspoons."""
return round(_volume * 227.30, _round)
def ukquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Fluid Ounces."""
return round(_volume * 40.000, _round)
def ukquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Gallons."""
return round(_volume * 0.25000, _round)
def ukquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Gills."""
return round(_volume * 8.0000, _round)
def ukquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Pints."""
return round(_volume * 2.0000, _round)
def ukquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Tablespoons."""
return round(_volume * 80.000, _round)
def ukquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Teaspoons."""
return round(_volume * 320.00, _round)
def ukquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Cups."""
return round(_volume * 4.8038, _round)
def ukquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Gallons."""
return round(_volume * 0.25801, _round)
def ukquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Pints."""
return round(_volume * 2.0641, _round)
def ukquart2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Quarts."""
return round(_volume * 1.0321, _round)
def ukquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Fluid Ounces."""
return round(_volume * 38.430, _round)
def ukquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Gills."""
return round(_volume * 9.6076, _round)
def ukquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Gallons."""
return round(_volume * 0.30024, _round)
def ukquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Pints."""
return round(_volume * 2.4019, _round)
def ukquart2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Quarts."""
return round(_volume * 1.2009, _round)
def ukquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Tablespoons."""
return round(_volume * 76.861, _round)
def ukquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Teaspoons."""
return round(_volume * 230.58, _round)
# UK TABLESPOONS #
def uktablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Canadian Cups."""
return round(_volume * 0.062500, _round)
def uktablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Cubic Inches."""
return round(_volume * 0.86694, _round)
def uktablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Cubic Meters."""
return round(_volume * 0.00001420656343230573, _round)
def uktablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Liters."""
return round(_volume * 0.01420656343230572525, _round)
def uktablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Cups."""
return round(_volume * 0.056826, _round)
def uktablespoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Tablespoons."""
return round(_volume * 0.94710, _round)
def uktablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Teaspoons."""
return round(_volume * 2.8413, _round)
def uktablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.50000, _round)
def uktablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Gallons."""
return round(_volume * 0.0031250, _round)
def uktablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Gills."""
return round(_volume * 0.10000, _round)
def uktablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Pints."""
return round(_volume * 0.025000, _round)
def uktablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Quarts."""
return round(_volume * 0.012500, _round)
def uktablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Teaspoons."""
return round(_volume * 4.0000, _round)
def uktablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Cups."""
return round(_volume * 0.060048, _round)
def uktablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0032252, _round)
def uktablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Pints."""
return round(_volume * 0.025801, _round)
def uktablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Quarts."""
return round(_volume * 0.012901, _round)
def uktablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.48038, _round)
def uktablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Gills."""
return round(_volume * 0.12010, _round)
def uktablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0037530, _round)
def uktablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Pints."""
return round(_volume * 0.030024, _round)
def uktablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015012, _round)
def uktablespoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Tablespoons."""
return round(_volume * 0.96076, _round)
def uktablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Teaspoons."""
return round(_volume * 2.8823, _round)
# UK TEASPOONS #
def ukteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Canadian Cups."""
return round(_volume * 0.015625, _round)
def ukteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Cubic Inches."""
return round(_volume * 0.21673, _round)
def ukteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Cubic Meters."""
return round(_volume * 0.00000355164085807643, _round)
def ukteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Liters."""
return round(_volume * 0.00355164085807643131, _round)
def ukteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Cups."""
return round(_volume * 0.014207, _round)
def ukteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.23678, _round)
def ukteaspoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Teaspoons."""
return round(_volume * 0.71033, _round)
def ukteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.12500, _round)
def ukteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Gallons."""
return round(_volume * 0.00078125, _round)
def ukteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Gills."""
return round(_volume * 0.025000, _round)
def ukteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Pints."""
return round(_volume * 0.0062500, _round)
def ukteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Quarts."""
return round(_volume * 0.0031250, _round)
def ukteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Tablespoons."""
return round(_volume * 0.25000, _round)
def ukteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Cups."""
return round(_volume * 0.015012, _round)
def ukteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Gallons."""
return round(_volume * 0.00080630, _round)
def ukteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Pints."""
return round(_volume * 0.0064504, _round)
def ukteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0032252, _round)
def ukteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.12010, _round)
def ukteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Gills."""
return round(_volume * 0.030024, _round)
def ukteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.00093824, _round)
def ukteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Pints."""
return round(_volume * 0.0075060, _round)
def ukteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0037530, _round)
def ukteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Tablespoons."""
return round(_volume * 0.24019, _round)
def ukteaspoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Teaspoons."""
return round(_volume * 0.72057, _round)
# US CUPS #
def uscup2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Canadian Cups."""
return round(_volume * 1.0408, _round)
def uscup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Cubic Inches."""
return round(_volume * 14.438, _round)
def uscup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Cubic Meters."""
return round(_volume * 0.0002365855966688748, _round)
def uscup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Liters."""
return round(_volume * 0.2365855966688747989, _round)
def uscup2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Cups."""
return round(_volume * 0.94635, _round)
def uscup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Tablespoons."""
return round(_volume * 15.773, _round)
def uscup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Teaspoons."""
return round(_volume * 47.318, _round)
def uscup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Fluid Ounces."""
return round(_volume * 8.3267, _round)
def uscup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Gallons."""
return round(_volume * 0.052042, _round)
def uscup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Gills."""
return round(_volume * 1.6653, _round)
def uscup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Pints."""
return round(_volume * 0.41634, _round)
def uscup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Quarts."""
return round(_volume * 0.20817, _round)
def uscup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Tablespoons."""
return round(_volume * 16.653, _round)
def uscup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Teaspoons."""
return round(_volume * 66.614, _round)
def uscup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Gallons."""
return round(_volume * 0.053710, _round)
def uscup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Pints."""
return round(_volume * 0.42968, _round)
def uscup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Quarts."""
return round(_volume * 0.21484, _round)
def uscup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Fluid Ounces."""
return round(_volume * 8.0000, _round)
def uscup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Gills."""
return round(_volume * 2.0000, _round)
def uscup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Gallons."""
return round(_volume * 0.062500, _round)
def uscup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Pints."""
return round(_volume * 0.50000, _round)
def uscup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Quarts."""
return round(_volume * 0.25000, _round)
def uscup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Tablespoons."""
return round(_volume * 16.000, _round)
def uscup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Teaspoons."""
return round(_volume * 48.000, _round)
# US DRY GALLONS #
def usdrygallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Canadian Cups."""
return round(_volume * 19.379, _round)
def usdrygallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Cubic Inches."""
return round(_volume * 268.80, _round)
def usdrygallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Cubic Meters."""
return round(_volume * 0.00440489824685049775, _round)
def usdrygallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Liters."""
return round(_volume * 4.4048982468504977535, _round)
def usdrygallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Cups."""
return round(_volume * 17.620, _round)
def usdrygallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Tablespoons."""
return round(_volume * 293.66, _round)
def usdrygallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Teaspoons."""
return round(_volume * 880.98, _round)
def usdrygallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Fluid Ounces."""
return round(_volume * 155.03, _round)
def usdrygallon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Gallons."""
return round(_volume * 0.96894, _round)
def usdrygallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Gills."""
return round(_volume * 31.006, _round)
def usdrygallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Pints."""
return round(_volume * 7.7515, _round)
def usdrygallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Quarts."""
return round(_volume * 3.8758, _round)
def usdrygallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Tablespoons."""
return round(_volume * 310.06, _round)
def usdrygallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Teaspoons."""
return round(_volume * 1240.2, _round)
def usdrygallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Cups."""
return round(_volume * 18.618, _round)
def usdrygallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Dry Pints."""
return round(_volume * 8.0000, _round)
def usdrygallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Dry Quarts."""
return round(_volume * 4.0000, _round)
def usdrygallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Fluid Ounces."""
return round(_volume * 148.95, _round)
def usdrygallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Gills."""
return round(_volume * 37.237, _round)
def usdrygallon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Gallons."""
return round(_volume * 1.1636, _round)
def usdrygallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Pints."""
return round(_volume * 9.3092, _round)
def usdrygallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Quarts."""
return round(_volume * 4.6546, _round)
def usdrygallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Tablespoons."""
return round(_volume * 297.89, _round)
def usdrygallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Teaspoons."""
return round(_volume * 893.68, _round)
# US DRY PINTS #
def usdrypint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Canadian Cups."""
return round(_volume * 2.4223, _round)
def usdrypint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Cubic Inches."""
return round(_volume * 33.600, _round)
def usdrypint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Cubic Meters."""
return round(_volume * 0.00055060015416804317, _round)
def usdrypint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Liters."""
return round(_volume * 0.55060015416804316705, _round)
def usdrypint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Cups."""
return round(_volume * 2.2024, _round)
def usdrypint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Tablespoons."""
return round(_volume * 36.707, _round)
def usdrypint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Teaspoons."""
return round(_volume * 110.12, _round)
def usdrypint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Fluid Ounces."""
return round(_volume * 19.379, _round)
def usdrypint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Gallons."""
return round(_volume * 0.12112, _round)
def usdrypint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Gills."""
return round(_volume * 3.8758, _round)
def usdrypint2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Pints."""
return round(_volume * 0.96894, _round)
def usdrypint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Quarts."""
return round(_volume * 0.48447, _round)
def usdrypint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Tablespoons."""
return round(_volume * 38.758, _round)
def usdrypint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Teaspoons."""
return round(_volume * 155.03, _round)
def usdrypint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Cups."""
return round(_volume * 2.3273, _round)
def usdrypint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Dry Gallons."""
return round(_volume * 0.12500, _round)
def usdrypint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Dry Quarts."""
return round(_volume * 0.50000, _round)
def usdrypint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Fluid Ounces."""
return round(_volume * 18.618, _round)
def usdrypint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Gills."""
return round(_volume * 4.6546, _round)
def usdrypint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Gallons."""
return round(_volume * 0.14546, _round)
def usdrypint2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Pints."""
return round(_volume * 1.1636, _round)
def usdrypint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Quarts."""
return round(_volume * 0.58182, _round)
def usdrypint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Tablespoons."""
return round(_volume * 37.237, _round)
def usdrypint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Teaspoons."""
return round(_volume * 111.71, _round)
# US DRY QUARTS #
def usdryquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Canadian Cups."""
return round(_volume * 4.8447, _round)
def usdryquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Cubic Inches."""
return round(_volume * 67.201, _round)
def usdryquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Cubic Meters."""
return round(_volume * 0.00110122456171262444, _round)
def usdryquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Liters."""
return round(_volume * 1.10122456171262443838, _round)
def usdryquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Cups."""
return round(_volume * 4.4049, _round)
def usdryquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Tablespoons."""
return round(_volume * 73.415, _round)
def usdryquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Teaspoons."""
return round(_volume * 220.24, _round)
def usdryquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Fluid Ounces."""
return round(_volume * 38.758, _round)
def usdryquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Gallons."""
return round(_volume * 0.24223, _round)
def usdryquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Gills."""
return round(_volume * 7.7515, _round)
def usdryquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Pints."""
return round(_volume * 1.9379, _round)
def usdryquart2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Quarts."""
return round(_volume * 0.96894, _round)
def usdryquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Tablespoons."""
return round(_volume * 77.515, _round)
def usdryquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Teaspoons."""
return round(_volume * 310.06, _round)
def usdryquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Cups."""
return round(_volume * 4.6546, _round)
def usdryquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Dry Gallons."""
return round(_volume * 0.25000, _round)
def usdryquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Dry Pints."""
return round(_volume * 2.0000, _round)
def usdryquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Fluid Ounces."""
return round(_volume * 37.237, _round)
def usdryquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Gills."""
return round(_volume * 9.3092, _round)
def usdryquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Gallons."""
return round(_volume * 0.29091, _round)
def usdryquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Pints."""
return round(_volume * 2.3273, _round)
def usdryquart2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Quarts."""
return round(_volume * 1.1636, _round)
def usdryquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Tablespoons."""
return round(_volume * 74.473, _round)
def usdryquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Teaspoons."""
return round(_volume * 223.42, _round)
# US FLUID OUNCES #
def usfluidounce2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Canadian Cups."""
return round(_volume * 0.13011, _round)
def usfluidounce2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Cubic Inches."""
return round(_volume * 1.8047, _round)
def usfluidounce2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Cubic Meters."""
return round(_volume * 0.00002957354941740108, _round)
def usfluidounce2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Liters."""
return round(_volume * 0.02957354941740107648, _round)
def usfluidounce2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Cups."""
return round(_volume * 0.11829, _round)
def usfluidounce2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Tablespoons."""
return round(_volume * 1.9716, _round)
def usfluidounce2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Teaspoons."""
return round(_volume * 5.9147, _round)
def usfluidounce2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Fluid Ounces."""
return round(_volume * 1.0408, _round)
def usfluidounce2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Gallons."""
return round(_volume * 0.0065053, _round)
def usfluidounce2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Gills."""
return round(_volume * 0.20817, _round)
def usfluidounce2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Pints."""
return round(_volume * 0.052042, _round)
def usfluidounce2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Quarts."""
return round(_volume * 0.026021, _round)
def usfluidounce2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Tablespoons."""
return round(_volume * 2.0817, _round)
def usfluidounce2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Teaspoons."""
return round(_volume * 8.3267, _round)
def usfluidounce2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Cups."""
return round(_volume * 0.12500, _round)
def usfluidounce2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Gallons."""
return round(_volume * 0.0067138, _round)
def usfluidounce2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Pints."""
return round(_volume * 0.053710, _round)
def usfluidounce2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Quarts."""
return round(_volume * 0.026855, _round)
def usfluidounce2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Gills."""
return round(_volume * 0.25000, _round)
def usfluidounce2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Gallons."""
return round(_volume * 0.0078125, _round)
def usfluidounce2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Pints."""
return round(_volume * 0.062500, _round)
def usfluidounce2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Quarts."""
return round(_volume * 0.031250, _round)
def usfluidounce2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Tablespoons."""
return round(_volume * 2.0000, _round)
def usfluidounce2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Teaspoons."""
return round(_volume * 6.0000, _round)
# US GILLS #
def usgill2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Canadian Cups."""
return round(_volume * 0.52042, _round)
def usgill2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Cubic Inches."""
return round(_volume * 7.2187, _round)
def usgill2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Cubic Meters."""
return round(_volume * 0.00011829419766960431, _round)
def usgill2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Liters."""
return round(_volume * 0.11829419766960430591, _round)
def usgill2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Cups."""
return round(_volume * 0.47318, _round)
def usgill2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Tablespoons."""
return round(_volume * 7.8863, _round)
def usgill2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Teaspoons."""
return round(_volume * 23.659, _round)
def usgill2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Fluid Ounces."""
return round(_volume * 4.1634, _round)
def usgill2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Gallons."""
return round(_volume * 0.026021, _round)
def usgill2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Gills."""
return round(_volume * 0.83267, _round)
def usgill2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Pints."""
return round(_volume * 0.20817, _round)
def usgill2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Quarts."""
return round(_volume * 0.10408, _round)
def usgill2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Tablespoons."""
return round(_volume * 8.3267, _round)
def usgill2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Teaspoons."""
return round(_volume * 33.307, _round)
def usgill2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Cups."""
return round(_volume * 0.50000, _round)
def usgill2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Gallons."""
return round(_volume * 0.026855, _round)
def usgill2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Pints."""
return round(_volume * 0.21484, _round)
def usgill2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Quarts."""
return round(_volume * 0.10742, _round)
def usgill2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Fluid Ounces."""
return round(_volume * 4.0000, _round)
def usgill2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Gallons."""
return round(_volume * 0.031250, _round)
def usgill2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Pints."""
return round(_volume * 0.25000, _round)
def usgill2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Quarts."""
return round(_volume * 0.12500, _round)
def usgill2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Tablespoons."""
return round(_volume * 8.0000, _round)
def usgill2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Teaspoons."""
return round(_volume * 24.000, _round)
# US LIQUID GALLONS #
def usliquidgallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Canadian Cups."""
return round(_volume * 16.653, _round)
def usliquidgallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Cubic Inches."""
return round(_volume * 231.00, _round)
def usliquidgallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Cubic Meters."""
return round(_volume * 0.00378544119317106409, _round)
def usliquidgallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Liters."""
return round(_volume * 3.78544119317106408752, _round)
def usliquidgallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Cups."""
return round(_volume * 15.142, _round)
def usliquidgallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Tablespoons."""
return round(_volume * 252.36, _round)
def usliquidgallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Teaspoons."""
return round(_volume * 757.08, _round)
def usliquidgallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Fluid Ounces."""
return round(_volume * 133.23, _round)
def usliquidgallon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Gallons."""
return round(_volume * 0.83267, _round)
def usliquidgallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Gills."""
return round(_volume * 26.646, _round)
def usliquidgallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Pints."""
return round(_volume * 6.6614, _round)
def usliquidgallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Quarts."""
return round(_volume * 3.3307, _round)
def usliquidgallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Tablespoons."""
return round(_volume * 266.46, _round)
def usliquidgallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Teaspoons."""
return round(_volume * 1065.8, _round)
def usliquidgallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Cups."""
return round(_volume * 16.000, _round)
def usliquidgallon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Gallons."""
return round(_volume * 0.85937, _round)
def usliquidgallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Pints."""
return round(_volume * 6.8749, _round)
def usliquidgallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Quarts."""
return round(_volume * 3.4375, _round)
def usliquidgallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Fluid Ounces."""
return round(_volume * 128.00, _round)
def usliquidgallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Gills."""
return round(_volume * 32.000, _round)
def usliquidgallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Liquid Pints."""
return round(_volume * 8.0000, _round)
def usliquidgallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Liquid Quarts."""
return round(_volume * 4.0000, _round)
def usliquidgallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Tablespoons."""
return round(_volume * 256.00, _round)
def usliquidgallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Teaspoons."""
return round(_volume * 768.00, _round)
# US LIQUID PINTS #
def usliquidpint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Canadian Cups."""
return round(_volume * 2.0817, _round)
def usliquidpint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Cubic Inches."""
return round(_volume * 28.875, _round)
def usliquidpint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Cubic Meters."""
return round(_volume * 0.0004731711933377496, _round)
def usliquidpint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Liters."""
return round(_volume * 0.4731711933377495978, _round)
def usliquidpint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Cups."""
return round(_volume * 1.8927, _round)
def usliquidpint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Tablespoons."""
return round(_volume * 31.545, _round)
def usliquidpint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Teaspoons."""
return round(_volume * 94.635, _round)
def usliquidpint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Fluid Ounces."""
return round(_volume * 16.653, _round)
def usliquidpint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Gallons."""
return round(_volume * 0.10408, _round)
def usliquidpint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Gills."""
return round(_volume * 3.3307, _round)
def usliquidpint2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Pints."""
return round(_volume * 0.83267, _round)
def usliquidpint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Quarts."""
return round(_volume * 0.41634, _round)
def usliquidpint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Tablespoons."""
return round(_volume * 33.307, _round)
def usliquidpint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Teaspoons."""
return round(_volume * 133.23, _round)
def usliquidpint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Cups."""
return round(_volume * 2.0000, _round)
def usliquidpint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Gallons."""
return round(_volume * 0.10742, _round)
def usliquidpint2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Pints."""
return round(_volume * 0.85937, _round)
def usliquidpint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Quarts."""
return round(_volume * 0.42968, _round)
def usliquidpint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Fluid Ounces."""
return round(_volume * 16.000, _round)
def usliquidpint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Gills."""
return round(_volume * 4.0000, _round)
def usliquidpint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Liquid Gallons."""
return round(_volume * 0.12500, _round)
def usliquidpint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Liquid Quarts."""
return round(_volume * 0.50000, _round)
def usliquidpint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Tablespoons."""
return round(_volume * 32.000, _round)
def usliquidpint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Teaspoons."""
return round(_volume * 96.000, _round)
# US LIQUID QUARTS #
def usliquidquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Canadian Cups."""
return round(_volume * 4.1634, _round)
def usliquidquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Cubic Inches."""
return round(_volume * 57.750, _round)
def usliquidquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Cubic Meters."""
return round(_volume * 0.0009463423866754992, _round)
def usliquidquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Liters."""
return round(_volume * 0.94634238667549919561, _round)
def usliquidquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Cups."""
return round(_volume * 3.7854, _round)
def usliquidquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Tablespoons."""
return round(_volume * 63.090, _round)
def usliquidquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Teaspoons."""
return round(_volume * 189.27, _round)
def usliquidquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Fluid Ounces."""
return round(_volume * 33.307, _round)
def usliquidquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Gallons."""
return round(_volume * 0.20817, _round)
def usliquidquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Gills."""
return round(_volume * 6.6614, _round)
def usliquidquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Pints."""
return round(_volume * 1.6653, _round)
def usliquidquart2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Quarts."""
return round(_volume * 0.83267, _round)
def usliquidquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Tablespoons."""
return round(_volume * 66.614, _round)
def usliquidquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Teaspoons."""
return round(_volume * 266.46, _round)
def usliquidquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Cups."""
return round(_volume * 4.0000, _round)
def usliquidquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Gallons."""
return round(_volume * 0.21484, _round)
def usliquidquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Pints."""
return round(_volume * 1.7187, _round)
def usliquidquart2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Quarts."""
return round(_volume * 0.85937, _round)
def usliquidquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Fluid Ounces."""
return round(_volume * 32.000, _round)
def usliquidquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Gills."""
return round(_volume * 8.0000, _round)
def usliquidquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Liquid Gallons."""
return round(_volume * 0.25000, _round)
def usliquidquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Liquid Pints."""
return round(_volume * 2.0000, _round)
def usliquidquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Tablespoons."""
return round(_volume * 64.000, _round)
def usliquidquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Teaspoons."""
return round(_volume * 192.00, _round)
# US TABLESPOONS #
def ustablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Canadian Cups."""
return round(_volume * 0.065053, _round)
def ustablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Cubic Inches."""
return round(_volume * 0.90234, _round)
def ustablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Cubic Meters."""
return round(_volume * 0.00001478677470870054, _round)
def ustablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Liters."""
return round(_volume * 0.01478677470870053824, _round)
def ustablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Cups."""
return round(_volume * 0.059147, _round)
def ustablespoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Tablespoons."""
return round(_volume * 0.98578, _round)
def ustablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Teaspoons."""
return round(_volume * 2.9574, _round)
def ustablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.52042, _round)
def ustablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Gallons."""
return round(_volume * 0.0032526, _round)
def ustablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Gills."""
return round(_volume * 0.10408, _round)
def ustablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Pints."""
return round(_volume * 0.026021, _round)
def ustablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Quarts."""
return round(_volume * 0.013011, _round)
def ustablespoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Tablespoons."""
return round(_volume * 1.0408, _round)
def ustablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Teaspoons."""
return round(_volume * 4.1634, _round)
def ustablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Cups."""
return round(_volume * 0.062500, _round)
def ustablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0033569, _round)
def ustablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Pints."""
return round(_volume * 0.026855, _round)
def ustablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Quarts."""
return round(_volume * 0.013428, _round)
def ustablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.50000, _round)
def ustablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Gills."""
return round(_volume * 0.12500, _round)
def ustablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0039062, _round)
def ustablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Pints."""
return round(_volume * 0.031250, _round)
def ustablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015625, _round)
def ustablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Teaspoons."""
return round(_volume * 3.0000, _round)
# US TEASPOONS #
def usteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Canadian Cups."""
return round(_volume * 0.021684, _round)
def usteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Cubic Inches."""
return round(_volume * 0.30078, _round)
def usteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Cubic Meters."""
return round(_volume * 0.00000492902208201893, _round)
def usteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Liters."""
return round(_volume * 0.00492902208201892744, _round)
def usteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Cups."""
return round(_volume * 0.019716, _round)
def usteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.32859, _round)
def usteaspoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Teaspoons."""
return round(_volume * 0.98578, _round)
def usteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.17347, _round)
def usteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Gallons."""
return round(_volume * 0.0010842, _round)
def usteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Gills."""
return round(_volume * 0.034695, _round)
def usteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Pints."""
return round(_volume * 0.0086737, _round)
def usteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Quarts."""
return round(_volume * 0.0043368, _round)
def usteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Tablespoons."""
return round(_volume * 0.34695, _round)
def usteaspoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Teaspoons."""
return round(_volume * 1.3878, _round)
def usteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Cups."""
return round(_volume * 0.020833, _round)
def usteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Gallons."""
return round(_volume * 0.0011190, _round)
def usteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Pints."""
return round(_volume * 0.0089517, _round)
def usteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0044759, _round)
def usteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.16667, _round)
def usteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Gills."""
return round(_volume * 0.041667, _round)
def usteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.0013021, _round)
def usteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Pints."""
return round(_volume * 0.010417, _round)
def usteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0052083, _round)
def usteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Tablespoons."""
return round(_volume * 0.33333, _round)
| lgpl-3.0 | 8,123,197,804,647,962,000 | 31.317719 | 150 | 0.65307 | false |
googleapis/python-pubsublite | tests/unit/pubsublite/cloudpubsub/internal/single_partition_subscriber_test.py | 1 | 10169 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import Callable
from asynctest.mock import MagicMock, call
import pytest
from google.api_core.exceptions import FailedPrecondition
from google.cloud.pubsub_v1.subscriber.message import Message
from google.pubsub_v1 import PubsubMessage
from google.cloud.pubsublite.types import FlowControlSettings
from google.cloud.pubsublite.cloudpubsub.internal.ack_set_tracker import AckSetTracker
from google.cloud.pubsublite.cloudpubsub.internal.single_partition_subscriber import (
SinglePartitionSingleSubscriber,
)
from google.cloud.pubsublite.cloudpubsub.message_transformer import MessageTransformer
from google.cloud.pubsublite.cloudpubsub.nack_handler import NackHandler
from google.cloud.pubsublite.cloudpubsub.internal.single_subscriber import (
AsyncSingleSubscriber,
)
from google.cloud.pubsublite.internal.wire.subscriber import Subscriber
from google.cloud.pubsublite.internal.wire.subscriber_reset_handler import (
SubscriberResetHandler,
)
from google.cloud.pubsublite.testing.test_utils import make_queue_waiter
from google.cloud.pubsublite_v1 import Cursor, FlowControlRequest, SequencedMessage
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
def mock_async_context_manager(cm):
cm.__aenter__.return_value = cm
return cm
@pytest.fixture()
def underlying():
return mock_async_context_manager(MagicMock(spec=Subscriber))
@pytest.fixture()
def flow_control_settings():
return FlowControlSettings(1000, 1000)
@pytest.fixture()
def initial_flow_request(flow_control_settings):
return FlowControlRequest(
allowed_messages=flow_control_settings.messages_outstanding,
allowed_bytes=flow_control_settings.bytes_outstanding,
)
@pytest.fixture()
def ack_set_tracker():
return mock_async_context_manager(MagicMock(spec=AckSetTracker))
@pytest.fixture()
def nack_handler():
return MagicMock(spec=NackHandler)
@pytest.fixture()
def transformer():
result = MagicMock(spec=MessageTransformer)
result.transform.side_effect = lambda source: PubsubMessage(
message_id=str(source.cursor.offset)
)
return result
@pytest.fixture()
def subscriber(
underlying, flow_control_settings, ack_set_tracker, nack_handler, transformer
):
def subscriber_factory(reset_handler: SubscriberResetHandler):
return underlying
return SinglePartitionSingleSubscriber(
subscriber_factory,
flow_control_settings,
ack_set_tracker,
nack_handler,
transformer,
)
async def test_init(subscriber, underlying, ack_set_tracker, initial_flow_request):
async with subscriber:
underlying.__aenter__.assert_called_once()
ack_set_tracker.__aenter__.assert_called_once()
underlying.allow_flow.assert_called_once_with(initial_flow_request)
underlying.__aexit__.assert_called_once()
ack_set_tracker.__aexit__.assert_called_once()
async def test_failed_transform(subscriber, underlying, transformer):
async with subscriber:
transformer.transform.side_effect = FailedPrecondition("Bad message")
underlying.read.return_value = SequencedMessage()
with pytest.raises(FailedPrecondition):
await subscriber.read()
async def test_ack(
subscriber: AsyncSingleSubscriber, underlying, transformer, ack_set_tracker
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message_1 = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
message_2 = SequencedMessage(cursor=Cursor(offset=2), size_bytes=10)
underlying.read.return_value = message_1
read_1: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
assert read_1.message_id == "1"
underlying.read.return_value = message_2
read_2: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1), call(2)])
assert read_2.message_id == "2"
read_2.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
ack_set_tracker.ack.assert_has_calls([call(2)])
read_1.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
ack_set_tracker.ack.assert_has_calls([call(2), call(1)])
async def test_track_failure(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
):
async with subscriber:
ack_set_tracker.track.side_effect = FailedPrecondition("Bad track")
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
with pytest.raises(FailedPrecondition):
await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
async def test_ack_failure(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
read: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
read.ack()
await ack_called_queue.get()
ack_set_tracker.ack.assert_has_calls([call(1)])
await ack_result_queue.put(FailedPrecondition("Bad ack"))
async def sleep_forever():
await asyncio.sleep(float("inf"))
underlying.read.side_effect = sleep_forever
with pytest.raises(FailedPrecondition):
await subscriber.read()
async def test_nack_failure(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
nack_handler,
):
async with subscriber:
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
read: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
nack_handler.on_nack.side_effect = FailedPrecondition("Bad nack")
read.nack()
async def sleep_forever():
await asyncio.sleep(float("inf"))
underlying.read.side_effect = sleep_forever
with pytest.raises(FailedPrecondition):
await subscriber.read()
async def test_nack_calls_ack(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
nack_handler,
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message
read: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
def on_nack(nacked: PubsubMessage, ack: Callable[[], None]):
assert nacked.message_id == "1"
ack()
nack_handler.on_nack.side_effect = on_nack
read.nack()
await ack_called_queue.get()
await ack_result_queue.put(None)
ack_set_tracker.ack.assert_has_calls([call(1)])
async def test_handle_reset(
subscriber: SinglePartitionSingleSubscriber,
underlying,
transformer,
ack_set_tracker,
):
ack_called_queue = asyncio.Queue()
ack_result_queue = asyncio.Queue()
ack_set_tracker.ack.side_effect = make_queue_waiter(
ack_called_queue, ack_result_queue
)
async with subscriber:
message_1 = SequencedMessage(cursor=Cursor(offset=1), size_bytes=5)
underlying.read.return_value = message_1
read_1: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1)])
assert read_1.message_id == "1"
await subscriber.handle_reset()
ack_set_tracker.clear_and_commit.assert_called_once()
# After reset, flow control tokens of unacked messages are refilled,
# but offset not committed.
read_1.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
underlying.allow_flow.assert_has_calls(
[
call(FlowControlRequest(allowed_messages=1000, allowed_bytes=1000,)),
call(FlowControlRequest(allowed_messages=1, allowed_bytes=5,)),
]
)
ack_set_tracker.ack.assert_has_calls([])
message_2 = SequencedMessage(cursor=Cursor(offset=2), size_bytes=10)
underlying.read.return_value = message_2
read_2: Message = await subscriber.read()
ack_set_tracker.track.assert_has_calls([call(1), call(2)])
assert read_2.message_id == "2"
read_2.ack()
await ack_called_queue.get()
await ack_result_queue.put(None)
underlying.allow_flow.assert_has_calls(
[
call(FlowControlRequest(allowed_messages=1000, allowed_bytes=1000,)),
call(FlowControlRequest(allowed_messages=1, allowed_bytes=5,)),
call(FlowControlRequest(allowed_messages=1, allowed_bytes=10,)),
]
)
ack_set_tracker.ack.assert_has_calls([call(2)])
| apache-2.0 | -7,433,503,452,001,105,000 | 33.588435 | 86 | 0.68699 | false |
slickqa/slickqaweb | slickqaweb/api/files.py | 1 | 7168 | __author__ = 'jcorbett'
from slickqaweb.app import app
from flask import request, Response
from bson import ObjectId
from slickqaweb.model.storedFile import StoredFile
from slickqaweb.model.fileChunk import FileChunk
from slickqaweb.model.serialize import deserialize_that
from .standardResponses import JsonResponse, read_request
from hashlib import md5
import re
import logging
from .apidocs import add_resource, accepts, returns, argument_doc, note
from mongoengine import ListField, EmbeddedDocumentField, ReferenceField, BinaryField
add_resource('/files', 'Upload, or Download files on slick.')
@app.route("/api/files/<file_id>")
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@returns(StoredFile)
def get_stored_file(file_id):
"""Get the "stored file" or the summary about the file."""
return JsonResponse(StoredFile.objects(id=ObjectId(file_id)).first())
@app.route("/api/files/<file_id>", methods=["PUT"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(StoredFile)
@returns(StoredFile)
def update_stored_file(file_id):
"""Update the properties of a stored file, you only have to include changed properties"""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
stored_file = deserialize_that(read_request(), stored_file)
stored_file.save()
return JsonResponse(stored_file)
@app.route("/api/files", methods=["POST"])
@accepts(StoredFile)
@returns(StoredFile)
@note("The chunkSize will be set by the server for you, even if you provide it. Make sure you supply a valid mimetype.")
def create_stored_file():
"""Create a new StoredFile object to store file content for."""
new_stored_file = deserialize_that(read_request(), StoredFile())
new_stored_file.chunkSize = 262144
new_stored_file.save()
return JsonResponse(new_stored_file)
@app.route("/api/files/<file_id>/content", methods=["POST"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(BinaryField(help_text="binary data of file"))
@returns(StoredFile)
@note("Use is not recommended unless your file is really small. Instead add individual chunks to the file.")
def set_file_content(file_id):
"""Upload all the content at once (for small files)."""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
data = request.data
stored_file.md5 = md5(data).hexdigest()
stored_file.length = len(data)
num_of_chunks = len(data) / 262144
if (len(data) % 262144) > 0:
num_of_chunks += 1
for i in range(num_of_chunks):
chunk = FileChunk()
chunk.files_id = stored_file.id
chunk.n = i
chunk.data = data[i * 262144:(i + 1) * 262144]
chunk.save()
stored_file.save()
return JsonResponse(stored_file)
@app.route("/api/files/<file_id>/addchunk", methods=["POST"])
@app.route("/api/results/<result_id>/files/<file_id>/addchunk", methods=["POST"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(BinaryField(help_text="binary data of the chunk."))
@returns(StoredFile)
def add_chunk_to_file(file_id, result_id=None):
"""Add content to a file (chunk by chunk)."""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
num_of_chunks = len(FileChunk.objects(files_id=stored_file.id))
chunk = FileChunk()
chunk.files_id = stored_file.id
chunk.n = num_of_chunks
chunk.data = request.data
chunk.save()
stored_file.length += len(request.data)
stored_file.save()
return JsonResponse(stored_file)
#@app.route("/api/files/<file_id>/content/<filename>", methods=["HEAD"])
#def get_header_for_file(file_id, filename):
# logger = logging.getLogger('slickqaweb.api.files.get_header_for_file')
# stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
# if stored_file is None:
# return Response("File with id '{}' and name '{}' not found.".format(file_id, filename), mimetype="text/plain", status=404)
# logger.debug("Returning header information for file with id {} and name {}".format(file_id, filename))
@app.route("/api/files/<file_id>/content/<filename>")
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@argument_doc('filename', 'The filename of the stored file. This is actually ignored, but makes for nice looking URLs.')
@returns(BinaryField(help_text="The file content."))
@note("This sets the http header to the mimetype from the stored file, and streams the file to the requester.")
def get_file_content(file_id, filename):
"""Get the content of a file."""
logger = logging.getLogger('slickqaweb.api.files.get_file_content')
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
if stored_file is None:
return Response("File with id '{}' and name '{}' not found.".format(file_id, filename), mimetype="text/plain", status=404)
range_header = request.headers.get('Range', None)
response = None
if not range_header:
logger.info("Returning file in classic mode")
def write_chunks():
for chunk in FileChunk.objects(files_id=stored_file.id).order_by('+n'):
yield chunk.data
response = Response(write_chunks(), mimetype=stored_file.mimetype, direct_passthrough=True)
else:
logger.debug("Returning file with id {} and filename {} and md5sum {} in ranged mode.".format(file_id, filename, stored_file.md5))
byte1, byte2 = 0, (stored_file.length - 1)
m = re.search('(\d+)-(\d*)', range_header)
g = m.groups()
if g[0]:
byte1 = int(g[0])
if g[1]:
possible_byte2 = int(g[1])
if possible_byte2 < byte2:
byte2 = possible_byte2
data = []
start_chunk_number = byte1 / stored_file.chunkSize
end_chunk_number = byte2 / stored_file.chunkSize
if byte2 % stored_file.chunkSize > 0:
end_chunk_number += 1
start_index = byte1 % stored_file.chunkSize
end_index = byte2 % stored_file.chunkSize
logger.debug("Using range information {}-{}/{}, chunks {}:{}-{}:{}".format(byte1, byte2, stored_file.length - 1, start_chunk_number, start_index, end_chunk_number, end_index))
def write_chunks():
for chunk in FileChunk.objects(files_id=stored_file.id).order_by('+n'):
if chunk.n >= start_chunk_number and chunk.n <= end_chunk_number:
start = 0
end = stored_file.chunkSize
if chunk.n == start_chunk_number:
start = start_index
if chunk.n == end_chunk_number:
end = end_index
yield chunk.data[start:end]
response = Response(write_chunks(), 206, mimetype=stored_file.mimetype, direct_passthrough=True)
response.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(byte1, byte2, stored_file.length))
response.headers.add('Accept-Ranges', 'bytes')
return response
| apache-2.0 | -2,193,309,757,777,340,400 | 43.8 | 183 | 0.667132 | false |
r3kall/AnimeRecommenderSystem | animerecommendersystem/recommender_systems/CollaborativeFilteringRS.py | 1 | 8293 | """
This files offers a recommendation system based on collaborative filtering technique.
1) Let U be the user we want to give recommendations to, for each user U2 != U we need to compute distance(U, U2) (*)
and get the top K neighbors. These neighbors should have watched a lot of animes also watched by U,
giving to them similar rates.
2) Once we have these K neighbors, we compute an aggregate rate to the animes they watched
by using the rates given by them (excluding the ones already watched by U, obviously).
In other words, we try to return as recommendations the animes watched by most of the neighbors
and with an high rate by (almost) all of them.
(*)HOW DO WE COMPUTE THE DISTANCE BETWEEN U AND U2?
Idea: cosine similarity
In particolar, for each anime watched by both users, we should compute the product of rates.
"""
import math
from collections import defaultdict
from animerecommendersystem.utils import definitions
STD_NUM_NEIGHBORS = 5
STD_NUM_RECOMM = 10
AVG_NEAREST_DISTANCE = 0.50
RELAX_RATIO = 1.1
# Constants for vote prediction
MAX_PREDICT_RATE = 10.
MIN_PREDICT_RATE = 3.
class CollaborativeFilteringRS:
def __init__(self, users_anime_lists, num_neighbors=STD_NUM_NEIGHBORS,
num_recommendations=STD_NUM_RECOMM, approx=True):
self.users_anime_lists = users_anime_lists
self.num_neighbors = num_neighbors
self.num_recommendations = num_recommendations
self.approx = approx
def compute_distance(self, username1, username2):
# Take the list of animes for each user
user1_animes = self.users_anime_lists[username1]
user2_animes = self.users_anime_lists[username2]
distance_numerator = 0
square_sum_1 = 0
square_sum_2 = 0
# Create a set that contains animes watched by at least one of the user.
total_set_animes = set(user1_animes['list'].keys())
total_set_animes |= set(user2_animes['list'].keys())
for anime in total_set_animes:
watched1 = False
watched2 = False
user1_rate = 0
user2_rate = 0
if anime in user1_animes['list'].keys():
watched1 = True
user1_rate = user1_animes['list'][anime]['rate']
if user1_rate == 0:
user1_rate = self.estimate_rate(user1_animes, anime)
square_sum_1 += user1_rate * user1_rate
if anime in user2_animes['list'].keys():
watched2 = True
user2_rate = user2_animes['list'][anime]['rate']
if user2_rate == 0:
user2_rate = self.estimate_rate(user2_animes, anime)
square_sum_2 += user2_rate * user2_rate
# If both users' lists contain this anime, then we need to increase the similarity
if watched1 and watched2:
distance_numerator += user1_rate * user2_rate
# At the end, use the values collected so far to compute the distance between users.
distance_denominator = math.sqrt(square_sum_1) * math.sqrt(square_sum_2)
similarity = distance_numerator / distance_denominator
distance = 1 - similarity
return distance
@staticmethod
def estimate_rate(user_animes, anime):
neighbor_rate = user_animes['mean_rate']
if neighbor_rate == 0:
anime_state = user_animes['list'][anime]['curr_state']
if anime_state == definitions.COMPLETED:
neighbor_rate = definitions.COMPLETED_RATE
elif anime_state == definitions.WATCHING:
neighbor_rate = definitions.WATCHING_RATE
elif anime_state == definitions.DROPPED:
neighbor_rate = definitions.DROPPED_RATE
elif anime_state == definitions.PLANNED:
neighbor_rate = definitions.PLANNED_RATE
elif anime_state == definitions.ON_HOLD:
neighbor_rate = definitions.ON_HOLD_RATE
return neighbor_rate
def get_neighbors(self, user):
if self.approx is True:
return self.get_approx_neighbors(user)
else:
return self.get_exact_neighbors(user)
def get_approx_neighbors(self, user):
"""
Basic idea: compute distance between 'username''s list and all other users, and pick the nearest ones.
=> PROBLEM: TOO SLOW.
=> SOLUTION: no need to pick the nearest one, but some near users will be still ok.
"""
neighbors = defaultdict(float)
how_many_good = 0
for user2 in self.users_anime_lists.keys():
if user2 == user or self.users_anime_lists[user2].get('list') is None:
continue
distance = self.compute_distance(user, user2)
neighbors[user2] = distance
# If this user is close enough to our target, then we take him as a neighbor
if distance <= AVG_NEAREST_DISTANCE * RELAX_RATIO:
how_many_good += 1
if how_many_good == self.num_neighbors:
break
# Sort neighbors according to distance, and return them
sorted_neighbors = sorted(neighbors, key=neighbors.get, reverse=False)
# return a dict, so we have also the similarity as info
res = dict()
for neighbor in sorted_neighbors[0:self.num_neighbors]:
# similarity
res[neighbor] = 1 - neighbors[neighbor]
return res
def get_exact_neighbors(self, user):
distances_dict = defaultdict(float)
for user2 in self.users_anime_lists.keys():
if user2 == user or self.users_anime_lists[user2].get('list') is None:
continue
distance = self.compute_distance(user, user2)
distances_dict[user2] = distance
# Once we have all distances, sort the dict by value and return a list containing
# the usernames of the nearest ones.
sorted_neighbors = sorted(distances_dict, key=distances_dict.get, reverse=False)
return sorted_neighbors[0:self.num_neighbors]
def get_recommendations(self, user):
neighbors_dict = self.get_neighbors(user)
predictions_rates_dict = defaultdict(float)
predictions_rates_num_dict = dict()
predictions_rates_den_dict = dict()
user_animes = self.users_anime_lists[user]
for neighbor in neighbors_dict.keys():
neighbor_animes = self.users_anime_lists[neighbor]
for anime in neighbor_animes['list'].keys():
if anime not in user_animes['list'].keys():
neighbor_rate = neighbor_animes['list'][anime]['rate']
if neighbor_rate > 0:
predictions_rates_num_dict[anime] = predictions_rates_num_dict.get(anime, 0) + \
neighbors_dict[neighbor] * \
(neighbor_rate - self.users_anime_lists[neighbor]['mean_rate'])
predictions_rates_den_dict[anime] = predictions_rates_den_dict.get(anime, 0) + neighbors_dict[
neighbor]
for anime in predictions_rates_num_dict.keys():
if predictions_rates_den_dict[anime] == 0:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate']
else:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate'] + \
(float(predictions_rates_num_dict[anime]) / float(
predictions_rates_den_dict[anime]))
if predictions_rates_dict[anime] < MIN_PREDICT_RATE:
predictions_rates_dict[anime] = MIN_PREDICT_RATE
elif predictions_rates_dict[anime] > MAX_PREDICT_RATE:
predictions_rates_dict[anime] = MAX_PREDICT_RATE
sorted_animes = sorted(predictions_rates_dict, key=predictions_rates_dict.get, reverse=True)
results = dict()
for anime in sorted_animes[0:self.num_recommendations]:
results[anime] = predictions_rates_dict[anime]
return results
| gpl-3.0 | -702,501,294,063,169,000 | 42.647368 | 123 | 0.606415 | false |
icereval/scrapi | scrapi/harvesters/stepic.py | 1 | 2795 | """
Stepic.org harvester of MOOC-online courses for SHARE Notification Service
Example API query: https://stepic.org:443/api/lessons/100
"""
from __future__ import unicode_literals
import json
import pycountry
from dateutil.parser import parse
from scrapi import requests
from scrapi.base import JSONHarvester
from scrapi.linter.document import RawDocument
def process_owner(owners_id):
resp = requests.get("https://stepic.org/api/users/" + str(owners_id)).json()
try:
person = resp[u'users'][0]
except KeyError:
person = {u'first_name': '', u'last_name': ''}
owner = {
'name': " ".join([person[u'first_name'], person[u'last_name']]),
'givenName': person[u'first_name'],
'additionalName': '',
'familyName': person[u'last_name'],
'email': '',
'sameAs': [],
}
return [owner]
class StepicHarvester(JSONHarvester):
short_name = 'stepic'
long_name = 'Stepic.org Online Education Platform'
url = 'http://www.stepic.org'
count = 0
URL = 'https://stepic.org/api/lessons'
@property
def schema(self):
return {
'contributors': ('/owner', process_owner),
'uris': {
'canonicalUri': ('/id', lambda x: self.url + '/' + str(x))
},
'title': '/title',
'providerUpdatedDateTime': ('/update_date', lambda x: parse(x).isoformat()),
'description': '/title',
'languages': ('/language', lambda x: [pycountry.languages.get(alpha2=x).terminology])
}
def harvest(self, start_date=None, end_date=None):
# TODO - stepic has no means of querying by date, we should add handling for the
# start and end date once it does.
search_url = self.URL
records = self.get_records(search_url)
record_list = []
for record in records:
doc_id = record['id']
record_list.append(
RawDocument(
{
'doc': json.dumps(record),
'source': self.short_name,
'docID': ('stepic_doc' + str(doc_id)).decode('utf-8'),
'filetype': 'json'
}
)
)
return record_list
def get_records(self, search_url):
all_lessons = []
resp = requests.get(self.URL + '?page=last').json()
last_lesson_id = resp['lessons'][-1]['id']
for pk in range(last_lesson_id + 1):
lesson = requests.get(search_url + "/" + str(pk), expected=[200, 403, 404])
if lesson.status_code == 200:
lesson_list = lesson.json()['lessons'][0]
all_lessons.append(lesson_list)
return all_lessons
| apache-2.0 | -3,222,189,658,091,499,500 | 31.126437 | 97 | 0.546333 | false |
Onager/plaso | plaso/analysis/interface.py | 1 | 2822 | # -*- coding: utf-8 -*-
"""This file contains the interface for analysis plugins."""
import abc
import calendar
import collections
import time
from plaso.analysis import definitions as analysis_definitions
from plaso.analysis import logger
from plaso.containers import events
from plaso.containers import reports
from plaso.lib import definitions
class AnalysisPlugin(object):
"""Class that defines the analysis plugin interface."""
# The name of the plugin. This is the name that is matched against when
# loading plugins, so it is important that this name is short, concise and
# explains the nature of the plugin easily. It also needs to be unique.
NAME = 'analysis_plugin'
def __init__(self):
"""Initializes an analysis plugin."""
super(AnalysisPlugin, self).__init__()
self._analysis_counter = collections.Counter()
self.plugin_type = analysis_definitions.PLUGIN_TYPE_REPORT
@property
def plugin_name(self):
"""str: name of the plugin."""
return self.NAME
def _CreateEventTag(self, event, labels):
"""Creates an event tag.
Args:
event (EventObject): event to tag.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
"""
event_identifier = event.GetIdentifier()
event_tag = events.EventTag()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Tagged event: {0:s} with labels: {1:s}'.format(
event_identifier_string, ', '.join(labels)))
return event_tag
# pylint: disable=unused-argument
def CompileReport(self, mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to analyze this
function will be called so that the report can be assembled.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
"""
analysis_report = reports.AnalysisReport(plugin_name=self.NAME)
time_elements = time.gmtime()
time_compiled = calendar.timegm(time_elements)
analysis_report.time_compiled = (
time_compiled * definitions.MICROSECONDS_PER_SECOND)
analysis_report.analysis_counter = self._analysis_counter
return analysis_report
@abc.abstractmethod
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
| apache-2.0 | -2,152,688,887,689,937,000 | 29.673913 | 76 | 0.706591 | false |
karras/gitlab-docsbot | setup.py | 1 | 2639 | # -*- coding: UTF-8 -*-
"""Setuptools package definition"""
from setuptools import setup
from setuptools import find_packages
import codecs
import os
import sys
version = sys.version_info[0]
if version > 2:
pass
else:
pass
__version__ = None
version_file = "autodocs/version.py"
with codecs.open(version_file, encoding="UTF-8") as f:
code = compile(f.read(), version_file, 'exec')
exec(code)
def find_data(packages, extensions):
"""Finds data files along with source.
:param packages: Look in these packages
:param extensions: Look for these extensions
"""
data = {}
for package in packages:
package_path = package.replace('.', '/')
for dirpath, _, filenames in os.walk(package_path):
for filename in filenames:
for extension in extensions:
if filename.endswith(".%s" % extension):
file_path = os.path.join(
dirpath,
filename
)
file_path = file_path[len(package) + 1:]
if package not in data:
data[package] = []
data[package].append(file_path)
return data
with codecs.open('README.md', 'r', encoding="UTF-8") as f:
README_TEXT = f.read()
setup(
name = "gitlab-autodocs",
version = __version__,
packages = find_packages(),
package_data=find_data(
find_packages(), ["py"]
),
data_files = [
('/etc', ['autodocs/config/gitlab-autodocs.yaml']),
('/lib/systemd/system', ['autodocs/config/gitlab-autodocs.service'])
],
entry_points = {
'console_scripts': [
'gitlab-autodocs = autodocs:main',
]
},
install_requires = [
"requests",
"pyyaml",
"python-gitlab"
],
author = "Adfinis SyGroup AG",
author_email = "https://adfinis-sygroup.ch/",
description = "GitLab CI Docs Bot",
long_description = README_TEXT,
keywords = "GitLab CI autodocs bot",
url = "https://github.com/karras/gitlab-docsbot",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Topic :: Software Development :: Build Tools"
]
)
| gpl-3.0 | 5,100,969,825,408,567,000 | 28 | 76 | 0.560818 | false |
KarrLab/kinetic_datanator | datanator/data_source/metabolites_meta_collection.py | 1 | 14782 | from datanator_query_python.query import query_sabiork, query_xmdb
from datanator.util import chem_util
from datanator.util import file_util
from datanator.util import index_collection
import datanator.config.core
import pymongo
import re
from pymongo.collation import Collation, CollationStrength
class MetabolitesMeta(query_sabiork.QuerySabio):
''' meta_loc: database location to save the meta collection
'''
def __init__(self, cache_dirname=None, MongoDB=None, replicaSet=None, db=None,
verbose=False, max_entries=float('inf'), username = None,
password = None, authSource = 'admin', meta_loc = None):
self.cache_dirname = cache_dirname
self.verbose = verbose
self.MongoDB = MongoDB
self.replicaSet = replicaSet
self.max_entries = max_entries
self.username = username
self.password = password
self.authSource = authSource
self.meta_loc = meta_loc
super(MetabolitesMeta, self).__init__(cache_dirname=cache_dirname, MongoDB=MongoDB, replicaSet=replicaSet,
db=db, verbose=verbose, max_entries=max_entries, username = username,
password = password, authSource = authSource)
self.frequency = 50
self.chem_manager = chem_util.ChemUtil()
self.file_manager = file_util.FileUtil()
self.ymdb_query = query_xmdb.QueryXmdb(username=username, password=password, server=MongoDB, authSource=authSource,
database=db, collection_str='ymdb', readPreference='nearest')
self.ecmdb_query = query_xmdb.QueryXmdb(username=username, password=password, server=MongoDB, authSource=authSource,
database=db, collection_str='ecmdb', readPreference='nearest')
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.client, self.db, self.collection = self.con_db('metabolites_meta')
def load_content(self):
collection_name = 'metabolites_meta'
ecmdb_fields = ['m2m_id', 'inchi', 'synonyms.synonym']
self.fill_metabolite_fields(
fields=ecmdb_fields, collection_src='ecmdb', collection_des = collection_name)
ymdb_fields = ['ymdb_id', 'inchi', 'synonyms.synonym']
self.fill_metabolite_fields(
fields=ymdb_fields, collection_src='ymdb', collection_des = collection_name)
_, _, collection = self.con_db(collection_name)
k = 0
for doc in self.collection.find(filter={}, projection={'inchi':1}):
if k > self.max_entries:
break
kinlaw_id = self.get_kinlawid_by_inchi([doc['inchi']])
rxn_participants = self.find_reaction_participants(kinlaw_id)
collection.update_one({'inchi': doc['inchi']},
{'$set': {'kinlaw_id': kinlaw_id,
'reaction_participants': rxn_participants}},
upsert=False)
k += 1
# i = 0
# cursor = collection.find(filter = {}, projection = {'similar_compounds_corrected':1, 'similar_compounds': 1})
# for doc in cursor:
# if i % self.frequency == 0:
# print(i)
# replacement = []
# for corrected in doc['similar_compounds_corrected']:
# for k, v in corrected.items():
# dic = {}
# dic[k] = v
# replacement.append(dic)
# collection.update_one({'_id': doc['_id']},
# {'$set': {'similar_compounds': replacement}},
# upsert=False)
# i += 1
def replace_key_in_similar_compounds(self):
query = {}
projection = {'similar_compounds': 1}
_, _, col = self.con_db('metabolites_meta')
docs = col.find(filter=query, projection=projection)
for doc in docs:
result = []
_list = doc['similar_compounds']
for dic in _list:
old_key = list(dic.keys())[0]
try:
new_key = col.find_one(filter={'inchi': old_key},
projection={'InChI_Key':1})['InChI_Key']
result.append( {new_key: dic[old_key]})
except TypeError:
result.append( {'NoStructure': -1} )
col.update_one({'_id': doc['_id']},
{'$set': {'similar_compounds': result} })
def fill_metabolite_fields(self, fields=None, collection_src=None, collection_des = None):
'''Fill in values of fields of interest from
metabolite collection: ecmdb or ymdb
Args:
fileds: list of fields of interest
collection_src: collection in which query will be done
collection_des: collection in which result will be updated
'''
projection = {}
for field in fields:
projection[field] = 1
projection['_id'] = 0
_, _, col_src = self.con_db(collection_src)
_, _, col_des = self.con_db(collection_des)
cursor = col_src.find(filter={}, projection=projection)
i = 0
for doc in cursor:
if i == self.max_entries:
break
if i % self.frequency == 0:
print('Getting fields of interest from {} document in {}'.format(i, collection_src))
doc['InChI_Key'] = self.chem_manager.inchi_to_inchikey(doc['inchi'])
if isinstance(doc.get('synonyms'), list):
continue
try:
synonyms = doc.get('synonyms', None).get('synonym')
except AttributeError:
synonyms = doc.get('synonyms', None)
col_des.update_one({'inchi': doc['inchi']},
{ '$set': { fields[0]: doc[fields[0]],
fields[1]: doc[fields[1]],
'synonyms': synonyms,
'InChI_Key': doc['InChI_Key']}},
upsert=True)
i += 1
def fill_names(self):
"""Fill names of metabolites in 'name' field
"""
docs = self.collection.find({})
count = self.collection.count_documents({})
for i, doc in enumerate(docs):
name = ''
inchi_key = doc['InChI_Key']
if i == self.max_entries:
break
if i % 100 == 0 and self.verbose:
print('Adding name to document {} out of {}.'.format(i, count))
if doc.get('ymdb_id') is None:
name = self.ecmdb_query.get_name_by_inchikey(inchi_key)
else:
name = self.ymdb_query.get_name_by_inchikey(inchi_key)
self.collection.update_one({'_id': doc['_id']},
{'$set': {'name': name}}, upsert=False)
def fill_standard_id(self, skip=0):
"""Fill meta collection with chebi_id, pubmed_id,
and kegg_id.
Args:
skip (:obj:`int`): skip first n number of records.
"""
con_0 = {'chebi_id': {'$exists': False}}
con_1 = {'chebi_id': None}
query = {'$or': [con_0, con_1]}
docs = self.collection.find(query, skip=skip)
count = self.collection.count_documents(query)
for i, doc in enumerate(docs):
if i == self.max_entries:
break
if i % 100 == 0 and self.verbose:
print('Processing doc {} out of {}'.format(i+skip, count))
m2m_id = doc.get('m2m_id')
ymdb_id = doc.get('ymdb_id')
if ymdb_id == 'YMDB00890' or ymdb_id == 'YMDB00862':
continue
if ymdb_id is not None: # ymdb has richer data than ecmdb
doc_e = self.ymdb_query.get_standard_ids_by_id(ymdb_id)
if doc_e['synonyms']:
synonyms = doc_e['synonyms']['synonym']
else:
synonyms = None
self.collection.update_many({'ymdb_id': ymdb_id},
{'$set': {'chebi_id': doc_e['chebi_id'],
'hmdb_id': doc_e['hmdb_id'],
'kegg_id': doc_e['kegg_id'],
'description': doc_e['description'],
'chemical_formula': doc_e['chemical_formula'],
'average_molecular_weight': doc_e['average_molecular_weight'],
'cas_registry_number': doc_e['cas_registry_number'],
'smiles': doc_e['smiles'],
'cellular_locations': doc_e['cellular_locations'],
'pubchem_compound_id': doc_e['pubchem_compound_id'],
'chemspider_id': doc_e['chemspider_id'],
'biocyc_id': doc_e['biocyc_id'],
'pathways': doc_e['pathways'],
'property': doc_e['property'],
'name': doc_e['name'],
'synonyms': synonyms}}, upsert=False)
elif m2m_id is not None:
doc_y = self.ecmdb_query.get_standard_ids_by_id(m2m_id)
if doc_y['synonyms']:
synonyms = doc_y['synonyms']['synonym']
else:
synonyms = None
self.collection.update_many({'m2m_id': m2m_id},
{'$set': {'chebi_id': doc_y['chebi_id'],
'hmdb_id': doc_y['hmdb_id'],
'kegg_id': doc_y['kegg_id'],
'description': doc_y['description'],
'chemical_formula': doc_y['chemical_formula'],
'average_molecular_weight': doc_y['average_molecular_weight'],
'cas_registry_number': doc_y['cas_registry_number'],
'smiles': doc_y['smiles'],
'cellular_locations': doc_y['cellular_locations'],
'pubchem_compound_id': doc_y['pubchem_compound_id'],
'chemspider_id': doc_y['chemspider_id'],
'biocyc_id': doc_y['biocyc_id'],
'pathways': doc_y['pathways'],
'property': doc_y['property'],
'name': doc_y['name'],
'synonyms': synonyms}}, upsert=False)
else:
continue
def remove_dups(self, _key):
"""Remove entries with the same _key.
Args:
_key(:obj:`str`): Name of fields in which dups will be identified.
"""
num, docs = self.get_duplicates('metabolites_meta', _key)
return num, docs
def reset_cellular_locations(self, start=0):
"""Github (https://github.com/KarrLab/datanator_rest_api/issues/69)
"""
query = {'cellular_locations': {'$ne': None}}
count = self.collection.count_documents(query) - start
for i, doc in enumerate(self.collection.find(filter=query, skip=start,
projection={'m2m_id': 1, 'ymdb_id': 1,
'cellular_locations': 1})):
if i == self.max_entries:
break
if self.verbose and i % 100 == 0:
print('Processing doc {} out of {} ...'.format(i, count))
cell_locations = doc['cellular_locations']
obj = []
if doc.get('ymdb_id'):
for loc in cell_locations:
location = loc['cellular_location']['cellular_location']
obj.append({
'reference': ['YMDB'],
'cellular_location': location
})
else:
for loc in cell_locations:
location = loc['cellular_location']['cellular_location']
obj.append({
'reference': ['ECMDB'],
'cellular_location': location
})
self.collection.update_one({'_id': doc['_id']},
{'$set': {'cellular_locations': obj}},
upsert=False)
def main():
db = 'datanator'
meta_loc = 'datanator'
username = datanator.config.core.get_config()['datanator']['mongodb']['user']
password = datanator.config.core.get_config()['datanator']['mongodb']['password']
MongoDB = datanator.config.core.get_config()['datanator']['mongodb']['server']
manager = MetabolitesMeta(cache_dirname=None, MongoDB=MongoDB, db=db,
verbose=True, max_entries=float('inf'),
username = username, password = password, meta_loc = meta_loc)
# # manager.load_content()
# collection_name = 'metabolites_meta'
# manager.fill_metabolite_fields(fields=['m2m_id', 'inchi', 'synonyms.synonym'],
# collection_src='ecmdb', collection_des = collection_name)
# manager.fill_metabolite_fields(fields=['ymdb_id', 'inchi', 'synonyms.synonym'],
# collection_src='ymdb',
# collection_des = collection_name)
# manager.fill_names()
# manager.fill_standard_id(skip=0)
# num, _ = manager.remove_dups('InChI_Key')
# print(num)
manager.reset_cellular_locations()
if __name__ == '__main__':
main()
| mit | -7,996,290,644,230,414,000 | 48.273333 | 124 | 0.468069 | false |
DigitalPandacoin/pandacoin | test/functional/feature_config_args.py | 1 | 2329 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "pandacoin.conf")
with open(conf_file, 'a', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
| mit | -4,612,651,100,947,003,000 | 46.530612 | 169 | 0.661228 | false |
metomi/rose | metomi/rosie/graph.py | 1 | 9686 | # Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Plot suite ancestry."""
import textwrap
import time
import pygraphviz
import metomi.rose.metadata_graph
import metomi.rose.opt_parse
import metomi.rose.reporter
import metomi.rosie.suite_id
import metomi.rosie.ws_client
import metomi.rosie.ws_client_cli
class NoConnectionsEvent(metomi.rose.reporter.Event):
"""An event raised if the graph has no edges or nodes.
event.args[0] is the filter id string.
"""
KIND = metomi.rose.reporter.Reporter.KIND_ERR
def __str__(self):
return "%s: no copy relationships to other suites" % self.args[0]
class PrintSuiteDetails(metomi.rose.reporter.Event):
"""An event to print out suite details when writing to CLI"""
KIND = metomi.rose.reporter.Reporter.KIND_OUT
def __str__(self):
template = " %s"
argslist = [self.args[0]]
if len(self.args) > 1:
for arg in self.args[1]:
template += ", %s"
argslist.append(arg)
return template % tuple(argslist)
def get_suite_data(prefix, properties=None):
"""Retrieve a dictionary containing the contents of RosieWS
Adds in any extra requested properties
"""
if properties is None:
properties = []
ws_client = metomi.rosie.ws_client.RosieWSClient(
prefixes=[prefix],
event_handler=metomi.rose.reporter.Reporter()
)
suite_data = ws_client.search(prefix, all_revs=1)[0][0]
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
suite_id = metomi.rosie.suite_id.SuiteId.from_idx_branch_revision(
dict_row["idx"],
dict_row["branch"],
dict_row["revision"]
)
dict_row["suite"] = suite_id.to_string_with_version()
if "local" in properties:
dict_row["local"] = suite_id.get_status()
if "date" in properties:
dict_row["date"] = time.strftime(
metomi.rosie.ws_client_cli.DATE_TIME_FORMAT,
time.gmtime(dict_row.get("date"))
)
return suite_data
def calculate_edges(graph, suite_data, filter_id=None, properties=None,
max_distance=None):
"""Get all connected suites for a prefix, optionally filtered."""
if properties is None:
properties = []
node_rosie_properties = {}
edges = []
forward_edges = {}
back_edges = {}
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
idx = dict_row["idx"]
node_rosie_properties[idx] = []
for prop in properties:
node_rosie_properties[idx].append(dict_row.get(prop))
from_idx = dict_row.get("from_idx")
if from_idx is None:
continue
edges.append((from_idx, idx))
forward_edges.setdefault(from_idx, [])
forward_edges[from_idx].append(idx)
back_edges.setdefault(idx, [])
back_edges[idx].append(from_idx)
if filter_id is None:
# Plot all the edges we've found.
for edge in sorted(edges):
node0, node1 = edge
add_node(graph, node0, node_rosie_properties.get(node0))
add_node(graph, node1, node_rosie_properties.get(node1))
graph.add_edge(edge[0], edge[1])
else:
reporter = metomi.rose.reporter.Reporter()
# Only plot the connections involving filter_id.
node_stack = []
node_stack = [(filter_id, 0)]
add_node(graph, filter_id, node_rosie_properties.get(filter_id),
fillcolor="lightgrey", style="filled")
ok_nodes = set([])
while node_stack:
node, distance = node_stack.pop()
if max_distance is not None and distance > max_distance:
continue
ok_nodes.add(node)
for neighbour_node in (forward_edges.get(node, []) +
back_edges.get(node, [])):
if neighbour_node not in ok_nodes:
node_stack.append((neighbour_node, distance + 1))
if len(ok_nodes) == 1:
# There are no related suites.
reporter(NoConnectionsEvent(filter_id))
for edge in sorted(edges):
node0, node1 = edge
if node0 in ok_nodes and node1 in ok_nodes:
add_node(graph, node0, node_rosie_properties.get(node0))
add_node(graph, node1, node_rosie_properties.get(node1))
graph.add_edge(node0, node1)
def add_node(graph, node, node_label_properties, **kwargs):
"""Add a node with a particular label."""
label_lines = [node]
if node_label_properties is not None:
for property_value in node_label_properties:
label_lines.extend(textwrap.wrap(str(property_value)))
label_text = "\\n".join(label_lines) # \n must be escaped for graphviz.
kwargs.update({"label": label_text})
graph.add_node(node, **kwargs)
def make_graph(suite_data, filter_id, properties, prefix, max_distance=None):
"""Construct the pygraphviz graph."""
graph = pygraphviz.AGraph(directed=True)
graph.graph_attr["rankdir"] = "LR"
if filter_id:
graph.graph_attr["name"] = filter_id + " copy tree"
else:
graph.graph_attr["name"] = prefix + " copy tree"
calculate_edges(graph, suite_data, filter_id, properties,
max_distance=max_distance)
return graph
def output_graph(graph, filename=None, debug_mode=False):
"""Draw the graph to filename (or temporary file if None)."""
metomi.rose.metadata_graph.output_graph(graph, debug_mode=debug_mode,
filename=filename)
def print_graph(suite_data, filter_id, properties=None, max_distance=None):
"""Dump out list of graph entries relating to a suite"""
if properties is None:
properties = []
reporter = metomi.rose.reporter.Reporter()
ancestry = {}
# Process suite_data to get ancestry tree
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
idx = dict_row["idx"]
from_idx = dict_row.get("from_idx")
if idx not in ancestry:
ancestry[idx] = {'parent': None, 'children': []}
if from_idx:
ancestry[idx]['parent'] = from_idx
for prop in properties:
ancestry[idx][prop] = dict_row.get(prop)
if from_idx in ancestry:
ancestry[from_idx]['children'].append(idx)
else:
ancestry[from_idx] = {'parent': None, 'children': [idx]}
# Print out info
parent_id = ancestry[filter_id]['parent']
if parent_id:
reporter(PrintSuiteDetails(
parent_id, [ancestry[parent_id][p] for p in properties]),
prefix="[parent]")
else:
reporter(PrintSuiteDetails(None), prefix="[parent]")
children = ancestry[filter_id]['children']
generation = 1
# Print out each generation of child suites
while children:
next_children = []
for child in children:
reporter(PrintSuiteDetails(child,
[ancestry[child][p] for p in properties]),
prefix="[child%s]" % generation)
# If a child has children add to list of next generation children
if ancestry[child]['children']:
next_children += ancestry[child]['children']
if max_distance and generation >= max_distance:
break
generation += 1
children = next_children
def main():
"""Provide the CLI interface."""
opt_parser = metomi.rose.opt_parse.RoseOptionParser()
opt_parser.add_my_options("distance",
"output_file",
"prefix",
"property",
"text")
opts, args = opt_parser.parse_args()
filter_id = None
if args:
filter_id = args[0]
prefix = metomi.rosie.suite_id.SuiteId(id_text=filter_id).prefix
if opts.prefix:
opt_parser.error("No need to specify --prefix when specifying ID")
elif opts.prefix:
prefix = opts.prefix
else:
prefix = metomi.rosie.suite_id.SuiteId.get_prefix_default()
if opts.distance and not args:
opt_parser.error("distance option requires an ID")
if opts.text and not args:
opt_parser.error("print option requires an ID")
suite_data = get_suite_data(prefix, opts.property)
if opts.text:
print_graph(suite_data, filter_id, opts.property,
max_distance=opts.distance)
else:
graph = make_graph(suite_data, filter_id, opts.property, prefix,
max_distance=opts.distance)
output_graph(graph, filename=opts.output_file,
debug_mode=opts.debug_mode)
if __name__ == "__main__":
main()
| gpl-3.0 | -901,207,645,010,932,700 | 33.469751 | 79 | 0.596118 | false |
bitmazk/django-frequently | frequently/tests/forms_tests.py | 1 | 1290 | """Tests for the forms of the ``django-frequently`` app."""
from django.test import TestCase
from mixer.backend.django import mixer
from .. import forms
class EntryFormTestCase(TestCase):
"""Tests for the ``EntryForm`` form class."""
def setUp(self):
self.owner = mixer.blend('auth.User')
def test_form(self):
data = {
'question': ('This is a very long question to test the slug'
' generator and the truncation results. Sometimes'
' questions can become very very long, so we will'
' have to be careful to not create exceptions.'),
'submitted_by': '[email protected]',
}
form = forms.EntryForm(data=data)
self.assertTrue(form.is_valid())
with self.settings(FREQUENTLY_REQUIRE_EMAIL=False):
form = forms.EntryForm(data=data, owner=self.owner)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.submitted_by, self.owner.email)
self.assertEqual(obj.slug, ('this-is-a-very-long-question-to-test-'
'the-slug-generator-and-the-truncation'
'-results-sometimes-questio'))
| mit | 9,197,072,594,367,914,000 | 38.090909 | 79 | 0.570543 | false |
herrnst/script.xbmc.lcdproc | resources/lib/lcdproc.py | 1 | 21691 | '''
XBMC LCDproc addon
Copyright (C) 2012-2018 Team Kodi
Copyright (C) 2012-2018 Daniel 'herrnst' Scheller
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import telnetlib
import time
import xbmc
from socket import *
from .settings import *
from .lcdbase import *
from .lcdproc_extra_imon import *
from .lcdproc_extra_mdm166a import *
from .infolabels import *
MAX_ROWS = 20
MAX_BIGDIGITS = 20
INIT_RETRY_INTERVAL = 2
INIT_RETRY_INTERVAL_MAX = 60
class LCDProc(LcdBase):
def __init__(self, settings):
self.m_bStop = True
self.m_lastInitAttempt = 0
self.m_initRetryInterval = INIT_RETRY_INTERVAL
self.m_used = True
self.tn = telnetlib.Telnet()
self.tnsocket = None
self.m_timeLastSockAction = time.time()
self.m_timeSocketIdleTimeout = 2
self.m_strLineText = [None]*MAX_ROWS
self.m_strLineType = [None]*MAX_ROWS
self.m_bstrLineIcon = [None]*MAX_ROWS
self.m_strDigits = [None]*MAX_BIGDIGITS
self.m_iProgressBarWidth = 0
self.m_iProgressBarLine = -1
self.m_bstrIconName = b"BLOCK_FILLED"
self.m_iBigDigits = int(8) # 12:45:78 / colons count as digit
self.m_iOffset = 1
self.m_bstrSetLineCmds = b""
self.m_cExtraIcons = None
LcdBase.__init__(self, settings)
def SendCommand(self, strCmd, bCheckRet):
countcmds = strCmd.count(b'\n')
sendcmd = strCmd
ret = True
# Single command without lf
if countcmds < 1:
countcmds = 1
sendcmd += b"\n"
try:
# Send to server via raw socket to prevent telnetlib tampering with
# certain chars (especially 0xFF -> telnet IAC)
self.tnsocket.sendall(sendcmd)
except:
# Something bad happened, abort
log(LOGERROR, "SendCommand: Telnet exception - send")
return False
# Update last socketaction timestamp
self.m_timeLastSockAction = time.time()
# Repeat for number of found commands
for i in range(1, (countcmds + 1)):
# Read in (multiple) responses
while True:
try:
# Read server reply
reply = self.tn.read_until(b"\n",3)
except:
# (Re)read failed, abort
log(LOGERROR, "SendCommand: Telnet exception - reread")
return False
# Skip these messages
if reply[:6] == b'listen':
continue
elif reply[:6] == b'ignore':
continue
elif reply[:3] == b'key':
continue
elif reply[:9] == b'menuevent':
continue
# Response seems interesting, so stop here
break
if not bCheckRet:
continue # no return checking desired, so be fine
if strCmd == b'noop' and reply == b'noop complete\n':
continue # noop has special reply
if reply == b'success\n':
continue
ret = False
# Leave information something undesired happened
if ret is False:
log(LOGWARNING, "Reply to '%s' was '%s'" % (strCmd.decode(self.m_strLCDEncoding), reply.decode(self.m_strLCDEncoding)))
return ret
def SetupScreen(self):
# Add screen first
if not self.SendCommand(b"screen_add xbmc", True):
return False
# Set screen priority
if not self.SendCommand(b"screen_set xbmc -priority info", True):
return False
# Turn off heartbeat if desired
if not self.m_Settings.getHeartBeat():
if not self.SendCommand(b"screen_set xbmc -heartbeat off", True):
return False
# Initialize command list var
strInitCommandList = b""
# Setup widgets (scrollers and hbars first)
for i in range(1,int(self.m_iRows)+1):
# Text widgets
strInitCommandList += b"widget_add xbmc lineScroller%i scroller\n" % (i)
# Progress bars
strInitCommandList += b"widget_add xbmc lineProgress%i hbar\n" % (i)
# Reset bars to zero
strInitCommandList += b"widget_set xbmc lineProgress%i 0 0 0\n" % (i)
self.m_strLineText[i-1] = ""
self.m_strLineType[i-1] = ""
# Setup icons last
for i in range(1,int(self.m_iRows)+1):
# Icons
strInitCommandList += b"widget_add xbmc lineIcon%i icon\n" % (i)
# Default icon
strInitCommandList += b"widget_set xbmc lineIcon%i 0 0 BLOCK_FILLED\n" % (i)
self.m_bstrLineIcon[i-1] = b""
for i in range(1,int(self.m_iBigDigits + 1)):
# Big Digit
strInitCommandList += b"widget_add xbmc lineBigDigit%i num\n" % (i)
# Set Digit
strInitCommandList += b"widget_set xbmc lineBigDigit%i 0 0\n" % (i)
self.m_strDigits[i] = b""
if not self.SendCommand(strInitCommandList, True):
return False
return True
def Initialize(self):
connected = False
if not self.m_used:
return False#nothing to do
#don't try to initialize too often
now = time.time()
if (now - self.m_lastInitAttempt) < self.m_initRetryInterval:
return False
self.m_lastInitAttempt = now
if self.Connect():
if LcdBase.Initialize(self):
# reset the retry interval after a successful connect
self.m_initRetryInterval = INIT_RETRY_INTERVAL
self.m_bStop = False
connected = True
else:
log(LOGERROR, "Connection successful but LCD.xml has errors, aborting connect")
if not connected:
# preventively close socket
self.CloseSocket()
# give up after INIT_RETRY_INTERVAL_MAX (60) seconds
if self.m_initRetryInterval > INIT_RETRY_INTERVAL_MAX:
self.m_used = False
log(LOGERROR,"Connect failed. Giving up. Please fix any connection problems and restart the addon.")
else:
self.m_initRetryInterval = self.m_initRetryInterval * 2
log(LOGERROR,"Connect failed. Retry in %d seconds." % self.m_initRetryInterval)
return connected
def DetermineExtraSupport(self):
rematch_imon = "SoundGraph iMON(.*)LCD"
rematch_mdm166a = "Targa(.*)mdm166a"
rematch_imonvfd = "Soundgraph(.*)VFD"
bUseExtraIcons = self.m_Settings.getUseExtraElements()
# Never cause script failure/interruption by this! This is totally optional!
try:
# Retrieve driver name for additional functionality
self.tn.write(b"info\n")
reply = self.tn.read_until(b"\n",3).strip().decode("ascii")
# When the LCDd driver doesn't supply a valid string, inform and return
if reply == "":
log(LOGINFO, "Empty driver information reply")
return
log(LOGINFO, "Driver information reply: " + reply)
if re.match(rematch_imon, reply):
log(LOGINFO, "SoundGraph iMON LCD detected")
if bUseExtraIcons:
self.m_cExtraIcons = LCDproc_extra_imon()
# override bigdigits counter, the imonlcd driver handles bigdigits
# different: digits count for two columns instead of three
self.m_iBigDigits = 7
elif re.match(rematch_mdm166a, reply):
log(LOGINFO, "Futaba/Targa USB mdm166a VFD detected")
if bUseExtraIcons:
self.m_cExtraIcons = LCDproc_extra_mdm166a()
elif re.match(rematch_imonvfd, reply):
log(LOGINFO, "SoundGraph iMON IR/VFD detected")
if self.m_cExtraIcons is not None:
self.m_cExtraIcons.Initialize()
except:
pass
def Connect(self):
self.CloseSocket()
try:
ip = self.m_Settings.getHostIp()
port = self.m_Settings.getHostPort()
log(LOGDEBUG,"Open " + str(ip) + ":" + str(port))
self.tn.open(ip, port)
# Start a new session
self.tn.write(b"hello\n")
# Receive LCDproc data to determine row and column information
reply = self.tn.read_until(b"\n",3).decode("ascii")
log(LOGDEBUG,"Reply: " + reply)
# parse reply by regex
lcdinfo = re.match("^connect .+ protocol ([0-9\.]+) lcd wid (\d+) hgt (\d+) cellwid (\d+) cellhgt (\d+)$", reply)
# if regex didn't match, LCDproc is incompatible or something's odd
if lcdinfo is None:
return False
# protocol version must currently either be 0.3 or 0.4
if float(lcdinfo.group(1)) not in [0.3, 0.4]:
log(LOGERROR, "Only LCDproc protocols 0.3 and 0.4 supported (got " + lcdinfo.group(1) +")")
return False
# set up class vars
self.m_iColumns = int(lcdinfo.group(2))
self.m_iRows = int(lcdinfo.group(3))
self.m_iCellWidth = int(lcdinfo.group(4))
self.m_iCellHeight = int(lcdinfo.group(5))
# tell users what's going on
log(LOGINFO, "Connected to LCDd at %s:%s, Protocol version %s - Geometry %sx%s characters (%sx%s pixels, %sx%s pixels per character)" % (str(ip), str(port), float(lcdinfo.group(1)), str(self.m_iColumns), str(self.m_iRows), str(self.m_iColumns * self.m_iCellWidth), str(self.m_iRows * self.m_iCellHeight), str(self.m_iCellWidth), str(self.m_iCellHeight)))
# Set up BigNum values based on display geometry
if self.m_iColumns < 13:
self.m_iBigDigits = 0 # No clock
elif self.m_iColumns < 17:
self.m_iBigDigits = 5 # HH:MM
elif self.m_iColumns < 20:
self.m_iBigDigits = 7 # H:MM:SS on play, HH:MM on clock
else:
self.m_iBigDigits = 8 # HH:MM:SS
# Check LCDproc if we can enable any extras or override values
# (might override e.g. m_iBigDigits!)
self.DetermineExtraSupport()
except:
log(LOGERROR,"Connect: Caught exception, aborting.")
return False
# retrieve raw socket object
self.tnsocket = self.tn.get_socket()
if self.tnsocket is None:
log(LOGERROR, "Retrieval of socket object failed!")
return False
if not self.SetupScreen():
log(LOGERROR, "Screen setup failed!")
return False
return True
def CloseSocket(self):
if self.tnsocket:
# no pyexceptions, please, we're disconnecting anyway
try:
# if we served extra elements, (try to) reset them
if self.m_cExtraIcons is not None:
if not self.SendCommand(self.m_cExtraIcons.GetClearAllCmd(), True):
log(LOGERROR, "CloseSocket(): Cannot clear extra icons")
# do gracefully disconnect (send directly as we won't get any response on this)
self.tn.write(b"bye\n")
# and close socket afterwards
self.tn.close()
except:
# exception caught on this, so what? :)
pass
# delete/cleanup extra support instance
del self.m_cExtraIcons
self.m_cExtraIcons = None
self.tnsocket = None
del self.tn
self.tn = telnetlib.Telnet()
def IsConnected(self):
if not self.tnsocket:
return False
# Ping only every SocketIdleTimeout seconds
if (self.m_timeLastSockAction + self.m_timeSocketIdleTimeout) > time.time():
return True
if not self.SendCommand(b"noop", True):
log(LOGERROR, "noop failed in IsConnected(), aborting!")
return False
return True
def SetBackLight(self, iLight):
if not self.tnsocket:
return
log(LOGDEBUG, "Switch Backlight to: " + str(iLight))
# Build command
if iLight == 0:
cmd = b"screen_set xbmc -backlight off\n"
elif iLight > 0:
cmd = b"screen_set xbmc -backlight on\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "SetBackLight(): Cannot change backlight state")
self.CloseSocket()
def SetContrast(self, iContrast):
#TODO: Not sure if you can control contrast from client
return
def Stop(self):
self.CloseSocket()
self.m_bStop = True
def Suspend(self):
if self.m_bStop or not self.tnsocket:
return
# Build command to suspend screen
cmd = b"screen_set xbmc -priority hidden\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "Suspend(): Cannot suspend")
self.CloseSocket()
def Resume(self):
if self.m_bStop or not self.tnsocket:
return
# Build command to resume screen
cmd = b"screen_set xbmc -priority info\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "Resume(): Cannot resume")
self.CloseSocket()
def GetColumns(self):
return int(self.m_iColumns)
def GetBigDigitTime(self, mode):
ret = ""
if self.m_InfoLabels.IsPlayerPlaying():
if not (mode == LCD_MODE.LCD_MODE_SCREENSAVER and self.m_InfoLabels.IsPlayerPaused()):
ret = self.m_InfoLabels.GetPlayerTime()[-self.m_iBigDigits:]
if ret == "": # no usable timestring, e.g. not playing anything
strSysTime = self.m_InfoLabels.GetSystemTime()
if self.m_iBigDigits >= 8: # return h:m:s
ret = strSysTime
elif self.m_iBigDigits >= 5: # return h:m when display too small
ret = strSysTime[:5]
return ret
def SetBigDigits(self, strTimeString, bForceUpdate):
iOffset = 1
iDigitCount = 1
iStringOffset = 0
strRealTimeString = ""
if strTimeString == "" or strTimeString == None:
return
iStringLength = int(len(strTimeString))
if self.m_bCenterBigDigits:
iColons = strTimeString.count(":")
iWidth = 3 * (iStringLength - iColons) + iColons
iOffset = 1 + max(self.m_iColumns - iWidth, 0) / 2
if iStringLength > self.m_iBigDigits:
iStringOffset = len(strTimeString) - self.m_iBigDigits
iOffset = 1;
if self.m_iOffset != iOffset:
# on offset change force redraw
bForceUpdate = True
self.m_iOffset = iOffset
for i in range(int(iStringOffset), int(iStringLength)):
if self.m_strDigits[iDigitCount] != strTimeString[i] or bForceUpdate:
self.m_strDigits[iDigitCount] = strTimeString[i]
if strTimeString[i] == ":":
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i %i 10\n" % (iDigitCount, iOffset)
elif strTimeString[i].isdigit():
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i %i %s\n" % (iDigitCount, iOffset, strTimeString[i].encode(self.m_strLCDEncoding))
else:
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (iDigitCount)
if strTimeString[i] == ":":
iOffset += 1
else:
iOffset += 3
iDigitCount += 1
while iDigitCount <= self.m_iBigDigits:
if self.m_strDigits[iDigitCount] != "" or bForceUpdate:
self.m_strDigits[iDigitCount] = ""
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (iDigitCount)
iDigitCount += 1
def SetProgressBar(self, percent, pxWidth):
self.m_iProgressBarWidth = int(float(percent) * pxWidth)
return self.m_iProgressBarWidth
def SetPlayingStateIcon(self):
bPlaying = self.m_InfoLabels.IsPlayerPlaying()
bPaused = self.m_InfoLabels.IsPlayerPaused()
bForwarding = self.m_InfoLabels.IsPlayerForwarding()
bRewinding = self.m_InfoLabels.IsPlayerRewinding()
self.m_bstrIconName = b"STOP"
if bForwarding:
self.m_bstrIconName = b"FF"
elif bRewinding:
self.m_bstrIconName = b"FR"
elif bPaused:
self.m_bstrIconName = b"PAUSE"
elif bPlaying:
self.m_bstrIconName = b"PLAY"
def GetRows(self):
return int(self.m_iRows)
def ClearBigDigits(self, fullredraw = True):
for i in range(1,int(self.m_iBigDigits + 1)):
# Clear Digit
if fullredraw:
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (i)
self.m_strDigits[i] = ""
# on full redraw, make sure all widget get redrawn by resetting their type
if fullredraw:
for i in range(0, int(self.GetRows())):
self.m_strLineType[i] = ""
self.m_strLineText[i] = ""
self.m_bstrLineIcon[i] = b""
def ClearLine(self, iLine):
self.m_bstrSetLineCmds += b"widget_set xbmc lineIcon%i 0 0 BLOCK_FILLED\n" % (iLine)
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i 0 0 0\n" % (iLine)
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"\"\n" % (iLine, iLine, self.m_iColumns, iLine)
def SetLine(self, mode, iLine, strLine, dictDescriptor, bForce):
if self.m_bStop or not self.tnsocket:
return
if iLine < 0 or iLine >= int(self.m_iRows):
return
plTime = self.m_InfoLabels.GetPlayerTime()
plDuration = self.m_InfoLabels.GetPlayerDuration()
ln = iLine + 1
bExtraForce = False
drawLineText = False
if self.m_strLineType[iLine] != dictDescriptor['type']:
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.ClearDisplay()
else:
if self.m_strLineType[iLine] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.ClearBigDigits()
else:
self.ClearLine(int(iLine + 1))
self.m_strLineType[iLine] = dictDescriptor['type']
bExtraForce = True
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESS and dictDescriptor['text'] != "":
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"%s\"\n" % (ln, ln, self.m_iColumns, ln, dictDescriptor['text'].encode(self.m_strLCDEncoding))
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME and dictDescriptor['text'] != "":
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"%s\"\n" % (ln, ln, self.m_iColumns, ln, dictDescriptor['text'].encode(self.m_strLCDEncoding))
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
strLineLong = self.GetBigDigitTime(mode)
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME:
strLineLong = plTime + self.m_bProgressbarBlank * (self.m_iColumns - len(plTime) - len(plDuration)) + plDuration
else:
strLineLong = strLine
strLineLong.strip()
iMaxLineLen = dictDescriptor['endx'] - (int(dictDescriptor['startx']) - 1)
iScrollSpeed = self.m_Settings.getScrollDelay()
bstrScrollMode = self.m_Settings.getLCDprocScrollMode().encode(self.m_strLCDEncoding)
if len(strLineLong) > iMaxLineLen: # if the string doesn't fit the display...
if iScrollSpeed != 0: # add separator when scrolling enabled
if bstrScrollMode == b"m": # and scrollmode is marquee
strLineLong += self.m_strScrollSeparator
else: # or cut off
strLineLong = strLineLong[:iMaxLineLen]
iScrollSpeed = 1
iStartX = dictDescriptor['startx']
# check if update is required
if strLineLong != self.m_strLineText[iLine] or bForce:
# bigscreen
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.SetBigDigits(strLineLong, bExtraForce)
# progressbar line
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESS:
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i %i %i %i\n" % (ln, iStartX, ln, self.m_iProgressBarWidth)
# progressbar line with time
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME:
drawLineText = True
pLenFract = float(self.m_iColumns - int(len(plDuration) + len(plTime))) / self.m_iColumns
pTimeLen = int(self.m_iProgressBarWidth * pLenFract)
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i %i %i %i\n" % (ln, iStartX + len(plTime), ln, pTimeLen)
# everything else (text, icontext)
else:
drawLineText = True
if len(strLineLong) < iMaxLineLen and dictDescriptor['align'] != LCD_LINEALIGN.LCD_LINEALIGN_LEFT:
iSpaces = iMaxLineLen - len(strLineLong)
if dictDescriptor['align'] == LCD_LINEALIGN.LCD_LINEALIGN_RIGHT:
iStartX += iSpaces
elif dictDescriptor['align'] == LCD_LINEALIGN.LCD_LINEALIGN_CENTER:
iStartX += int(iSpaces / 2)
if drawLineText:
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i %i %i %i %i %s %i \"%s\"\n" % (ln, iStartX, ln, self.m_iColumns, ln, bstrScrollMode, iScrollSpeed, re.escape(strLineLong.encode(self.m_strLCDEncoding, errors="replace")))
# cache contents
self.m_strLineText[iLine] = strLineLong
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_ICONTEXT:
if self.m_bstrLineIcon[iLine] != self.m_bstrIconName or bExtraForce:
self.m_bstrLineIcon[iLine] = self.m_bstrIconName
self.m_bstrSetLineCmds += b"widget_set xbmc lineIcon%i 1 %i %s\n" % (ln, ln, self.m_bstrIconName)
def ClearDisplay(self):
log(LOGDEBUG, "Clearing display contents")
# clear line buffer first
self.FlushLines()
# set all widgets to empty stuff and/or offscreen
for i in range(1,int(self.m_iRows)+1):
self.ClearLine(i)
# add commands to clear big digits
self.ClearBigDigits()
# send to display
self.FlushLines()
def FlushLines(self):
if len(self.m_bstrSetLineCmds) > 0:
# Send complete command package
self.SendCommand(self.m_bstrSetLineCmds, False)
self.m_bstrSetLineCmds = b""
| gpl-2.0 | 4,010,940,431,878,455,000 | 32.681677 | 360 | 0.647411 | false |
deepmind/deep-verify | deep_verify/src/auto_verifier.py | 1 | 9808 | # coding=utf-8
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatic construction of verifiable layers from a Sonnet module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deep_verify.src.layers import layers
import interval_bound_propagation as ibp
import sonnet as snt
class NotVerifiableError(Exception):
"""Module's graph contains features that do not map to verification layers."""
class VerifiableLayerBuilder(object):
"""Constructs verifiable layers from a Sonnet module."""
def __init__(self, network):
"""Constructor.
Args:
network: `NetworkBuilder` containing network with propagated bounds.
"""
super(VerifiableLayerBuilder, self).__init__()
self._network = network
def build_layers(self):
"""Builds the verifiable layers.
Returns:
List of `SingleVerifiableLayer` for the module.
Raises:
NotVerifiableError: on invalid layer arrangement.
"""
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(self._network.output_module))
if (not isinstance(backstop_node, ibp.ModelInputWrapper) or
self._network.fanout_of(backstop_node) != known_fanout):
raise NotVerifiableError('Invalid connectivity')
if reshape:
raise NotVerifiableError('Cannot end with a reshape operation')
return self._fuse(verifiable_layers)
def _build_layers_rec(self, node, known_fanout=1, batchnorm_node=None):
"""Builds verifiable layers leading up to the given layer output.
The list is constructed by navigating the layers in reverse order,
stopping either when the module's original inputs are reached,
or (for within a ResNet block) when a layer is encountered that has
outputs not processed by this navigation.
Args:
node: Layer output, up to which to build verifiable layers.
known_fanout: Number of immediate outputs of `layer_tensor` that have
already been processed by the caller.
This is typically 1, but sub-classes may invoke with 2 (or possibly
greater) where the network contains branches.
batchnorm_node: The BatchNorm's ConnectedSubgraph object if
`layer_tensor` is the input to a BatchNorm layer, otherwise None.
Returns:
backstop_node: Node, typically the `ibp.ModelInputWrapper`, at which we
stopped backtracking.
known_fanout: Number of immediate outputs of `input_tensor` that were
processed in this call.
This is typically 1, but overrides may return 2 (or possibly greater)
in the presence of branched architectures.
verifiable_layers: List of `SingleVerifiableLayer` whose final element's
output is `outputs`.
reshape: Whether the final element of `verifiable_layers` is followed by
a reshape operation.
Raises:
NotVerifiableError: on invalid layer arrangement.
"""
if (isinstance(node, ibp.ModelInputWrapper) or
self._network.fanout_of(node) != known_fanout):
# Reached the inputs (or start of the enclosing ResNet block).
# No more layers to construct.
if batchnorm_node:
raise NotVerifiableError('Cannot begin with batchnorm')
return node, known_fanout, [], False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'identity'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
return self._build_layers_rec(input_node, batchnorm_node=batchnorm_node)
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'avg_pool'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the AvgPool layer.
if batchnorm_node:
raise NotVerifiableError('AvgPool cannot have batchnorm')
if node.parameters['padding'] == 'SAME':
raise ValueError('"SAME" padding is not supported.')
verifiable_layers.append(layers.AvgPool(
input_node,
node,
kernel_shape=node.parameters['ksize'][1:-1],
strides=node.parameters['strides'][1:-1],
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'reduce_mean'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the AvgPool layer.
if batchnorm_node:
raise NotVerifiableError('AvgPool cannot have batchnorm')
verifiable_layers.append(layers.AvgPool(
input_node,
node,
kernel_shape=None,
strides=None,
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'max_pool'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the MaxPool layer.
if batchnorm_node:
raise NotVerifiableError('MaxPool cannot have batchnorm')
if node.parameters['padding'] == 'SAME':
raise ValueError('"SAME" padding is not supported.')
verifiable_layers.append(layers.MaxPool(
input_node,
node,
kernel_shape=node.parameters['ksize'][1:-1],
strides=node.parameters['strides'][1:-1],
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'reduce_max'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the MaxPool layer.
if batchnorm_node:
raise NotVerifiableError('MaxPool cannot have batchnorm')
verifiable_layers.append(layers.MaxPool(
input_node,
node,
kernel_shape=None,
strides=None,
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif isinstance(node.module, snt.BatchNorm):
# Construct the previous layer with batchnorm.
if batchnorm_node:
raise NotVerifiableError('Cannot have consecutive batchnorms')
input_node, = self._network.dependencies(node)
return self._build_layers_rec(input_node, batchnorm_node=node)
elif isinstance(node.module, snt.BatchReshape):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
if batchnorm_node:
raise NotVerifiableError('Reshape cannot have batchnorm')
return backstop_node, known_fanout, verifiable_layers, True
else:
# Recursively build all preceding layers.
input_nodes = self._network.dependencies(node)
if len(input_nodes) != 1:
raise NotVerifiableError('Unary operation expected')
input_node, = input_nodes
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the layer.
verifiable_layers.append(layers.create_verifiable_layer(
input_node,
batchnorm_node or node,
node.module,
batch_norm=(batchnorm_node.module if batchnorm_node else None),
reshape=reshape,
parameters=(node.parameters
if isinstance(node, ibp.IncreasingMonotonicWrapper)
else None),
))
return backstop_node, known_fanout, verifiable_layers, False
def _fuse(self, verifiable_layers):
"""Performs fusion of certain layer pairs."""
fused_layers = []
idx = 0
while idx < len(verifiable_layers):
if (idx+2 <= len(verifiable_layers) and
isinstance(verifiable_layers[idx], layers.MaxPool) and
isinstance(verifiable_layers[idx+1], layers.Activation) and
verifiable_layers[idx+1].activation == 'relu'):
# Fuse maxpool with relu.
original = verifiable_layers[idx]
fused_layers.append(layers.MaxPool(original.input_node,
original.output_node,
kernel_shape=original.kernel_shape,
strides=original.strides,
with_relu=True,
reshape=original.reshape))
idx += 2
else:
fused_layers.append(verifiable_layers[idx])
idx += 1
return fused_layers
| apache-2.0 | 5,044,877,332,549,764,000 | 38.869919 | 80 | 0.662622 | false |
SafeW3rd/Ciphers | simpleSubHacker.py | 1 | 7029 | # Simple Substitution Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import os, re, copy, pprint, pyperclip, simpleSubCipher, makeWordPatterns
if not os.path.exists('wordPatterns.py'):
makeWordPatterns.main() # create the wordPatterns.py file
import wordPatterns
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
nonLettersOrSpacePattern = re.compile('[^A-Z\s]')
def main():
message = 'Sy l nlx sr pyyacao l ylwj eiswi upar lulsxrj isr sxrjsxwjr, ia esmm rwctjsxsza sj wmpramh, lxo txmarr jia aqsoaxwa sr pqaceiamnsxu, ia esmm caytra jp famsaqa sj. Sy, px jia pjiac ilxo, ia sr pyyacao rpnajisxu eiswi lyypcor l calrpx ypc lwjsxu sx lwwpcolxwa jp isr sxrjsxwjr, ia esmm lwwabj sj aqax px jia rmsuijarj aqsoaxwa. Jia pcsusx py nhjir sr agbmlsxao sx jisr elh. -Facjclxo Ctrramm'
# Determine the possible valid ciphertext translations.
print('Hacking...')
letterMapping = hackSimpleSub(message)
# Display the results to the user.
print('Mapping:')
pprint.pprint(letterMapping)
print()
print('Original ciphertext:')
print(message)
print()
print('Copying hacked message to clipboard:')
hackedMessage = decryptWithCipherletterMapping(message, letterMapping)
pyperclip.copy(hackedMessage)
print(hackedMessage)
def getBlankCipherletterMapping():
# Returns a dictionary value that is a blank cipherletter mapping.
return {'A': [], 'B': [], 'C': [], 'D': [], 'E': [], 'F': [], 'G': [], 'H': [], 'I': [], 'J': [], 'K': [], 'L': [], 'M': [], 'N': [], 'O': [], 'P': [], 'Q': [], 'R': [], 'S': [], 'T': [], 'U': [], 'V': [], 'W': [], 'X': [], 'Y': [], 'Z': []}
def addLettersToMapping(letterMapping, cipherword, candidate):
# The letterMapping parameter is a "cipherletter mapping" dictionary
# value that the return value of this function starts as a copy of.
# The cipherword parameter is a string value of the ciphertext word.
# The candidate parameter is a possible English word that the
# cipherword could decrypt to.
# This function adds the letters of the candidate as potential
# decryption letters for the cipherletters in the cipherletter
# mapping.
letterMapping = copy.deepcopy(letterMapping)
for i in range(len(cipherword)):
if candidate[i] not in letterMapping[cipherword[i]]:
letterMapping[cipherword[i]].append(candidate[i])
return letterMapping
def intersectMappings(mapA, mapB):
# To intersect two maps, create a blank map, and then add only the
# potential decryption letters if they exist in BOTH maps.
intersectedMapping = getBlankCipherletterMapping()
for letter in LETTERS:
# An empty list means "any letter is possible". In this case just
# copy the other map entirely.
if mapA[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapB[letter])
elif mapB[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapA[letter])
else:
# If a letter in mapA[letter] exists in mapB[letter], add
# that letter to intersectedMapping[letter].
for mappedLetter in mapA[letter]:
if mappedLetter in mapB[letter]:
intersectedMapping[letter].append(mappedLetter)
return intersectedMapping
def removeSolvedLettersFromMapping(letterMapping):
# Cipher letters in the mapping that map to only one letter are
# "solved" and can be removed from the other letters.
# For example, if 'A' maps to potential letters ['M', 'N'], and 'B'
# maps to ['N'], then we know that 'B' must map to 'N', so we can
# remove 'N' from the list of what 'A' could map to. So 'A' then maps
# to ['M']. Note that now that 'A' maps to only one letter, we can
# remove 'M' from the list of letters for every other
# letter. (This is why there is a loop that keeps reducing the map.)
letterMapping = copy.deepcopy(letterMapping)
loopAgain = True
while loopAgain:
# First assume that we will not loop again:
loopAgain = False
# solvedLetters will be a list of uppercase letters that have one
# and only one possible mapping in letterMapping
solvedLetters = []
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
solvedLetters.append(letterMapping[cipherletter][0])
# If a letter is solved, than it cannot possibly be a potential
# decryption letter for a different ciphertext letter, so we
# should remove it from those other lists.
for cipherletter in LETTERS:
for s in solvedLetters:
if len(letterMapping[cipherletter]) != 1 and s in letterMapping[cipherletter]:
letterMapping[cipherletter].remove(s)
if len(letterMapping[cipherletter]) == 1:
# A new letter is now solved, so loop again.
loopAgain = True
return letterMapping
def hackSimpleSub(message):
intersectedMap = getBlankCipherletterMapping()
cipherwordList = nonLettersOrSpacePattern.sub('', message.upper()).split()
for cipherword in cipherwordList:
# Get a new cipherletter mapping for each ciphertext word.
newMap = getBlankCipherletterMapping()
wordPattern = makeWordPatterns.getWordPattern(cipherword)
if wordPattern not in wordPatterns.allPatterns:
continue # This word was not in our dictionary, so continue.
# Add the letters of each candidate to the mapping.
for candidate in wordPatterns.allPatterns[wordPattern]:
newMap = addLettersToMapping(newMap, cipherword, candidate)
# Intersect the new mapping with the existing intersected mapping.
intersectedMap = intersectMappings(intersectedMap, newMap)
# Remove any solved letters from the other lists.
return removeSolvedLettersFromMapping(intersectedMap)
def decryptWithCipherletterMapping(ciphertext, letterMapping):
# Return a string of the ciphertext decrypted with the letter mapping,
# with any ambiguous decrypted letters replaced with an _ underscore.
# First create a simple sub key from the letterMapping mapping.
key = ['x'] * len(LETTERS)
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
# If there's only one letter, add it to the key.
keyIndex = LETTERS.find(letterMapping[cipherletter][0])
key[keyIndex] = cipherletter
else:
ciphertext = ciphertext.replace(cipherletter.lower(), '_')
ciphertext = ciphertext.replace(cipherletter.upper(), '_')
key = ''.join(key)
# With the key we've created, decrypt the ciphertext.
return simpleSubCipher.decryptMessage(key, ciphertext)
if __name__ == '__main__':
main() | mit | -4,164,585,501,999,395,000 | 43.070513 | 405 | 0.654289 | false |
fle-internal/content-curation | contentcuration/contentcuration/management/commands/exportchannel.py | 1 | 27865 | import collections
import itertools
import json
import logging as logmodule
import os
import re
import sys
import tempfile
import uuid
import zipfile
from itertools import chain
from django.conf import settings
from django.core.files import File
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Count
from django.db.models import Q
from django.db.models import Sum
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from kolibri.content.utils.search import fuzz
from kolibri_content import models as kolibrimodels
from kolibri_content.router import get_active_content_database
from kolibri_content.router import using_content_database
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import roles
from contentcuration import models as ccmodels
from contentcuration.statistics import record_publish_stats
from contentcuration.utils.files import create_thumbnail_from_base64
from contentcuration.utils.files import get_thumbnail_encoding
from contentcuration.utils.parser import extract_value
from contentcuration.utils.parser import load_json_string
logmodule.basicConfig()
logging = logmodule.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf8')
PERSEUS_IMG_DIR = exercises.IMG_PLACEHOLDER + "/images"
THUMBNAIL_DIMENSION = 128
MIN_SCHEMA_VERSION = "1"
class EarlyExit(BaseException):
def __init__(self, message, db_path):
self.message = message
self.db_path = db_path
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('channel_id', type=str)
parser.add_argument('--force', action='store_true', dest='force', default=False)
parser.add_argument('--user_id', dest='user_id', default=None)
parser.add_argument('--force-exercises', action='store_true', dest='force-exercises', default=False)
# optional argument to send an email to the user when done with exporting channel
parser.add_argument('--email', action='store_true', default=False)
def handle(self, *args, **options):
# license_id = options['license_id']
channel_id = options['channel_id']
force = options['force']
send_email = options['email']
user_id = options['user_id']
force_exercises = options['force-exercises']
channel = ccmodels.Channel.objects.get(pk=channel_id)
# license = ccmodels.License.objects.get(pk=license_id)
try:
create_content_database(channel_id, force, user_id, force_exercises)
increment_channel_version(channel)
mark_all_nodes_as_changed(channel)
add_tokens_to_channel(channel)
fill_published_fields(channel)
# Attributes not getting set for some reason, so just save it here
channel.main_tree.publishing = False
channel.main_tree.changed = False
channel.main_tree.published = True
channel.main_tree.save()
if send_email:
send_emails(channel, user_id)
# use SQLite backup API to put DB into archives folder.
# Then we can use the empty db name to have SQLite use a temporary DB (https://www.sqlite.org/inmemorydb.html)
record_publish_stats(channel)
except EarlyExit as e:
logging.warning("Exited early due to {message}.".format(message=e.message))
self.stdout.write("You can find your database in {path}".format(path=e.db_path))
# No matter what, make sure publishing is set to False once the run is done
finally:
channel.main_tree.publishing = False
channel.main_tree.save()
def send_emails(channel, user_id):
subject = render_to_string('registration/custom_email_subject.txt', {'subject': _('Kolibri Studio Channel Published')})
if user_id:
user = ccmodels.User.objects.get(pk=user_id)
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
else:
# Email all users about updates to channel
for user in itertools.chain(channel.editors.all(), channel.viewers.all()):
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
def create_content_database(channel_id, force, user_id, force_exercises):
channel = ccmodels.Channel.objects.get(pk=channel_id)
# increment the channel version
if not force:
raise_if_nodes_are_all_unchanged(channel)
fh, tempdb = tempfile.mkstemp(suffix=".sqlite3")
with using_content_database(tempdb):
channel.main_tree.publishing = True
channel.main_tree.save()
prepare_export_database(tempdb)
map_channel_to_kolibri_channel(channel)
map_content_nodes(channel.main_tree, channel.language, channel.id, channel.name, user_id=user_id, force_exercises=force_exercises)
map_prerequisites(channel.main_tree)
save_export_database(channel_id)
def create_kolibri_license_object(ccnode):
use_license_description = not ccnode.license.is_custom
return kolibrimodels.License.objects.get_or_create(
license_name=ccnode.license.license_name,
license_description=ccnode.license.license_description if use_license_description else ccnode.license_description
)
def increment_channel_version(channel):
channel.version += 1
channel.last_published = timezone.now()
channel.save()
def assign_license_to_contentcuration_nodes(channel, license):
channel.main_tree.get_family().update(license_id=license.pk)
def map_content_nodes(root_node, default_language, channel_id, channel_name, user_id=None, force_exercises=False):
# make sure we process nodes higher up in the tree first, or else when we
# make mappings the parent nodes might not be there
node_queue = collections.deque()
node_queue.append(root_node)
def queue_get_return_none_when_empty():
try:
return node_queue.popleft()
except IndexError:
return None
# kolibri_license = kolibrimodels.License.objects.get(license_name=license.license_name)
with transaction.atomic():
with ccmodels.ContentNode.objects.delay_mptt_updates():
for node in iter(queue_get_return_none_when_empty, None):
logging.debug("Mapping node with id {id}".format(
id=node.pk))
if node.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists():
children = (node.children.all())
node_queue.extend(children)
kolibrinode = create_bare_contentnode(node, default_language, channel_id, channel_name)
if node.kind.kind == content_kinds.EXERCISE:
exercise_data = process_assessment_metadata(node, kolibrinode)
if force_exercises or node.changed or not node.files.filter(preset_id=format_presets.EXERCISE).exists():
create_perseus_exercise(node, kolibrinode, exercise_data, user_id=user_id)
create_associated_file_objects(kolibrinode, node)
map_tags_to_node(kolibrinode, node)
def create_bare_contentnode(ccnode, default_language, channel_id, channel_name):
logging.debug("Creating a Kolibri contentnode for instance id {}".format(
ccnode.node_id))
kolibri_license = None
if ccnode.license is not None:
kolibri_license = create_kolibri_license_object(ccnode)[0]
language = None
if ccnode.language or default_language:
language, _new = get_or_create_language(ccnode.language or default_language)
kolibrinode, is_new = kolibrimodels.ContentNode.objects.update_or_create(
pk=ccnode.node_id,
defaults={
'kind': ccnode.kind.kind,
'title': ccnode.title if ccnode.parent else channel_name,
'content_id': ccnode.content_id,
'channel_id': channel_id,
'author': ccnode.author or "",
'description': ccnode.description,
'sort_order': ccnode.sort_order,
'license_owner': ccnode.copyright_holder or "",
'license': kolibri_license,
'available': ccnode.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists(), # Hide empty topics
'stemmed_metaphone': ' '.join(fuzz(ccnode.title + ' ' + ccnode.description)),
'lang': language,
'license_name': kolibri_license.license_name if kolibri_license is not None else None,
'license_description': kolibri_license.license_description if kolibri_license is not None else None,
'coach_content': ccnode.role_visibility == roles.COACH,
}
)
if ccnode.parent:
logging.debug("Associating {child} with parent {parent}".format(
child=kolibrinode.pk,
parent=ccnode.parent.node_id
))
kolibrinode.parent = kolibrimodels.ContentNode.objects.get(pk=ccnode.parent.node_id)
kolibrinode.save()
logging.debug("Created Kolibri ContentNode with node id {}".format(ccnode.node_id))
logging.debug("Kolibri node count: {}".format(kolibrimodels.ContentNode.objects.all().count()))
return kolibrinode
def get_or_create_language(language):
return kolibrimodels.Language.objects.get_or_create(
id=language.pk,
lang_code=language.lang_code,
lang_subcode=language.lang_subcode,
lang_name=language.lang_name if hasattr(language, 'lang_name') else language.native_name,
lang_direction=language.lang_direction
)
def create_associated_thumbnail(ccnode, ccfilemodel):
"""
Gets the appropriate thumbnail for export (uses or generates a base64 encoding)
Args:
ccnode (<ContentNode>): node to derive thumbnail from (if encoding is provided)
ccfilemodel (<File>): file to get thumbnail from if no encoding is available
Returns <File> model of encoded, resized thumbnail
"""
encoding = None
try:
encoding = ccnode.thumbnail_encoding and load_json_string(ccnode.thumbnail_encoding).get('base64')
except ValueError:
logging.error("ERROR: node thumbnail is not in correct format ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
# Save the encoding if it doesn't already have an encoding
if not encoding:
try:
encoding = get_thumbnail_encoding(str(ccfilemodel))
except IOError:
# ImageMagick may raise an IOError if the file is not a thumbnail. Catch that then just return early.
logging.error("ERROR: cannot identify the thumbnail ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
ccnode.thumbnail_encoding = json.dumps({
"base64": encoding,
"points": [],
"zoom": 0,
})
ccnode.save()
return create_thumbnail_from_base64(
encoding,
uploaded_by=ccfilemodel.uploaded_by,
file_format_id=ccfilemodel.file_format_id,
preset_id=ccfilemodel.preset_id
)
def create_associated_file_objects(kolibrinode, ccnode):
logging.debug("Creating LocalFile and File objects for Node {}".format(kolibrinode.id))
for ccfilemodel in ccnode.files.exclude(Q(preset_id=format_presets.EXERCISE_IMAGE) | Q(preset_id=format_presets.EXERCISE_GRAPHIE)):
preset = ccfilemodel.preset
fformat = ccfilemodel.file_format
if ccfilemodel.language:
get_or_create_language(ccfilemodel.language)
if preset.thumbnail:
ccfilemodel = create_associated_thumbnail(ccnode, ccfilemodel) or ccfilemodel
kolibrilocalfilemodel, new = kolibrimodels.LocalFile.objects.get_or_create(
pk=ccfilemodel.checksum,
defaults={
'extension': fformat.extension,
'file_size': ccfilemodel.file_size,
}
)
kolibrimodels.File.objects.create(
pk=ccfilemodel.pk,
checksum=ccfilemodel.checksum,
extension=fformat.extension,
available=True, # TODO: Set this to False, once we have availability stamping implemented in Kolibri
file_size=ccfilemodel.file_size,
contentnode=kolibrinode,
preset=preset.pk,
supplementary=preset.supplementary,
lang_id=ccfilemodel.language and ccfilemodel.language.pk,
thumbnail=preset.thumbnail,
priority=preset.order,
local_file=kolibrilocalfilemodel,
)
def create_perseus_exercise(ccnode, kolibrinode, exercise_data, user_id=None):
logging.debug("Creating Perseus Exercise for Node {}".format(ccnode.title))
filename = "{0}.{ext}".format(ccnode.title, ext=file_formats.PERSEUS)
temppath = None
try:
with tempfile.NamedTemporaryFile(suffix="zip", delete=False) as tempf:
temppath = tempf.name
create_perseus_zip(ccnode, exercise_data, tempf)
file_size = tempf.tell()
tempf.flush()
ccnode.files.filter(preset_id=format_presets.EXERCISE).delete()
assessment_file_obj = ccmodels.File.objects.create(
file_on_disk=File(open(temppath, 'r'), name=filename),
contentnode=ccnode,
file_format_id=file_formats.PERSEUS,
preset_id=format_presets.EXERCISE,
original_filename=filename,
file_size=file_size,
uploaded_by_id=user_id,
)
logging.debug("Created exercise for {0} with checksum {1}".format(ccnode.title, assessment_file_obj.checksum))
finally:
temppath and os.unlink(temppath)
def process_assessment_metadata(ccnode, kolibrinode):
# Get mastery model information, set to default if none provided
assessment_items = ccnode.assessment_items.all().order_by('order')
exercise_data = json.loads(ccnode.extra_fields) if ccnode.extra_fields else {}
randomize = exercise_data.get('randomize') if exercise_data.get('randomize') is not None else True
assessment_item_ids = [a.assessment_id for a in assessment_items]
mastery_model = {'type': exercise_data.get('mastery_model') or exercises.M_OF_N}
if mastery_model['type'] == exercises.M_OF_N:
mastery_model.update({'n': exercise_data.get('n') or min(5, assessment_items.count()) or 1})
mastery_model.update({'m': exercise_data.get('m') or min(5, assessment_items.count()) or 1})
elif mastery_model['type'] == exercises.DO_ALL:
mastery_model.update({'n': assessment_items.count() or 1, 'm': assessment_items.count() or 1})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_2:
mastery_model.update({'n': 2, 'm': 2})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_3:
mastery_model.update({'n': 3, 'm': 3})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_5:
mastery_model.update({'n': 5, 'm': 5})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_10:
mastery_model.update({'n': 10, 'm': 10})
exercise_data.update({
'mastery_model': exercises.M_OF_N,
'legacy_mastery_model': mastery_model['type'],
'randomize': randomize,
'n': mastery_model.get('n'),
'm': mastery_model.get('m'),
'all_assessment_items': assessment_item_ids,
'assessment_mapping': {a.assessment_id: a.type if a.type != 'true_false' else exercises.SINGLE_SELECTION.decode('utf-8') for a in assessment_items},
})
kolibrimodels.AssessmentMetaData.objects.create(
id=uuid.uuid4(),
contentnode=kolibrinode,
assessment_item_ids=json.dumps(assessment_item_ids),
number_of_assessments=assessment_items.count(),
mastery_model=json.dumps(mastery_model),
randomize=randomize,
is_manipulable=ccnode.kind_id == content_kinds.EXERCISE,
)
return exercise_data
def create_perseus_zip(ccnode, exercise_data, write_to_path):
with zipfile.ZipFile(write_to_path, "w") as zf:
try:
exercise_context = {
'exercise': json.dumps(exercise_data, sort_keys=True, indent=4)
}
exercise_result = render_to_string('perseus/exercise.json', exercise_context)
write_to_zipfile("exercise.json", exercise_result, zf)
for question in ccnode.assessment_items.prefetch_related('files').all().order_by('order'):
try:
for image in question.files.filter(preset_id=format_presets.EXERCISE_IMAGE).order_by('checksum'):
image_name = "images/{}.{}".format(image.checksum, image.file_format_id)
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
write_to_zipfile(image_name, content.read(), zf)
for image in question.files.filter(preset_id=format_presets.EXERCISE_GRAPHIE).order_by('checksum'):
svg_name = "images/{0}.svg".format(image.original_filename)
json_name = "images/{0}-data.json".format(image.original_filename)
if svg_name not in zf.namelist() or json_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
content = content.read()
content = content.split(exercises.GRAPHIE_DELIMITER)
write_to_zipfile(svg_name, content[0], zf)
write_to_zipfile(json_name, content[1], zf)
write_assessment_item(question, zf)
except Exception as e:
logging.error("Publishing error: {}".format(str(e)))
finally:
zf.close()
def write_to_zipfile(filename, content, zf):
info = zipfile.ZipInfo(filename, date_time=(2013, 3, 14, 1, 59, 26))
info.comment = "Perseus file generated during export process".encode()
info.compress_type = zipfile.ZIP_STORED
info.create_system = 0
zf.writestr(info, content)
def write_assessment_item(assessment_item, zf):
if assessment_item.type == exercises.MULTIPLE_SELECTION:
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.SINGLE_SELECTION or assessment_item.type == 'true_false':
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.INPUT_QUESTION:
template = 'perseus/input_question.json'
elif assessment_item.type == exercises.PERSEUS_QUESTION:
template = 'perseus/perseus_question.json'
else:
raise TypeError("Unrecognized question type on item {}".format(assessment_item.assessment_id))
question = process_formulas(assessment_item.question)
question, question_images = process_image_strings(question, zf)
answer_data = json.loads(assessment_item.answers)
for answer in answer_data:
if assessment_item.type == exercises.INPUT_QUESTION:
answer['answer'] = extract_value(answer['answer'])
else:
answer['answer'] = answer['answer'].replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
answer['answer'] = process_formulas(answer['answer'])
# In case perseus doesn't support =wxh syntax, use below code
answer['answer'], answer_images = process_image_strings(answer['answer'], zf)
answer.update({'images': answer_images})
answer_data = list(filter(lambda a: a['answer'] or a['answer'] == 0, answer_data)) # Filter out empty answers, but not 0
hint_data = json.loads(assessment_item.hints)
for hint in hint_data:
hint['hint'] = process_formulas(hint['hint'])
hint['hint'], hint_images = process_image_strings(hint['hint'], zf)
hint.update({'images': hint_images})
context = {
'question': question,
'question_images': question_images,
'answers': sorted(answer_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'multiple_select': assessment_item.type == exercises.MULTIPLE_SELECTION,
'raw_data': assessment_item.raw_data.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR),
'hints': sorted(hint_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'randomize': assessment_item.randomize,
}
result = render_to_string(template, context).encode('utf-8', "ignore")
write_to_zipfile("{0}.json".format(assessment_item.assessment_id), result, zf)
def process_formulas(content):
for match in re.finditer(ur'\$(\$.+\$)\$', content):
content = content.replace(match.group(0), match.group(1))
return content
def process_image_strings(content, zf):
image_list = []
content = content.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
for match in re.finditer(ur'!\[(?:[^\]]*)]\(([^\)]+)\)', content):
img_match = re.search(ur'(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*', match.group(1))
if img_match:
# Add any image files that haven't been written to the zipfile
filename = img_match.group(1).split('/')[-1]
checksum, ext = os.path.splitext(filename)
image_name = "images/{}.{}".format(checksum, ext[1:])
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(checksum, filename), 'rb') as imgfile:
write_to_zipfile(image_name, imgfile.read(), zf)
# Add resizing data
if img_match.group(2) and img_match.group(3):
image_data = {'name': img_match.group(1)}
image_data.update({'width': float(img_match.group(2))})
image_data.update({'height': float(img_match.group(3))})
image_list.append(image_data)
content = content.replace(match.group(1), img_match.group(1))
return content, image_list
def map_prerequisites(root_node):
for n in ccmodels.PrerequisiteContentRelationship.objects.filter(prerequisite__tree_id=root_node.tree_id)\
.values('prerequisite__node_id', 'target_node__node_id'):
target_node = kolibrimodels.ContentNode.objects.get(pk=n['target_node__node_id'])
target_node.has_prerequisite.add(n['prerequisite__node_id'])
def map_channel_to_kolibri_channel(channel):
logging.debug("Generating the channel metadata.")
channel.icon_encoding = convert_channel_thumbnail(channel)
channel.save()
kolibri_channel = kolibrimodels.ChannelMetadata.objects.create(
id=channel.id,
name=channel.name,
description=channel.description,
version=channel.version + 1, # Need to save as version being published, not current version
thumbnail=channel.icon_encoding,
root_pk=channel.main_tree.node_id,
root_id=channel.main_tree.node_id,
min_schema_version=MIN_SCHEMA_VERSION, # Need to modify Kolibri so we can import this without importing models
)
logging.info("Generated the channel metadata.")
return kolibri_channel
def convert_channel_thumbnail(channel):
""" encode_thumbnail: gets base64 encoding of thumbnail
Args:
thumbnail (str): file path or url to channel's thumbnail
Returns: base64 encoding of thumbnail
"""
if not channel.thumbnail or channel.thumbnail == '' or 'static' in channel.thumbnail:
return ""
if channel.thumbnail_encoding:
try:
thumbnail_data = channel.thumbnail_encoding
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
except ValueError:
logging.error("ERROR: channel thumbnail is not in correct format ({}: {})".format(channel.id, channel.thumbnail_encoding))
return get_thumbnail_encoding(channel.thumbnail)
def map_tags_to_node(kolibrinode, ccnode):
""" map_tags_to_node: assigns tags to nodes (creates fk relationship)
Args:
kolibrinode (kolibri.models.ContentNode): node to map tag to
ccnode (contentcuration.models.ContentNode): node with tags to map
Returns: None
"""
tags_to_add = []
for tag in ccnode.tags.all():
t, _new = kolibrimodels.ContentTag.objects.get_or_create(pk=tag.pk, tag_name=tag.tag_name)
tags_to_add.append(t)
kolibrinode.tags = tags_to_add
kolibrinode.save()
def prepare_export_database(tempdb):
call_command("flush", "--noinput", database=get_active_content_database()) # clears the db!
call_command("migrate",
"content",
run_syncdb=True,
database=get_active_content_database(),
noinput=True)
logging.info("Prepared the export database.")
def raise_if_nodes_are_all_unchanged(channel):
logging.debug("Checking if we have any changed nodes.")
changed_models = channel.main_tree.get_family().filter(changed=True)
if changed_models.count() == 0:
logging.debug("No nodes have been changed!")
raise EarlyExit(message="No models changed!", db_path=None)
logging.info("Some nodes are changed.")
def mark_all_nodes_as_changed(channel):
logging.debug("Marking all nodes as changed.")
channel.main_tree.get_family().update(changed=False, published=True)
logging.info("Marked all nodes as changed.")
def save_export_database(channel_id):
logging.debug("Saving export database")
current_export_db_location = get_active_content_database()
target_export_db_location = os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id))
with open(current_export_db_location) as currentf:
storage.save(target_export_db_location, currentf)
logging.info("Successfully copied to {}".format(target_export_db_location))
def add_tokens_to_channel(channel):
if not channel.secret_tokens.filter(is_primary=True).exists():
logging.info("Generating tokens for the channel.")
channel.make_token()
def fill_published_fields(channel):
published_nodes = channel.main_tree.get_descendants().filter(published=True).prefetch_related('files')
channel.total_resource_count = published_nodes.exclude(kind_id=content_kinds.TOPIC).count()
channel.published_kind_count = json.dumps(list(published_nodes.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id')))
channel.published_size = published_nodes.values('files__checksum', 'files__file_size').distinct(
).aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0
node_languages = published_nodes.exclude(language=None).values_list('language', flat=True)
file_languages = published_nodes.values_list('files__language', flat=True)
language_list = list(set(chain(node_languages, file_languages)))
for lang in language_list:
if lang:
channel.included_languages.add(lang)
channel.save()
| mit | -1,944,273,147,826,961,200 | 42.335925 | 156 | 0.656271 | false |
ntt-sic/nova | nova/compute/api.py | 1 | 161884 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import notifier
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(notifier.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = notifier.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
self.network_api.validate_networks(context, requested_networks)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.InstanceTypeMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.InstanceTypeDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.InstanceTypeDiskTooSmall()
def _check_and_transform_bdm(self, base_options, min_count, max_count,
block_device_mapping, legacy_bdm):
if legacy_bdm:
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, base_options.get('image_ref', ''),
root_device_name)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
self._check_requested_networks(context, requested_networks)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
return base_options
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options = self._validate_and_build_base_options(context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id)
block_device_mapping = self._check_and_transform_bdm(
base_options, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
# NOTE (ndipanov): For now assume that image mapping is legacy
image_bdm = block_device.from_legacy_mapping(
image_properties.get('block_device_mapping', []),
None, instance['root_device_name'])
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping + image_bdm)
for mapping in (image_mapping, image_bdm, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
self.db.instance_destroy(context, instance['uuid'])
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance['disable_terminate']:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
reservations = None
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance['vm_state'] == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, downsize_reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.InstanceTypeNotFound:
LOG.warning(_("instance type %d not found"),
old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
self.db.block_device_mapping_destroy(context, bdm['id'])
cb(context, instance, bdms, local=True)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=system_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
instance_type = flavors.extract_flavor(instance)
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=None,
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
if 'all_tenants' in search_opts:
check_policy(context, "get_all_tenants", target)
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
instance_type = flavors.get_flavor_by_flavor_id(
flavor_id)
filters['instance_type_id'] = instance_type['id']
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, basestring):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Live Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_LIVE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.live_snapshot_instance(context, instance=instance,
image_id=image_meta['id'])
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=None)
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
bdm['volume_id'] = None
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] in [vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED,
vm_states.ERROR])):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
elevated = context.elevated()
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance['image_ref'] or ''
files_to_inject = kwargs.pop('files_to_inject', [])
metadata = kwargs.get('metadata', {})
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
instance_type = flavors.extract_flavor(instance)
self._checks_for_create_and_rebuild(context, image_id, image,
instance_type, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that
# if the system_metadata for this instance is updated
# after we do the get and before we update.. those other
# updates will be lost. Since this problem exists in a lot
# of other places, I think it should be addressed in a DB
# layer overhaul.
sys_metadata = self.db.instance_system_metadata_get(context,
instance['uuid'])
orig_sys_metadata = dict(sys_metadata)
# Remove the old keys
for key in sys_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del sys_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, instance_type)
sys_metadata.update(new_sys_metadata)
self.db.instance_system_metadata_update(context,
instance['uuid'], sys_metadata, True)
return orig_sys_metadata
instance = self.update(context, instance,
task_state=task_states.REBUILDING,
expected_task_state=None,
# Unfortunately we need to set image_ref early,
# so API users can see it.
image_ref=image_href, kernel_id=kernel_id or "",
ramdisk_id=ramdisk_id or "",
progress=0, **kwargs)
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance['uuid']))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
reservations = self._reserve_quota_delta(context, deltas)
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=None)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
reservations)
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_instance_type[resource] -
old_instance_type[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_instance_type, old_instance_type):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_instance_type = flavors.get_flavor(
migration_ref['old_instance_type_id'])
new_instance_type = flavors.get_flavor(
migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_instance_type = flavors.extract_flavor(instance,
'old_')
new_instance_type = flavors.extract_flavor(instance,
'new_')
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@staticmethod
def _resize_cells_support(context, reservations, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
if reservations:
# With cells, the best we can do right now is commit the
# reservations immediately...
QUOTAS.commit(context, reservations,
project_id=instance.project_id)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
# FIXME(sirp): both of these should raise InstanceTypeNotFound instead
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=None)
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, reservations, instance,
current_instance_type,
new_instance_type)
reservations = []
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type, reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=None)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance, legacy=False)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=None)
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = utils.diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance, legacy=False)
root_bdm = block_device.get_root_bdm(bdms)
if root_bdm and root_bdm.get('destination_type') == 'volume':
return True
return False
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=None)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=None,
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm['instance'],
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm['instance'],
volume_id, snapshot_id, delete_info)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_power_action(context, action=action,
host=host_name)
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.update_metadata(metadata)
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = notifier.get_notifier(service='api')
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
group_ref = self.db.security_group_update(context,
security_group['id'],
group)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
| apache-2.0 | -2,824,583,000,643,304,000 | 42.505509 | 79 | 0.569476 | false |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/google/cloud/gapic/errorreporting/v1beta1/error_group_service_client.py | 1 | 10469 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.devtools.clouderrorreporting.v1beta1 ErrorGroupService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.errorreporting.v1beta1 import enums
from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import common_pb2
from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import error_group_service_pb2
class ErrorGroupServiceClient(object):
"""Service for retrieving and updating individual error groups."""
SERVICE_ADDRESS = 'clouderrorreporting.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
_GROUP_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/groups/{group}')
@classmethod
def group_path(cls, project, group):
"""Returns a fully-qualified group resource name string."""
return cls._GROUP_PATH_TEMPLATE.render({
'project': project,
'group': group,
})
@classmethod
def match_project_from_group_name(cls, group_name):
"""Parses the project from a group resource.
Args:
group_name (string): A fully-qualified path representing a group
resource.
Returns:
A string representing the project.
"""
return cls._GROUP_PATH_TEMPLATE.match(group_name).get('project')
@classmethod
def match_group_from_group_name(cls, group_name):
"""Parses the group from a group resource.
Args:
group_name (string): A fully-qualified path representing a group
resource.
Returns:
A string representing the group.
"""
return cls._GROUP_PATH_TEMPLATE.match(group_name).get('group')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A ErrorGroupServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-error-reporting-v1beta1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'error_group_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.devtools.clouderrorreporting.v1beta1.ErrorGroupService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.error_group_service_stub = config.create_stub(
error_group_service_pb2.ErrorGroupServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._get_group = api_callable.create_api_call(
self.error_group_service_stub.GetGroup,
settings=defaults['get_group'])
self._update_group = api_callable.create_api_call(
self.error_group_service_stub.UpdateGroup,
settings=defaults['update_group'])
# Service calls
def get_group(self, group_name, options=None):
"""
Get the specified group.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_group_service_client
>>> api = error_group_service_client.ErrorGroupServiceClient()
>>> group_name = api.group_path('[PROJECT]', '[GROUP]')
>>> response = api.get_group(group_name)
Args:
group_name (string): [Required] The group resource name. Written as
<code>projects/<var>projectID</var>/groups/<var>group_name</var></code>.
Call
<a href=\"/error-reporting/reference/rest/v1beta1/projects.groupStats/list\">
<code>groupStats.list</code></a> to return a list of groups belonging to
this project.
Example: <code>projects/my-project-123/groups/my-group</code>
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = error_group_service_pb2.GetGroupRequest(
group_name=group_name)
return self._get_group(request, options)
def update_group(self, group, options=None):
"""
Replace the data for the specified group.
Fails if the group does not exist.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_group_service_client
>>> from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import common_pb2
>>> api = error_group_service_client.ErrorGroupServiceClient()
>>> group = common_pb2.ErrorGroup()
>>> response = api.update_group(group)
Args:
group (:class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup`): [Required] The group which replaces the resource on the server.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = error_group_service_pb2.UpdateGroupRequest(group=group)
return self._update_group(request, options)
| mit | 8,283,711,628,382,273,000 | 40.543651 | 169 | 0.647244 | false |
kmoocdev2/edx-platform | openedx/features/course_experience/views/course_outline.py | 1 | 5344 | """
Views to show a course outline.
"""
import re
import datetime
from completion import waffle as completion_waffle
from django.contrib.auth.models import User
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from waffle.models import Switch
from web_fragments.fragment import Fragment
from courseware.courses import get_course_overview_with_access
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from student.models import CourseEnrollment
from util.milestones_helpers import get_course_content_milestones
from xmodule.modulestore.django import modulestore
from ..utils import get_course_outline_block_tree, get_resume_block
DEFAULT_COMPLETION_TRACKING_START = datetime.datetime(2018, 1, 24, tzinfo=UTC)
class CourseOutlineFragmentView(EdxFragmentView):
"""
Course outline fragment to be shown in the unified course view.
"""
def render_to_fragment(self, request, course_id=None, page_context=None, **kwargs):
"""
Renders the course outline as a fragment.
"""
course_key = CourseKey.from_string(course_id)
course_overview = get_course_overview_with_access(request.user, 'load', course_key, check_if_enrolled=True)
course = modulestore().get_course(course_key)
course_block_tree = get_course_outline_block_tree(request, course_id)
if not course_block_tree:
return None
context = {
'csrf': csrf(request)['csrf_token'],
'course': course_overview,
'due_date_display_format': course.due_date_display_format,
'blocks': course_block_tree
}
resume_block = get_resume_block(course_block_tree)
if not resume_block:
self.mark_first_unit_to_resume(course_block_tree)
xblock_display_names = self.create_xblock_id_and_name_dict(course_block_tree)
gated_content = self.get_content_milestones(request, course_key)
context['gated_content'] = gated_content
context['xblock_display_names'] = xblock_display_names
html = render_to_string('course_experience/course-outline-fragment.html', context)
return Fragment(html)
def create_xblock_id_and_name_dict(self, course_block_tree, xblock_display_names=None):
"""
Creates a dictionary mapping xblock IDs to their names, using a course block tree.
"""
if xblock_display_names is None:
xblock_display_names = {}
if course_block_tree.get('id'):
xblock_display_names[course_block_tree['id']] = course_block_tree['display_name']
if course_block_tree.get('children'):
for child in course_block_tree['children']:
self.create_xblock_id_and_name_dict(child, xblock_display_names)
return xblock_display_names
def get_content_milestones(self, request, course_key):
"""
Returns dict of subsections with prerequisites and whether the prerequisite has been completed or not
"""
def _get_key_of_prerequisite(namespace):
return re.sub('.gating', '', namespace)
all_course_milestones = get_course_content_milestones(course_key)
uncompleted_prereqs = {
milestone['content_id']
for milestone in get_course_content_milestones(course_key, user_id=request.user.id)
}
gated_content = {
milestone['content_id']: {
'completed_prereqs': milestone['content_id'] not in uncompleted_prereqs,
'prerequisite': _get_key_of_prerequisite(milestone['namespace'])
}
for milestone in all_course_milestones
}
return gated_content
def user_enrolled_after_completion_collection(self, user, course_key):
"""
Checks that the user has enrolled in the course after 01/24/2018, the date that
the completion API began data collection. If the user has enrolled in the course
before this date, they may see incomplete collection data. This is a temporary
check until all active enrollments are created after the date.
"""
user = User.objects.get(username=user)
try:
user_enrollment = CourseEnrollment.objects.get(
user=user,
course_id=course_key,
is_active=True
)
return user_enrollment.created > self._completion_data_collection_start()
except CourseEnrollment.DoesNotExist:
return False
def _completion_data_collection_start(self):
"""
Returns the date that the ENABLE_COMPLETION_TRACKING waffle switch was enabled.
"""
# pylint: disable=protected-access
switch_name = completion_waffle.waffle()._namespaced_name(completion_waffle.ENABLE_COMPLETION_TRACKING)
try:
return Switch.objects.get(name=switch_name).created
except Switch.DoesNotExist:
return DEFAULT_COMPLETION_TRACKING_START
def mark_first_unit_to_resume(self, block_node):
children = block_node.get('children')
if children:
children[0]['resume_block'] = True
self.mark_first_unit_to_resume(children[0])
| agpl-3.0 | -8,452,376,268,452,737,000 | 38.007299 | 115 | 0.661302 | false |
openpermissions/repository-srv | tests/unit/controllers/test_assets_handler.py | 1 | 5620 | # -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import pytest
from mock import MagicMock, patch
from koi import exceptions
from koi.test_helpers import gen_test, make_future
from repository.controllers.assets_handler import _validate_body, AssetsHandler
TEST_NAMESPACE = 'c8ab01'
TOKEN = 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnQiOnsic2VydmljZV90eXBlIjoiaW5kZXgiLCJvcmdhbmlzYXRpb25faWQiOiJ0Z' \
'XN0Y28iLCJpZCI6IjQyMjVmNDc3NGQ2ODc0YTY4NTY1YTA0MTMwMDAxMTQ0In0sImRlbGVnYXRlIjpmYWxzZSwiYXVkIjoibG9jYWxob3N0Ojg' \
'wMDcvYXV0aG9yaXplIiwiZXhwIjoxNDU2OTM5NDk0LCJpc3MiOiJsb2NhbGhvc3Q6ODAwNy90b2tlbiIsInNjb3BlIjoicmVhZCIsImdyYW50X' \
'3R5cGUiOiJjbGllbnRfY3JlZGVudGlhbHMiLCJzdWIiOiI0MjI1ZjQ3NzRkNjg3NGE2ODU2NWEwNDEzMDAwMTE0NCJ9.J4gFHMU-v_1f5xgWjd' \
'42JaZhHpYfaccPtvq5uZMox3jvcs2A7q1exI3YIB75x589wp6QRpChr5C-If4bR71vpZ09cSMoX4UKR5WOaMDAMeMh2QPEHYUCE1VWEyrr_o1i' \
'ljSk-bNfo8Mpufl67NL0J7rU7ZJ-o3ZwgoPIDTA1x1utcrvlLKTlWkmYGqEEBXxuL0V_vOGHW6UohXAA87jdMlgQRNTaZo75ETqbKp4sPIuiXz' \
'OoidEPjbvZpo7LkAfAea9Js-B6muWWaI_i2FO2K3c6XJvxZAiyufL-nE-fx1vSJQeOixEr6zbnOF_s7byETxHKlCwOrxpx0wqPrE0ttw'
class PartialMockedHandler(AssetsHandler):
def __init__(self, content_type=None):
super(PartialMockedHandler, self).__init__(application=MagicMock(),
request=MagicMock())
self.finish = MagicMock()
self.token = {'sub': 'client1', 'client': {'id': 'testco'}}
self.request.headers = {}
if content_type:
self.request.headers['Content-Type'] = content_type
def test__validate_body():
request = MagicMock()
request.body = 'test'
_validate_body(request)
def test__validate_body_no_body():
request = MagicMock()
request.body = None
with pytest.raises(exceptions.HTTPError) as exc:
_validate_body(request)
assert exc.value.status_code == 400
assert exc.value.errors == 'No Data in Body'
def test__validate_body_empty_body():
request = MagicMock()
request.body = ''
with pytest.raises(exceptions.HTTPError) as exc:
_validate_body(request)
assert exc.value.status_code == 400
assert exc.value.errors == 'Body is empty string'
@patch('repository.controllers.assets_handler._validate_body', return_value=None)
@patch('repository.controllers.assets_handler.helper')
@patch('repository.controllers.assets_handler.audit')
@patch('repository.controllers.assets_handler.asset')
@gen_test
def test_repository_assets_handler_post(assets, audit, helper, _validate_body):
helper.validate.return_value = None
assets.store.return_value = make_future('asset data')
audit.log_added_assets.return_value = make_future(None)
handler = PartialMockedHandler()
yield handler.post(TEST_NAMESPACE)
assert assets.store.call_count == 1
audit.log_added_assets.assert_called_once_with(
'asset data',
{'sub': 'client1', 'client': {'id': 'testco'}},
repository_id='c8ab01')
handler.finish.assert_called_once_with({"status": 200})
@patch('repository.controllers.assets_handler._validate_body', return_value=None)
@patch('repository.controllers.assets_handler.helper')
@patch('repository.controllers.assets_handler.asset')
def test_repository_assets_handler_post_error(assets, helper, _validate_body):
helper.validate.return_value = None
def mock_store(body, namespace, content_type):
raise exceptions.HTTPError(400, 'errormsg')
assets.store.side_effect = mock_store
handler = PartialMockedHandler()
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post(TEST_NAMESPACE)
assert exc.value.status_code == 400
@gen_test
def test_repository_assets_handler_post_invalid_content_type():
def mock_validate(_):
raise exceptions.HTTPError(415, 'errormsg')
handler = PartialMockedHandler(content_type='application/json')
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post(TEST_NAMESPACE)
assert exc.value.status_code == 415
@patch('repository.controllers.assets_handler._validate_body')
@gen_test
def test_repository_assets_handler_post_invalid_body(_validate_body):
def mock_validate(_):
raise exceptions.HTTPError(400, 'errormsg')
_validate_body.side_effect = mock_validate
handler = PartialMockedHandler()
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post(TEST_NAMESPACE)
assert exc.value.status_code == 400
@patch('repository.controllers.assets_handler._validate_body', return_value=None)
@patch('repository.controllers.assets_handler.helper')
@gen_test
def test_repository_assets_handler_post_invalid_body_xml(helper, _validate_body):
def mock_validate(data, format=None):
raise exceptions.HTTPError(400, 'errormsg')
helper.validate.side_effect = mock_validate
handler = PartialMockedHandler()
with pytest.raises(exceptions.HTTPError) as exc:
yield handler.post('repository1')
assert exc.value.status_code == 400
| apache-2.0 | -9,183,863,592,397,668,000 | 37.758621 | 122 | 0.738256 | false |
davidsminor/gaffer | python/GafferSceneUI/ExecutableRenderUI.py | 1 | 2102 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import fnmatch
import GafferUI
import GafferScene
GafferUI.Nodule.registerNodule( GafferScene.ExecutableRender.staticTypeId(), fnmatch.translate( "*" ), lambda plug : None )
GafferUI.Nodule.registerNodule( GafferScene.ExecutableRender.staticTypeId(), "in", GafferUI.StandardNodule )
| bsd-3-clause | -8,632,754,748,786,710,000 | 47.883721 | 123 | 0.694577 | false |
FilipeMaia/arrayfire-python | arrayfire/array.py | 1 | 30063 | #######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
arrayfire.Array class and helper functions.
"""
import inspect
from .library import *
from .util import *
from .util import _is_number
from .bcast import _bcast_var
from .base import *
from .index import *
from .index import _Index4
def _create_array(buf, numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_array(ct.pointer(out_arr), ct.c_void_p(buf),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def _create_empty_array(numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_handle(ct.pointer(out_arr),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def constant_array(val, d0, d1=None, d2=None, d3=None, dtype=Dtype.f32):
"""
Internal function to create a C array. Should not be used externall.
"""
if not isinstance(dtype, ct.c_int):
if isinstance(dtype, int):
dtype = ct.c_int(dtype)
elif isinstance(dtype, Dtype):
dtype = ct.c_int(dtype.value)
else:
raise TypeError("Invalid dtype")
out = ct.c_void_p(0)
dims = dim4(d0, d1, d2, d3)
if isinstance(val, complex):
c_real = ct.c_double(val.real)
c_imag = ct.c_double(val.imag)
if (dtype.value != Dtype.c32.value and dtype.value != Dtype.c64.value):
dtype = Dtype.c32.value
safe_call(backend.get().af_constant_complex(ct.pointer(out), c_real, c_imag,
4, ct.pointer(dims), dtype))
elif dtype.value == Dtype.s64.value:
c_val = ct.c_longlong(val.real)
safe_call(backend.get().af_constant_long(ct.pointer(out), c_val, 4, ct.pointer(dims)))
elif dtype.value == Dtype.u64.value:
c_val = ct.c_ulonglong(val.real)
safe_call(backend.get().af_constant_ulong(ct.pointer(out), c_val, 4, ct.pointer(dims)))
else:
c_val = ct.c_double(val)
safe_call(backend.get().af_constant(ct.pointer(out), c_val, 4, ct.pointer(dims), dtype))
return out
def _binary_func(lhs, rhs, c_func):
out = Array()
other = rhs
if (_is_number(rhs)):
ldims = dim4_to_tuple(lhs.dims())
rty = implicit_dtype(rhs, lhs.type())
other = Array()
other.arr = constant_array(rhs, ldims[0], ldims[1], ldims[2], ldims[3], rty.value)
elif not isinstance(rhs, Array):
raise TypeError("Invalid parameter to binary function")
safe_call(c_func(ct.pointer(out.arr), lhs.arr, other.arr, _bcast_var.get()))
return out
def _binary_funcr(lhs, rhs, c_func):
out = Array()
other = lhs
if (_is_number(lhs)):
rdims = dim4_to_tuple(rhs.dims())
lty = implicit_dtype(lhs, rhs.type())
other = Array()
other.arr = constant_array(lhs, rdims[0], rdims[1], rdims[2], rdims[3], lty.value)
elif not isinstance(lhs, Array):
raise TypeError("Invalid parameter to binary function")
c_func(ct.pointer(out.arr), other.arr, rhs.arr, _bcast_var.get())
return out
def _ctype_to_lists(ctype_arr, dim, shape, offset=0):
if (dim == 0):
return list(ctype_arr[offset : offset + shape[0]])
else:
dim_len = shape[dim]
res = [[]] * dim_len
for n in range(dim_len):
res[n] = _ctype_to_lists(ctype_arr, dim - 1, shape, offset)
offset += shape[0]
return res
def _slice_to_length(key, dim):
tkey = [key.start, key.stop, key.step]
if tkey[0] is None:
tkey[0] = 0
elif tkey[0] < 0:
tkey[0] = dim - tkey[0]
if tkey[1] is None:
tkey[1] = dim
elif tkey[1] < 0:
tkey[1] = dim - tkey[1]
if tkey[2] is None:
tkey[2] = 1
return int(((tkey[1] - tkey[0] - 1) / tkey[2]) + 1)
def _get_info(dims, buf_len):
elements = 1
numdims = len(dims)
idims = [1]*4
for i in range(numdims):
elements *= dims[i]
idims[i] = dims[i]
if (elements == 0):
if (buf_len != 0):
idims = [buf_len, 1, 1, 1]
numdims = 1
else:
raise RuntimeError("Invalid size")
return numdims, idims
def _get_indices(key):
S = Index(slice(None))
inds = _Index4(S, S, S, S)
if isinstance(key, tuple):
n_idx = len(key)
for n in range(n_idx):
inds[n] = Index(key[n])
else:
inds[0] = Index(key)
return inds
def _get_assign_dims(key, idims):
dims = [1]*4
for n in range(len(idims)):
dims[n] = idims[n]
if _is_number(key):
dims[0] = 1
return dims
elif isinstance(key, slice):
dims[0] = _slice_to_length(key, idims[0])
return dims
elif isinstance(key, ParallelRange):
dims[0] = _slice_to_length(key.S, idims[0])
return dims
elif isinstance(key, BaseArray):
# If the array is boolean take only the number of nonzeros
if(key.dtype() is Dtype.b8):
dims[0] = int(sum(key))
else:
dims[0] = key.elements()
return dims
elif isinstance(key, tuple):
n_inds = len(key)
for n in range(n_inds):
if (_is_number(key[n])):
dims[n] = 1
elif (isinstance(key[n], BaseArray)):
# If the array is boolean take only the number of nonzeros
if(key[n].dtype() is Dtype.b8):
dims[n] = int(sum(key[n]))
else:
dims[n] = key[n].elements()
elif (isinstance(key[n], slice)):
dims[n] = _slice_to_length(key[n], idims[n])
elif (isinstance(key[n], ParallelRange)):
dims[n] = _slice_to_length(key[n].S, idims[n])
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
return dims
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
def transpose(a, conj=False):
"""
Perform the transpose on an input.
Parameters
-----------
a : af.Array
Multi dimensional arrayfire array.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Returns
--------
out : af.Array
Containing the tranpose of `a` for all batches.
"""
out = Array()
safe_call(backend.get().af_transpose(ct.pointer(out.arr), a.arr, conj))
return out
def transpose_inplace(a, conj=False):
"""
Perform inplace transpose on an input.
Parameters
-----------
a : af.Array
- Multi dimensional arrayfire array.
- Contains transposed values on exit.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Note
-------
Input `a` needs to be a square matrix or a batch of square matrices.
"""
safe_call(backend.get().af_transpose_inplace(a.arr, conj))
class Array(BaseArray):
"""
A multi dimensional array container.
Parameters
----------
src : optional: array.array, list or C buffer. default: None.
- When `src` is `array.array` or `list`, the data is copied to create the Array()
- When `src` is None, an empty buffer is created.
dims : optional: tuple of ints. default: (0,)
- When using the default values of `dims`, the dims are caclulated as `len(src)`
dtype: optional: str or arrayfire.Dtype. default: None.
- if str, must be one of the following:
- 'f' for float
- 'd' for double
- 'b' for bool
- 'B' for unsigned char
- 'i' for signed 32 bit integer
- 'I' for unsigned 32 bit integer
- 'l' for signed 64 bit integer
- 'L' for unsigned 64 bit integer
- 'F' for 32 bit complex number
- 'D' for 64 bit complex number
- if arrayfire.Dtype, must be one of the following:
- Dtype.f32 for float
- Dtype.f64 for double
- Dtype.b8 for bool
- Dtype.u8 for unsigned char
- Dtype.s32 for signed 32 bit integer
- Dtype.u32 for unsigned 32 bit integer
- Dtype.s64 for signed 64 bit integer
- Dtype.u64 for unsigned 64 bit integer
- Dtype.c32 for 32 bit complex number
- Dtype.c64 for 64 bit complex number
- if None, Dtype.f32 is assumed
Attributes
-----------
arr: ctypes.c_void_p
ctypes variable containing af_array from arrayfire library.
Examples
--------
Creating an af.Array() from array.array()
>>> import arrayfire as af
>>> import array
>>> a = array.array('f', (1, 2, 3, 4))
>>> b = af.Array(a, (2,2))
>>> af.display(b)
[2 2 1 1]
1.0000 3.0000
2.0000 4.0000
Creating an af.Array() from a list
>>> import arrayfire as af
>>> import array
>>> a = [1, 2, 3, 4]
>>> b = af.Array(a)
>>> af.display(b)
[4 1 1 1]
1.0000
2.0000
3.0000
4.0000
Creating an af.Array() from numpy.array()
>>> import numpy as np
>>> import arrayfire as af
>>> a = np.random.random((2,2))
>>> a
array([[ 0.33042524, 0.36135449],
[ 0.86748649, 0.42199135]])
>>> b = af.Array(a.ctypes.data, a.shape, a.dtype.char)
>>> af.display(b)
[2 2 1 1]
0.3304 0.8675
0.3614 0.4220
Note
-----
- The class is currently limited to 4 dimensions.
- arrayfire.Array() uses column major format.
- numpy uses row major format by default which can cause issues during conversion
"""
def __init__(self, src=None, dims=(0,), dtype=None):
super(Array, self).__init__()
buf=None
buf_len=0
if dtype is not None:
if isinstance(dtype, str):
type_char = dtype
else:
type_char = to_typecode[dtype.value]
else:
type_char = None
_type_char='f'
backend.lock()
if src is not None:
if (isinstance(src, Array)):
safe_call(backend.get().af_retain_array(ct.pointer(self.arr), src.arr))
return
host = __import__("array")
if isinstance(src, host.array):
buf,buf_len = src.buffer_info()
_type_char = src.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, list):
tmp = host.array('f', src)
buf,buf_len = tmp.buffer_info()
_type_char = tmp.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, int) or isinstance(src, ct.c_void_p):
buf = src
numdims, idims = _get_info(dims, buf_len)
elements = 1
for dim in idims:
elements *= dim
if (elements == 0):
raise RuntimeError("Expected dims when src is data pointer")
if (type_char is None):
raise TypeError("Expected type_char when src is data pointer")
_type_char = type_char
else:
raise TypeError("src is an object of unsupported class")
if (type_char is not None and
type_char != _type_char):
raise TypeError("Can not create array of requested type from input data type")
self.arr = _create_array(buf, numdims, idims, to_dtype[_type_char])
else:
if type_char is None:
type_char = 'f'
numdims = len(dims)
idims = [1] * 4
for n in range(numdims):
idims[n] = dims[n]
self.arr = _create_empty_array(numdims, idims, to_dtype[type_char])
def copy(self):
"""
Performs a deep copy of the array.
Returns
-------
out: af.Array()
An identical copy of self.
"""
out = Array()
safe_call(backend.get().af_copy_array(ct.pointer(out.arr), self.arr))
return out
def __del__(self):
"""
Release the C array when going out of scope
"""
if self.arr.value:
backend.get().af_release_array(self.arr)
def device_ptr(self):
"""
Return the device pointer held by the array.
Returns
------
ptr : int
Contains location of the device pointer
Note
----
- This can be used to integrate with custom C code and / or PyCUDA or PyOpenCL.
- No mem copy is peformed, this function returns the raw device pointer.
"""
ptr = ct.c_void_p(0)
backend.get().af_get_device_ptr(ct.pointer(ptr), self.arr)
return ptr.value
def elements(self):
"""
Return the number of elements in the array.
"""
num = ct.c_ulonglong(0)
safe_call(backend.get().af_get_elements(ct.pointer(num), self.arr))
return num.value
def dtype(self):
"""
Return the data type as a arrayfire.Dtype enum value.
"""
dty = ct.c_int(Dtype.f32.value)
safe_call(backend.get().af_get_type(ct.pointer(dty), self.arr))
return to_dtype[typecodes[dty.value]]
def type(self):
"""
Return the data type as an int.
"""
return self.dtype().value
def dims(self):
"""
Return the shape of the array as a tuple.
"""
d0 = ct.c_longlong(0)
d1 = ct.c_longlong(0)
d2 = ct.c_longlong(0)
d3 = ct.c_longlong(0)
safe_call(backend.get().af_get_dims(ct.pointer(d0), ct.pointer(d1),
ct.pointer(d2), ct.pointer(d3), self.arr))
dims = (d0.value,d1.value,d2.value,d3.value)
return dims[:self.numdims()]
def numdims(self):
"""
Return the number of dimensions of the array.
"""
nd = ct.c_uint(0)
safe_call(backend.get().af_get_numdims(ct.pointer(nd), self.arr))
return nd.value
def is_empty(self):
"""
Check if the array is empty i.e. it has no elements.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_empty(ct.pointer(res), self.arr))
return res.value
def is_scalar(self):
"""
Check if the array is scalar i.e. it has only one element.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_scalar(ct.pointer(res), self.arr))
return res.value
def is_row(self):
"""
Check if the array is a row i.e. it has a shape of (1, cols).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_row(ct.pointer(res), self.arr))
return res.value
def is_column(self):
"""
Check if the array is a column i.e. it has a shape of (rows, 1).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_column(ct.pointer(res), self.arr))
return res.value
def is_vector(self):
"""
Check if the array is a vector i.e. it has a shape of one of the following:
- (rows, 1)
- (1, cols)
- (1, 1, vols)
- (1, 1, 1, batch)
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_vector(ct.pointer(res), self.arr))
return res.value
def is_complex(self):
"""
Check if the array is of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_complex(ct.pointer(res), self.arr))
return res.value
def is_real(self):
"""
Check if the array is not of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_real(ct.pointer(res), self.arr))
return res.value
def is_double(self):
"""
Check if the array is of double precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_double(ct.pointer(res), self.arr))
return res.value
def is_single(self):
"""
Check if the array is of single precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_single(ct.pointer(res), self.arr))
return res.value
def is_real_floating(self):
"""
Check if the array is real and of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_realfloating(ct.pointer(res), self.arr))
return res.value
def is_floating(self):
"""
Check if the array is of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_floating(ct.pointer(res), self.arr))
return res.value
def is_integer(self):
"""
Check if the array is of integer type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_integer(ct.pointer(res), self.arr))
return res.value
def is_bool(self):
"""
Check if the array is of type b8.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_bool(ct.pointer(res), self.arr))
return res.value
def __add__(self, other):
"""
Return self + other.
"""
return _binary_func(self, other, backend.get().af_add)
def __iadd__(self, other):
"""
Perform self += other.
"""
self = _binary_func(self, other, backend.get().af_add)
return self
def __radd__(self, other):
"""
Return other + self.
"""
return _binary_funcr(other, self, backend.get().af_add)
def __sub__(self, other):
"""
Return self - other.
"""
return _binary_func(self, other, backend.get().af_sub)
def __isub__(self, other):
"""
Perform self -= other.
"""
self = _binary_func(self, other, backend.get().af_sub)
return self
def __rsub__(self, other):
"""
Return other - self.
"""
return _binary_funcr(other, self, backend.get().af_sub)
def __mul__(self, other):
"""
Return self * other.
"""
return _binary_func(self, other, backend.get().af_mul)
def __imul__(self, other):
"""
Perform self *= other.
"""
self = _binary_func(self, other, backend.get().af_mul)
return self
def __rmul__(self, other):
"""
Return other * self.
"""
return _binary_funcr(other, self, backend.get().af_mul)
def __truediv__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __itruediv__(self, other):
"""
Perform self /= other.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rtruediv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __div__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __idiv__(self, other):
"""
Perform other / self.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rdiv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __mod__(self, other):
"""
Return self % other.
"""
return _binary_func(self, other, backend.get().af_mod)
def __imod__(self, other):
"""
Perform self %= other.
"""
self = _binary_func(self, other, backend.get().af_mod)
return self
def __rmod__(self, other):
"""
Return other % self.
"""
return _binary_funcr(other, self, backend.get().af_mod)
def __pow__(self, other):
"""
Return self ** other.
"""
return _binary_func(self, other, backend.get().af_pow)
def __ipow__(self, other):
"""
Perform self **= other.
"""
self = _binary_func(self, other, backend.get().af_pow)
return self
def __rpow__(self, other):
"""
Return other ** self.
"""
return _binary_funcr(other, self, backend.get().af_pow)
def __lt__(self, other):
"""
Return self < other.
"""
return _binary_func(self, other, backend.get().af_lt)
def __gt__(self, other):
"""
Return self > other.
"""
return _binary_func(self, other, backend.get().af_gt)
def __le__(self, other):
"""
Return self <= other.
"""
return _binary_func(self, other, backend.get().af_le)
def __ge__(self, other):
"""
Return self >= other.
"""
return _binary_func(self, other, backend.get().af_ge)
def __eq__(self, other):
"""
Return self == other.
"""
return _binary_func(self, other, backend.get().af_eq)
def __ne__(self, other):
"""
Return self != other.
"""
return _binary_func(self, other, backend.get().af_neq)
def __and__(self, other):
"""
Return self & other.
"""
return _binary_func(self, other, backend.get().af_bitand)
def __iand__(self, other):
"""
Perform self &= other.
"""
self = _binary_func(self, other, backend.get().af_bitand)
return self
def __or__(self, other):
"""
Return self | other.
"""
return _binary_func(self, other, backend.get().af_bitor)
def __ior__(self, other):
"""
Perform self |= other.
"""
self = _binary_func(self, other, backend.get().af_bitor)
return self
def __xor__(self, other):
"""
Return self ^ other.
"""
return _binary_func(self, other, backend.get().af_bitxor)
def __ixor__(self, other):
"""
Perform self ^= other.
"""
self = _binary_func(self, other, backend.get().af_bitxor)
return self
def __lshift__(self, other):
"""
Return self << other.
"""
return _binary_func(self, other, backend.get().af_bitshiftl)
def __ilshift__(self, other):
"""
Perform self <<= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftl)
return self
def __rshift__(self, other):
"""
Return self >> other.
"""
return _binary_func(self, other, backend.get().af_bitshiftr)
def __irshift__(self, other):
"""
Perform self >>= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftr)
return self
def __neg__(self):
"""
Return -self
"""
return 0 - self
def __pos__(self):
"""
Return +self
"""
return self
def __invert__(self):
"""
Return ~self
"""
return self == 0
def __nonzero__(self):
return self != 0
# TODO:
# def __abs__(self):
# return self
def __getitem__(self, key):
"""
Return self[key]
Note
----
Ellipsis not supported as key
"""
try:
out = Array()
n_dims = self.numdims()
inds = _get_indices(key)
safe_call(backend.get().af_index_gen(ct.pointer(out.arr),
self.arr, ct.c_longlong(n_dims), inds.pointer))
return out
except RuntimeError as e:
raise IndexError(str(e))
def __setitem__(self, key, val):
"""
Perform self[key] = val
Note
----
Ellipsis not supported as key
"""
try:
n_dims = self.numdims()
if (_is_number(val)):
tdims = _get_assign_dims(key, self.dims())
other_arr = constant_array(val, tdims[0], tdims[1], tdims[2], tdims[3], self.type())
del_other = True
else:
other_arr = val.arr
del_other = False
out_arr = ct.c_void_p(0)
inds = _get_indices(key)
safe_call(backend.get().af_assign_gen(ct.pointer(out_arr),
self.arr, ct.c_longlong(n_dims), inds.pointer,
other_arr))
safe_call(backend.get().af_release_array(self.arr))
if del_other:
safe_call(backend.get().af_release_array(other_arr))
self.arr = out_arr
except RuntimeError as e:
raise IndexError(str(e))
def to_ctype(self, row_major=False, return_shape=False):
"""
Return the data as a ctype C array after copying to host memory
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: The ctypes array of the appropriate type and length.
else :
(res, dims): tuple of the ctypes array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_ctype on empty array")
tmp = transpose(self) if row_major else self
ctype_type = to_c_type[self.type()] * self.elements()
res = ctype_type()
safe_call(backend.get().af_get_data_ptr(ct.pointer(res), self.arr))
if (return_shape):
return res, self.dims()
else:
return res
def to_array(self, row_major=False, return_shape=False):
"""
Return the data as array.array
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: array.array of the appropriate type and length.
else :
(res, dims): array.array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_array on empty array")
res = self.to_ctype(row_major, return_shape)
host = __import__("array")
h_type = to_typecode[self.type()]
if (return_shape):
return host.array(h_type, res[0]), res[1]
else:
return host.array(h_type, res)
def to_list(self, row_major=False):
"""
Return the data as list
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: list of the appropriate type and length.
else :
(res, dims): list and the shape of the array
"""
ct_array, shape = self.to_ctype(row_major, True)
return _ctype_to_lists(ct_array, len(shape) - 1, shape)
def __repr__(self):
"""
Displays the meta data of the arrayfire array.
Note
----
Use arrayfire.display(a) to display the contents of the array.
"""
# Having __repr__ directly print things is a bad idea
# Placeholder for when af_array_to_string is available
# safe_call(backend.get().af_array_to_string...
return 'Type: arrayfire.Array()\nShape: %s\nType char: %s' % \
(self.dims(), to_typecode[self.type()])
def __array__(self):
"""
Constructs a numpy.array from arrayfire.Array
"""
import numpy as np
res = np.empty(self.dims(), dtype=np.dtype(to_typecode[self.type()]), order='F')
safe_call(backend.get().af_get_data_ptr(ct.c_void_p(res.ctypes.data), self.arr))
return res
def display(a):
"""
Displays the contents of an array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array
"""
expr = inspect.stack()[1][-2]
try:
if (expr is not None):
st = expr[0].find('(') + 1
en = expr[0].rfind(')')
print('%s' % expr[0][st:en])
safe_call(backend.get().af_print_array(a.arr))
except:
safe_call(backend.get().af_print_array(a.arr))
from .algorithm import sum
| bsd-3-clause | -5,315,669,486,786,941,000 | 27.30791 | 100 | 0.520939 | false |
ethansshaw/stellavitrum | ScienceFairProcess.py | 1 | 10349 | #!/usr/bin/env python
"""
Written by Ethan Shaw
"""
from astropy.io import fits
import sys, png, math, os
colors = ['red', 'green', 'blue']
# Build x_axis_len rows, each containing y_axis_len columns
# access with PNG_data[row][column]
def buildMatrix(x_axis_len, y_axis_len, greyscale=True):
# set up empty list (matrix) to hold pixels
PNG_data = []
for row in range(0, x_axis_len):
PNG_data.append([])
#start out with an empty list, then put another list in it so it looks like [[]]
#gives the value of x_axis_len empty lists inside the list PNG_data
for column in range (0, y_axis_len):
if ( greyscale ):
PNG_data[row].append(0)
#this is the grayscale value
else:
#Red,Green,Blue values
PNG_data[row].append(0)
PNG_data[row].append(0)
PNG_data[row].append(0)
return PNG_data
#Function defines ONLY color
def setPixel(PNG_data, red, green, blue, row, column):
PNG_data[row][column*3] = red
PNG_data[row][column*3 + 1] = green
PNG_data[row][column*3 + 2] = blue
def getPixelRange(PNG_data, x_axis_len, y_axis_len):
# determine the PNG_data range for scaling purposes
pixel_max = 0
pixel_min = pow(2,16)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
pixel_max = max(pixel_max, PNG_data[row][column])
pixel_min = min(pixel_min, PNG_data[row][column])
print "Pixel max: {0:.20f}, Pixel min: {0:.20f}".format(pixel_max, pixel_min)
return (pixel_max, pixel_min)
def getRawDataFromFile(file, color):
#this reads the file and structures into useable format
hdulist = fits.open(file)
entry = hdulist[0]
bits_per_pixel = entry.header['BITPIX']
number_axis = entry.header['NAXIS']
x_axis_len = entry.header['NAXIS2']
y_axis_len = entry.header['NAXIS1']
print "Data dimensions: (%d x %d) - %d axes, %d bpp" % (x_axis_len, y_axis_len, number_axis, bits_per_pixel)
# data is a bunch of columns, each containing one row
data = entry.data
pixelData = buildMatrix(x_axis_len, y_axis_len, greyscale=False)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
try:
image_value = data[row][column]
red, green, blue = ( 0,0,0 )
if ( color == 'red' ):
red = image_value
elif ( color == 'green' ):
green = image_value
elif ( color == 'blue' ):
blue = image_value
setPixel(pixelData, red, green, blue, row, column)
except Exception as e:
print "Error accessing (%d, %d) : %s" % (row, column, e)
raise SystemExit
return pixelData
def combineTwoDataSets(dataSet1, dataSet2):
print "Combining two data sets"
# step 1, make a new data set the size of the two
x_axis_len = len(dataSet1)
y_axis_len = len(dataSet1[0])
combinedData = buildMatrix(x_axis_len, y_axis_len)
# step 2, step over each pixel in the sets and ADD to the combined pixel value
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
combinedData[row][column] = dataSet1[row][column] + dataSet2[row][column]
# step 3, return the combined data set
return combinedData
def writePNGFile(PNGData, output_directory, dataset_name):
filename = '%s/out_data_%s.png' % ( output_directory, dataset_name)
f = open(filename, 'wb') # binary mode is important
w = png.Writer(len(PNGData[0])/3, len(PNGData), greyscale=False,alpha=False, bitdepth=16)
w.write(f, PNGData)
print "Image written to file %s" % filename
def linearScale(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 - 1) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
return val
def logarithmicScalePixel(value, min_value, max_value):
try:
val = abs(math.log(value))
# for min and max we use 0, 100 for now
return linearScalePixel(val, 0, 100)
except Exception as e:
return 0
def linearScalePixel(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 -1 ) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
if ( val < 0 or val > 65535 ):
print "value %d (orig: %f was outside range %.e, %.e" % ( val, value, min_value, max_value )
raise SystemExit
return val
def scaleDataSet(scalingFunction, dataSet):
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
pixel_max, pixel_min = getPixelRange(dataSet, x_axis_len, y_axis_len)
print "Max: %f, Min: %f" % (pixel_max, pixel_min)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataSet[row][column] = scalingFunction(dataSet[row][column], pixel_min, pixel_max)
return dataSet
def linearScaleDataSet(dataSet):
return scaleDataSet(linearScalePixel, dataSet)
def logScaleDataSet(dataSet):
return scaleDataSet(logarithmicScalePixel, dataSet)
def zeroOutliersInDataSet(dataSet, interQuartileScaleFactor=1.5):
(firstQuartile, median, thirdQuartile, interQuartile) = getQuartileValues(dataSet)
minAllowedValue = max(0, firstQuartile - (interQuartileScaleFactor * interQuartile))
maxAllowedValue = thirdQuartile + (interQuartileScaleFactor * interQuartile)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataValue = dataSet[row][column]
if (dataValue < minAllowedValue or dataValue > maxAllowedValue):
dataSet[row][column] = 0
return dataSet
def histogramData(dataSet, output_directory, dataset_folder="data"):
pixel_max, pixel_min = getPixelRange(dataSet, len(dataSet), len(dataSet[0]))
histogram = {}
number_of_groups = 10
group_size = (pixel_max - pixel_min) / (number_of_groups *1.0)
for i in range(0, number_of_groups):
histogram[int(i*group_size)] = 0
histogramKeys = histogram.keys()
histogramKeys.sort()
histogramKeys.reverse()
for x in range(0, len(dataSet)):
for y in range(0, len(dataSet[0])):
pixel = dataSet[x][y]
for key in histogramKeys:
if pixel < key:
histogram[key] = int(histogram[key] + 1)
continue
histogramKeys.reverse()
output_path = "%s/%s_histogram.csv" % (output_directory, dataset_folder)
outf = open(output_path, "w")
for key in histogramKeys:
kname = "Bucket %d" % key
outf.write("%s,%d\n" % (kname, histogram[key]))
outf.close()
print "Histogram written to file %s" % output_path
def getMean(dataSet):
sum = 0.0
count = 0
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > 0:
sum = sum + dataSet[row][column]
count = count + 1
return sum/count
def getMedian(dataSet):
dataList = []
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if (dataSet[row][column] > 0):
dataList.append(dataSet[row][column])
dataList.sort()
middleNumber = len(dataList)/2
return dataList[middleNumber]
def getQuartileValues(dataSet):
median = getMedian(dataSet)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
valuesLessThanMedian = []
valuesGreaterThanMedian = []
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > median:
valuesGreaterThanMedian.append(dataSet[row][column])
else:
valuesLessThanMedian.append(dataSet[row][column])
valuesGreaterThanMedian.sort()
valuesLessThanMedian.sort()
firstQuartile = valuesLessThanMedian[len(valuesLessThanMedian)/2]
thirdQuartile = valuesGreaterThanMedian[len(valuesGreaterThanMedian)/2]
interQuartile = thirdQuartile - firstQuartile
print "Quartiles: ", firstQuartile, median, thirdQuartile, interQuartile
return (firstQuartile, median, thirdQuartile, interQuartile)
def getMode(dataSet):
dataPoints = {}
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
point = dataSet[row][column]
if (point > 0):
if dataPoints.has_key(point):
dataPoints[point] = dataPoints[point] + 1
else:
dataPoints[point] = 1
maxCount = 0
maxValue = None
for (value, count) in dataPoints.items():
if count > maxCount:
maxCount = count
maxValue = value
print "%f was the max value and occurred %d times" % (maxValue, maxCount)
return maxValue
def outputToCSVFile(filename, dataSet):
outf = open(filename, 'w')
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
line = ""
for column in range (0, y_axis_len):
line = "%s%.7e," % (line, dataSet[row][column])
line = line + "\n"
outf.write(line)
outf.close()
print "Wrote to %s" % filename
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s <file1> <file2> ..." % sys.argv[0]
raise SystemExit
files = sys.argv[1:]
i = 0
PNGDataSets = []
#rData = getRawDataFromFile(files[0], "red")
#writePNGFile(rData, "red")
#raise SystemExit
full_path1 = os.path.abspath(files[0])
folder_path = os.path.split(full_path1)[0]
dataset_folder = os.path.basename(folder_path)
for file in files:
dataSet = getRawDataFromFile(file, colors[i])
i = i + 1
dataSetNormalized = zeroOutliersInDataSet(dataSet)
PNGDataSets.append(dataSetNormalized)
combinedSet = None
for dataSet in PNGDataSets:
if (combinedSet == None):
combinedSet = dataSet
else:
combinedSet = combineTwoDataSets(combinedSet, dataSet)
parent_directory = os.path.split(os.path.abspath(sys.argv[0]))[0]
output_directory = os.path.join(parent_directory, "Results")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print "Created directory %s" % output_directory
else:
print "Output directory %s exists" % output_directory
# now linear scale the outlier set
scaledSet = linearScaleDataSet(combinedSet)
histogramData(scaledSet, output_directory, dataset_folder)
#raise SystemExit
filename = "%s/dataset_%s.csv" % (output_directory, dataset_folder)
outputToCSVFile(filename, scaledSet)
writePNGFile(scaledSet, output_directory, dataset_folder) #old was writePNGFile(combinedSet, "combined")
print "Process complete" | mit | 1,193,926,610,286,185,000 | 30.081081 | 109 | 0.689439 | false |
sbelskie/symplicity | Symplicity/local_settings.py | 1 | 2695 | """
Django settings for Symplicity project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'symptom_tracker',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Symplicity.urls'
WSGI_APPLICATION = 'Symplicity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'symplicity',
'USER':'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
| apache-2.0 | 5,726,592,050,074,065,000 | 24.683168 | 77 | 0.687199 | false |
oldm/OldMan | oldman/schema/hydra.py | 1 | 1520 | from uuid import uuid4
from rdflib import URIRef, RDF, RDFS
from oldman.vocabulary import OLDM_CORRESPONDING_CLASS
class HydraSchemaAdapter(object):
"""Updates some Hydra patterns in the schema graph:
- hydra:Link: create a hydra:Class, subclass of the link range that support the same operations
"""
def update_schema_graph(self, graph):
graph = graph.skolemize()
graph = self._update_links(graph)
return graph
@staticmethod
def _update_links(graph):
links = list(graph.subjects(RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Link")))
for link_property in links:
new_class_iri = URIRef(u"http://localhost/.well-known/genid/link_class/%s" % uuid4())
graph.add((new_class_iri, RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Class")))
graph.add((link_property, URIRef(OLDM_CORRESPONDING_CLASS), new_class_iri))
# Ranges --> upper classes
ranges = list(graph.objects(link_property, RDFS.range))
for range in ranges:
graph.add((new_class_iri, RDFS.subClassOf, range))
# supported Operations
supported_operation_property = URIRef(u"http://www.w3.org/ns/hydra/core#supportedOperation")
operations = list(graph.objects(link_property, supported_operation_property))
for operation in operations:
graph.add((new_class_iri, supported_operation_property, operation))
return graph
| bsd-3-clause | -5,559,144,035,788,686,000 | 35.190476 | 105 | 0.646053 | false |
rkokkelk/Gulliver | deluge/core/core.py | 1 | 36618 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Andrew Resch <[email protected]>
# Copyright (C) 2011 Pedro Algarvio <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import base64
import glob
import logging
import os
import shutil
import tempfile
import threading
from urlparse import urljoin
import twisted.web.client
import twisted.web.error
import deluge.common
import deluge.component as component
from deluge import path_chooser_common
from deluge._libtorrent import lt
from deluge.configmanager import ConfigManager, get_config_dir
from deluge.core.alertmanager import AlertManager
from deluge.core.authmanager import (AUTH_LEVEL_ADMIN, AUTH_LEVEL_NONE, AUTH_LEVELS_MAPPING,
AUTH_LEVELS_MAPPING_REVERSE, AuthManager)
from deluge.core.eventmanager import EventManager
from deluge.core.filtermanager import FilterManager
from deluge.core.pluginmanager import PluginManager
from deluge.core.preferencesmanager import PreferencesManager
from deluge.core.rpcserver import export
from deluge.core.torrentmanager import TorrentManager
from deluge.core.scanner import Scanner
from deluge.error import DelugeError, InvalidPathError, InvalidTorrentError
from deluge.event import NewVersionAvailableEvent, SessionPausedEvent, SessionResumedEvent, TorrentQueueChangedEvent
from deluge.httpdownloader import download_file
log = logging.getLogger(__name__)
class Core(component.Component):
def __init__(self, listen_interface=None):
log.debug("Core init...")
component.Component.__init__(self, "Core")
# Create the client fingerprint
client_id = "DE"
client_version = deluge.common.VersionSplit(deluge.common.get_version()).version
while len(client_version) < 4:
client_version.append(0)
# Start the libtorrent session
log.info("Starting libtorrent %s (%s, %s) session...", lt.version, client_id, client_version)
self.session = lt.session(lt.fingerprint(client_id, *client_version), flags=0)
# Load the session state if available
self.__load_session_state()
# --- Set session settings ---
settings = self.session.get_settings()
settings["user_agent"] = "Deluge/%(deluge_version)s libtorrent/%(lt_version)s" % {
'deluge_version': deluge.common.get_version(),
'lt_version': self.get_libtorrent_version().rpartition(".")[0]
}
# No SSL torrent support in code so disable the listen port.
settings["ssl_listen"] = 0
# On Windows OS set the disk I/O read/write to bypass OS cache
if deluge.common.windows_check():
settings["disk_io_write_mode"] = lt.io_buffer_mode_t.disable_os_cache
settings["disk_io_read_mode"] = lt.io_buffer_mode_t.disable_os_cache
self.session.set_settings(settings)
# --- libtorrent plugins ---
# Allows peers to download the metadata from the swarm directly
self.session.add_extension("metadata_transfer")
self.session.add_extension("ut_metadata")
# Ban peers that sends bad data
self.session.add_extension("smart_ban")
# Create the components
self.eventmanager = EventManager()
self.preferencesmanager = PreferencesManager()
self.alertmanager = AlertManager()
self.pluginmanager = PluginManager(self)
self.torrentmanager = TorrentManager()
self.filtermanager = FilterManager(self)
self.authmanager = AuthManager()
# New release check information
self.new_release = None
# Get the core config
self.torrent_config = self.session.get_settings()
self.config = ConfigManager("core.conf")
self.config.save()
# Config needs to be ready for scanner
self.scanner = Scanner()
# If there was an interface value from the command line, use it, but
# store the one in the config so we can restore it on shutdown
self.__old_interface = None
if listen_interface:
if deluge.common.is_ip(listen_interface):
self.__old_interface = self.config["listen_interface"]
self.config["listen_interface"] = listen_interface
else:
log.error("Invalid listen interface (must be IP Address): %s", listen_interface)
def start(self):
"""Starts the core"""
# New release check information
self.__new_release = None
def stop(self):
log.debug("Core stopping...")
# Save the libtorrent session state
self.__save_session_state()
# We stored a copy of the old interface value
if self.__old_interface:
self.config["listen_interface"] = self.__old_interface
# Make sure the config file has been saved
self.config.save()
def shutdown(self):
pass
def __save_session_state(self):
"""Saves the libtorrent session state"""
filename = "session.state"
filepath = get_config_dir(filename)
filepath_bak = filepath + ".bak"
filepath_tmp = filepath + ".tmp"
try:
if os.path.isfile(filepath):
log.debug("Creating backup of %s at: %s", filename, filepath_bak)
shutil.copy2(filepath, filepath_bak)
except IOError as ex:
log.error("Unable to backup %s to %s: %s", filepath, filepath_bak, ex)
else:
log.info("Saving the %s at: %s", filename, filepath)
try:
with open(filepath_tmp, "wb") as _file:
_file.write(lt.bencode(self.session.save_state()))
_file.flush()
os.fsync(_file.fileno())
shutil.move(filepath_tmp, filepath)
except (IOError, EOFError) as ex:
log.error("Unable to save %s: %s", filename, ex)
if os.path.isfile(filepath_bak):
log.info("Restoring backup of %s from: %s", filename, filepath_bak)
shutil.move(filepath_bak, filepath)
def __load_session_state(self):
"""Loads the libtorrent session state"""
filename = "session.state"
filepath = get_config_dir(filename)
filepath_bak = filepath + ".bak"
for _filepath in (filepath, filepath_bak):
log.info("Opening %s for load: %s", filename, _filepath)
try:
with open(_filepath, "rb") as _file:
state = lt.bdecode(_file.read())
except (IOError, EOFError, RuntimeError) as ex:
log.warning("Unable to load %s: %s", _filepath, ex)
else:
log.info("Successfully loaded %s: %s", filename, _filepath)
self.session.load_state(state)
return
def get_new_release(self):
log.debug("get_new_release")
from urllib2 import urlopen, URLError
try:
self.new_release = urlopen("http://download.deluge-torrent.org/version-1.0").read().strip()
except URLError as ex:
log.debug("Unable to get release info from website: %s", ex)
return
self.check_new_release()
def check_new_release(self):
if self.new_release:
log.debug("new_release: %s", self.new_release)
if deluge.common.VersionSplit(self.new_release) > deluge.common.VersionSplit(deluge.common.get_version()):
component.get("EventManager").emit(NewVersionAvailableEvent(self.new_release))
return self.new_release
return False
# Exported Methods
@export
def start_scan(self, scan_dir):
return self.scanner.scan(scan_dir, False)
@export
def add_torrent_file(self, filename, filedump, options):
"""Adds a torrent file to the session.
Args:
filename (str): the filename of the torrent
filedump (str): A base64 encoded string of the torrent file contents
options (dict): The options to apply to the torrent on add
Returns:
str: The torrent_id or None
"""
try:
filedump = base64.decodestring(filedump)
except Exception as ex:
log.error("There was an error decoding the filedump string!")
log.exception(ex)
try:
torrent_id = self.torrentmanager.add(
filedump=filedump, options=options, filename=filename
)
except Exception as ex:
log.error("There was an error adding the torrent file %s", filename)
log.exception(ex)
torrent_id = None
return torrent_id
@export
def add_torrent_url(self, url, options, headers=None):
"""
Adds a torrent from a url. Deluge will attempt to fetch the torrent
from url prior to adding it to the session.
:param url: the url pointing to the torrent file
:type url: string
:param options: the options to apply to the torrent on add
:type options: dict
:param headers: any optional headers to send
:type headers: dict
:returns: a Deferred which returns the torrent_id as a str or None
"""
log.info("Attempting to add url %s", url)
def on_download_success(filename):
# We got the file, so add it to the session
f = open(filename, "rb")
data = f.read()
f.close()
try:
os.remove(filename)
except OSError as ex:
log.warning("Couldn't remove temp file: %s", ex)
return self.add_torrent_file(filename, base64.encodestring(data), options)
def on_download_fail(failure):
if failure.check(twisted.web.error.PageRedirect):
new_url = urljoin(url, failure.getErrorMessage().split(" to ")[1])
result = download_file(
new_url, tempfile.mkstemp()[1], headers=headers,
force_filename=True
)
result.addCallbacks(on_download_success, on_download_fail)
elif failure.check(twisted.web.client.PartialDownloadError):
result = download_file(
url, tempfile.mkstemp()[1], headers=headers,
force_filename=True, allow_compression=False
)
result.addCallbacks(on_download_success, on_download_fail)
else:
# Log the error and pass the failure onto the client
log.error("Error occurred downloading torrent from %s", url)
log.error("Reason: %s", failure.getErrorMessage())
result = failure
return result
d = download_file(
url, tempfile.mkstemp()[1], headers=headers, force_filename=True
)
d.addCallbacks(on_download_success, on_download_fail)
return d
@export
def add_torrent_magnet(self, uri, options):
"""
Adds a torrent from a magnet link.
:param uri: the magnet link
:type uri: string
:param options: the options to apply to the torrent on add
:type options: dict
:returns: the torrent_id
:rtype: string
"""
log.debug("Attempting to add by magnet uri: %s", uri)
return self.torrentmanager.add(magnet=uri, options=options)
@export
def add_torrent_seed(self, filename, filedump, seedname, options):
"""Adds a torrent file to the session.
Args:
filename (str): the filename of the torrent
filedump (str): A base64 encoded string of the torrent file contents
seedname (str): the filename of the seed
options (dict): The options to apply to the torrent on add
Returns:
str: The torrent_id or None
"""
options["download_location"] = seedname
try:
filedump = base64.decodestring(filedump)
except Exception as ex:
log.error("There was an error decoding the filedump string!")
log.exception(ex)
try:
torrent_id = self.torrentmanager.add(
filedump=filedump, options=options, filename=filename
)
except Exception as ex:
log.error("There was an error adding the torrent file %s", filename)
log.exception(ex)
torrent_id = None
return torrent_id
@export
def remove_torrent(self, torrent_id, remove_data):
"""
Removes a torrent from the session.
:param torrent_id: the torrent_id of the torrent to remove
:type torrent_id: string
:param remove_data: if True, remove the data associated with this torrent
:type remove_data: boolean
:returns: True if removed successfully
:rtype: bool
:raises InvalidTorrentError: if the torrent_id does not exist in the session
"""
log.debug("Removing torrent %s from the core.", torrent_id)
return self.torrentmanager.remove(torrent_id, remove_data)
@export
def get_session_status(self, keys):
"""
Gets the session status values for 'keys', these keys are taking
from libtorrent's session status.
See: http://www.rasterbar.com/products/libtorrent/manual.html#status
:param keys: the keys for which we want values
:type keys: list
:returns: a dictionary of {key: value, ...}
:rtype: dict
"""
status = {}
session_status = self.session.status()
for key in keys:
status[key] = getattr(session_status, key)
return status
@export
def get_cache_status(self):
"""
Returns a dictionary of the session's cache status.
:returns: the cache status
:rtype: dict
"""
status = self.session.get_cache_status()
cache = {}
for attr in dir(status):
if attr.startswith("_"):
continue
cache[attr] = getattr(status, attr)
# Add in a couple ratios
try:
cache["write_hit_ratio"] = float((cache["blocks_written"] -
cache["writes"])) / float(cache["blocks_written"])
except ZeroDivisionError:
cache["write_hit_ratio"] = 0.0
try:
cache["read_hit_ratio"] = float(cache["blocks_read_hit"]) / float(cache["blocks_read"])
except ZeroDivisionError:
cache["read_hit_ratio"] = 0.0
return cache
@export
def force_reannounce(self, torrent_ids):
log.debug("Forcing reannouncment to: %s", torrent_ids)
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].force_reannounce()
@export
def pause_torrent(self, torrent_ids):
log.debug("Pausing: %s", torrent_ids)
for torrent_id in torrent_ids:
if not self.torrentmanager[torrent_id].pause():
log.warning("Error pausing torrent %s", torrent_id)
@export
def connect_peer(self, torrent_id, ip, port):
log.debug("adding peer %s to %s", ip, torrent_id)
if not self.torrentmanager[torrent_id].connect_peer(ip, port):
log.warning("Error adding peer %s:%s to %s", ip, port, torrent_id)
@export
def move_storage(self, torrent_ids, dest):
log.debug("Moving storage %s to %s", torrent_ids, dest)
for torrent_id in torrent_ids:
if not self.torrentmanager[torrent_id].move_storage(dest):
log.warning("Error moving torrent %s to %s", torrent_id, dest)
@export
def pause_session(self):
"""Pause all torrents in the session"""
if not self.session.is_paused():
self.session.pause()
component.get("EventManager").emit(SessionPausedEvent())
@export
def resume_session(self):
"""Resume all torrents in the session"""
if self.session.is_paused():
self.session.resume()
component.get("EventManager").emit(SessionResumedEvent())
@export
def resume_torrent(self, torrent_ids):
log.debug("Resuming: %s", torrent_ids)
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].resume()
def create_torrent_status(self, torrent_id, torrent_keys, plugin_keys, diff=False, update=False, all_keys=False):
try:
status = self.torrentmanager[torrent_id].get_status(torrent_keys, diff, update=update, all_keys=all_keys)
except KeyError:
import traceback
traceback.print_exc()
# Torrent was probaly removed meanwhile
return {}
# Ask the plugin manager to fill in the plugin keys
if len(plugin_keys) > 0:
status.update(self.pluginmanager.get_status(torrent_id, plugin_keys))
return status
@export
def get_torrent_status(self, torrent_id, keys, diff=False):
torrent_keys, plugin_keys = self.torrentmanager.separate_keys(keys, [torrent_id])
return self.create_torrent_status(torrent_id, torrent_keys, plugin_keys, diff=diff, update=True,
all_keys=not keys)
@export
def get_torrents_status(self, filter_dict, keys, diff=False):
"""
returns all torrents , optionally filtered by filter_dict.
"""
torrent_ids = self.filtermanager.filter_torrent_ids(filter_dict)
d = self.torrentmanager.torrents_status_update(torrent_ids, keys, diff=diff)
def add_plugin_fields(args):
status_dict, plugin_keys = args
# Ask the plugin manager to fill in the plugin keys
if len(plugin_keys) > 0:
for key in status_dict.keys():
status_dict[key].update(self.pluginmanager.get_status(key, plugin_keys))
return status_dict
d.addCallback(add_plugin_fields)
return d
@export
def get_filter_tree(self, show_zero_hits=True, hide_cat=None):
"""
returns {field: [(value,count)] }
for use in sidebar(s)
"""
return self.filtermanager.get_filter_tree(show_zero_hits, hide_cat)
@export
def get_session_state(self):
"""Returns a list of torrent_ids in the session."""
# Get the torrent list from the TorrentManager
return self.torrentmanager.get_torrent_list()
@export
def get_config(self):
"""Get all the preferences as a dictionary"""
return self.config.config
@export
def get_config_value(self, key):
"""Get the config value for key"""
return self.config.get(key)
@export
def get_config_values(self, keys):
"""Get the config values for the entered keys"""
return dict((key, self.config.get(key)) for key in keys)
@export
def set_config(self, config):
"""Set the config with values from dictionary"""
# Load all the values into the configuration
for key in config.keys():
if isinstance(config[key], basestring):
config[key] = config[key].encode("utf8")
self.config[key] = config[key]
@export
def get_torrent_config_value(self, key):
"""Get the config value for key"""
return self.torrent_config(key)
@export
def get_torrent_config(self):
"""Get all the preferences as a dictionary"""
return self.session.get_settings()
@export
def set_torrent_config(self, config):
"""Set the torrent settings with values from dictionary"""
# Load all the values into the configuration
for key in config.keys():
if isinstance(config[key], basestring):
config[key] = config[key].encode("utf8")
self.torrent_config[key] = config[key]
self.session.set_settings(self.torrent_config)
@export
def set_torrent_high_speed_seed(self):
"""Set the torrent setting to high performance seed"""
high_speed_settings = lt.high_performance_seed()
self.session.set_settings(high_speed_settings)
@export
def get_listen_port(self):
"""Returns the active listen port"""
return self.session.listen_port()
@export
def get_i2p_proxy(self):
"""Returns the active listen port"""
i2p_settings = self.session.i2p_proxy()
i2p_dict = {"hostname": i2p_settings.hostname, "port": i2p_settings.port}
return i2p_dict
@export
def get_proxy(self):
"""Returns the active listen port"""
proxy_settings = self.session.proxy()
proxy_dict = {
"type": int(proxy_settings.type),
"hostname": proxy_settings.hostname,
"username": proxy_settings.username,
"password": proxy_settings.password,
"port": proxy_settings.port,
"proxy_hostnames": proxy_settings.proxy_hostnames,
"proxy_peer_connections": proxy_settings.proxy_peer_connections
}
return proxy_dict
@export
def get_available_plugins(self):
"""Returns a list of plugins available in the core"""
return self.pluginmanager.get_available_plugins()
@export
def get_enabled_plugins(self):
"""Returns a list of enabled plugins in the core"""
return self.pluginmanager.get_enabled_plugins()
@export
def enable_plugin(self, plugin):
self.pluginmanager.enable_plugin(plugin)
return None
@export
def disable_plugin(self, plugin):
self.pluginmanager.disable_plugin(plugin)
return None
@export
def force_recheck(self, torrent_ids):
"""Forces a data recheck on torrent_ids"""
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].force_recheck()
@export
def set_torrent_options(self, torrent_ids, options):
"""Sets the torrent options for torrent_ids"""
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].set_options(options)
@export
def set_torrent_trackers(self, torrent_id, trackers):
"""Sets a torrents tracker list. trackers will be [{"url", "tier"}]"""
return self.torrentmanager[torrent_id].set_trackers(trackers)
@export
def set_torrent_max_connections(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max number of connections"""
return self.torrentmanager[torrent_id].set_max_connections(value)
@export
def set_torrent_max_upload_slots(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max number of upload slots"""
return self.torrentmanager[torrent_id].set_max_upload_slots(value)
@export
def set_torrent_max_upload_speed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max upload speed"""
return self.torrentmanager[torrent_id].set_max_upload_speed(value)
@export
def set_torrent_max_download_speed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max download speed"""
return self.torrentmanager[torrent_id].set_max_download_speed(value)
@export
def set_torrent_file_priorities(self, torrent_id, priorities):
# Deprecated method, use set_torrent_options instead
# Used by at least one 3rd party plugin:
"""Sets a torrents file priorities"""
return self.torrentmanager[torrent_id].set_file_priorities(priorities)
@export
def set_torrent_prioritize_first_last(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a higher priority to the first and last pieces"""
return self.torrentmanager[torrent_id].set_prioritize_first_last_pieces(value)
@export
def set_torrent_auto_managed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the auto managed flag for queueing purposes"""
return self.torrentmanager[torrent_id].set_auto_managed(value)
@export
def set_torrent_stop_at_ratio(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the torrent to stop at 'stop_ratio'"""
return self.torrentmanager[torrent_id].set_stop_at_ratio(value)
@export
def set_torrent_stop_ratio(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the ratio when to stop a torrent if 'stop_at_ratio' is set"""
return self.torrentmanager[torrent_id].set_stop_ratio(value)
@export
def set_torrent_remove_at_ratio(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the torrent to be removed at 'stop_ratio'"""
return self.torrentmanager[torrent_id].set_remove_at_ratio(value)
@export
def set_torrent_move_completed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the torrent to be moved when completed"""
return self.torrentmanager[torrent_id].set_move_completed(value)
@export
def set_torrent_move_completed_path(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the path for the torrent to be moved when completed"""
return self.torrentmanager[torrent_id].set_move_completed_path(value)
@export(AUTH_LEVEL_ADMIN)
def set_owner(self, torrent_ids, username):
"""Set's the torrent owner.
:param torrent_id: the torrent_id of the torrent to remove
:type torrent_id: string
:param username: the new owner username
:type username: string
:raises DelugeError: if the username is not known
"""
if not self.authmanager.has_account(username):
raise DelugeError("Username \"%s\" is not known." % username)
if isinstance(torrent_ids, basestring):
torrent_ids = [torrent_ids]
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].set_owner(username)
return None
@export
def get_path_size(self, path):
"""Returns the size of the file or folder 'path' and -1 if the path is
unaccessible (non-existent or insufficient privs)"""
return deluge.common.get_path_size(path)
@export
def create_torrent(self, path, tracker, piece_length, comment, target,
webseeds, private, created_by, trackers, add_to_session):
log.debug("creating torrent..")
threading.Thread(target=self._create_torrent_thread,
args=(
path,
tracker,
piece_length,
comment,
target,
webseeds,
private,
created_by,
trackers,
add_to_session)).start()
def _create_torrent_thread(self, path, tracker, piece_length, comment, target,
webseeds, private, created_by, trackers, add_to_session):
import deluge.metafile
deluge.metafile.make_meta_file(
path,
tracker,
piece_length,
comment=comment,
target=target,
webseeds=webseeds,
private=private,
created_by=created_by,
trackers=trackers)
log.debug("torrent created!")
if add_to_session:
options = {}
options["download_location"] = os.path.split(path)[0]
self.add_torrent_file(os.path.split(target)[1], open(target, "rb").read(), options)
@export
def upload_plugin(self, filename, filedump):
"""This method is used to upload new plugins to the daemon. It is used
when connecting to the daemon remotely and installing a new plugin on
the client side. 'plugin_data' is a xmlrpc.Binary object of the file data,
ie, plugin_file.read()"""
try:
filedump = base64.decodestring(filedump)
except Exception as ex:
log.error("There was an error decoding the filedump string!")
log.exception(ex)
return
f = open(os.path.join(get_config_dir(), "plugins", filename), "wb")
f.write(filedump)
f.close()
component.get("CorePluginManager").scan_for_plugins()
@export
def rescan_plugins(self):
"""
Rescans the plugin folders for new plugins
"""
component.get("CorePluginManager").scan_for_plugins()
@export
def rename_files(self, torrent_id, filenames):
"""
Rename files in torrent_id. Since this is an asynchronous operation by
libtorrent, watch for the TorrentFileRenamedEvent to know when the
files have been renamed.
:param torrent_id: the torrent_id to rename files
:type torrent_id: string
:param filenames: a list of index, filename pairs
:type filenames: ((index, filename), ...)
:raises InvalidTorrentError: if torrent_id is invalid
"""
if torrent_id not in self.torrentmanager.torrents:
raise InvalidTorrentError("torrent_id is not in session")
self.torrentmanager[torrent_id].rename_files(filenames)
@export
def rename_folder(self, torrent_id, folder, new_folder):
"""
Renames the 'folder' to 'new_folder' in 'torrent_id'. Watch for the
TorrentFolderRenamedEvent which is emitted when the folder has been
renamed successfully.
:param torrent_id: the torrent to rename folder in
:type torrent_id: string
:param folder: the folder to rename
:type folder: string
:param new_folder: the new folder name
:type new_folder: string
:raises InvalidTorrentError: if the torrent_id is invalid
"""
if torrent_id not in self.torrentmanager.torrents:
raise InvalidTorrentError("torrent_id is not in session")
self.torrentmanager[torrent_id].rename_folder(folder, new_folder)
@export
def queue_top(self, torrent_ids):
log.debug("Attempting to queue %s to top", torrent_ids)
# torrent_ids must be sorted in reverse before moving to preserve order
for torrent_id in sorted(torrent_ids, key=self.torrentmanager.get_queue_position, reverse=True):
try:
# If the queue method returns True, then we should emit a signal
if self.torrentmanager.queue_top(torrent_id):
component.get("EventManager").emit(TorrentQueueChangedEvent())
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
@export
def queue_up(self, torrent_ids):
log.debug("Attempting to queue %s to up", torrent_ids)
torrents = ((self.torrentmanager.get_queue_position(torrent_id), torrent_id) for torrent_id in torrent_ids)
torrent_moved = True
prev_queue_position = None
# torrent_ids must be sorted before moving.
for queue_position, torrent_id in sorted(torrents):
# Move the torrent if and only if there is space (by not moving it we preserve the order)
if torrent_moved or queue_position - prev_queue_position > 1:
try:
torrent_moved = self.torrentmanager.queue_up(torrent_id)
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
# If the torrent moved, then we should emit a signal
if torrent_moved:
component.get("EventManager").emit(TorrentQueueChangedEvent())
else:
prev_queue_position = queue_position
@export
def queue_down(self, torrent_ids):
log.debug("Attempting to queue %s to down", torrent_ids)
torrents = ((self.torrentmanager.get_queue_position(torrent_id), torrent_id) for torrent_id in torrent_ids)
torrent_moved = True
prev_queue_position = None
# torrent_ids must be sorted before moving.
for queue_position, torrent_id in sorted(torrents, reverse=True):
# Move the torrent if and only if there is space (by not moving it we preserve the order)
if torrent_moved or prev_queue_position - queue_position > 1:
try:
torrent_moved = self.torrentmanager.queue_down(torrent_id)
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
# If the torrent moved, then we should emit a signal
if torrent_moved:
component.get("EventManager").emit(TorrentQueueChangedEvent())
else:
prev_queue_position = queue_position
@export
def queue_bottom(self, torrent_ids):
log.debug("Attempting to queue %s to bottom", torrent_ids)
# torrent_ids must be sorted before moving to preserve order
for torrent_id in sorted(torrent_ids, key=self.torrentmanager.get_queue_position):
try:
# If the queue method returns True, then we should emit a signal
if self.torrentmanager.queue_bottom(torrent_id):
component.get("EventManager").emit(TorrentQueueChangedEvent())
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
@export
def glob(self, path):
return glob.glob(path)
@export
def test_listen_port(self):
"""
Checks if the active port is open
:returns: True if the port is open, False if not
:rtype: bool
"""
from twisted.web.client import getPage
d = getPage("http://deluge-torrent.org/test_port.php?port=%s" %
self.get_listen_port(), timeout=30)
def on_get_page(result):
return bool(int(result))
def on_error(failure):
log.warning("Error testing listen port: %s", failure)
d.addCallback(on_get_page)
d.addErrback(on_error)
return d
@export
def get_free_space(self, path=None):
"""
Returns the number of free bytes at path
:param path: the path to check free space at, if None, use the default download location
:type path: string
:returns: the number of free bytes at path
:rtype: int
:raises InvalidPathError: if the path is invalid
"""
if not path:
path = self.config["download_location"]
try:
return deluge.common.free_space(path)
except InvalidPathError:
return -1
@export
def get_libtorrent_version(self):
"""
Returns the libtorrent version.
:returns: the version
:rtype: string
"""
return lt.version
@export
def get_completion_paths(self, args):
"""
Returns the available path completions for the input value.
"""
return path_chooser_common.get_completion_paths(args)
@export(AUTH_LEVEL_ADMIN)
def get_known_accounts(self):
return self.authmanager.get_known_accounts()
@export(AUTH_LEVEL_NONE)
def get_auth_levels_mappings(self):
return (AUTH_LEVELS_MAPPING, AUTH_LEVELS_MAPPING_REVERSE)
@export(AUTH_LEVEL_ADMIN)
def create_account(self, username, password, authlevel):
return self.authmanager.create_account(username, password, authlevel)
@export(AUTH_LEVEL_ADMIN)
def update_account(self, username, password, authlevel):
return self.authmanager.update_account(username, password, authlevel)
@export(AUTH_LEVEL_ADMIN)
def remove_account(self, username):
return self.authmanager.remove_account(username)
| gpl-3.0 | -1,125,341,462,122,659,600 | 36.441718 | 118 | 0.612458 | false |
blackshirt/dompetku | dompetku/handler/services.py | 1 | 3750 | #!/usr/bin/env python
#
# Copyright @2014 [email protected]
# Licensed: see Python license
"""Module to handle json services."""
import datetime
import json
import peewee
import tornado.web
import tornado.escape
from dompetku.handler import base
from dompetku.utils import jsonify
from dompetku.model import Transaksi, User
from dompetku.form import TransaksiForm
class TransaksiContainer(object):
def __init__(self, user):
self.user = user
def find_one(self, tid):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, Transaksi.tid == tid)
if trn.exists():
data = trn.get() # Transaksi instance
return data
return None
def find_data(self, *expr):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, *expr)
return trn # Transaksi QueryResultWrapper
return None
class DataSources(TransaksiContainer):
def __init__(self, user):
self.user = user
super().__init__(self.user)
def get_one(self, tid):
data = self.find_one(tid)
if data is not None:
results = {
'tid': data.tid,
'user': data.user.name,
'info': data.info,
'amount': data.amount,
'transdate': data.transdate,
'memo': data.memo
}
return results # dict of transaksi item
def get_data(self, *expr):
temporary = {}
results = []
data = self.find_data(*expr)
for item in data:
temporary = {
'tid': item.tid,
'user': item.user.name,
'info': item.info,
'transdate': item.transdate,
'amount': item.amount,
'memo': item.memo
}
results.append(temporary)
return results # list of dict of transaksi item
class ApiTransactions(base.BaseHandler):
def initialize(self):
self.dsc = DataSources(self.current_user)
@tornado.web.authenticated
def get(self, *kondisi):
if kondisi:
data = self.dsc.get_data(*kondisi)
else:
# get data bulan sekarang
today = datetime.date.today()
cur_month = today.month
expr = (Transaksi.transdate.month == cur_month,)
data = self.dsc.get_data(expr)
self.write(jsonify(data))
def post(self):
data = tornado.escape.json_decode(self.request.body)
info = data.get('info')
amount = data.get('amount')
memo = data.get('memo')
try:
active_user = User.get(User.name == self.current_user)
except peewee.DoesNotExist:
active_user = None
return
item = Transaksi.insert(info = info,
amount=amount,
tipe=10,
user=active_user.uid,
memo=memo )
last_id = item.execute()
transaksi = Transaksi.get(Transaksi.tid == last_id)
response = {'info': transaksi.info,
'user': transaksi.user.name,
'amount': transaksi.amount,
'memo': transaksi.memo,
'transdate': transaksi.transdate}
self.write(jsonify(response))
| bsd-2-clause | -4,487,325,386,750,957,000 | 29.241935 | 92 | 0.524267 | false |
spel-uchile/SUCHAI-Flight-Software | sandbox/log_parser.py | 1 | 1956 | import re
import argparse
import pandas as pd
# General expressions
re_error = re.compile(r'\[ERROR\]\[(\d+)\]\[(\w+)\](.+)')
re_warning = re.compile(r'\[WARN \]\[(\d+)\]\[(\w+)\](.+)')
re_info = re.compile(r'\[INFO \]\[(\d+)\]\[(\w+)\](.+)')
re_debug = re.compile(r'\[DEBUG\]\[(\d+)\]\[(\w+)\](.+)')
re_verbose = re.compile(r'\[VERB \]\[(\d+)\]\[(\w+)\](.+)')
# Specific expressions
re_cmd_run = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Running the command: (.+)')
re_cmd_result = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Command result: (\d+)')
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser()
# General expressions
parser.add_argument('file', type=str, help="Log file")
parser.add_argument('--error', action="store_const", const=re_error)
parser.add_argument('--warning', action="store_const", const=re_warning)
parser.add_argument('--info', action="store_const", const=re_info)
parser.add_argument('--debug', action="store_const", const=re_debug)
parser.add_argument('--verbose', action="store_const", const=re_verbose)
# Specific expressions
parser.add_argument('--cmd-run', action="store_const", const=re_cmd_run)
parser.add_argument('--cmd-result', action="store_const", const=re_cmd_result)
return parser.parse_args()
def parse_text(text, regexp):
return regexp.findall(text)
def save_parsed(logs, file, format=None):
df = pd.DataFrame(logs)
# print(df)
df.to_csv(file)
if __name__ == "__main__":
args = get_parameters()
print("Reading file {}...".format(args.file))
with open(args.file) as logfile:
text = logfile.read()
args = vars(args)
print(args)
for type, regexp in args.items():
if type is not "file" and regexp is not None:
print("Parsing {}...", type)
logs = parse_text(text, regexp)
save_parsed(logs, args["file"]+type+".csv")
| gpl-3.0 | 7,759,135,881,385,060,000 | 30.548387 | 83 | 0.599182 | false |
pombreda/ruffus | ruffus/test/test_verbosity.py | 1 | 8627 | #!/usr/bin/env python
from __future__ import print_function
"""
test_verbosity.py
"""
temp_dir = "test_verbosity/"
import unittest
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = list(map(__import__, [ruffus_name]))[0]
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
import re
ruffus = __import__ (ruffus_name)
for attr in "pipeline_run", "pipeline_printout", "suffix", "transform", "split", "merge", "dbdict", "follows", "mkdir", "originate", "Pipeline":
globals()[attr] = getattr (ruffus, attr)
RethrownJobError = ruffus.ruffus_exceptions.RethrownJobError
RUFFUS_HISTORY_FILE = ruffus.ruffus_utility.RUFFUS_HISTORY_FILE
CHECKSUM_FILE_TIMESTAMPS = ruffus.ruffus_utility.CHECKSUM_FILE_TIMESTAMPS
#---------------------------------------------------------------
# create initial files
#
@mkdir(temp_dir + 'data/scratch/lg/what/one/two/three/')
@originate([ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ])
def create_initial_file_pairs(output_files):
# create both files as necessary
for output_file in output_files:
with open(output_file, "w") as oo: pass
#---------------------------------------------------------------
# first task
@transform(create_initial_file_pairs, suffix(".start"), ".output.1")
def first_task(input_files, output_file):
with open(output_file, "w"): pass
#---------------------------------------------------------------
# second task
@transform(first_task, suffix(".output.1"), ".output.2")
def second_task(input_files, output_file):
with open(output_file, "w"): pass
test_pipeline = Pipeline("test")
test_pipeline.originate(output = [ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ],
task_func = create_initial_file_pairs)
test_pipeline.transform(task_func = first_task, input = create_initial_file_pairs, filter = suffix(".start"), output = ".output.1")
test_pipeline.transform(input = first_task, filter = suffix(".output.1"), output = ".output.2", task_func= second_task)
decorator_syntax = 0
oop_syntax = 1
class Test_verbosity(unittest.TestCase):
#___________________________________________________________________________
#
# test_printout_abbreviated_path1
#___________________________________________________________________________
def test_printout_abbreviated_path1(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue(re.search('Job needs update:.*Missing files.*'
'\[\.\.\./job2\.a\.start, test_verbosity/job2\.b\.start, \.\.\./job2.a.output.1\]', ret, re.DOTALL) is not None)
#___________________________________________________________________________
#
# test_printout_abbreviated_path2
#___________________________________________________________________________
def test_printout_abbreviated_path2(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../three/job1.a.start, test_verbosity/job1.b.start, .../three/job1.a.output.1]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path3
#___________________________________________________________________________
def test_printout_abbreviated_path3(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../two/three/job1.a.start, test_verbosity/job1.b.start, .../two/three/job1.a.output.1]' in s.getvalue())
#___________________________________________________________________________
#
# test_printout_abbreviated_path9
#___________________________________________________________________________
def test_printout_abbreviated_path9(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[%sdata/scratch/lg/what/one/two/three/job2.a.start, test_verbosity/job2.b.start,' % temp_dir in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path0
#___________________________________________________________________________
def test_printout_abbreviated_path0(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
path_str = os.path.abspath('%sdata/scratch/lg/what/one/two/three/job2.a.start' % temp_dir)
path_str = '[[%s' % path_str
self.assertTrue(path_str in ret)
self.assertTrue(temp_dir + 'job2.b.start]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path_minus_60
#___________________________________________________________________________
def test_printout_abbreviated_path_minus_60(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[<???> ne/two/three/job2.a.start, test_verbosity/job2.b.start]' in ret)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
unittest.main()
| mit | 8,074,170,189,742,017,000 | 44.405263 | 144 | 0.534833 | false |
ewilazarus/snnm | snnm.py | 1 | 2886 | #!/usr/bin/python
"""
snnm
~~~~
This module contains the source code for `snnm`
Snnm is an utility tool created to fetch synonyms for a given expression from
the web and print them to the console.
"""
import bs4
import click
import requests
BASE_URL = 'http://www.thesaurus.com/browse/'
def _fetch_html(expression):
"""
Returns the HTML containing the synonyms for the given expression
"""
response = requests.get(BASE_URL + expression)
response.raise_for_status()
return response.text
def _parse_html(html):
"""
Returns a parsed list of synonyms out of a given HTML
"""
parser = bs4.BeautifulSoup(html, 'html.parser')
synonyms = []
divs = parser.find_all('div', class_='relevancy-list')
for div in divs:
spans = div.find_all('span', class_='text')
synonyms += [str(span.string) for span in spans]
return synonyms
def fetch_synonyms(expression):
"""
Returns a list of synonyms for a given expression
"""
try:
return _parse_html(_fetch_html(expression))
except requests.exceptions.HTTPError:
return []
def clean(synonyms):
"""
Returns the deduped, sorted list of synonyms
"""
deduped_synonyms = list(set([s.strip() for s in synonyms]))
deduped_synonyms.sort()
return deduped_synonyms
def print_synonyms_ugly(synonyms):
"""
Prints the list of synonyms to the screen
"""
for synonym in synonyms:
print(synonym)
def print_synonyms(synonyms):
"""
Prints the list of synonyms to the screen, using colors and breakpoints
"""
if not synonyms:
click.secho('-- NO RESULTS --', fg='red')
click.echo()
else:
height = click.get_terminal_size()[1] - 3
batch = [synonyms[i:i+height] for i in range(0, len(synonyms), height)]
for synonyms in batch:
for synonym in synonyms:
click.secho(synonym, fg='yellow')
click.echo()
if batch.index(synonyms) != len(batch) - 1:
click.echo('Press any key to continue ...', nl=False)
key = click.getchar()
if key == '\x03':
raise KeyboardInterrupt()
click.echo()
@click.command(name='snnm')
@click.argument('expression')
@click.option('-u', '--ugly-output', is_flag=True)
def main(expression, ugly_output):
"""
List synonyms for an expression
"""
try:
if not ugly_output:
click.echo('Synonyms for {}:'.format(click.style(expression,
fg='blue')))
synonyms = clean(fetch_synonyms(expression))
if ugly_output:
print_synonyms_ugly(synonyms)
else:
print_synonyms(synonyms)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| mit | -3,771,467,267,364,749,000 | 24.767857 | 79 | 0.592862 | false |
guillaume-philippon/aquilon | tests/broker/test_del_virtual_switch.py | 1 | 2507 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del network device command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelVirtualSwitch(TestBrokerCommand):
def test_100_unregister_pg_tag(self):
self.noouttest(["unbind_port_group", "--virtual_switch", "utvswitch",
"--tag", "710"])
def test_105_verify_pg_gone(self):
command = ["show_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.commandtest(command)
self.matchclean(out, "Port Group", command)
def test_110_del_utvswitch(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch"]
self.noouttest(command)
def test_115_verify_utvswitch(self):
command = ["show_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.notfoundtest(command)
self.matchoutput(out, "Virtual Switch utvswitch not found.", command)
def test_120_del_utvswitch2(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch2"]
self.noouttest(command)
def test_130_del_camelcase(self):
self.check_plenary_exists("virtualswitchdata", "camelcase")
self.noouttest(["del_virtual_switch", "--virtual_switch", "CaMeLcAsE"])
self.check_plenary_gone("virtualswitchdata", "camelcase")
def test_200_del_again(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.notfoundtest(command)
self.matchoutput(out, "Virtual Switch utvswitch not found.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelVirtualSwitch)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | -7,692,059,405,420,377,000 | 38.171875 | 79 | 0.682489 | false |
vmalloc/gossip | gossip/utils.py | 2 | 2484 | import itertools
from .exceptions import CannotResolveDependencies
from .helpers import DONT_CARE, FIRST
def topological_sort_registrations(registrations, unconstrained_priority=DONT_CARE):
graph = _build_dependency_graph(registrations, unconstrained_priority=unconstrained_priority)
returned_indices = _topological_sort(range(len(registrations)), graph)
assert len(returned_indices) == len(registrations)
return [registrations[idx] for idx in returned_indices]
def _topological_sort(indices, graph):
independent = sorted(set(indices) - set(m for n, m in graph), reverse=True)
returned = []
while independent:
n = independent.pop()
returned.append(n)
for m in indices:
edge = (n, m)
if m == n:
assert edge not in graph
continue
if edge in graph:
graph.remove(edge)
# check if m is now independent
for edge in graph:
if edge[1] == m:
# not indepdendent
break
else:
# no other incoming edges to m
independent.append(m)
if graph:
raise CannotResolveDependencies('Cyclic dependency detected')
return returned
def _build_dependency_graph(registrations, unconstrained_priority):
providers_by_name = {}
for index, registration in enumerate(registrations):
for name in registration.provides:
providers = providers_by_name.get(name)
if providers is None:
providers = providers_by_name[name] = []
providers.append(index)
graph = set()
for needer_index, registration in enumerate(registrations):
for need in registration.needs:
for provider_index in providers_by_name.get(need, []):
graph.add((provider_index, needer_index))
if unconstrained_priority != DONT_CARE:
caring_indices = set([idx for idx, r in enumerate(registrations) if r.needs or r.provides])
non_caring_indices = set(range(len(registrations))) - caring_indices
for caring_index, uncaring_index in itertools.product(caring_indices, non_caring_indices):
if unconstrained_priority == FIRST:
pair = (uncaring_index, caring_index)
else:
pair = (caring_index, uncaring_index)
graph.add(pair)
return graph
| bsd-3-clause | 8,597,037,287,050,342,000 | 36.636364 | 99 | 0.612721 | false |
BrewPi/brewpi-service | test/test_couchdb_client.py | 1 | 3284 | """
Tests brewblox_service.couchdb_client
"""
import pytest
from aiohttp import web
from aiohttp.client_exceptions import ClientResponseError
from brewblox_service import couchdb_client, http_client
TESTED = couchdb_client.__name__
SRV_URL = couchdb_client.COUCH_URL[len('http://'):]
DB_URL = '/sparkbase'
DOC_URL = '/sparkbase/sparkdoc'
@pytest.fixture
def app(app, mocker):
mocker.patch(TESTED + '.DB_RETRY_INTERVAL_S', 0.01)
http_client.setup(app)
couchdb_client.setup(app)
return app
@pytest.fixture
def cclient(app):
return couchdb_client.get_client(app)
async def test_client_read(app, client, cclient, aresponses):
# Blank database
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=404))
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({'rev': 'rev_read'}))
assert await cclient.read('sparkbase', 'sparkdoc', [1, 2]) == ('rev_read', [1, 2])
# Retry contact server, content in database
for i in range(20):
aresponses.add(SRV_URL, '/', 'HEAD', web.json_response({}, status=404))
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT', web.json_response({}, status=412))
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({'_rev': 'rev_read', 'data': [2, 1]}))
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=409))
assert await cclient.read('sparkbase', 'sparkdoc', []) == ('rev_read', [2, 1])
async def test_client_read_errors(app, client, cclient, aresponses):
with pytest.raises(ClientResponseError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT', web.json_response({}, status=404))
await cclient.read('sparkbase', 'sparkdoc', [])
with pytest.raises(ClientResponseError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=404)) # unexpected
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=404))
await cclient.read('sparkbase', 'sparkdoc', [])
with pytest.raises(ClientResponseError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=412))
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=500)) # unexpected
await cclient.read('sparkbase', 'sparkdoc', [])
with pytest.raises(ValueError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
# Either get or put must return an ok value
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=409))
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=404))
await cclient.read('sparkbase', 'sparkdoc', [])
async def test_client_write(app, client, cclient, aresponses):
aresponses.add(
SRV_URL, f'{DOC_URL}?rev=revy', 'PUT',
web.json_response({'rev': 'rev_write'}), match_querystring=True)
assert await cclient.write('sparkbase', 'sparkdoc', 'revy', [1, 2]) == 'rev_write'
| gpl-3.0 | -3,470,771,546,410,322,400 | 39.04878 | 100 | 0.643423 | false |
philanthropy-u/edx-platform | openedx/core/djangoapps/user_authn/views/tests/test_login.py | 1 | 29762 | # coding:utf-8
"""
Tests for student activation and login
"""
import json
import unicodedata
import unittest
import ddt
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest
from django.test.client import Client
from django.test.utils import override_settings
from django.urls import NoReverseMatch, reverse
from mock import patch
from six import text_type
from six.moves import range
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.password_policy.compliance import (
NonCompliantPasswordException,
NonCompliantPasswordWarning
)
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, waffle
from openedx.core.djangoapps.user_authn.cookies import jwt_cookies
from openedx.core.djangoapps.user_authn.tests.utils import setup_login_oauth_client
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import RegistrationFactory, UserFactory, UserProfileFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class LoginTest(CacheIsolationTestCase):
"""
Test login_user() view
"""
ENABLED_CACHES = ['default']
LOGIN_FAILED_WARNING = 'Email or password is incorrect'
ACTIVATE_ACCOUNT_WARNING = 'In order to sign in, you need to activate your account'
username = 'test'
user_email = '[email protected]'
password = 'test_password'
def setUp(self):
"""Setup a test user along with its registration and profile"""
super(LoginTest, self).setUp()
self.user = UserFactory.build(username=self.username, email=self.user_email)
self.user.set_password(self.password)
self.user.save()
RegistrationFactory(user=self.user)
UserProfileFactory(user=self.user)
self.client = Client()
cache.clear()
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [self.user_email])
def test_login_success_unicode_email(self):
unicode_email = u'test' + six.unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(
unicode_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_last_login_updated(self):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert self.user.last_login > old_last_login
def test_login_success_prevent_auth_user_writes(self):
with waffle().override(PREVENT_AUTH_USER_WRITES, True):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert old_last_login == self.user.last_login
def test_login_fail_no_user_exists(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response(
self.user_email,
'wrong_password',
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response(
self.user_email,
self.password
)
self._assert_response(response, success=False,
value="In order to sign in, you need to activate your account.")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_not_activated_with_correct_credentials(self):
"""
Tests that when user login with the correct credentials but with an inactive
account, the system, send account activation email notification to the user.
"""
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(
self.user_email,
self.password,
)
self._assert_response(response, success=False, value=self.ACTIVATE_ACCOUNT_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch('openedx.core.djangoapps.user_authn.views.login._log_and_raise_inactive_user_auth_error')
def test_login_inactivated_user_with_incorrect_credentials(self, mock_inactive_user_email_and_error):
"""
Tests that when user login with incorrect credentials and an inactive account,
the system does *not* send account activation email notification to the user.
"""
nonexistent_email = '[email protected]'
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(nonexistent_email, 'incorrect_password')
self.assertFalse(mock_inactive_user_email_and_error.called)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
def test_login_unicode_email(self):
unicode_email = self.user_email + six.unichr(40960)
response, mock_audit_log = self._login_response(
unicode_email,
self.password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = self.password + six.unichr(1972)
response, mock_audit_log = self._login_response(
self.user_email,
unicode_password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
self.assertEqual(user_info["username"], self.user.username)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
expected = {
'target': '/',
}
self.assertDictContainsSubset(expected, response.context_data)
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in range(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response(self.user_email, password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in range(30):
password = u'test_password{0}'.format(i)
self._login_response(self.user_email, password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {"DISABLE_SET_JWT_COOKIES_FOR_TESTS": False})
def test_login_refresh(self):
def _assert_jwt_cookie_present(response):
self.assertEqual(response.status_code, 200)
self.assertIn(jwt_cookies.jwt_refresh_cookie_name(), self.client.cookies)
setup_login_oauth_client()
response, _ = self._login_response(self.user_email, self.password)
_assert_jwt_cookie_present(response)
response = self.client.post(reverse('login_refresh'))
_assert_jwt_cookie_present(response)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_no_user_profile(self):
"""
Assert that user login with cas (Central Authentication Service) is
redirect to dashboard in case of lms or upload_transcripts in case of
cms
"""
user = UserFactory.build(username='tester', email='[email protected]')
user.set_password(self.password)
user.save()
# Assert that no profile is created.
self.assertFalse(hasattr(user, 'profile'))
creds = {'email': '[email protected]', 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
user = User.objects.get(pk=user.pk)
# Assert that profile is created.
self.assertTrue(hasattr(user, 'profile'))
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 200)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance(self):
"""
Tests _enforce_password_policy_compliance succeeds when no exception is thrown
"""
enforce_compliance_path = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_path) as mock_check_password_policy_compliance:
mock_check_password_policy_compliance.return_value = HttpResponse()
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertTrue(response_content.get('success'))
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_exception(self):
"""
Tests _enforce_password_policy_compliance fails with an exception thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordException()
response, _ = self._login_response(
self.user_email,
self.password
)
response_content = json.loads(response.content)
self.assertFalse(response_content.get('success'))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Password reset', mail.outbox[0].subject)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_warning(self):
"""
Tests _enforce_password_policy_compliance succeeds with a warning thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordWarning('Test warning')
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertIn('Test warning', self.client.session['_messages'])
self.assertTrue(response_content.get('success'))
@ddt.data(
('test_password', 'test_password', True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
(unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
)
@ddt.unpack
def test_password_unicode_normalization_login(self, password, password_entered, login_success):
"""
Tests unicode normalization on user's passwords on login.
"""
self.user.set_password(password)
self.user.save()
response, _ = self._login_response(self.user.email, password_entered)
self._assert_response(response, success=login_success)
def _login_response(self, email, password, patched_audit_log=None, extra_post_params=None):
"""
Post the login info
"""
if patched_audit_log is None:
patched_audit_log = 'openedx.core.djangoapps.user_authn.views.login.AUDIT_LOG'
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
"""
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
"""
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(unicode(response_dict['value']), unicode(value)))
self.assertIn(value, response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='[email protected]')
self.extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='[email protected]')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
target_url = reverse('courseware', args=[text_type(self.course.id)])
noshib_response = self.client.get(target_url, follow=True, HTTP_ACCEPT="text/html")
self.assertEqual(noshib_response.redirect_chain[-1],
('/login?next={url}'.format(url=target_url), 302))
self.assertContains(noshib_response, (u"Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
target_url_shib = reverse('courseware', args=[text_type(self.shib_course.id)])
shib_response = self.client.get(**{'path': target_url_shib,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'HTTP_ACCEPT': "text/html"})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('/shib-login/?next={url}'.format(url=target_url_shib), 302))
self.assertEqual(shib_response.redirect_chain[-2],
(target_url_shib, 302))
self.assertEqual(shib_response.status_code, 200)
| agpl-3.0 | -3,259,418,563,379,477,500 | 44.621538 | 118 | 0.643859 | false |
bruteforce1/cryptopals | set2/ch10/implement_aes_cbc.py | 1 | 2510 | #!/usr/bin/python3
"""
CBC mode is a block cipher mode that allows us to encrypt irregularly-
sized messages, despite the fact that a block cipher natively only
transforms individual blocks.
In CBC mode, each ciphertext block is added to the next plaintext block
before the next call to the cipher core.
The first plaintext block, which has no associated previous ciphertext
block, is added to a "fake 0th ciphertext block" called the
initialization vector, or IV.
Implement CBC mode by hand by taking the ECB function you wrote
earlier, making it encrypt instead of decrypt (verify this by
decrypting whatever you encrypt to test), and using your XOR function
from the previous exercise to combine them.
The file here is intelligible (somewhat) when CBC decrypted against
"YELLOW SUBMARINE" with an IV of all ASCII 0 (\x00\x00\x00 &c)
"""
import argparse
import os
import sys
from utils.cpset2 import aes_cbc, make_b64_printable
def main(filename, key, iv):
print('Input File: ' + str(filename))
print('Key: ' + str(key))
print('IV: ' + str(iv))
crypt = ''
if not os.path.isfile(filename):
print(filename + ' is not a valid file.')
return -1
with open(filename, 'r') as infile:
for line in infile:
crypt += line
ret = aes_cbc(crypt, key, iv, 0)
if ret:
print('Decrypted Contents in: ' + filename + '.dec')
with open(filename + '.dec', 'w') as tf:
tf.write(ret.decode('utf-8'))
un_ret = make_b64_printable(aes_cbc(ret, key, iv))
if un_ret:
print('Encrypted Contents in: ' + filename + '.enc')
with open(filename + '.enc', 'w') as tf:
tf.write(un_ret.decode('utf-8'))
return 0
print('Error.')
return -1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Implements AES CBC encryption and decryption manually.')
parser.add_argument('-f', '--inputfile', help='opt. file encrypted \
with AES in CBC mode',
default='10.txt')
parser.add_argument('-i', '--iv', help='opt. 16 byte initialization \
vector',
default=chr(0) * 16)
parser.add_argument('-k', '--key', help='opt. 16 byte encryption or \
decryption key',
default='YELLOW SUBMARINE')
args = parser.parse_args()
sys.exit(main(args.inputfile, args.key, args.iv))
| mit | 5,468,502,557,935,550,000 | 34.352113 | 77 | 0.61753 | false |
charanpald/features | features/test/PrimalCCATest.py | 1 | 3226 |
import unittest
import numpy
import scipy.linalg
from features.PrimalCCA import PrimalCCA
from features.KernelCCA import KernelCCA
from kernel.LinearKernel import LinearKernel
import logging
class PrimalCCATest(unittest.TestCase):
def setUp(self):
numpy.seterr(all='ignore')
pass
def testLearnModel(self):
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = X
tau = 0.0
tol = 10**--6
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
Y = X*2
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
#Rotate X to form Y
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
def testProject(self):
#Test if it is the same as KCCA
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = numpy.random.rand(numExamples, numFeatures)
tau = 0.0
tol = 10**--6
k = 5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas2 = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
#Seem to get an error in this for some reason
#self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
#self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
#Now try with different tau
tau = 0.5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(XU.T, XU) - numpy.ones(k)) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(YU.T, YU) - numpy.ones(k)) < tol)
def testGetY(self):
#Test if we can recover Y from X
numExamples = 10
numFeatures = 5
X = numpy.random.rand(numExamples, numFeatures)
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
tau = 0.0
cca = PrimalCCA(tau)
U, V, lmbdas = cca.learnModel(X, Y)
Yhat = X.dot(U).dot(V.T).dot(numpy.linalg.inv(numpy.dot(V, V.T)))
logging.debug((numpy.abs(Yhat- Y)))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -873,620,248,228,570,200 | 27.298246 | 85 | 0.579045 | false |
myt00seven/svrg | cifar/alexnet_lasagne/lasagne-googlenet-master/googlenet/layers/bn.py | 1 | 4243 | import numpy as np
import theano.tensor as T
import theano
from lasagne import init # from .. import init
from lasagne import nonlinearities # from .. import nonlinearities
from lasagne.layers.base import Layer # from .base import Layer
__all__ = [
"BNLayer",
]
class BNLayer(Layer):
"""
lasagne.layers.BNLayer(incoming, nonlinearity=lasagne.nonlinearities.rectify, **kwargs)
A batch normalization layer.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. If a shared variable or a
numpy array is provided the shape should be (num_inputs, num_units).
See :meth:`Layer.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If a shared variable or a
numpy array is provided the shape should be (num_units,).
If None is provided the layer will have no biases.
See :meth:`Layer.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = BNLayer(l_in)
Notes
-----
If the input to this layer has more than two axes, it will flatten the
trailing axes. This is useful for when a dense layer follows a
convolutional layer, for example. It is not necessary to insert a
:class:`FlattenLayer` in this case.
"""
def __init__(self, incoming, gamma=1.0, beta=0., nonlinearity=None, epsilon=1e-6,
**kwargs):
super(BNLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
# get output shape of incoming
#self.n_channels = self.input_shape[1]
#print self.input_shape
#raise NameError("Hi")
self.epsilon = epsilon
if len(self.input_shape) is 4:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0,'x','x'))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0,'x','x'))
elif len(self.input_shape) is 2:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0))
else: # input should be 4d tensor or 2d matrix
raise ValueError('input of BNLayer should be 4d tensor or 2d matrix')
# done init
def get_output_shape_for(self, input_shape):
#return (input_shape[0], self.num_units)
return input_shape
def get_output_for(self, input, **kwargs):
if input.ndim is 4: # 4d tensor
self.mean = T.mean(input, axis=[0, 2, 3], keepdims=True) #self.mean = T.mean(input, axis=[0, 2, 3]).dimshuffle(('x', 0, 'x', 'x'))
#self.var = T.std(input, axis=[0, 2, 3], keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=[0, 2, 3], keepdims=True) / np.array([self.input_shape[0] * self.input_shape[2] * self.input_shape[3]], dtype=theano.config.floatX)
else: # elif input.ndim is 2: # 2d matrix
self.mean = T.mean(input, axis=0, keepdims=True) #self.mean = T.mean(input, axis=0).dimshuffle(('x',0))
#self.var = T.std(input, axis=0, keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=0, keepdims=True) / np.array([self.input_shape[0]], dtype=theano.config.floatX)
activation = (input - self.mean) / T.sqrt(self.var + self.epsilon)
activation = self.gamma * activation + self.beta
return self.nonlinearity(activation)
| mit | 4,750,715,369,948,741,000 | 40.194175 | 191 | 0.634928 | false |
qrsforever/workspace | python/learn/thinkstats/rankit.py | 1 | 1807 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
import thinkstats
import myplot
import matplotlib.pyplot as pyplot
def Sample(n=6):
"""Generates a sample from a standard normal variate.
n: sample size
Returns: list of n floats
"""
t = [random.normalvariate(0.0, 1.0) for i in range(n)]
t.sort()
return t
def Samples(n=6, m=1000):
"""Generates m samples with size n each.
n: sample size
m: number of samples
Returns: list of m samples
"""
t = [Sample(n) for i in range(m)]
return t
def EstimateRankits(n=6, m=1000):
"""Estimates the expected values of sorted random samples.
n: sample size
m: number of iterations
Returns: list of n rankits
"""
t = Samples(n, m)
t = zip(*t)
means = [thinkstats.Mean(x) for x in t]
return means
def MakeNormalPlot(ys, root=None, line_options={}, **options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
line_options: dictionary of options for pyplot.plot
options: dictionary of options for myplot.Save
"""
# TODO: when n is small, generate a larger sample and desample
n = len(ys)
xs = [random.normalvariate(0.0, 1.0) for i in range(n)]
pyplot.clf()
pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)
myplot.Save(root,
xlabel = 'Standard normal values',
legend=False,
**options)
def main():
means = EstimateRankits(84)
print(means)
if __name__ == "__main__":
main()
| mit | 151,881,123,661,949,600 | 21.308642 | 75 | 0.614278 | false |
baliga-lab/weeder_patched | python/seqtools.py | 1 | 3069 | HAMMING_MAX = 9999
def read_sequences_from_fasta_string(fasta_string):
"""reads the sequences contained in a FASTA string"""
lines = fasta_string.split('\n')
sequences = []
seqbuffer = ""
seqname = None
for line in lines:
line = line.strip()
if line.startswith('>'):
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
seqbuffer = ""
seqname = line[1:]
elif line and len(line) > 0:
seqbuffer += line
# add the last line
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
return sequences
def read_sequences_from_fasta_file(filepath):
"""Read the sequences from the specified FASTA file"""
with open(filepath) as inputfile:
fasta_string = inputfile.read()
return read_sequences_from_fasta_string(fasta_string)
def revcomp(sequence):
"""compute the reverse complement of the input string"""
return "".join([revchar(c) for c in sequence[::-1]])
def overlap(str1, str2, checkreverse):
result = False
overlapping = True
for l in range(1, 3):
for i in range(len(str1) - l):
if i >= len(str2) or str1[i + l] != str2[i]:
overlapping = False
break
if overlapping:
result = True
overlapping = True
for i in range(len(str1) - l):
if (i + l) >= len(str2) or str1[i] != str2[i + l]:
overlapping = False
break
if overlapping:
result = True
if checkreverse:
rev_result = overlap(str1[::-1], str2, False)
if rev_result:
result = True
return result
def hamming_distance(str1, str2, checkreverse):
dist_forward = 0
dist_reverse = HAMMING_MAX
if len(str1) != len(str2) or str1 == str2:
return HAMMING_MAX
for i in range(len(str1)):
if str1[i] != str2[i]:
dist_forward += 1
if not checkreverse:
return dist_forward
else:
rev = str1[::-1]
for i in range(len(str1)):
if rev[i] != str2[i]:
dist_reverse += 1
if dist_reverse < dist_forward:
return dist_reverse
else:
return dist_forward
def inside(str1, str2, checkreverse):
len1 = len(str1)
len2 = len(str2)
result = False
if (len2 - len1) != 2:
return False
for i in range(len2 - len1 + 1):
match = True
for j in range(i, i + len1):
if str1[j - i] != str2[j]:
match = False
break
if match:
result = True
if checkreverse:
rev_result = inside(str1[::-1], str2, False)
if rev_result:
result = True
return result
def char_to_int(c):
c = c.lower()
if c == 'a':
return 0;
elif c == 'c':
return 1;
elif c == 'g':
return 2;
elif c == 't':
return 3;
elif c == '$':
return 4;
else:
return -1;
| gpl-3.0 | -7,517,009,544,331,105,000 | 24.789916 | 62 | 0.525904 | false |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part03-e05_correlation/test/test_correlation.py | 1 | 2795 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_out
module_name="src.correlation"
correlations = load(module_name, "correlations")
lengths = load(module_name, "lengths")
def patch_name(m, d):
import importlib
parts=d.split(".")
try:
getattr(importlib.import_module(m), parts[-1])
p=".".join([m, parts[-1]])
except ModuleNotFoundError:
raise
except AttributeError:
if len(parts) == 1:
raise
try:
getattr(importlib.import_module(m), parts[-2])
p=".".join([m] + parts[-2:])
except AttributeError:
if len(parts) == 2:
raise
getattr(importlib.import_module(m), parts[-3])
p=".".join([m] + parts[-3:])
return p
class Correlation(unittest.TestCase):
@points('p03-05.1')
def test_lengths(self):
result = lengths()
self.assertAlmostEqual(result, 0.8717537758865832, places=4, msg="Wrong correlation!")
@points('p03-05.1')
def test_lengths_calls(self):
with patch(patch_name(module_name, "scipy.stats.pearsonr")) as pcorr:
result = lengths()
pcorr.assert_called()
@points('p03-05.2')
def test_correlations(self):
result = correlations()
n, m = result.shape
for r in range(n):
for c in range(r):
self.assertAlmostEqual(result[r,c], result[c,r], places=4,
msg="The correlation matrix is not symmetric!")
self.assertAlmostEqual(result[r,r], 1, places=4, msg="Values on the diagonal should be one!")
self.assertAlmostEqual(result[0,1], -0.11756978, places=4,
msg="Incorrect value in position [0,1]!")
self.assertAlmostEqual(result[0,2], 0.87175378, places=4,
msg="Incorrect value in position [0,2]!")
self.assertAlmostEqual(result[0,3], 0.81794113, places=4,
msg="Incorrect value in position [0,3]!")
self.assertAlmostEqual(result[1,2], -0.4284401, places=4,
msg="Incorrect value in position [1,2]!")
self.assertAlmostEqual(result[1,3], -0.36612593, places=4,
msg="Incorrect value in position [1,3]!")
self.assertAlmostEqual(result[2,3], 0.96286543, places=4,
msg="Incorrect value in position [2,3]!")
@points('p03-05.2')
def test_lengths_calls(self):
with patch(patch_name(module_name, "np.corrcoef")) as pcorr:
result = correlations()
pcorr.assert_called()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -656,753,270,546,592,900 | 33.085366 | 105 | 0.563148 | false |
hobarrera/todoman | tests/test_filtering.py | 1 | 8808 | from datetime import datetime
from datetime import timedelta
from todoman.cli import cli
from todoman.model import Database
from todoman.model import Todo
def test_priority(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create("one.ics", "SUMMARY:haha\nPRIORITY:4\n")
create("two.ics", "SUMMARY:hoho\nPRIORITY:9\n")
create("three.ics", "SUMMARY:hehe\nPRIORITY:5\n")
create("four.ics", "SUMMARY:huhu\n")
result_high = runner.invoke(cli, ["list", "--priority=high"])
assert not result_high.exception
assert "haha" in result_high.output
assert "hoho" not in result_high.output
assert "huhu" not in result_high.output
assert "hehe" not in result_high.output
result_medium = runner.invoke(cli, ["list", "--priority=medium"])
assert not result_medium.exception
assert "haha" in result_medium.output
assert "hehe" in result_medium.output
assert "hoho" not in result_medium.output
assert "huhu" not in result_medium.output
result_low = runner.invoke(cli, ["list", "--priority=low"])
assert not result_low.exception
assert "haha" in result_low.output
assert "hehe" in result_low.output
assert "hoho" in result_low.output
assert "huhu" not in result_low.output
result_none = runner.invoke(cli, ["list", "--priority=none"])
assert not result_none.exception
assert "haha" in result_none.output
assert "hehe" in result_none.output
assert "hoho" in result_none.output
assert "huhu" in result_none.output
result_error = runner.invoke(cli, ["list", "--priority=blah"])
assert result_error.exception
def test_location(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create("one.ics", "SUMMARY:haha\nLOCATION: The Pool\n")
create("two.ics", "SUMMARY:hoho\nLOCATION: The Dungeon\n")
create("two.ics", "SUMMARY:harhar\n")
result = runner.invoke(cli, ["list", "--location", "Pool"])
assert not result.exception
assert "haha" in result.output
assert "hoho" not in result.output
assert "harhar" not in result.output
def test_category(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create("one.ics", "SUMMARY:haha\nCATEGORIES:work,trip\n")
create("two.ics", "CATEGORIES:trip\nSUMMARY:hoho\n")
create("three.ics", "SUMMARY:harhar\n")
result = runner.invoke(cli, ["list", "--category", "work"])
assert not result.exception
assert "haha" in result.output
assert "hoho" not in result.output
assert "harhar" not in result.output
def test_grep(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create(
"one.ics",
"SUMMARY:fun\nDESCRIPTION: Have fun!\n",
)
create(
"two.ics",
"SUMMARY:work\nDESCRIPTION: The stuff for work\n",
)
create(
"three.ics",
"SUMMARY:buy sandwiches\nDESCRIPTION: This is for the Duke\n",
)
create(
"four.ics",
"SUMMARY:puppies\nDESCRIPTION: Feed the puppies\n",
)
create(
"five.ics",
"SUMMARY:research\nDESCRIPTION: Cure cancer\n",
)
create("six.ics", "SUMMARY:hoho\n")
result = runner.invoke(cli, ["list", "--grep", "fun"])
assert not result.exception
assert "fun" in result.output
assert "work" not in result.output
assert "sandwiches" not in result.output
assert "puppies" not in result.output
assert "research" not in result.output
assert "hoho" not in result.output
def test_filtering_lists(tmpdir, runner, create):
tmpdir.mkdir("list_one")
tmpdir.mkdir("list_two")
tmpdir.mkdir("list_three")
runner.invoke(cli, ["new", "-l", "list_one", "todo one"])
runner.invoke(cli, ["new", "-l", "list_two", "todo two"])
runner.invoke(cli, ["new", "-l", "list_three", "todo three"])
# No filter
result = runner.invoke(cli, ["list"])
assert not result.exception
assert len(result.output.splitlines()) == 3
assert "todo one" in result.output
assert "@list_one" in result.output
assert "todo two" in result.output
assert "@list_two" in result.output
assert "todo three" in result.output
assert "@list_three" in result.output
# One filter
result = runner.invoke(cli, ["list", "list_two"])
assert not result.exception
assert len(result.output.splitlines()) == 1
assert "todo two" in result.output
assert "@list_two" not in result.output
# Several filters
result = runner.invoke(cli, ["list", "list_one", "list_two"])
assert not result.exception
assert len(result.output.splitlines()) == 2
assert "todo one" in result.output
assert "todo two" in result.output
assert "@list_one" in result.output
assert "@list_two" in result.output
def test_due_aware(tmpdir, runner, create, now_for_tz):
db = Database([tmpdir.join("default")], tmpdir.join("cache.sqlite"))
list_ = next(db.lists())
for tz in ["CET", "HST"]:
for i in [1, 23, 25, 48]:
todo = Todo(new=True)
todo.due = now_for_tz(tz) + timedelta(hours=i)
todo.summary = "{}".format(i)
todo.list = list_
db.save(todo)
todos = list(db.todos(due=24))
assert len(todos) == 4
assert todos[0].summary == "23"
assert todos[1].summary == "23"
assert todos[2].summary == "1"
assert todos[3].summary == "1"
def test_due_naive(tmpdir, runner, create):
now = datetime.now()
for i in [1, 23, 25, 48]:
due = now + timedelta(hours=i)
create(
"test_{}.ics".format(i),
"SUMMARY:{}\nDUE;VALUE=DATE-TIME:{}\n".format(
i,
due.strftime("%Y%m%dT%H%M%S"),
),
)
db = Database([tmpdir.join("default")], tmpdir.join("cache.sqlite"))
todos = list(db.todos(due=24))
assert len(todos) == 2
assert todos[0].summary == "23"
assert todos[1].summary == "1"
def test_filtering_start(tmpdir, runner, todo_factory):
today = datetime.now()
now = today.strftime("%Y-%m-%d")
tomorrow = (today + timedelta(days=1)).strftime("%Y-%m-%d")
yesterday = (today + timedelta(days=-1)).strftime("%Y-%m-%d")
result = runner.invoke(cli, ["list", "--start", "before", now])
assert not result.exception
assert not result.output.strip()
result = runner.invoke(cli, ["list", "--start", "after", now])
assert not result.exception
assert not result.output.strip()
todo_factory(summary="haha", start=today)
todo_factory(summary="hoho", start=today)
todo_factory(summary="hihi", start=today - timedelta(days=2))
todo_factory(summary="huhu")
result = runner.invoke(cli, ["list", "--start", "after", yesterday])
assert not result.exception
assert "haha" in result.output
assert "hoho" in result.output
assert "hihi" not in result.output
assert "huhu" not in result.output
result = runner.invoke(cli, ["list", "--start", "before", yesterday])
assert not result.exception
assert "haha" not in result.output
assert "hoho" not in result.output
assert "hihi" in result.output
assert "huhu" not in result.output
result = runner.invoke(cli, ["list", "--start", "after", tomorrow])
assert not result.exception
assert "haha" not in result.output
assert "hoho" not in result.output
assert "hihi" not in result.output
assert "huhu" not in result.output
def test_statuses(todo_factory, todos):
cancelled = todo_factory(status="CANCELLED").uid
completed = todo_factory(status="COMPLETED").uid
in_process = todo_factory(status="IN-PROCESS").uid
needs_action = todo_factory(status="NEEDS-ACTION").uid
no_status = todo_factory(status="NEEDS-ACTION").uid
all_todos = set(todos(status=["ANY"]))
cancelled_todos = set(todos(status=["CANCELLED"]))
completed_todos = set(todos(status=["COMPLETED"]))
in_process_todos = set(todos(status=["IN-PROCESS"]))
needs_action_todos = set(todos(status=["NEEDS-ACTION"]))
assert {t.uid for t in all_todos} == {
cancelled,
completed,
in_process,
needs_action,
no_status,
}
assert {t.uid for t in cancelled_todos} == {cancelled}
assert {t.uid for t in completed_todos} == {completed}
assert {t.uid for t in in_process_todos} == {in_process}
assert {t.uid for t in needs_action_todos} == {needs_action, no_status}
| isc | 6,266,408,189,354,740,000 | 32.618321 | 75 | 0.642484 | false |
NeoRazorX/ubuntufaq | public.py | 1 | 15820 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of ubuntufaq
# Copyright (C) 2011 Carlos Garcia Gomez [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, logging
# cargamos django 1.2
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext.webapp import template
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users, memcache
from recaptcha.client import captcha
from base import *
from preguntas import *
from enlaces import *
class Portada(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_portada( users.get_current_user() )
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Ubuntu FAQ',
'descripcion': APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'urespuestas': self.sc.get_ultimas_respuestas(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'stats': self.sc.get_stats()
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write( template.render(path, template_values) )
class Populares(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_populares()
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Populares - Ubuntu FAQ',
'descripcion': 'Listado de preguntas y noticias populares de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'stats': self.sc.get_stats(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio
}
path = os.path.join(os.path.dirname(__file__), 'templates/populares.html')
self.response.out.write( template.render(path, template_values) )
class Ayuda(Pagina):
def get(self):
Pagina.get(self)
template_values = {
'titulo': 'Ayuda de Ubuntu FAQ',
'descripcion': u'Sección de ayuda de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'karmalist': memcache.get('pending-users'),
'foco': 'ayuda'
}
path = os.path.join(os.path.dirname(__file__), 'templates/ayuda.html')
self.response.out.write(template.render(path, template_values))
class Nueva_publicacion(Pagina):
def get(self):
Pagina.get(self)
# el captcha
if users.get_current_user():
chtml = ''
else:
chtml = captcha.displayhtml(
public_key = RECAPTCHA_PUBLIC_KEY,
use_ssl = False,
error = None)
if self.request.get('tipo') == 'pregunta':
foco = 'pregunta'
elif self.request.get('tipo') == 'enlace':
foco = 'enlace'
else:
foco = 'pensamiento'
template_values = {
'titulo': 'Publicar...',
'descripcion': u'Formulario de publicación de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'captcha': chtml,
'tipo': self.request.get('tipo'),
'contenido': self.request.get('contenido'),
'url2': self.request.get('url'),
'foco': foco
}
path = os.path.join(os.path.dirname(__file__), 'templates/nueva.html')
self.response.out.write(template.render(path, template_values))
class Pagina_buscar(Pagina):
def get(self, tag=None):
Pagina.get(self)
# para corregir fallos de codificación en el tag
if isinstance(tag, str):
tag = unicode( urllib.unquote(tag), 'utf-8')
else:
tag = unicode( urllib.unquote(tag) )
template_values = {
'titulo': 'Ubuntu FAQ: ' + tag,
'descripcion': u'Páginas relacionadas con ' + tag,
'tag': tag,
'tags': 'problema, duda, ayuda, ' + tag,
'relacionadas': self.sc.paginas_relacionadas(tag, True),
'alltags': self.sc.get_alltags(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
def post(self, ntag=None):
Pagina.get(self)
query = urllib.unquote( self.request.get('query') )
template_values = {
'titulo': 'Ubuntu FAQ: ' + query,
'descripcion': u'Resultados de: ' + query,
'tag': query,
'buscando': True,
'tags': 'problema, duda, ayuda, ' + query,
'relacionadas': self.sc.buscar( query ),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
class Guardar_voto(Pagina):
def get(self, tipo='x', keye=None, voto='-1'):
try:
if self.request.environ['HTTP_USER_AGENT'].lower().find('googlebot') != -1:
logging.info('Googlebot!')
self.redirect('/')
else:
if tipo == 'r':
elemento = Respuesta.get( keye )
elif tipo == 'c':
elemento = Comentario.get( keye )
else:
elemento = False
if not elemento: # no hay elemento a votar
logging.warning('Elemento no encontrado!')
self.redirect('/error/404')
elif self.request.remote_addr in elemento.ips and self.request.remote_addr != '127.0.0.1': # ya se ha votado desde esta IP
logging.info('Voto ya realizado')
self.redirect( elemento.get_link() )
else: # voto válido
ips = elemento.ips
ips.append( self.request.remote_addr )
elemento.ips = ips
if voto == '0':
elemento.valoracion -= 1
logging.info('Voto negativo')
elif voto == '1':
elemento.valoracion += 1
logging.info('Voto positivo')
else:
logging.info('Voto no válido: ' + str(voto))
elemento.put()
elemento.borrar_cache()
# actualizamos la estadistica
stats = self.sc.get_stats()
if voto in ['0', '1']:
try:
stats['votos'] += 1
except:
stats['votos'] = 1
memcache.replace('stats', stats)
self.redirect( elemento.get_link() )
except:
self.redirect('/error/503')
class Rss(Pagina):
def get(self):
template_values = {
'portada': self.sc.get_portada(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss.html')
self.response.out.write(template.render(path, template_values))
class Rssr(Pagina):
def get(self):
template_values = {
'respuestas': self.sc.get_ultimas_respuestas(),
'comentarios': self.sc.get_ultimos_comentarios(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss-respuestas.html')
self.response.out.write(template.render(path, template_values))
class Sitemap(Pagina):
def get(self):
portada = self.sc.get_portada()
print 'Content-Type: text/xml'
print ''
print '<?xml version="1.0" encoding="UTF-8"?>'
print '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
for p in portada:
print '<url><loc>' + p['link'] + '</loc><lastmod>' + str(p['fecha']).split(' ')[0] + '</lastmod><changefreq>always</changefreq><priority>0.9</priority></url>'
print '</urlset>'
class Perror(Pagina):
def get(self, cerror='404'):
Pagina.get(self)
derror = {
'403': 'Permiso denegado',
'403c': 'Permiso denegado - error en el captcha',
'404': u'Página no encontrada en Ubuntu FAQ',
'503': 'Error en Ubuntu FAQ',
'606': 'Idiota detectado'
}
merror = {
'403': '403 - Permiso denegado',
'403c': u'<img src="/img/fuuu_face.png" alt="fuuu"/><br/><br/>403 - Permiso denegado: debes repetir el captcha.<br/>Evita los captchas iniciando sesión.',
'404': u'404 - Página no encontrada en Ubuntu FAQ',
'503': '<img src="/img/fuuu_face.png" alt="explosión"/><br/><br/>503 - Error en Ubuntu FAQ,<br/>consulta el estado en: http://code.google.com/status/appengine',
'606': u'<img src="/img/troll_face.png" alt="troll"/><br/><br/>606 - ¿Por qué no pruebas a escribir algo diferente?'
}
if cerror == '503':
logging.error( '503' )
else:
logging.warning( cerror )
template_values = {
'titulo': str(cerror) + ' - Ubuntu FAQ',
'descripcion': derror.get(cerror, 'Error desconocido'),
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario': self.formulario,
'error': merror.get(cerror, 'Error desconocido'),
'cerror': cerror,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write(template.render(path, template_values))
def main():
application = webapp.WSGIApplication([('/', Portada),
('/inicio', Todas_preguntas),
('/preguntas', Todas_preguntas),
(r'/preguntas/(.*)', Todas_preguntas),
('/populares', Populares),
('/sin-solucionar', Sin_solucionar),
('/actualidad', Actualidad),
(r'/actualidad/(.*)', Actualidad),
(r'/p/(.*)', Redir_pregunta),
(r'/question/(.*)', Detalle_pregunta),
('/nueva', Nueva_publicacion),
('/add_p', Nueva_pregunta),
('/mod_p', Detalle_pregunta),
('/del_p', Borrar_pregunta),
('/add_r', Responder),
('/mod_r', Modificar_respuesta),
('/del_r', Borrar_respuesta),
(r'/e/(.*)', Acceder_enlace),
(r'/de/(.*)', Redir_enlace),
(r'/story/(.*)', Detalle_enlace),
('/add_e', Actualidad),
('/mod_e', Detalle_enlace),
('/hun_e', Hundir_enlace),
('/del_e', Borrar_enlace),
('/add_c', Comentar),
('/mod_c', Modificar_comentario),
('/del_c', Borrar_comentario),
('/ayuda', Ayuda),
(r'/search/(.*)', Pagina_buscar),
(r'/votar/(.*)/(.*)/(.*)', Guardar_voto),
('/rss', Rss),
('/rss-respuestas', Rssr),
('/sitemap', Sitemap),
('/sitemap.xml', Sitemap),
(r'/error/(.*)', Perror),
('/.*', Perror),
],
debug=DEBUG_FLAG)
webapp.template.register_template_library('filters.filtros_django')
run_wsgi_app(application)
if __name__ == "__main__":
main()
| agpl-3.0 | -7,409,657,376,595,017,000 | 43.784703 | 179 | 0.49864 | false |
DataDog/integrations-core | openstack_controller/tests/common.py | 1 | 14615 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import datetime
import os
CHECK_NAME = 'openstack'
FIXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')
ALL_IDS = ['server-1', 'server-2', 'other-1', 'other-2']
EXCLUDED_NETWORK_IDS = ['server-1', 'other-.*']
EXCLUDED_SERVER_IDS = ['server-2', 'other-.*']
FILTERED_NETWORK_ID = 'server-2'
FILTERED_SERVER_ID = 'server-1'
FILTERED_BY_PROJ_SERVER_ID = ['server-1', 'server-2']
CONFIG_FILE_INSTANCE = {
'name': 'test_name',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
'openstack_config_file_path': os.path.abspath('./tests/fixtures/openstack_config.yaml'),
'openstack_cloud_name': 'test_cloud',
}
KEYSTONE_INSTANCE = {
'name': 'test_name',
'keystone_server_url': 'http://10.0.2.15:5000',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
}
MOCK_CONFIG = {'init_config': {}, 'instances': [KEYSTONE_INSTANCE]}
EXAMPLE_AUTH_RESPONSE = {
u'token': {
u'methods': [u'password'],
u'roles': [
{u'id': u'f20c215f5a4d47b7a6e510bc65485ced', u'name': u'datadog_monitoring'},
{u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'},
],
u'expires_at': u'2015-11-02T15: 57: 43.911674Z',
u'project': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'0850707581fe4d738221a72db0182876',
u'name': u'admin',
},
u'catalog': [
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'354e35ed19774e398f80dc2a90d07f4b',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'36e8e2bf24384105b9d56a65b0900172',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'de93edcbf7f9446286687ec68423c36f',
},
],
u'type': u'compute',
u'id': u'2023bd4f451849ba8abeaaf283cdde4f',
u'name': u'nova',
},
{
u'endpoints': [
{
u'url': u'http://10.0.3.111:8776/v1/***************************4bfc1',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************2452f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************8239f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************7caa1',
},
],
u'type': u'volume',
u'id': u'***************************e7e16',
u'name': u'cinder',
},
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'7c1e318d8f7f42029fcb591598df2ef5',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'afcc88b1572f48a38bb393305dc2b584',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'd9730dbdc07844d785913219da64a197',
},
],
u'type': u'network',
u'id': u'21ad241f26194bccb7d2e49ee033d5a2',
u'name': u'neutron',
},
],
u'extras': {},
u'user': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'5f10e63fbd6b411186e561dc62a9a675',
u'name': u'datadog',
},
u'audit_ids': [u'OMQQg9g3QmmxRHwKrfWxyQ'],
u'issued_at': u'2015-11-02T14: 57: 43.911697Z',
}
}
EXAMPLE_PROJECTS_RESPONSE = {
"projects": [
{
"domain_id": "1789d1",
"enabled": True,
"id": "263fd9",
"links": {"self": "https://example.com/identity/v3/projects/263fd9"},
"name": "Test Group",
}
],
"links": {"self": "https://example.com/identity/v3/auth/projects", "previous": None, "next": None},
}
# .. server/network
SERVERS_CACHE_MOCK = {
'servers': {
"server-1": {"id": "server-1", "name": "server-name-1", "status": "ACTIVE", "project_name": "testproj"},
"server-2": {"id": "server-2", "name": "server-name-2", "status": "ACTIVE", "project_name": "testproj"},
"other-1": {"id": "other-1", "name": "server-name-other-1", "status": "ACTIVE", "project_name": "blacklist_1"},
"other-2": {"id": "other-2", "name": "server-name-other-2", "status": "ACTIVE", "project_name": "blacklist_2"},
},
'change_since': datetime.datetime.utcnow().isoformat(),
}
EMPTY_NOVA_SERVERS = []
# One example from MOCK_NOVA_SERVERS to emulate pagination
MOCK_NOVA_SERVERS_PAGINATED = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
}
]
# Example response from - https://developer.openstack.org/api-ref/compute/#list-servers-detailed
# ID and server-name values have been changed for test readability
MOCK_NOVA_SERVERS = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "DELETED",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-2",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server_newly_added",
"metadata": {"My Server Name": "Apache1"},
"name": "newly_added_server",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
]
EXAMPLE_GET_FLAVORS_DETAIL_RETURN_VALUE = [
{'id': u'10', 'disk': 10, 'vcpus': 2, 'ram': 1024, 'OS-FLV-EXT-DATA:ephemeral': 0, 'swap': 0},
{
'id': u'625c2e4b-0a1f-4236-bb67-5ceee1a766e5',
'disk': 48,
'vcpus': 8,
'ram': 5934,
'OS-FLV-EXT-DATA:ephemeral': 0,
'swap': 0,
},
]
EXAMPLE_GET_OS_AGGREGATES_RETURN_VALUE = [{'hosts': ["compute"], 'name': "name", 'availability_zone': "london"}]
EXAMPLE_GET_OS_HYPERVISORS_RETURN_VALUE = [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": ["pge", "clflush"],
"topology": {"cores": 1, "threads": 1, "sockets": 4},
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host1",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 2,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": "host1", "id": 7, "disabled_reason": None},
"vcpus": 2,
"vcpus_used": 0,
}
]
EXAMPLE_GET_PROJECT_LIMITS_RETURN_VALUE = {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 1,
"totalServerGroupsUsed": 0,
}
EXAMPLE_GET_NETWORKS_RETURN_VALUE = [
{
'id': u'2755452c-4fe8-4ba1-9b26-8898665b0958',
'name': u'net2',
'tenant_id': u'680031a39ce040e1b81289ea8c73fb11',
'admin_state_up': True,
}
]
DEFAULT_METRICS = [
'openstack.controller',
'openstack.nova.current_workload',
'openstack.nova.disk_available_least',
'openstack.nova.free_disk_gb',
'openstack.nova.free_ram_mb',
'openstack.nova.hypervisor_load.1',
'openstack.nova.hypervisor_load.15',
'openstack.nova.hypervisor_load.5',
'openstack.nova.limits.max_image_meta',
'openstack.nova.limits.max_personality',
'openstack.nova.limits.max_personality_size',
'openstack.nova.limits.max_security_group_rules',
'openstack.nova.limits.max_security_groups',
'openstack.nova.limits.max_server_meta',
'openstack.nova.limits.max_total_cores',
'openstack.nova.limits.max_total_floating_ips',
'openstack.nova.limits.max_total_instances',
'openstack.nova.limits.max_total_keypairs',
'openstack.nova.limits.max_total_ram_size',
'openstack.nova.limits.total_cores_used',
'openstack.nova.limits.total_floating_ips_used',
'openstack.nova.limits.total_instances_used',
'openstack.nova.limits.total_ram_used',
'openstack.nova.limits.total_security_groups_used',
'openstack.nova.local_gb',
'openstack.nova.local_gb_used',
'openstack.nova.memory_mb',
'openstack.nova.memory_mb_used',
'openstack.nova.running_vms',
'openstack.nova.vcpus',
'openstack.nova.vcpus_used',
]
| bsd-3-clause | 3,778,650,173,125,150,700 | 37.159269 | 119 | 0.515156 | false |
asimshankar/tensorflow | tensorflow/python/saved_model/save.py | 1 | 34278 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a SavedModel from a Checkpointable Python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import saved_object_graph_pb2
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
def _find_function_to_export(root):
"""Iterate over `root`'s attributes, finding traced functions."""
exported_function = None
previous_attribute_name = None
for attribute_name in dir(root):
attribute_value = getattr(root, attribute_name, None)
if isinstance(attribute_value, def_function.PolymorphicFunction):
if exported_function is not None:
raise ValueError(
("Exporting an object with no "
"tf.saved_model.save(..., signatures=...) "
"argument specified, and with more than one "
"@tf.function-decorated method attached to it: {}. The signature "
"keys for these functions are ambiguous. Specify signature "
"functions explicitly.").format(
[previous_attribute_name, attribute_name]))
exported_function = attribute_value
previous_attribute_name = attribute_name
if exported_function is None:
exported_function = getattr(root, DEFAULT_SIGNATURE_ATTR, None)
if exported_function is None:
raise ValueError(
("Exporting an object with no tf.saved_model.save(..., signatures=...) "
"argument specified, and with no @tf.function-decorated methods "
"attached to it. In the future this will be a supported use-case for "
"Python re-import, but at the moment saving a SavedModel without "
"signatures does not make sense, as the only consumers will expect "
"signatures. Either decorate a method or specify a signature function "
"explicitly."))
return exported_function
def _canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if not isinstance(signatures, collections.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
concrete_signatures = {}
for serving_key, signature_function in signatures.items():
if isinstance(signature_function, (defun.PolymorphicFunction,
def_function.PolymorphicFunction)):
input_signature = signature_function._input_signature # pylint: disable=protected-access
if input_signature is None:
raise ValueError(
("Unable to use the function {} as a signature directly. Functions "
"used to generate serving signatures must either have an "
"`input_signature=` specified when constructed, or must be "
"converted to concrete functions using "
"`f.get_concrete_function(...)`.").format(signature_function))
signature_function = signature_function.get_concrete_function()
elif not isinstance(signature_function, defun.Function):
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Python functions may be decorated with "
"`@tf.function(input_signature=...)` and passed as signatures "
"directly, or created without a signature using `@tf.function` "
"and then converted to a concrete TensorFlow function using "
"`f.get_concrete_function(...)`.").format(signature_function))
concrete_signatures[serving_key] = signature_function
return concrete_signatures
def _is_flat(sequence):
sequence_flat = nest.flatten(sequence)
try:
nest.assert_same_structure(sequence_flat, sequence)
return True
except ValueError:
return False
except TypeError:
return False
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
if isinstance(outputs, collections.Mapping):
for key, value in outputs.items():
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a dictionary containing non-Tensor value {} for key {} "
"in the output of the function {} used to generate a SavedModel "
"signature. Dictionaries outputs for functions used as signatures "
"should have one Tensor output per string key.")
.format(value, key, compat.as_str_any(function_name)))
return outputs
else:
original_outputs = outputs
if not isinstance(outputs, collections.Sequence):
outputs = [outputs]
if not _is_flat(outputs):
raise ValueError(
("Got non-flat outputs '{}' from '{}' for SavedModel "
"signature '{}'. Signatures have one Tensor per output, so "
"to have predictable names Python functions used to generate "
"these signatures should avoid outputting Tensors in nested "
"structures.")
.format(original_outputs, function_name, signature_key))
return {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
def _tensor_dict_to_tensorinfo(tensor_dict):
return {key: utils_impl.build_tensor_info(value)
for key, value in tensor_dict.items()}
def _map_captures_to_created_tensors(
original_captures, resource_map):
"""Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
resource_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`resource_map`.
"""
export_captures = []
for exterior, interior in original_captures.items():
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
if exterior.dtype == dtypes.resource:
raise AssertionError(
("Tried to export a function which references untracked stateful "
"object {}. Stateful TensorFlow objects (e.g. tf.Variable) must "
"be tracked by the main object. Objects may be tracked by "
"assigning them to an attribute of another tracked object, or to "
"an attribute of the main object directly.")
.format(interior))
else:
# This is a captured Tensor, but it's not a resource. We'll just add it
# to the graph as a constant.
mapped_resource = constant_op.constant(exterior.numpy())
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(
function_arguments, signature_key, function_name):
"""Creates exterior placeholders in the exported graph for function arguments.
Functions have two types of inputs: tensors captured from the outside (eager)
context, and arguments to the function which we expect to receive from the
user at each call. `_map_captures_to_created_tensors` replaces
captured tensors with stand-ins (typically these are resource dtype tensors
associated with variables). `_map_function_inputs_to_created_inputs` runs over
every argument, creating a new placeholder for each which will belong to the
exported graph rather than the function body.
Args:
function_arguments: A list of argument placeholders in the function body.
signature_key: The name of the signature being exported, for error messages.
function_name: The name of the function, for error messages.
Returns:
A tuple of (mapped_inputs, exterior_placeholders)
mapped_inputs: A list with entries corresponding to `function_arguments`
containing all of the inputs of the function gathered from the exported
graph (both captured resources and arguments).
exterior_argument_placeholders: A dictionary mapping from argument names
to placeholders in the exported graph, containing the explicit arguments
to the function which a user is expected to provide.
Raises:
ValueError: If argument names are not unique.
"""
# `exterior_argument_placeholders` holds placeholders which are outside the
# function body, directly contained in a MetaGraph of the SavedModel. The
# function body itself contains nearly identical placeholders used when
# running the function, but these exterior placeholders allow Session-based
# APIs to call the function using feeds and fetches which name Tensors in the
# MetaGraph.
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# `export_captures` contains an exhaustive set of captures, so if we don't
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.")
.format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
"""Calls `function` in the exported graph, using mapped resource captures."""
export_captures = _map_captures_to_created_tensors(
function.graph.captures, resource_map)
mapped_inputs = args + export_captures
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
# definition.
# pylint: disable=protected-access
outputs = function._build_call_outputs(
function._inference_function.call(context.context(), mapped_inputs))
return outputs
def _generate_signatures(signature_functions, resource_map):
"""Validates and calls `signature_functions` in the default graph.
Args:
signature_functions: A dictionary mapping string keys to concrete TensorFlow
functions (e.g. from `_canonicalize_signatures`) which will be used to
generate SignatureDefs.
resource_map: A dictionary mapping from resource tensors in the eager
context to resource tensors in the Graph being exported. This dictionary
is used to re-bind resources captured by functions to tensors which will
exist in the SavedModel.
Returns:
Each function in the `signature_functions` dictionary is called with
placeholder Tensors, generating a function call operation and output
Tensors. The placeholder Tensors, the function call operation, and the
output Tensors from the function call are part of the default Graph.
This function then returns a dictionary with the same structure as
`signature_functions`, with the concrete functions replaced by SignatureDefs
implicitly containing information about how to call each function from a
TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference
the generated placeholders and Tensor outputs by name.
The caller is expected to include the default Graph set while calling this
function as a MetaGraph in a SavedModel, including the returned
SignatureDefs as part of that MetaGraph.
"""
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(
argument_inputs, signature_key, function.name))
outputs = _normalize_outputs(
_call_function_with_mapped_captures(
function, mapped_inputs, resource_map),
function.name, signature_key)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs))
return signatures
def _trace_resource_initializers(accessible_objects):
"""Create concrete functions from `TrackableResource` objects."""
resource_initializers = []
def _wrap_initializer(obj):
obj.initialize()
return constant_op.constant(1.) # Dummy control output
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
resource_initializers.append(def_function.function(
functools.partial(_wrap_initializer, obj),
# All inputs are captures.
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo", [
# List of AssetFileDef protocol buffers
"asset_defs",
# Map from asset variable resource Tensors to their init ops
"asset_initializers_by_resource",
# Map from base asset filenames to full paths
"asset_filename_map",
# Map from TrackableAsset to index of corresponding AssetFileDef
"asset_index"])
def _process_asset(trackable_asset, asset_info, resource_map):
"""Add `trackable_asset` to `asset_info` and `resource_map`."""
original_variable = trackable_asset.asset_path
with context.eager_mode():
original_path = original_variable.numpy()
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
# TODO(andresp): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=original_variable.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_variable.handle] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_variable.handle] = asset_variable.handle
def _map_resources(accessible_objects):
"""Makes new resource handle ops corresponding to existing resource tensors.
Creates resource handle ops in the current default graph, whereas
`accessible_objects` will be from an eager context. Resource mapping adds
resource handle ops to the main GraphDef of a SavedModel, which allows the C++
loader API to interact with variables.
Args:
accessible_objects: A list of objects, some of which may contain resources,
to create replacements for.
Returns:
A tuple of (object_map, resource_map, asset_info):
object_map: A dictionary mapping from object in `accessible_objects` to
replacement objects created to hold the new resource tensors.
resource_map: A dictionary mapping from resource tensors extracted from
`accessible_objects` to newly created resource tensors.
asset_info: An _AssetInfo tuple describing external assets referenced from
accessible_objects.
"""
# TODO(allenl): Handle MirroredVariables and other types of variables which
# may need special casing.
object_map = {}
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
new_resource = obj.create_resource()
resource_map[obj.resource_handle] = new_resource
elif resource_variable_ops.is_resource_variable(obj):
new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)
object_map[obj] = new_variable
resource_map[obj.handle] = new_variable.handle
elif isinstance(obj, tracking.TrackableAsset):
_process_asset(obj, asset_info, resource_map)
return object_map, resource_map, asset_info
def _fill_meta_graph_def(meta_graph_def, obj, signature_functions,
object_saver):
"""Generates a MetaGraph which calls `signature_functions`.
Args:
meta_graph_def: The MetaGraphDef proto to fill.
obj: The checkpointable object being exported.
signature_functions: A dictionary mapping signature keys to concrete
functions containing signatures to add to the MetaGraph.
object_saver: A CheckpointableSaver to add to the MetaGraph.
Returns:
An _AssetInfo, which contains information to help creating the SavedModel.
"""
signatures = {}
# List objects from the eager context to make sure Optimizers give us the
# right Graph-dependent variables.
accessible_objects = util.list_objects(obj)
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = _map_resources(accessible_objects)
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(
resource_initializer_function, [], resource_map))
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
# Add the same op to the main_op collection and to the init_op
# signature. The collection is for compatibility with older loader APIs;
# only one will be executed.
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(
init_op, constants.INIT_OP_SIGNATURE_KEY))
# Saving an object-based checkpoint again gathers variables. We need to do the
# gathering from the eager context so Optimizers save the right set of
# variables, but want any operations associated with the save/restore to be in
# the exported graph (thus the `to_graph` argument).
saver = object_saver.freeze(object_map=object_map, to_graph=exported_graph)
# We must resolve the concrete function to add to MetaGraph while in eager
# mode.
concrete_functions = []
for accessible_object in accessible_objects:
for function in function_serialization.list_all_polymorphic_functions(
accessible_object).values():
concrete_functions.extend(
function_serialization.list_all_concrete_functions(function))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector.
ops.dismantle_graph(exported_graph)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info
def _write_object_graph(root, export_dir, asset_file_def_index):
"""Save a SavedObjectGraph proto for `root`."""
# SavedObjectGraph is similar to the CheckpointableObjectGraph proto in the
# checkpoint. It will eventually go into the SavedModel.
proto = saved_object_graph_pb2.SavedObjectGraph()
checkpointable_objects, node_ids, slot_variables = util.find_objects(root)
util.fill_object_graph_proto(checkpointable_objects, node_ids, slot_variables,
proto)
for obj, obj_proto in zip(checkpointable_objects, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index)
function_serialization.add_polymorphic_functions_to_object_graph_proto(
checkpointable_objects, proto)
extra_asset_dir = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY))
file_io.recursive_create_dir(extra_asset_dir)
object_graph_filename = os.path.join(
extra_asset_dir, compat.as_bytes("object_graph.pb"))
file_io.write_string_to_file(object_graph_filename, proto.SerializeToString())
def _write_object_proto(obj, proto, asset_file_def_index):
"""Saves an object into SavedObject proto."""
if isinstance(obj, tracking.TrackableAsset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
else:
proto.user_object.SetInParent()
@tf_export("saved_model.save", v1=["saved_model.experimental.save"])
def save(obj, export_dir, signatures=None):
# pylint: disable=line-too-long
"""Exports the Checkpointable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
Example usage:
```python
class Adder(tf.train.Checkpoint):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = Adder()
tf.saved_model.save(to_export, '/tmp/adder')
```
The resulting SavedModel is then servable with an input named "x", its value
having any shape and dtype float32.
The optional `signatures` argument controls which methods in `obj` will be
available to programs which consume `SavedModel`s, for example serving
APIs. Python functions may be decorated with
`@tf.function(input_signature=...)` and passed as signatures directly, or
lazily with a call to `get_concrete_function` on the method decorated with
`@tf.function`.
If the `signatures` argument is omitted, `obj` will be searched for
`@tf.function`-decorated methods. If exactly one `@tf.function` is found, that
method will be used as the default signature for the SavedModel. This behavior
is expected to change in the future, when a corresponding
`tf.saved_model.load` symbol is added. At that point signatures will be
completely optional, and any `@tf.function` attached to `obj` or its
dependencies will be exported for use with `load`.
When invoking a signature in an exported SavedModel, `Tensor` arguments are
identified by name. These names will come from the Python function's argument
names by default. They may be overridden by specifying a `name=...` argument
in the corresponding `tf.TensorSpec` object. Explicit naming is required if
multiple `Tensor`s are passed through a single argument to the Python
function.
The outputs of functions used as `signatures` must either be flat lists, in
which case outputs will be numbered, or a dictionary mapping string keys to
`Tensor`, in which case the keys will be used to name outputs.
Since `tf.keras.Model` objects are also Checkpointable, this function can be
used to export Keras models. For example, exporting with a signature
specified:
```python
class Model(tf.keras.Model):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def serve(self, serialized):
...
m = Model()
tf.saved_model.save(m, '/tmp/saved_model/')
```
Exporting from a function without a fixed signature:
```python
class Model(tf.keras.Model):
@tf.function
def call(self, x):
...
m = Model()
tf.saved_model.save(
m, '/tmp/saved_model/',
signatures=m.call.get_concrete_function(
tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp")))
```
`tf.keras.Model` instances constructed from inputs and outputs already have a
signature and so do not require a `@tf.function` decorator or a `signatures`
argument. If neither are specified, the model's forward pass is exported.
```python
x = input_layer.Input((4,), name="x")
y = core.Dense(5, name="out")(x)
model = training.Model(x, y)
tf.saved_model.save(model, '/tmp/saved_model/')
# The exported SavedModel takes "x" with shape [None, 4] and returns "out"
# with shape [None, 5]
```
Variables must be tracked by assigning them to an attribute of a tracked
object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers
from `tf.keras.layers`, optimizers from `tf.train`) track their variables
automatically. This is the same tracking scheme that `tf.train.Checkpoint`
uses, and an exported `Checkpoint` object may be restored as a training
checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's
"variables/" subdirectory. Currently variables are the only stateful objects
supported by `tf.saved_model.save`, but others (e.g. tables) will be supported
in the future.
`tf.function` does not hard-code device annotations from outside the function
body, instead using the calling context's device. This means for example that
exporting a model which runs on a GPU and serving it on a CPU will generally
work, with some exceptions. `tf.device` annotations inside the body of the
function will be hard-coded in the exported model; this type of annotation is
discouraged. Device-specific operations, e.g. with "cuDNN" in the name or with
device-specific layouts, may cause issues. Currently a `DistributionStrategy`
is another exception: active distribution strategies will cause device
placements to be hard-coded in a function. Exporting a single-device
computation and importing under a `DistributionStrategy` is not currently
supported, but may be in the future.
SavedModels exported with `tf.saved_model.save` [strip default-valued
attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)
automatically, which removes one source of incompatibilities when the consumer
of a SavedModel is running an older TensorFlow version than the
producer. There are however other sources of incompatibilities which are not
handled automatically, such as when the exported model contains operations
which the consumer does not have definitions for.
The current implementation of `tf.saved_model.save` targets serving use-cases,
but omits information which will be necessary for the planned future
implementation of `tf.saved_model.load`. Exported models using the current
`save` implementation, and other existing SavedModels, will not be compatible
with `tf.saved_model.load` when it is implemented. Further, `save` will in the
future attempt to export `@tf.function`-decorated methods which it does not
currently inspect, so some objects which are exportable today will raise
exceptions on export in the future (e.g. due to complex/non-serializable
default arguments). Such backwards-incompatible API changes are expected only
prior to the TensorFlow 2.0 release.
Args:
obj: A checkpointable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: Optional, either a `tf.function` with an input signature
specified or the result of `f.get_concrete_function` on a
`@tf.function`-decorated function `f`, in which case `f` will be used to
generate a signature for the SavedModel under the default serving
signature key. `signatures` may also be a dictionary, in which case it
maps from signature keys to either `tf.function` instances with input
signatures or concrete functions. The keys of such a dictionary may be
arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
Raises:
ValueError: If `obj` is not checkpointable.
@compatibility(eager)
Not supported when graph building. From TensorFlow 1.x,
`tf.enable_eager_execution()` must run first. May not be called from within a
function body.
@end_compatibility
"""
if not context.executing_eagerly():
with ops.init_scope():
if context.executing_eagerly():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
else:
raise AssertionError(
"tf.saved_model.save is not supported when graph building. "
"tf.enable_eager_execution() must run first when calling it from "
"TensorFlow 1.x.")
# pylint: enable=line-too-long
if not isinstance(obj, base.CheckpointableBase):
raise ValueError(
"Expected a Checkpointable object for export, got {}.".format(obj))
if signatures is None:
# Note that we run this before saving the checkpoint, since looping over
# attributes may have the side effect of creating variables in some cases.
signatures = _find_function_to_export(obj)
signatures = _canonicalize_signatures(signatures)
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
object_saver = util.CheckpointableSaver(obj)
asset_info = _fill_meta_graph_def(
meta_graph_def, obj, signatures, object_saver)
saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
# So far we've just been generating protocol buffers with no I/O. Now we write
# the checkpoint, copy assets into the assets directory, and write out the
# SavedModel proto itself.
utils_impl.get_or_create_variables_dir(export_dir)
object_saver.save(utils_impl.get_variables_path(export_dir))
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, saved_model.SerializeToString())
_write_object_graph(obj, export_dir, asset_info.asset_index)
| apache-2.0 | -1,015,136,350,667,706,800 | 45.010738 | 162 | 0.722212 | false |
alejandroesquiva/AutomaticApiRest-PythonConnector | example/test.py | 1 | 1552 | __author__ = 'Alejandro Esquiva Rodriguez'
from aarpy.AARConnector import AARConnector
#AAR Instance
##Create instance via URL
AAR = AARConnector(url="http://automaticapirest.info/demo/getData.php?t=Country&c=Code,Name&l=0,5")
##Create instance via parameters
AAR = AARConnector(domain="http://automaticapirest.info/demo/",table="Country",columns="Name",orderby="Name",limit="10",where="Name:'Albania'")
#Get all the json
jsondata = AAR.getJson()
'''
print(jsondata)
##########################
{'dbInfo': ['Code', 'Name'], 'data': [{'1': 'Aruba', 'Code': 'ABW', '0': 'ABW', 'Name': 'Aruba'}, {'1': 'Afghanistan', 'Code': 'AFG', '0': 'AFG', 'Name': 'Afghanistan'}, {'1': 'Angola', 'Code': 'AGO', '0': 'AGO', 'Name': 'Angola'}, {'1': 'Anguilla', 'Code': 'AIA', '0': 'AIA', 'Name': 'Anguilla'}, {'1': 'Albania', 'Code': 'ALB', '0': 'ALB', 'Name': 'Albania'}]}
'''
#Get all the data
data = AAR.getData()
#Get query info
dbinfo = AAR.getDBInfo()
#Output a specific data
#Name in the first row
name = data[0]["Name"]
#OR
name = data[0]["0"]
print(name)
#Print query info
AAR.printDBInfo()
'''
[
"Code",
"Name"
]
'''
#Print Data
AAR.printData()
'''
[
{
"0": "ABW",
"1": "Aruba",
"Code": "ABW",
"Name": "Aruba"
},
{
"0": "AFG",
"1": "Afghanistan",
"Code": "AFG",
"Name": "Afghanistan"
},
{
"0": "AGO",
"1": "Angola",
"Code": "AGO",
"Name": "Angola"
},
{
"0": "AIA",
"1": "Anguilla",
"Code": "AIA",
"Name": "Anguilla"
},
{
"0": "ALB",
"1": "Albania",
"Code": "ALB",
"Name": "Albania"
}
]
'''
| mit | 2,217,455,657,594,030,000 | 17.258824 | 362 | 0.556701 | false |
stephane-caron/ijhr-2016 | pymanoid/cone.py | 1 | 2305 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Stephane Caron <[email protected]>
#
# This file is part of pymanoid.
#
# pymanoid is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
from cdd import Matrix, Polyhedron, RepType
from numpy import array, hstack, zeros
NUMBER_TYPE = 'float' # 'float' or 'fraction'
class ConeException(Exception):
def __init__(self, M):
self.M = M
class NotConeFace(ConeException):
def __str__(self):
return "Matrix is not a cone face"
class NotConeSpan(ConeException):
def __str__(self):
return "Matrix is not a cone span"
def face_of_span(S):
"""
Returns the face matrix S^F of the span matrix S,
that is, a matrix such that
{x = S z, z >= 0} if and only if {S^F x <= 0}.
"""
V = hstack([zeros((S.shape[1], 1)), S.T])
# V-representation: first column is 0 for rays
V_cdd = Matrix(V, number_type=NUMBER_TYPE)
V_cdd.rep_type = RepType.GENERATOR
P = Polyhedron(V_cdd)
H = array(P.get_inequalities())
b, A = H[:, 0], H[:, 1:]
for i in xrange(H.shape[0]):
if b[i] != 0:
raise NotConeSpan(S)
return -A
def span_of_face(F):
"""
Compute the span matrix F^S of the face matrix F,
that is, a matrix such that
{F x <= 0} if and only if {x = F^S z, z >= 0}.
"""
b, A = zeros((F.shape[0], 1)), -F
# H-representation: A x + b >= 0
F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE)
F_cdd.rep_type = RepType.INEQUALITY
P = Polyhedron(F_cdd)
V = array(P.get_generators())
for i in xrange(V.shape[0]):
if V[i, 0] != 0: # 1 = vertex, 0 = ray
raise NotConeFace(F)
return V[:, 1:]
| gpl-3.0 | 8,395,834,680,207,245,000 | 25.802326 | 79 | 0.625163 | false |
cs98jrb/Trinity | mysite/events/forms/booking.py | 1 | 2961 | __author__ = 'james'
from django.utils.translation import ugettext as _
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from events.models import Booking
from orders.models import Order, OrderItem
class BookingForm(forms.ModelForm):
# set the css of required fields
required_css_class = 'required'
email = forms.EmailField(
max_length=254,
label="Contact email",
required=True,
help_text="This is required so we can contact you."
)
tandc = forms.BooleanField(
label="Accept terms and conditions",
required=True,
)
def __init__(self, request, *args, **kwargs):
booking = super(BookingForm, self).__init__(*args, **kwargs)
# add label
self.fields['quantity'].label = "Number of people"
try:
if not request.user.is_anonymous():
self.fields['email'].initial = request.user.email
except User.DoesNotExist:
pass
class Meta:
model = Booking
fields = ['email', 'quantity', ]
def save(self, event, price, user, commit=True):
from django.contrib.contenttypes.models import ContentType
#
booking = super(BookingForm, self).save(commit=False)
booking.booked_by = user
booking.event = event
booking.price = price
total_booked = 0
open_order_list = Order.objects.open_order(user=user)
if open_order_list:
order = open_order_list[0]
for item in order.orderitem_set.all():
total_booked += item.content_object.quantity
if not(event.pricing_set.all().filter(online_book=True)
and not event.fully_booked):
raise ValidationError(
_('This event is fully booked'),
code='Fully Booked'
)
commit = False
elif event.num_spaces < (booking.quantity + total_booked):
places = booking.quantity + total_booked
raise ValidationError(
_('Not enough spaces for %(places)s people.'),
code='No Space',
params={'places': places},
)
commit = False
if commit:
booking.save()
# Add to open order
if not open_order_list:
order = Order(ordered_by=user)
order.save()
order_item = OrderItem(
order=order,
description=event.__unicode__(),
value=(price.value*booking.quantity),
vat=price.vat,
content_type=ContentType.objects.get_for_model(booking),
object_id=booking.id
)
order_item.save()
return booking
def clean(self):
return self.cleaned_data | gpl-2.0 | -287,810,376,454,806,340 | 28.039216 | 72 | 0.570078 | false |
Midnighter/pyorganism | setup.py | 1 | 2511 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
==================
PyOrganism Package
==================
:Authors:
Moritz Emanuel Beber
:Date:
2012-05-22
:Copyright:
Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.
:File:
setup.py
"""
import sys
from os.path import join
from setuptools import (setup, Extension)
try:
from Cython.Distutils import build_ext
except ImportError as err:
sys.exit("Apologies, you need 'Cython' to install 'pyorganism'.")
if __name__ == "__main__":
# continuous
sources = ["continuous_wrapper.pyx", "continuous.c"]
c_path = join("pyorganism", "regulation", "src")
continuous = Extension("pyorganism.regulation.continuous_wrapper",
sources=[join(c_path, src) for src in sources],
include_dirs=[c_path]
)
setup(
name="pyorganism",
version="0.2.5",
license="BSD",
description="analyze organisational principles in living organisms",
author="Moritz Emanuel Beber",
author_email="moritz (dot) beber (at) gmail (dot) com",
url="http://github.com/Midnighter/pyorganism",
zip_safe=False,
install_requires=[
"future",
"networkx",
"numpy",
"pandas"
],
packages=["pyorganism",
"pyorganism.io",
"pyorganism.metabolism",
"pyorganism.regulation",
],
# package_data = {"pyorganism": ["data/*.xml", "data/*.txt", "data/*.tsv"]},
ext_modules=[continuous],
cmdclass={"build_ext": build_ext},
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
| bsd-3-clause | 7,473,944,302,607,792,000 | 29.621951 | 94 | 0.562724 | false |
abice-sbr/adaptsearch | blastalign.py | 1 | 4394 | import string, re
# Written by Robert Belshaw (School of Biomedical & Healthcare Sciences, University of Plymouth) & Aris Katzourakis (Department of Zoology, University of Oxford)
# For more information and to cite see Belshaw, R & Katzourakis, A (2005) BlastAlign: a program that uses blast to align problematic nucleotide sequences. Bioinformatics 21:122-123.
# Please send any comments to [email protected] or [email protected]
file = open('blast_out', 'r')
buffer = file.readlines()
def Calculate_hits():
Number_of_landmarks = len(Permanent_dictionary[KeyList[0]]) # use legth of first entry
counter = 1
while counter < Number_of_landmarks: # Less than because list starts from zero
number_of_hits = 0
for item in KeyList:
list = Permanent_dictionary[item]
landmark = list[counter]
if landmark != '*':
number_of_hits = number_of_hits + 1
List_of_hits.append(number_of_hits)
counter = counter +1
return List_of_hits
def doInsertRoutine(list, value):
no_ast = 0
old_diff = 0
switch = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
new_diff = (item - value)*(item - value)
if item < value:
no_ast = 0
else:
i = list.index(item)
if new_diff > old_diff:
i = i-no_ast
list.insert(i, value)
else:
list.insert(i, value)
switch = 1
break
old_diff = new_diff
if switch == 0:
no_ast = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
no_ast = 0
i = len(list) - no_ast # Finds position before any trailing asterisks
list.insert(i, value)
return list, i
def go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos):
tempKeyList = []
for item in KeyList:
tempKeyList.append(item)
tempKeyList.remove(tempKey)
for item in tempKeyList:
tempList = []
for subitem in Permanent_dictionary[item]:
tempList.append(subitem)
if Library_dictionary.has_key(item):
tempList.insert(LandmarkInsertPos, Library_dictionary[item])
Permanent_dictionary[item] = tempList
else:
tempList.insert(LandmarkInsertPos, '*')
Permanent_dictionary[item] = tempList
def process_previous_block(tempKey, tempValue, Library_dictionary):
landmark = 0
tempList = []
for item in (Permanent_dictionary[tempKey]):
tempList.append(item)
for item in (Permanent_dictionary[tempKey]):
if item != '*':
if (tempValue >= item-30) and (tempValue <= item+30):
landmark = 1
else:
pass
if landmark == 0:
theAnswer = doInsertRoutine(tempList, tempValue)
tempList = theAnswer[0]
LandmarkInsertPos = theAnswer[1]
Permanent_dictionary[tempKey] = tempList
go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos)
def makeOutFile():
theOutFile = open('blast_out_python', 'w')
theOutFile.write('\t\t') # Just to line up entries for ease of viewing
for item in List_of_hits:
theOutFile.write('%s\t' %item)
theOutFile.write('\n')
for item in KeyList:
theOutFile.write('%s\t' %item)
for listItem in Permanent_dictionary[item]:
theOutFile.write('%s\t' %listItem)
theOutFile.write('\n')
Query_dictionary = {}
Library_dictionary = {}
Permanent_dictionary = {}
KeyList = []
list = [0]
List_of_hits = [] # To note whether entries are unique or not
for line in buffer:
if line[0] == '*':
entry = ""
entry = line[1:-1]
Permanent_dictionary[entry] = list
KeyList.append(entry)
n=0
previousKey = "null" # Need in case have identical sequences & then need to avoid unassigned variable
for line in buffer:
tempList = []
if line[0:5] == 'Query':
if n >= 1:
process_previous_block(QueryKey, QueryValue, Library_dictionary)
Library_dictionary = {}
line = string.split(line)
QueryKey = (line[0])[5:]
QueryValue = string.atoi(line[1])
Query_dictionary[QueryKey] = QueryValue
n=n+1
elif line[0:7] == 'Library':
line = string.split(line)
LibraryKey = (line[0])[7:]
LibraryValue = string.atoi(line[1])
if LibraryKey != QueryKey:
if previousKey == LibraryKey:
previousDist = (previousValue-QueryValue)*(previousValue-QueryValue)
currentDist = (LibraryValue-QueryValue)*(LibraryValue-QueryValue)
if currentDist < previousDist:
Library_dictionary[LibraryKey] = LibraryValue
else:
Library_dictionary[LibraryKey] = LibraryValue
previousKey = (line[0])[7:]
previousValue = string.atoi(line[1])
Calculate_hits()
makeOutFile()
| gpl-3.0 | 5,974,816,698,575,030,000 | 28.891156 | 181 | 0.69117 | false |
chrismamil/chowda | test/test_chowda.py | 1 | 2201 | import unittest
import os
import chowda.parsing as parse
import datetime
import pandas as pd
from chowda.load import load_file
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_FILE = "CTL1 wk3 exp1 RAW data.txt"
TEST_1 = os.path.join(DATA_DIR, TEST_FILE)
class TestChowda(unittest.TestCase):
def setup(self):
test_file = os.path.join(DATA_DIR, TEST_FILE)
with open(test_file) as in_handle:
self.in_data = in_handle.readlines()
def test_parse_experiment_time(self):
result = parse.parse_experiment_time(self.in_data[0])
self.assertEquals(result.keys()[0], "Experiment Started")
def test_parse_subject(self):
result = parse.parse_subject(self.in_data[1])
self.assertEquals(result["Subject"], "CNS1")
def test_parse_mass(self):
result = parse.parse_subject_mass(self.in_data[2])
self.assertEquals(result["Subject Mass"], 34.26)
def test_load_file(self):
from chowda.load import load_file
result = load_file(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
def test_get_header(self):
from chowda.load import get_header
result = get_header(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(result[-1].split(",")[0].strip(), '"========"')
def test_get_data(self):
from chowda.load import get_data
result = get_data(TEST_1)
self.assertEquals(result[0].split(",", 1)[0], "Interval")
def test_partition_file(self):
from chowda.load import partition_file
header, data = partition_file(TEST_1)
self.assertEquals(header[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(header[-1].split(",")[0].strip(), '"========"')
self.assertEquals(data[0].split(",", 1)[0], "Interval")
def test_load_dataframe(self):
from chowda.load import load_dataframe
result = load_dataframe(parse.get_data(self.in_data))
self.assertEquals(result["Interval"].ix[0], "001")
| mit | -1,276,954,630,114,679,000 | 35.081967 | 73 | 0.613358 | false |
CDKGlobal/cd-performance-promotion | cd_perf_promotion/engines/comparisonengine.py | 1 | 19434 | import json
import operator
class ComparisonEngine:
"""
Queries the performance tools' APIs and determines if the build passes
the target requirements.
"""
def check_health_severity(self, violation):
"""
Fails the build if the defined severity is found in the health rule
violations
Keyword arguments:
violation - dictionary that contains all of the information for a single
violation (as determined by AppDynamics)
"""
# Add the violation to the output file after removing unecessary data
self.output_json["appdynamics"]["healthrule_violations"].append(violation)
# Fail the build
self.output_json["promotion_gates"]["appdynamics_health"] = False
self.build_status_passed = False
def compare_appdynamics(self, healthrule_violations, warning, critical):
"""
Performs the comparison between the defined violation severity settings
and the violations that occurred
Keyword arguments:
healthrule_violations - Dictionary that contains all of the AppDynamics
health violations
warning - Boolean that indicates whether the user thinks
that health rule violations with a status of
"WARNING" are important enough to evaluate
critical - Boolean that indicates whether the user thinks
that health rule violations with a status of
"CRITICAL" are important enough to evaluate
"""
# Set the health to True by default and flip it if necessary
self.output_json["promotion_gates"]["appdynamics_health"] = True
for violation in healthrule_violations:
# Check if the severity settings that we care about exist in the health rule violations
if ((warning == True) and (violation["severity"] == "WARNING")):
self.check_health_severity(violation)
if ((critical == True) and (violation["severity"] == "CRITICAL")):
self.check_health_severity(violation)
def compare_blazemeter(self, metric_title, target_data, metric_data, transaction_index, operator):
"""
Performs the comparison between configuration promotion gates and the
actual blazemeter test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
transaction_index - The index of the transaction in the list of
transactions
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Add the data to the output file
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = False
self.build_status_passed = False
def compare_webpagetest(self, metric_title, target_data, metric_data, run_index, view, operator):
"""
Performs the comparison between configuration promotion gates and the
actual WebPageTest test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
view - Either first_view or repeat_view
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Convert the metric data to an int (WebPageTest's XML output makes everything a string)
metric_data = int(metric_data)
# Add the data to the output file
if (run_index == None):
# Data from the averages section
self.output_json["webpagetest"]["average"][view][metric_title] = metric_data
else:
# Data from the runs section
self.output_json["webpagetest"]["runs"][run_index][view][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
if ((metric_title_passed in self.output_json["promotion_gates"] and self.output_json["promotion_gates"][metric_title_passed] != False) or (metric_title_passed not in self.output_json["promotion_gates"])):
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = True
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = False
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = False
self.build_status_passed = False
def process_data(self, config_data, perf_data):
"""
Determines if the build meets promotion gate criteria based off of the
information in the config file (retrieved by configengine) and the data
from the modules (retrieved by dataengine)
Keyword Arguments:
config_data - dictionary that contains all of the information retrieved
by the config engine
perf_data - dictionary that contains all of the information retrieved
by the data engine
"""
# Prepare the output file promotion gates section
self.output_json["promotion_gates"] = {}
# AppDynamics Module
if (config_data["appdynamics"]["exists"] == True):
# Check for AppDynamics Health Violations (only if the user cares)
if ((config_data["promotion_gates"]["warning"] == True) or (config_data["promotion_gates"]["critical"] == True)):
# Output something so that the user isn't confused, regardless of whether health violations were found
self.output_json["appdynamics"] = {"healthrule_violations": []}
if (perf_data["appdynamics"]["healthrule_violations"] != []):
# Uh-oh, there's something wrong with the build
self.compare_appdynamics(perf_data["appdynamics"]["healthrule_violations"], config_data["promotion_gates"]["warning"], config_data["promotion_gates"]["critical"])
else:
# No health violations, good to go!
self.output_json["promotion_gates"]["appdynamics_health"] = True
# BlazeMeter Module
if (config_data["blazemeter"]["exists"] == True):
# Compare BlazeMeter metrics
# Add BlazeMeter into the output file
self.output_json["blazemeter"] = {"transactions": []}
for index, transaction in enumerate(perf_data["blazemeter"]["transactions"]):
# Add transaction information into the output
self.output_json["blazemeter"]["transactions"].append({"transaction_id": transaction["transaction_id"], "transaction_name": transaction["transaction_name"]})
# Average Response Time
self.compare_blazemeter("response_time_avg", config_data["promotion_gates"]["response_time_avg"], transaction["response_time_avg"], index, operator.lt)
# Max Response Time
self.compare_blazemeter("response_time_max", config_data["promotion_gates"]["response_time_max"], transaction["response_time_max"], index, operator.lt)
# Response Time Geometric Mean
self.compare_blazemeter("response_time_geomean", config_data["promotion_gates"]["response_time_geomean"], transaction["response_time_geomean"], index, operator.lt)
# Response Time Standard Deviation
self.compare_blazemeter("response_time_stdev", config_data["promotion_gates"]["response_time_stdev"], transaction["response_time_stdev"], index, operator.lt)
# Response Time 90% Line
self.compare_blazemeter("response_time_tp90", config_data["promotion_gates"]["response_time_tp90"], transaction["response_time_tp90"], index, operator.lt)
# Response Time 95% Line
self.compare_blazemeter("response_time_tp95", config_data["promotion_gates"]["response_time_tp95"], transaction["response_time_tp95"], index, operator.lt)
# Response Time 99% Line
self.compare_blazemeter("response_time_tp99", config_data["promotion_gates"]["response_time_tp99"], transaction["response_time_tp99"], index, operator.lt)
# Maximum Latency
self.compare_blazemeter("latency_max", config_data["promotion_gates"]["latency_max"], transaction["latency_max"], index, operator.lt)
# Average Latency
self.compare_blazemeter("latency_avg", config_data["promotion_gates"]["latency_avg"], transaction["latency_avg"], index, operator.lt)
# Latency Standard Deviation
self.compare_blazemeter("latency_stdev", config_data["promotion_gates"]["latency_stdev"], transaction["latency_stdev"], index, operator.lt)
# Average Bandwidth
self.compare_blazemeter("bandwidth_avg", config_data["promotion_gates"]["bandwidth_avg"], transaction["bandwidth_avg"], index, operator.lt)
# Transaction Rate
self.compare_blazemeter("transaction_rate", config_data["promotion_gates"]["transaction_rate"], transaction["transaction_rate"], index, operator.gt)
# WebPageTest Module
if (config_data["webpagetest"]["exists"] == True):
# Compare WebPageTest metrics
# Add WebPageTest into the output file
self.output_json["webpagetest"] = {"average": {}, "runs": []}
# Keep track of the views for looping purposes
views = ["first_view", "repeat_view"]
# Make sure that we care about the data before processing it
if (("first_view" in config_data["promotion_gates"]) or ("repeat_view" in config_data["promotion_gates"])):
# Check out the averages for the runs
# This is less for failing the build and more for adding the data into the output file
for view in views:
if (view in config_data["promotion_gates"]):
# Set up average first_view
self.output_json["webpagetest"]["average"][view] = {}
# Speed Index (Average)
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["average"][view]["SpeedIndex"], None, view, operator.gt)
# Time to First Paint (Average)
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["average"][view]["firstPaint"], None, view, operator.lt)
# Time to First Byte (Average)
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["average"][view]["TTFB"], None, view, operator.lt)
# Time to Fully Loaded (Average)
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["average"][view]["fullyLoaded"], None, view, operator.lt)
# Time to Visual Complete (Average)
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["average"][view]["visualComplete"], None, view, operator.lt)
# Time to Start Render (Average)
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["average"][view]["render"], None, view, operator.lt)
# Time to Last Visual Change (Average)
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["average"][view]["lastVisualChange"], None, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["average"][view]["titleTime"], None, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["average"][view]["bytesIn"], None, view, operator.lt)
# Loop over all of the runs
# Most of the time there will likely be only one
for run_id, run in enumerate(perf_data["webpagetest"]["runs"]):
# Add transaction information into the output
self.output_json["webpagetest"]["runs"].append({"run_id": run["run_id"]})
# Loop over all of the views for each run
for view in views:
if (view in config_data["promotion_gates"]):
# Set up first_view for the run
self.output_json["webpagetest"]["runs"][run_id][view] = {}
# Speed Index
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["runs"][run_id][view]["SpeedIndex"], run_id, view, operator.gt)
# Time to First Paint
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["runs"][run_id][view]["firstPaint"], run_id, view, operator.lt)
# Time to First Byte
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["runs"][run_id][view]["TTFB"], run_id, view, operator.lt)
# Time to Fully Loaded
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["runs"][run_id][view]["fullyLoaded"], run_id, view, operator.lt)
# Time to Visual Complete
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["runs"][run_id][view]["visualComplete"], run_id, view, operator.lt)
# Time to Start Render
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["runs"][run_id][view]["render"], run_id, view, operator.lt)
# Time to Last Visual Change
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["runs"][run_id][view]["lastVisualChange"], run_id, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["runs"][run_id][view]["titleTime"], run_id, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["runs"][run_id][view]["bytesIn"], run_id, view, operator.lt)
# Set the overall status in the output JSON file
self.output_json["promotion_gates"]["passed"] = self.build_status_passed
# We're done!
print("Processed performance data")
return self.output_json
def __init__(self):
"""
Class starting point
"""
# Build Status
self.build_status_passed = True
# Output JSON report data
# Later appended by the AppDynamics and BlazeMeter processing functions
self.output_json = {}
| mit | -1,119,515,072,854,985,200 | 64.877966 | 229 | 0.591129 | false |
Frky/scat | src/shell/memory/addrtable.py | 1 | 1331 | #-*- coding: utf-8 -*-
class AddrTable(object):
TABLE_SIZE = 10000
def __init__(self, dic=False):
self.__addr = list()
self.__dic = dic
for i in xrange(AddrTable.TABLE_SIZE):
if self.__dic:
self.__addr.append(dict())
else:
self.__addr.append(list())
self.__curr_key = None
self.__curr_addr = None
def contains(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
return addr in self.__addr[key].keys()
else:
return addr in self.__addr[key]
def add(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
self.__addr[key][addr] = list()
else:
self.__addr[key].append(addr)
def remove(self, addr):
key = addr % AddrTable.TABLE_SIZE
self.__addr[key].remove(addr)
def add_dic(self, addr, fid):
if not self.__dic:
raise Exception
key = addr % AddrTable.TABLE_SIZE
self.__addr[key][addr].append(fid)
def items(self):
for key in self.__addr:
if self.__dic:
for addr, call in key.items():
yield addr, call
else:
for addr in key:
yield addr
| mit | -7,471,110,895,551,581,000 | 26.163265 | 50 | 0.486852 | false |
BenjaminSchubert/web-polls | backend/errors/http.py | 1 | 1737 | """
This module contains a collection of commonly encountered HTTP exceptions.
This allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.
"""
from errors import ErrorMessage
__author__ = "Benjamin Schubert <[email protected]>"
class BaseHTTPException(Exception):
"""
This is the base HTTP Exception.
It should not be used as is, as it signifies that the server had an unexpected error.
"""
status_code = 500 # type: int
def __init__(self, payload: ErrorMessage = None, status_code: int = None):
"""
Create a new `BaseHTTPException`.
:param payload: payload to send to explain the error to the user.
:param status_code: HTTP status code to send. If not given, will fallback to `self.status_code`.
"""
super().__init__(self)
if payload is None:
payload = dict()
self.payload = payload
if status_code is not None:
self.status_code = status_code
class ForbiddenException(BaseHTTPException):
def __init__(self):
super().__init__({}, 401)
class BadRequestException(BaseHTTPException):
"""This is an exception to throw to return a 400 BAD REQUEST to the user."""
def __init__(self, payload: ErrorMessage):
"""
Create a new `BadRequestException`.
:param payload: payload to send to explain the error to the user.
"""
super().__init__(payload, 400)
class NotFoundException(BaseHTTPException):
"""This is an exception to throw to return a 404 NOT FOUND to the user."""
def __init__(self):
"""Create a new `NotFoundException`."""
super().__init__(None, 404)
| mit | -7,086,277,074,543,223,000 | 27.95 | 116 | 0.639033 | false |
Subsets and Splits