max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
stockprophet/__init__.py | chihyi-liao/stockprophet | 1 | 8200 | <gh_stars>1-10
from stockprophet.cli import entry_point
from stockprophet.crawler import (
init_stock_type, init_stock_category
)
from stockprophet.db import init_db
from .utils import read_db_settings
def preprocessing() -> bool:
result = False
# noinspection PyBroadException
try:
db_config = read_db_settings()
if not db_config:
print("config.ini 找不到 'database' 區段")
return result
except Exception:
print("無法讀取或解析config.ini")
return result
# noinspection PyBroadException
try:
init_db(db_config)
init_stock_category()
init_stock_type()
result = True
except Exception as e:
print("無法連線資料庫: %s" % (str(e), ))
return result
def main():
if preprocessing():
entry_point()
| from stockprophet.cli import entry_point
from stockprophet.crawler import (
init_stock_type, init_stock_category
)
from stockprophet.db import init_db
from .utils import read_db_settings
def preprocessing() -> bool:
result = False
# noinspection PyBroadException
try:
db_config = read_db_settings()
if not db_config:
print("config.ini 找不到 'database' 區段")
return result
except Exception:
print("無法讀取或解析config.ini")
return result
# noinspection PyBroadException
try:
init_db(db_config)
init_stock_category()
init_stock_type()
result = True
except Exception as e:
print("無法連線資料庫: %s" % (str(e), ))
return result
def main():
if preprocessing():
entry_point() | fr | 0.141312 | # noinspection PyBroadException # noinspection PyBroadException | 2.223773 | 2 |
2021/day_25.py | mpcjanssen/Advent-of-Code | 1 | 8201 | <gh_stars>1-10
import aoc_helper
RAW = aoc_helper.day(25)
print(RAW)
def parse_raw():
...
DATA = parse_raw()
def part_one():
...
def part_two():
...
aoc_helper.submit(25, part_one)
aoc_helper.submit(25, part_two)
| import aoc_helper
RAW = aoc_helper.day(25)
print(RAW)
def parse_raw():
...
DATA = parse_raw()
def part_one():
...
def part_two():
...
aoc_helper.submit(25, part_one)
aoc_helper.submit(25, part_two) | none | 1 | 2.036998 | 2 |
|
6/6.2.py | Hunter1753/adventofcode | 1 | 8202 | <gh_stars>1-10
def setIntersectionCount(group):
return len(set.intersection(*group))
groupList = []
tempGroup = []
with open("./6/input.txt") as inputFile:
for line in inputFile:
line = line.replace("\n","")
if len(line) > 0:
tempGroup.append(set(line))
else:
groupList.append(tempGroup)
tempGroup = []
if len(tempGroup) > 0:
groupList.append(tempGroup)
groupList = list(map(setIntersectionCount,groupList))
print("{} common options in groups".format(sum(groupList))) | def setIntersectionCount(group):
return len(set.intersection(*group))
groupList = []
tempGroup = []
with open("./6/input.txt") as inputFile:
for line in inputFile:
line = line.replace("\n","")
if len(line) > 0:
tempGroup.append(set(line))
else:
groupList.append(tempGroup)
tempGroup = []
if len(tempGroup) > 0:
groupList.append(tempGroup)
groupList = list(map(setIntersectionCount,groupList))
print("{} common options in groups".format(sum(groupList))) | none | 1 | 3.446082 | 3 |
|
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py | ZichaoGuo/PaddleSlim | 926 | 8203 | <filename>demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import numpy as np
from paddleslim.nas import GPNAS
# 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo
# [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en)
# [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958)
# demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search
# 基于本demo的改进版可以获得双倍奖金
def preprare_trainning_data(file_name, t_flag):
## t_flag ==1 using all trainning data
## t_flag ==2 using half trainning data
with open(file_name, 'r') as f:
arch_dict = json.load(f)
Y_all = []
X_all = []
for sub_dict in arch_dict.items():
Y_all.append(sub_dict[1]['acc'] * 100)
X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4, 16)[2])
X_all, Y_all = np.array(X_all), np.array(Y_all)
X_train, Y_train, X_test, Y_test = X_all[0::t_flag], Y_all[
0::t_flag], X_all[1::t_flag], Y_all[1::t_flag]
return X_train, Y_train, X_test, Y_test
if __name__ == '__main__':
stage1_file = './datasets/Track2_stage1_trainning.json'
stage2_file = './datasets/Track2_stage2_few_show_trainning.json'
X_train_stage1, Y_train_stage1, X_test_stage1, Y_test_stage1 = preprare_trainning_data(
stage1_file, 1)
X_train_stage2, Y_train_stage2, X_test_stage2, Y_test_stage2 = preprare_trainning_data(
stage2_file, 2)
gpnas = GPNAS()
w = gpnas.get_initial_mean(X_test_stage1, Y_test_stage1)
init_cov = gpnas.get_initial_cov(X_train_stage1)
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict(
X_test_stage2))
print('RMSE trainning on stage1 testing on stage2:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
gpnas.get_posterior_mean(X_train_stage2[0::3], Y_train_stage2[0::3])
gpnas.get_posterior_mean(X_train_stage2[1::3], Y_train_stage2[1::3])
gpnas.get_posterior_cov(X_train_stage2[1::3], Y_train_stage2[1::3])
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict_jiont(
X_test_stage2, X_train_stage2[::1], Y_train_stage2[::1]))
print('RMSE using stage1 as prior:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
| <filename>demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import numpy as np
from paddleslim.nas import GPNAS
# 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo
# [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en)
# [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958)
# demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search
# 基于本demo的改进版可以获得双倍奖金
def preprare_trainning_data(file_name, t_flag):
## t_flag ==1 using all trainning data
## t_flag ==2 using half trainning data
with open(file_name, 'r') as f:
arch_dict = json.load(f)
Y_all = []
X_all = []
for sub_dict in arch_dict.items():
Y_all.append(sub_dict[1]['acc'] * 100)
X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4, 16)[2])
X_all, Y_all = np.array(X_all), np.array(Y_all)
X_train, Y_train, X_test, Y_test = X_all[0::t_flag], Y_all[
0::t_flag], X_all[1::t_flag], Y_all[1::t_flag]
return X_train, Y_train, X_test, Y_test
if __name__ == '__main__':
stage1_file = './datasets/Track2_stage1_trainning.json'
stage2_file = './datasets/Track2_stage2_few_show_trainning.json'
X_train_stage1, Y_train_stage1, X_test_stage1, Y_test_stage1 = preprare_trainning_data(
stage1_file, 1)
X_train_stage2, Y_train_stage2, X_test_stage2, Y_test_stage2 = preprare_trainning_data(
stage2_file, 2)
gpnas = GPNAS()
w = gpnas.get_initial_mean(X_test_stage1, Y_test_stage1)
init_cov = gpnas.get_initial_cov(X_train_stage1)
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict(
X_test_stage2))
print('RMSE trainning on stage1 testing on stage2:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
gpnas.get_posterior_mean(X_train_stage2[0::3], Y_train_stage2[0::3])
gpnas.get_posterior_mean(X_train_stage2[1::3], Y_train_stage2[1::3])
gpnas.get_posterior_cov(X_train_stage2[1::3], Y_train_stage2[1::3])
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict_jiont(
X_test_stage2, X_train_stage2[::1], Y_train_stage2[::1]))
print('RMSE using stage1 as prior:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
| en | 0.666365 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo # [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en) # [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958) # demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search # 基于本demo的改进版可以获得双倍奖金 ## t_flag ==1 using all trainning data ## t_flag ==2 using half trainning data | 1.910093 | 2 |
pages/migrations/0004_auto_20181102_0944.py | yogeshprasad/spa-development | 0 | 8204 | <reponame>yogeshprasad/spa-development<gh_stars>0
# Generated by Django 2.0.6 on 2018-11-02 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_coachingcourse'),
]
operations = [
migrations.AlterField(
model_name='coachingcourse',
name='username',
field=models.CharField(default='', max_length=100),
),
]
| # Generated by Django 2.0.6 on 2018-11-02 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_coachingcourse'),
]
operations = [
migrations.AlterField(
model_name='coachingcourse',
name='username',
field=models.CharField(default='', max_length=100),
),
] | en | 0.844349 | # Generated by Django 2.0.6 on 2018-11-02 09:44 | 1.599932 | 2 |
imageclassification/src/sample/splitters/_StratifiedSplitter.py | waikato-datamining/keras-imaging | 0 | 8205 | from collections import OrderedDict
from random import Random
from typing import Set
from .._types import Dataset, Split, LabelIndices
from .._util import per_label
from ._RandomSplitter import RandomSplitter
from ._Splitter import Splitter
class StratifiedSplitter(Splitter):
"""
TODO
"""
def __init__(self, percentage: float, labels: LabelIndices, random: Random = Random()):
self._percentage = percentage
self._labels = labels
self._random = random
def __str__(self) -> str:
return f"strat-{self._percentage}"
def __call__(self, dataset: Dataset) -> Split:
subsets_per_label = per_label(dataset)
sub_splits = {
label: RandomSplitter(int(len(subsets_per_label[label]) * self._percentage), self._random)(subsets_per_label[label])
for label in self._labels.keys()
}
result = OrderedDict(), OrderedDict()
for filename, label in dataset.items():
result_index = 0 if filename in sub_splits[label][0] else 1
result[result_index][filename] = label
return result
| from collections import OrderedDict
from random import Random
from typing import Set
from .._types import Dataset, Split, LabelIndices
from .._util import per_label
from ._RandomSplitter import RandomSplitter
from ._Splitter import Splitter
class StratifiedSplitter(Splitter):
"""
TODO
"""
def __init__(self, percentage: float, labels: LabelIndices, random: Random = Random()):
self._percentage = percentage
self._labels = labels
self._random = random
def __str__(self) -> str:
return f"strat-{self._percentage}"
def __call__(self, dataset: Dataset) -> Split:
subsets_per_label = per_label(dataset)
sub_splits = {
label: RandomSplitter(int(len(subsets_per_label[label]) * self._percentage), self._random)(subsets_per_label[label])
for label in self._labels.keys()
}
result = OrderedDict(), OrderedDict()
for filename, label in dataset.items():
result_index = 0 if filename in sub_splits[label][0] else 1
result[result_index][filename] = label
return result
| none | 1 | 2.742425 | 3 |
|
revenuecat_python/enums.py | YuraHavrylko/revenuecat_python | 1 | 8206 | <reponame>YuraHavrylko/revenuecat_python
from enum import Enum
class SubscriptionPlatform(Enum):
ios = 'ios'
android = 'android'
macos = 'macos'
uikitformac = 'uikitformac'
stripe = 'stripe'
class AttributionNetworkCode(Enum):
apple_search_ads = 0
adjust = 1
apps_flyer = 2
branch = 3
tenjin = 4
facebook = 5 | from enum import Enum
class SubscriptionPlatform(Enum):
ios = 'ios'
android = 'android'
macos = 'macos'
uikitformac = 'uikitformac'
stripe = 'stripe'
class AttributionNetworkCode(Enum):
apple_search_ads = 0
adjust = 1
apps_flyer = 2
branch = 3
tenjin = 4
facebook = 5 | none | 1 | 2.431421 | 2 |
|
windows_packages_gpu/torch/nn/intrinsic/qat/modules/linear_relu.py | codeproject/DeepStack | 353 | 8207 | from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn.qat as nnqat
import torch.nn.intrinsic
import torch.nn.functional as F
class LinearReLU(nnqat.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
Examples::
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = torch.nn.intrinsic.LinearReLU
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return self.activation_post_process(F.relu(
F.linear(input, self.weight_fake_quant(self.weight), self.bias)))
@classmethod
def from_float(cls, mod, qconfig=None):
return super(LinearReLU, cls).from_float(mod, qconfig)
| from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn.qat as nnqat
import torch.nn.intrinsic
import torch.nn.functional as F
class LinearReLU(nnqat.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
Examples::
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = torch.nn.intrinsic.LinearReLU
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return self.activation_post_process(F.relu(
F.linear(input, self.weight_fake_quant(self.weight), self.bias)))
@classmethod
def from_float(cls, mod, qconfig=None):
return super(LinearReLU, cls).from_float(mod, qconfig)
| en | 0.662167 | A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
Examples::
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30]) | 2.746999 | 3 |
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/draw_buffers2.py | temelkirci/Motion_Editor | 1 | 8208 | <reponame>temelkirci/Motion_Editor
'''OpenGL extension EXT.draw_buffers2
This module customises the behaviour of the
OpenGL.raw.GL.EXT.draw_buffers2 to provide a more
Python-friendly API
Overview (from the spec)
This extension builds upon the ARB_draw_buffers extension and provides
separate blend enables and color write masks for each color output. In
ARB_draw_buffers (part of OpenGL 2.0), separate values can be written to
each color buffer, but the blend enable and color write mask are global
and apply to all color outputs.
While this extension does provide separate blend enables, it does not
provide separate blend functions or blend equations per color output.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/draw_buffers2.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.draw_buffers2 import *
### END AUTOGENERATED SECTION | '''OpenGL extension EXT.draw_buffers2
This module customises the behaviour of the
OpenGL.raw.GL.EXT.draw_buffers2 to provide a more
Python-friendly API
Overview (from the spec)
This extension builds upon the ARB_draw_buffers extension and provides
separate blend enables and color write masks for each color output. In
ARB_draw_buffers (part of OpenGL 2.0), separate values can be written to
each color buffer, but the blend enable and color write mask are global
and apply to all color outputs.
While this extension does provide separate blend enables, it does not
provide separate blend functions or blend equations per color output.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/draw_buffers2.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.draw_buffers2 import *
### END AUTOGENERATED SECTION | en | 0.691713 | OpenGL extension EXT.draw_buffers2 This module customises the behaviour of the OpenGL.raw.GL.EXT.draw_buffers2 to provide a more Python-friendly API Overview (from the spec) This extension builds upon the ARB_draw_buffers extension and provides separate blend enables and color write masks for each color output. In ARB_draw_buffers (part of OpenGL 2.0), separate values can be written to each color buffer, but the blend enable and color write mask are global and apply to all color outputs. While this extension does provide separate blend enables, it does not provide separate blend functions or blend equations per color output. The official definition of this extension is available here: http://www.opengl.org/registry/specs/EXT/draw_buffers2.txt ### END AUTOGENERATED SECTION | 1.83881 | 2 |
pymemcache/client/retrying.py | liquidpele/pymemcache | 0 | 8209 | """ Module containing the RetryingClient wrapper class. """
from time import sleep
def _ensure_tuple_argument(argument_name, argument_value):
"""
Helper function to ensure the given arguments are tuples of Exceptions (or
subclasses), or can at least be converted to such.
Args:
argument_name: str, name of the argument we're checking, only used for
raising meaningful exceptions.
argument: any, the argument itself.
Returns:
tuple[Exception]: A tuple with the elements from the argument if they are
valid.
Exceptions:
ValueError: If the argument was not None, tuple or Iterable.
ValueError: If any of the elements of the argument is not a subclass of
Exception.
"""
# Ensure the argument is a tuple, set or list.
if argument_value is None:
return tuple()
elif not isinstance(argument_value, (tuple, set, list)):
raise ValueError("%s must be either a tuple, a set or a list." % argument_name)
# Convert the argument before checking contents.
argument_tuple = tuple(argument_value)
# Check that all the elements are actually inherited from Exception.
# (Catchable)
if not all([issubclass(arg, Exception) for arg in argument_tuple]):
raise ValueError(
"%s is only allowed to contain elements that are subclasses of "
"Exception." % argument_name
)
return argument_tuple
class RetryingClient(object):
"""
Client that allows retrying calls for the other clients.
"""
def __init__(
self, client, attempts=2, retry_delay=0, retry_for=None, do_not_retry_for=None
):
"""
Constructor for RetryingClient.
Args:
client: Client|PooledClient|HashClient, inner client to use for
performing actual work.
attempts: optional int, how many times to attempt an action before
failing. Must be 1 or above. Defaults to 2.
retry_delay: optional int|float, how many seconds to sleep between
each attempt.
Defaults to 0.
retry_for: optional None|tuple|set|list, what exceptions to
allow retries for. Will allow retries for all exceptions if None.
Example:
`(MemcacheClientError, MemcacheUnexpectedCloseError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
do_not_retry_for: optional None|tuple|set|list, what
exceptions should be retried. Will not block retries for any
Exception if None.
Example:
`(IOError, MemcacheIllegalInputError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
Exceptions:
ValueError: If `attempts` is not 1 or above.
ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or
Iterable.
ValueError: If any of the elements of `retry_for` or
`do_not_retry_for` is not a subclass of Exception.
ValueError: If there is any overlap between `retry_for` and
`do_not_retry_for`.
"""
if attempts < 1:
raise ValueError(
"`attempts` argument must be at least 1. "
"Otherwise no attempts are made."
)
self._client = client
self._attempts = attempts
self._retry_delay = retry_delay
self._retry_for = _ensure_tuple_argument("retry_for", retry_for)
self._do_not_retry_for = _ensure_tuple_argument(
"do_not_retry_for", do_not_retry_for
)
# Verify no overlap in the go/no-go exception collections.
for exc_class in self._retry_for:
if exc_class in self._do_not_retry_for:
raise ValueError(
'Exception class "%s" was present in both `retry_for` '
"and `do_not_retry_for`. Any exception class is only "
"allowed in a single argument." % repr(exc_class)
)
# Take dir from the client to speed up future checks.
self._client_dir = dir(self._client)
def _retry(self, name, func, *args, **kwargs):
"""
Workhorse function, handles retry logic.
Args:
name: str, Name of the function called.
func: callable, the function to retry.
*args: args, array arguments to pass to the function.
**kwargs: kwargs, keyword arguments to pass to the function.
"""
for attempt in range(self._attempts):
try:
result = func(*args, **kwargs)
return result
except Exception as exc:
# Raise the exception to caller if either is met:
# - We've used the last attempt.
# - self._retry_for is set, and we do not match.
# - self._do_not_retry_for is set, and we do match.
# - name is not actually a member of the client class.
if (
attempt >= self._attempts - 1
or (self._retry_for and not isinstance(exc, self._retry_for))
or (
self._do_not_retry_for
and isinstance(exc, self._do_not_retry_for)
)
or name not in self._client_dir
):
raise exc
# Sleep and try again.
sleep(self._retry_delay)
# This is the real magic soup of the class, we catch anything that isn't
# strictly defined for ourselves and pass it on to whatever client we've
# been given.
def __getattr__(self, name):
return lambda *args, **kwargs: self._retry(
name, self._client.__getattribute__(name), *args, **kwargs
)
# We implement these explicitly because they're "magic" functions and won't
# get passed on by __getattr__.
def __dir__(self):
return self._client_dir
# These magics are copied from the base client.
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
| """ Module containing the RetryingClient wrapper class. """
from time import sleep
def _ensure_tuple_argument(argument_name, argument_value):
"""
Helper function to ensure the given arguments are tuples of Exceptions (or
subclasses), or can at least be converted to such.
Args:
argument_name: str, name of the argument we're checking, only used for
raising meaningful exceptions.
argument: any, the argument itself.
Returns:
tuple[Exception]: A tuple with the elements from the argument if they are
valid.
Exceptions:
ValueError: If the argument was not None, tuple or Iterable.
ValueError: If any of the elements of the argument is not a subclass of
Exception.
"""
# Ensure the argument is a tuple, set or list.
if argument_value is None:
return tuple()
elif not isinstance(argument_value, (tuple, set, list)):
raise ValueError("%s must be either a tuple, a set or a list." % argument_name)
# Convert the argument before checking contents.
argument_tuple = tuple(argument_value)
# Check that all the elements are actually inherited from Exception.
# (Catchable)
if not all([issubclass(arg, Exception) for arg in argument_tuple]):
raise ValueError(
"%s is only allowed to contain elements that are subclasses of "
"Exception." % argument_name
)
return argument_tuple
class RetryingClient(object):
"""
Client that allows retrying calls for the other clients.
"""
def __init__(
self, client, attempts=2, retry_delay=0, retry_for=None, do_not_retry_for=None
):
"""
Constructor for RetryingClient.
Args:
client: Client|PooledClient|HashClient, inner client to use for
performing actual work.
attempts: optional int, how many times to attempt an action before
failing. Must be 1 or above. Defaults to 2.
retry_delay: optional int|float, how many seconds to sleep between
each attempt.
Defaults to 0.
retry_for: optional None|tuple|set|list, what exceptions to
allow retries for. Will allow retries for all exceptions if None.
Example:
`(MemcacheClientError, MemcacheUnexpectedCloseError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
do_not_retry_for: optional None|tuple|set|list, what
exceptions should be retried. Will not block retries for any
Exception if None.
Example:
`(IOError, MemcacheIllegalInputError)`
Accepts any class that is a subclass of Exception.
Defaults to None.
Exceptions:
ValueError: If `attempts` is not 1 or above.
ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or
Iterable.
ValueError: If any of the elements of `retry_for` or
`do_not_retry_for` is not a subclass of Exception.
ValueError: If there is any overlap between `retry_for` and
`do_not_retry_for`.
"""
if attempts < 1:
raise ValueError(
"`attempts` argument must be at least 1. "
"Otherwise no attempts are made."
)
self._client = client
self._attempts = attempts
self._retry_delay = retry_delay
self._retry_for = _ensure_tuple_argument("retry_for", retry_for)
self._do_not_retry_for = _ensure_tuple_argument(
"do_not_retry_for", do_not_retry_for
)
# Verify no overlap in the go/no-go exception collections.
for exc_class in self._retry_for:
if exc_class in self._do_not_retry_for:
raise ValueError(
'Exception class "%s" was present in both `retry_for` '
"and `do_not_retry_for`. Any exception class is only "
"allowed in a single argument." % repr(exc_class)
)
# Take dir from the client to speed up future checks.
self._client_dir = dir(self._client)
def _retry(self, name, func, *args, **kwargs):
"""
Workhorse function, handles retry logic.
Args:
name: str, Name of the function called.
func: callable, the function to retry.
*args: args, array arguments to pass to the function.
**kwargs: kwargs, keyword arguments to pass to the function.
"""
for attempt in range(self._attempts):
try:
result = func(*args, **kwargs)
return result
except Exception as exc:
# Raise the exception to caller if either is met:
# - We've used the last attempt.
# - self._retry_for is set, and we do not match.
# - self._do_not_retry_for is set, and we do match.
# - name is not actually a member of the client class.
if (
attempt >= self._attempts - 1
or (self._retry_for and not isinstance(exc, self._retry_for))
or (
self._do_not_retry_for
and isinstance(exc, self._do_not_retry_for)
)
or name not in self._client_dir
):
raise exc
# Sleep and try again.
sleep(self._retry_delay)
# This is the real magic soup of the class, we catch anything that isn't
# strictly defined for ourselves and pass it on to whatever client we've
# been given.
def __getattr__(self, name):
return lambda *args, **kwargs: self._retry(
name, self._client.__getattribute__(name), *args, **kwargs
)
# We implement these explicitly because they're "magic" functions and won't
# get passed on by __getattr__.
def __dir__(self):
return self._client_dir
# These magics are copied from the base client.
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
| en | 0.78035 | Module containing the RetryingClient wrapper class. Helper function to ensure the given arguments are tuples of Exceptions (or subclasses), or can at least be converted to such. Args: argument_name: str, name of the argument we're checking, only used for raising meaningful exceptions. argument: any, the argument itself. Returns: tuple[Exception]: A tuple with the elements from the argument if they are valid. Exceptions: ValueError: If the argument was not None, tuple or Iterable. ValueError: If any of the elements of the argument is not a subclass of Exception. # Ensure the argument is a tuple, set or list. # Convert the argument before checking contents. # Check that all the elements are actually inherited from Exception. # (Catchable) Client that allows retrying calls for the other clients. Constructor for RetryingClient. Args: client: Client|PooledClient|HashClient, inner client to use for performing actual work. attempts: optional int, how many times to attempt an action before failing. Must be 1 or above. Defaults to 2. retry_delay: optional int|float, how many seconds to sleep between each attempt. Defaults to 0. retry_for: optional None|tuple|set|list, what exceptions to allow retries for. Will allow retries for all exceptions if None. Example: `(MemcacheClientError, MemcacheUnexpectedCloseError)` Accepts any class that is a subclass of Exception. Defaults to None. do_not_retry_for: optional None|tuple|set|list, what exceptions should be retried. Will not block retries for any Exception if None. Example: `(IOError, MemcacheIllegalInputError)` Accepts any class that is a subclass of Exception. Defaults to None. Exceptions: ValueError: If `attempts` is not 1 or above. ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or Iterable. ValueError: If any of the elements of `retry_for` or `do_not_retry_for` is not a subclass of Exception. ValueError: If there is any overlap between `retry_for` and `do_not_retry_for`. # Verify no overlap in the go/no-go exception collections. # Take dir from the client to speed up future checks. Workhorse function, handles retry logic. Args: name: str, Name of the function called. func: callable, the function to retry. *args: args, array arguments to pass to the function. **kwargs: kwargs, keyword arguments to pass to the function. # Raise the exception to caller if either is met: # - We've used the last attempt. # - self._retry_for is set, and we do not match. # - self._do_not_retry_for is set, and we do match. # - name is not actually a member of the client class. # Sleep and try again. # This is the real magic soup of the class, we catch anything that isn't # strictly defined for ourselves and pass it on to whatever client we've # been given. # We implement these explicitly because they're "magic" functions and won't # get passed on by __getattr__. # These magics are copied from the base client. | 3.207531 | 3 |
8.1.py | HuaichenOvO/EIE3280HW | 0 | 8210 | <reponame>HuaichenOvO/EIE3280HW
import numpy as np
import numpy.linalg as lg
A_mat = np.matrix([
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 0]
])
eigen = lg.eig(A_mat) # return Arr[5] with 5 different linear independent eigen values
vec = eigen[1][:, 0] # the column (eigen vector) with the largest eigen value
value = eigen[0][0] # the largest eigen value
print(vec)
print(A_mat * vec)
print(value * vec)
| import numpy as np
import numpy.linalg as lg
A_mat = np.matrix([
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 0]
])
eigen = lg.eig(A_mat) # return Arr[5] with 5 different linear independent eigen values
vec = eigen[1][:, 0] # the column (eigen vector) with the largest eigen value
value = eigen[0][0] # the largest eigen value
print(vec)
print(A_mat * vec)
print(value * vec) | en | 0.466959 | # return Arr[5] with 5 different linear independent eigen values # the column (eigen vector) with the largest eigen value # the largest eigen value | 3.094293 | 3 |
classroom/migrations/0025_myfile_file.py | Abulhusain/E-learing | 5 | 8211 | # Generated by Django 2.2.2 on 2019-08-25 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classroom', '0024_auto_20190825_1723'),
]
operations = [
migrations.AddField(
model_name='myfile',
name='file',
field=models.CharField(blank=True, max_length=100),
),
]
| # Generated by Django 2.2.2 on 2019-08-25 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classroom', '0024_auto_20190825_1723'),
]
operations = [
migrations.AddField(
model_name='myfile',
name='file',
field=models.CharField(blank=True, max_length=100),
),
]
| en | 0.699913 | # Generated by Django 2.2.2 on 2019-08-25 09:29 | 1.553272 | 2 |
jumbo_api/objects/profile.py | rolfberkenbosch/python-jumbo-api | 3 | 8212 | <filename>jumbo_api/objects/profile.py
from jumbo_api.objects.store import Store
class Profile(object):
def __init__(self, data):
self.id = data.get("identifier")
self.store = Store(data.get("store"))
def __str__(self):
return f"{self.id} {self.store}"
| <filename>jumbo_api/objects/profile.py
from jumbo_api.objects.store import Store
class Profile(object):
def __init__(self, data):
self.id = data.get("identifier")
self.store = Store(data.get("store"))
def __str__(self):
return f"{self.id} {self.store}"
| none | 1 | 2.558684 | 3 |
|
tmp/real_time_log_analy/logWatcher.py | hankai17/test | 7 | 8213 | <gh_stars>1-10
#!/usr/bin/env python
import os
import sys
import time
import errno
import stat
import datetime
import socket
import struct
import atexit
import logging
#from lru import LRUCacheDict
from logging import handlers
from task_manager import Job, taskManage
from ctypes import *
from urlparse import *
from multiprocessing import Process,Lock
from log_obj import CLog
from parse_conf import cConfParser
log_file = "timelog.log"
log_fmt = '%(asctime)s: %(message)s'
config_file = 'test.config'
domain_white_dict = {}
pps_ip_list = []
pps_port = 0
domain_sfx_err_count = 0
domain_sfx_err_rate = 0
ats_ip = ''
def daemonize(pid_file=None):
pid = os.fork()
if pid:
sys.exit(0)
os.chdir('/')
os.umask(0)
os.setsid()
_pid = os.fork()
if _pid:
sys.exit(0)
sys.stdout.flush()
sys.stderr.flush()
with open('/dev/null') as read_null, open('/dev/null', 'w') as write_null:
os.dup2(read_null.fileno(), sys.stdin.fileno())
os.dup2(write_null.fileno(), sys.stdout.fileno())
os.dup2(write_null.fileno(), sys.stderr.fileno())
if pid_file:
with open(pid_file, 'w+') as f:
f.write(str(os.getpid()))
atexit.register(os.remove, pid_file)
def get_suffix(p):
if len(p) == 1:
#return "pure domain"
return "nil"
fields = p.split("/")
if len(fields) == 0 or len(fields) == 1:
return "null"
fields1 = fields[len(fields) - 1].split(".")
if len(fields1) == 0 or len(fields1) == 1:
return "null"
else:
return fields1[len(fields1) - 1]
class LogWatcher(object):
def __init__(self, folder, callback, extensions=["log"], logfile_keyword="squid", tail_lines=0):
self.files_map = {}
self.callback = callback
self.folder = os.path.realpath(folder)
self.extensions = extensions
self.logfile_kw = logfile_keyword
assert os.path.exists(self.folder), "%s does not exists" % self.folder
assert callable(callback)
self.update_files()
for id, file in self.files_map.iteritems():
file.seek(os.path.getsize(file.name)) # EOF
if tail_lines:
lines = self.tail(file.name, tail_lines)
if lines:
self.callback(file.name, lines)
def __del__(self):
self.close()
def loop(self, interval=0.1, async=False):
while 1:
try:
self.update_files()
for fid, file in list(self.files_map.iteritems()):
self.readfile(file)
if async:
return
time.sleep(interval)
except KeyboardInterrupt:
break
def log(self, line):
print line
def listdir(self):
ls = os.listdir(self.folder)
if self.extensions:
return [x for x in ls if os.path.splitext(x)[1][1:] in self.extensions and self.logfile_kw in os.path.split(x)[1] ]
else:
return ls
@staticmethod
def tail(fname, window):
try:
f = open(fname, 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return []
else:
raise
else:
BUFSIZ = 1024
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
data = ""
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
exit = True
else:
f.seek(step, os.SEEK_END)
data = f.read().strip()
if data.count('\n') >= window:
break
else:
block -= 1
return data.splitlines()[-window:]
def update_files(self):
ls = []
if os.path.isdir(self.folder):
for name in self.listdir():
absname = os.path.realpath(os.path.join(self.folder, name))
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
fid = self.get_file_id(st)
ls.append((fid, absname))
elif os.path.isfile(self.folder):
absname = os.path.realpath(self.folder)
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
fid = self.get_file_id(st)
ls.append((fid, absname))
else:
print 'You submitted an object that was neither a file or folder...exiting now.'
sys.exit()
for fid, file in list(self.files_map.iteritems()):
try:
st = os.stat(file.name)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self.unwatch(file, fid)
else:
raise
else:
if fid != self.get_file_id(st):
self.unwatch(file, fid)
self.watch(file.name)
for fid, fname in ls:
if fid not in self.files_map:
self.watch(fname)
def readfile(self, file):
lines = file.readlines()
if lines:
self.callback(file.name, lines)
def watch(self, fname):
try:
file = open(fname, "r")
fid = self.get_file_id(os.stat(fname))
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
self.log("watching logfile %s" % fname)
self.files_map[fid] = file
def unwatch(self, file, fid):
lines = self.readfile(file)
self.log("un-watching logfile %s" % file.name)
del self.files_map[fid]
if lines:
self.callback(file.name, lines)
@staticmethod
def get_file_id(st):
return "%xg%x" % (st.st_dev, st.st_ino)
def close(self):
for id, file in self.files_map.iteritems():
file.close()
self.files_map.clear()
def udp_send_message(ip_list, port, arr):
for ip in ip_list:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(arr, (ip, port))
s.close()
def pull_data(job):
if not (job.sfx == "nil" or job.sfx == "null"):
fmt = "=HHHH%dsH%dsH" %(len(job.url),len(job.sfx))
data = struct.pack(
fmt,
80, #id
1, #type
8 + len(job.url) + 2 + len(job.sfx) + 1, #length
len(job.url), #domain_len
job.url, #domain
len(job.sfx), #sfx_len
job.sfx, #sfx
0
)
else:
fmt = "=HHHH%dsH" %(len(job.url))
data = struct.pack(
fmt,
80, #id
1, #type
8 + len(job.url) + 1, #length
len(job.url), #domain_len
job.url,
0
)
global pps_ip_list
global pps_port
udp_send_message(pps_ip_list, pps_port, data)
tmg.done_task_add(job)
log_message = job.url + ' ' + job.sfx
loger.write(20, log_message)
def callback_routine(idx):
print 'callback_routinue'
def get_domain_white(f):
if len(f) == 0:
print 'No domain_white_list'
return
filename = f
fd = open(filename, 'r')
for line in fd.readlines():
line = line.strip()
if not domain_white_dict.has_key(line):
domain_white_dict[line] = 1
print 'parse domain_white_list done'
def period_check_task(job):
global txn_idx
global once_flag
if txn_idx == 0 and once_flag == 0:
once_flag = 1
tmg.done_task_add(job)
job.addtime = time.time()
tmg.task_add(job)
return
loger.write(10, '------>')
mutex.acquire()
for k in d1.keys():
if domain_white_dict.has_key(k):
continue
for k1 in d1[k].keys():
err_rate = d1[k][k1]['not_ok'] * 100 / (d1[k][k1]['not_ok'] + d1[k][k1]['20x'])
log_message = k + ' ' + str(err_rate)
loger.write(10, log_message)
global domain_sfx_err_count
global domain_sfx_err_rate
if err_rate >= domain_sfx_err_rate and (d1[k][k1]['not_ok'] + d1[k][k1]['20x']) >= domain_sfx_err_count :
#print "will add to task", k, k1, "ok:", d1[k][k1]['20x'], "not_ok:", d1[k][k1]['not_ok'], "err rate:", err_rate
txn_idx += 1
job = Job(txn_idx, pull_data, time.time(), 0, k, '', callback_routine, k1, '')
tmg.task_add(job)
loger.write(10, '<------')
d1.clear()
mutex.release()
tmg.done_task_add(job)
if job.period > 0:
job.addtime = time.time()
tmg.task_add(job)
def config_parse():
global domain_sfx_err_count
global domain_sfx_err_rate
global pps_ip_list
global pps_port
global ats_ip
cp = cConfParser(config_file)
pps_ip = cp.get('common', 'pps_ip')
fields = pps_ip.strip().split('|')
if len(fields) > 0:
for i in fields:
pps_ip_list.append(i)
else:
pps_ip_list.append(pps_ip)
pps_port = int(cp.get('common', 'pps_port'))
domain_sfx_err_count = int(cp.get('common', 'domain_sfx_err_count' ))
domain_sfx_err_rate = int(cp.get('common', 'domain_sfx_err_rate' ))
ats_ip = cp.get('common', 'ats_ip')
print 'ats_ip: ', ats_ip
print 'pps_ip: ', pps_ip
print 'pps_port: ', pps_port
print 'domain_sfx_err_count: ', domain_sfx_err_count
print 'domain_sfx_err_rate: ', domain_sfx_err_rate
return cp
once_flag = 0
txn_idx = 0
d1 = {}
mutex = Lock()
version_message = '1.0.1'
#1.0.1: Add conf obj; Add log obj
#1.0.2: More pps. add tool config
if __name__ == '__main__':
help_message = 'Usage: python %s' % sys.argv[0]
if len(sys.argv) == 2 and (sys.argv[1] in '--version'):
print version_message
exit(1)
if len(sys.argv) == 2 and (sys.argv[1] in '--help'):
print help_message
exit(1)
if len(sys.argv) != 1:
print help_message
exit(1)
cp = config_parse()
get_domain_white(cp.get('common', 'domain_white_list'))
loger = CLog(log_file, log_fmt, 12, 5, cp.get('common', 'debug'))
print 'Start ok'
daemonize()
tmg = taskManage()
tmg.run()
pull_pps_job = Job(txn_idx, period_check_task, time.time(), int(cp.get('common', 'interval')), '', '', callback_routine, '', '')
tmg.task_add(pull_pps_job)
def callback(filename, lines):
for line in lines:
fields = line.strip().split("'")
http_code = fields[23]
domain = fields[13]
log_message = 'new line ' + domain
#loger.write(10, log_message)
if len(domain.split(":")) > 0:
domain = domain.split(":")[0]
user_ip = fields[5]
result = urlparse(fields[15])
sfx = get_suffix(result.path)
if sfx == 'nil' or sfx == 'null':
continue
if len(domain) <= 3:
continue
#is watch req
global ats_ip
if user_ip == ats_ip:
continue
mutex.acquire()
sfx_dict = None
if not d1.has_key(domain):
d1[domain] = {}
sfx_dict = d1[domain]
else:
sfx_dict = d1[domain]
if not sfx_dict.has_key(sfx):
sfx_dict[sfx] = {'20x':0, 'not_ok':0}
if not(http_code in "200" or http_code in "206" or http_code in "304" or http_code in "204"):
sfx_dict[sfx]['not_ok'] += 1
else:
sfx_dict[sfx]['20x'] += 1
mutex.release()
l = LogWatcher("/opt/ats/var/log/trafficserver", callback)
l.loop()
#https://docs.python.org/2/library/ctypes.html
#https://blog.csdn.net/u012611644/article/details/80529746
| #!/usr/bin/env python
import os
import sys
import time
import errno
import stat
import datetime
import socket
import struct
import atexit
import logging
#from lru import LRUCacheDict
from logging import handlers
from task_manager import Job, taskManage
from ctypes import *
from urlparse import *
from multiprocessing import Process,Lock
from log_obj import CLog
from parse_conf import cConfParser
log_file = "timelog.log"
log_fmt = '%(asctime)s: %(message)s'
config_file = 'test.config'
domain_white_dict = {}
pps_ip_list = []
pps_port = 0
domain_sfx_err_count = 0
domain_sfx_err_rate = 0
ats_ip = ''
def daemonize(pid_file=None):
pid = os.fork()
if pid:
sys.exit(0)
os.chdir('/')
os.umask(0)
os.setsid()
_pid = os.fork()
if _pid:
sys.exit(0)
sys.stdout.flush()
sys.stderr.flush()
with open('/dev/null') as read_null, open('/dev/null', 'w') as write_null:
os.dup2(read_null.fileno(), sys.stdin.fileno())
os.dup2(write_null.fileno(), sys.stdout.fileno())
os.dup2(write_null.fileno(), sys.stderr.fileno())
if pid_file:
with open(pid_file, 'w+') as f:
f.write(str(os.getpid()))
atexit.register(os.remove, pid_file)
def get_suffix(p):
if len(p) == 1:
#return "pure domain"
return "nil"
fields = p.split("/")
if len(fields) == 0 or len(fields) == 1:
return "null"
fields1 = fields[len(fields) - 1].split(".")
if len(fields1) == 0 or len(fields1) == 1:
return "null"
else:
return fields1[len(fields1) - 1]
class LogWatcher(object):
def __init__(self, folder, callback, extensions=["log"], logfile_keyword="squid", tail_lines=0):
self.files_map = {}
self.callback = callback
self.folder = os.path.realpath(folder)
self.extensions = extensions
self.logfile_kw = logfile_keyword
assert os.path.exists(self.folder), "%s does not exists" % self.folder
assert callable(callback)
self.update_files()
for id, file in self.files_map.iteritems():
file.seek(os.path.getsize(file.name)) # EOF
if tail_lines:
lines = self.tail(file.name, tail_lines)
if lines:
self.callback(file.name, lines)
def __del__(self):
self.close()
def loop(self, interval=0.1, async=False):
while 1:
try:
self.update_files()
for fid, file in list(self.files_map.iteritems()):
self.readfile(file)
if async:
return
time.sleep(interval)
except KeyboardInterrupt:
break
def log(self, line):
print line
def listdir(self):
ls = os.listdir(self.folder)
if self.extensions:
return [x for x in ls if os.path.splitext(x)[1][1:] in self.extensions and self.logfile_kw in os.path.split(x)[1] ]
else:
return ls
@staticmethod
def tail(fname, window):
try:
f = open(fname, 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return []
else:
raise
else:
BUFSIZ = 1024
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
data = ""
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
exit = True
else:
f.seek(step, os.SEEK_END)
data = f.read().strip()
if data.count('\n') >= window:
break
else:
block -= 1
return data.splitlines()[-window:]
def update_files(self):
ls = []
if os.path.isdir(self.folder):
for name in self.listdir():
absname = os.path.realpath(os.path.join(self.folder, name))
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
fid = self.get_file_id(st)
ls.append((fid, absname))
elif os.path.isfile(self.folder):
absname = os.path.realpath(self.folder)
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
fid = self.get_file_id(st)
ls.append((fid, absname))
else:
print 'You submitted an object that was neither a file or folder...exiting now.'
sys.exit()
for fid, file in list(self.files_map.iteritems()):
try:
st = os.stat(file.name)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self.unwatch(file, fid)
else:
raise
else:
if fid != self.get_file_id(st):
self.unwatch(file, fid)
self.watch(file.name)
for fid, fname in ls:
if fid not in self.files_map:
self.watch(fname)
def readfile(self, file):
lines = file.readlines()
if lines:
self.callback(file.name, lines)
def watch(self, fname):
try:
file = open(fname, "r")
fid = self.get_file_id(os.stat(fname))
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
self.log("watching logfile %s" % fname)
self.files_map[fid] = file
def unwatch(self, file, fid):
lines = self.readfile(file)
self.log("un-watching logfile %s" % file.name)
del self.files_map[fid]
if lines:
self.callback(file.name, lines)
@staticmethod
def get_file_id(st):
return "%xg%x" % (st.st_dev, st.st_ino)
def close(self):
for id, file in self.files_map.iteritems():
file.close()
self.files_map.clear()
def udp_send_message(ip_list, port, arr):
for ip in ip_list:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(arr, (ip, port))
s.close()
def pull_data(job):
if not (job.sfx == "nil" or job.sfx == "null"):
fmt = "=HHHH%dsH%dsH" %(len(job.url),len(job.sfx))
data = struct.pack(
fmt,
80, #id
1, #type
8 + len(job.url) + 2 + len(job.sfx) + 1, #length
len(job.url), #domain_len
job.url, #domain
len(job.sfx), #sfx_len
job.sfx, #sfx
0
)
else:
fmt = "=HHHH%dsH" %(len(job.url))
data = struct.pack(
fmt,
80, #id
1, #type
8 + len(job.url) + 1, #length
len(job.url), #domain_len
job.url,
0
)
global pps_ip_list
global pps_port
udp_send_message(pps_ip_list, pps_port, data)
tmg.done_task_add(job)
log_message = job.url + ' ' + job.sfx
loger.write(20, log_message)
def callback_routine(idx):
print 'callback_routinue'
def get_domain_white(f):
if len(f) == 0:
print 'No domain_white_list'
return
filename = f
fd = open(filename, 'r')
for line in fd.readlines():
line = line.strip()
if not domain_white_dict.has_key(line):
domain_white_dict[line] = 1
print 'parse domain_white_list done'
def period_check_task(job):
global txn_idx
global once_flag
if txn_idx == 0 and once_flag == 0:
once_flag = 1
tmg.done_task_add(job)
job.addtime = time.time()
tmg.task_add(job)
return
loger.write(10, '------>')
mutex.acquire()
for k in d1.keys():
if domain_white_dict.has_key(k):
continue
for k1 in d1[k].keys():
err_rate = d1[k][k1]['not_ok'] * 100 / (d1[k][k1]['not_ok'] + d1[k][k1]['20x'])
log_message = k + ' ' + str(err_rate)
loger.write(10, log_message)
global domain_sfx_err_count
global domain_sfx_err_rate
if err_rate >= domain_sfx_err_rate and (d1[k][k1]['not_ok'] + d1[k][k1]['20x']) >= domain_sfx_err_count :
#print "will add to task", k, k1, "ok:", d1[k][k1]['20x'], "not_ok:", d1[k][k1]['not_ok'], "err rate:", err_rate
txn_idx += 1
job = Job(txn_idx, pull_data, time.time(), 0, k, '', callback_routine, k1, '')
tmg.task_add(job)
loger.write(10, '<------')
d1.clear()
mutex.release()
tmg.done_task_add(job)
if job.period > 0:
job.addtime = time.time()
tmg.task_add(job)
def config_parse():
global domain_sfx_err_count
global domain_sfx_err_rate
global pps_ip_list
global pps_port
global ats_ip
cp = cConfParser(config_file)
pps_ip = cp.get('common', 'pps_ip')
fields = pps_ip.strip().split('|')
if len(fields) > 0:
for i in fields:
pps_ip_list.append(i)
else:
pps_ip_list.append(pps_ip)
pps_port = int(cp.get('common', 'pps_port'))
domain_sfx_err_count = int(cp.get('common', 'domain_sfx_err_count' ))
domain_sfx_err_rate = int(cp.get('common', 'domain_sfx_err_rate' ))
ats_ip = cp.get('common', 'ats_ip')
print 'ats_ip: ', ats_ip
print 'pps_ip: ', pps_ip
print 'pps_port: ', pps_port
print 'domain_sfx_err_count: ', domain_sfx_err_count
print 'domain_sfx_err_rate: ', domain_sfx_err_rate
return cp
once_flag = 0
txn_idx = 0
d1 = {}
mutex = Lock()
version_message = '1.0.1'
#1.0.1: Add conf obj; Add log obj
#1.0.2: More pps. add tool config
if __name__ == '__main__':
help_message = 'Usage: python %s' % sys.argv[0]
if len(sys.argv) == 2 and (sys.argv[1] in '--version'):
print version_message
exit(1)
if len(sys.argv) == 2 and (sys.argv[1] in '--help'):
print help_message
exit(1)
if len(sys.argv) != 1:
print help_message
exit(1)
cp = config_parse()
get_domain_white(cp.get('common', 'domain_white_list'))
loger = CLog(log_file, log_fmt, 12, 5, cp.get('common', 'debug'))
print 'Start ok'
daemonize()
tmg = taskManage()
tmg.run()
pull_pps_job = Job(txn_idx, period_check_task, time.time(), int(cp.get('common', 'interval')), '', '', callback_routine, '', '')
tmg.task_add(pull_pps_job)
def callback(filename, lines):
for line in lines:
fields = line.strip().split("'")
http_code = fields[23]
domain = fields[13]
log_message = 'new line ' + domain
#loger.write(10, log_message)
if len(domain.split(":")) > 0:
domain = domain.split(":")[0]
user_ip = fields[5]
result = urlparse(fields[15])
sfx = get_suffix(result.path)
if sfx == 'nil' or sfx == 'null':
continue
if len(domain) <= 3:
continue
#is watch req
global ats_ip
if user_ip == ats_ip:
continue
mutex.acquire()
sfx_dict = None
if not d1.has_key(domain):
d1[domain] = {}
sfx_dict = d1[domain]
else:
sfx_dict = d1[domain]
if not sfx_dict.has_key(sfx):
sfx_dict[sfx] = {'20x':0, 'not_ok':0}
if not(http_code in "200" or http_code in "206" or http_code in "304" or http_code in "204"):
sfx_dict[sfx]['not_ok'] += 1
else:
sfx_dict[sfx]['20x'] += 1
mutex.release()
l = LogWatcher("/opt/ats/var/log/trafficserver", callback)
l.loop()
#https://docs.python.org/2/library/ctypes.html
#https://blog.csdn.net/u012611644/article/details/80529746 | en | 0.439934 | #!/usr/bin/env python #from lru import LRUCacheDict #return "pure domain" # EOF #id #type #length #domain_len #domain #sfx_len #sfx #id #type #length #domain_len #print "will add to task", k, k1, "ok:", d1[k][k1]['20x'], "not_ok:", d1[k][k1]['not_ok'], "err rate:", err_rate #1.0.1: Add conf obj; Add log obj #1.0.2: More pps. add tool config #loger.write(10, log_message) #is watch req #https://docs.python.org/2/library/ctypes.html #https://blog.csdn.net/u012611644/article/details/80529746 | 2.103302 | 2 |
lazyblacksmith/views/ajax/__init__.py | jonathonfletcher/LazyBlacksmith | 49 | 8214 | # -*- encoding: utf-8 -*-
from flask import request
from lazyblacksmith.utils.request import is_xhr
import logging
logger = logging.getLogger('lb.ajax')
def is_not_ajax():
"""
Return True if request is not ajax
This function is used in @cache annotation
to not cache direct call (http 403)
"""
return not is_xhr(request)
| # -*- encoding: utf-8 -*-
from flask import request
from lazyblacksmith.utils.request import is_xhr
import logging
logger = logging.getLogger('lb.ajax')
def is_not_ajax():
"""
Return True if request is not ajax
This function is used in @cache annotation
to not cache direct call (http 403)
"""
return not is_xhr(request)
| en | 0.679123 | # -*- encoding: utf-8 -*- Return True if request is not ajax
This function is used in @cache annotation
to not cache direct call (http 403) | 2.349487 | 2 |
src/automata_learning_with_policybank/Traces.py | logic-and-learning/AdvisoRL | 4 | 8215 | import os
class Traces:
def __init__(self, positive = set(), negative = set()):
self.positive = positive
self.negative = negative
"""
IG: at the moment we are adding a trace only if it ends up in an event.
should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '')
recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for
execution, nor for learning)
"""
def _should_add(self, trace, i):
prefixTrace = trace[:i]
if not prefixTrace[-1] == '':
return True
else:
return False
def _get_prefixes(self, trace, up_to_limit = None):
if up_to_limit is None:
up_to_limit = len(trace)
all_prefixes = set()
for i in range(1, up_to_limit+1):
if self._should_add(trace, i):
all_prefixes.add(trace[:i])
return all_prefixes
def symbol_to_trace(self,symbols):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(letters, numbers))
traces = list()
for symbol in symbols:
traces.append(dictionary.get(symbol))
return tuple(traces)
def trace_to_symbol(self,traces):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
symbols = list()
for trace in traces:
symbols.append(dictionary.get(trace))
return tuple(traces)
def rm_trace_to_symbol(self,rm_file):
file = rm_file
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
with open(file) as f:
content = f.readlines()
lines = []
for line in content:
end = 0
begin = 1 #initialize values based on what won't enter the loops; initial values irrelevant
number = 0 #random, had to initialize
if line != content[0]:
number = str()
check = 0
count=0
for character in line:
if ((check==1) & (character=="'")): #looks for second quotation
check = 10 #end search
end = count-1
elif (character == "'"): #looks for first quotation
check = 1
begin = count+1
elif (check==1):
number += character
count = count+1
symbol = dictionary.get(int(number))
#symbol = symbol + '&!n'
line = list(line) #necessary for use of pop,insert
if end==begin+1:
line.pop(end)
line.pop(begin)
line.insert(begin,symbol)
elif end==begin:
line.pop(begin)
line.insert(begin,symbol)
lines.append(line)
with open(rm_file, 'w') as f:
for line in lines:
for item in line:
f.write(str(item))
def fix_rmfiles(self,rmfile):
file = rmfile
with open(file) as f:
content = f.readlines()
final_state = str()
for line in content:
if line != content[0]:
brackets = 0
commas = 0
state = str()
next_state = str()
for character in line:
if (character == "(") & (brackets == 0):
brackets = 1
elif brackets == 1:
if character == "(":
brackets = 2
elif brackets == 2:
if character == "1":
final_state = next_state
print(final_state)
if ((commas == 0) & (brackets == 1)):
if character == ",":
commas = 1
else:
state += character
elif ((commas == 1) & (brackets == 1)):
if character == ",":
commas = 2
else:
next_state += character
# with open(rmfile, 'w') as f:
# for line in content:
# for item in line:
# f.write(str(item))
# f.write("\n")
# writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))"
# f.write(writethis)
"""
when adding a trace, it additionally adds all prefixes as negative traces
"""
def add_trace(self, trace, reward, learned):
trace = tuple(trace)
if reward > 0:
self.positive.add(trace)
# | is a set union operator
#if learned==0:
self.negative |= self._get_prefixes(trace, len(trace)-1)
else:
#if learned == 0:
self.negative |= self._get_prefixes(trace)
# else:
# self.negative.add(trace)
def export_traces(self, filename):
parent_path = os.path.dirname(filename)
os.makedirs(parent_path,exist_ok=True)
with open(filename, "w") as output_file:
output_file.write("POSITIVE:")
for trace in self.positive:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
output_file.write("\nNEGATIVE:")
for trace in self.negative:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
def __repr__(self):
return repr(self.positive) + "\n\n" + repr(self.negative)
| import os
class Traces:
def __init__(self, positive = set(), negative = set()):
self.positive = positive
self.negative = negative
"""
IG: at the moment we are adding a trace only if it ends up in an event.
should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '')
recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for
execution, nor for learning)
"""
def _should_add(self, trace, i):
prefixTrace = trace[:i]
if not prefixTrace[-1] == '':
return True
else:
return False
def _get_prefixes(self, trace, up_to_limit = None):
if up_to_limit is None:
up_to_limit = len(trace)
all_prefixes = set()
for i in range(1, up_to_limit+1):
if self._should_add(trace, i):
all_prefixes.add(trace[:i])
return all_prefixes
def symbol_to_trace(self,symbols):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(letters, numbers))
traces = list()
for symbol in symbols:
traces.append(dictionary.get(symbol))
return tuple(traces)
def trace_to_symbol(self,traces):
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
symbols = list()
for trace in traces:
symbols.append(dictionary.get(trace))
return tuple(traces)
def rm_trace_to_symbol(self,rm_file):
file = rm_file
letters = ['a','b','c','d','e','f','g', 'h', 'n']
numbers = [int(i) for i in range(0,9)]
dictionary = dict(zip(numbers, letters))
with open(file) as f:
content = f.readlines()
lines = []
for line in content:
end = 0
begin = 1 #initialize values based on what won't enter the loops; initial values irrelevant
number = 0 #random, had to initialize
if line != content[0]:
number = str()
check = 0
count=0
for character in line:
if ((check==1) & (character=="'")): #looks for second quotation
check = 10 #end search
end = count-1
elif (character == "'"): #looks for first quotation
check = 1
begin = count+1
elif (check==1):
number += character
count = count+1
symbol = dictionary.get(int(number))
#symbol = symbol + '&!n'
line = list(line) #necessary for use of pop,insert
if end==begin+1:
line.pop(end)
line.pop(begin)
line.insert(begin,symbol)
elif end==begin:
line.pop(begin)
line.insert(begin,symbol)
lines.append(line)
with open(rm_file, 'w') as f:
for line in lines:
for item in line:
f.write(str(item))
def fix_rmfiles(self,rmfile):
file = rmfile
with open(file) as f:
content = f.readlines()
final_state = str()
for line in content:
if line != content[0]:
brackets = 0
commas = 0
state = str()
next_state = str()
for character in line:
if (character == "(") & (brackets == 0):
brackets = 1
elif brackets == 1:
if character == "(":
brackets = 2
elif brackets == 2:
if character == "1":
final_state = next_state
print(final_state)
if ((commas == 0) & (brackets == 1)):
if character == ",":
commas = 1
else:
state += character
elif ((commas == 1) & (brackets == 1)):
if character == ",":
commas = 2
else:
next_state += character
# with open(rmfile, 'w') as f:
# for line in content:
# for item in line:
# f.write(str(item))
# f.write("\n")
# writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))"
# f.write(writethis)
"""
when adding a trace, it additionally adds all prefixes as negative traces
"""
def add_trace(self, trace, reward, learned):
trace = tuple(trace)
if reward > 0:
self.positive.add(trace)
# | is a set union operator
#if learned==0:
self.negative |= self._get_prefixes(trace, len(trace)-1)
else:
#if learned == 0:
self.negative |= self._get_prefixes(trace)
# else:
# self.negative.add(trace)
def export_traces(self, filename):
parent_path = os.path.dirname(filename)
os.makedirs(parent_path,exist_ok=True)
with open(filename, "w") as output_file:
output_file.write("POSITIVE:")
for trace in self.positive:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
output_file.write("\nNEGATIVE:")
for trace in self.negative:
output_file.write("\n")
string_repr = [str(el) for el in trace]
output_file.write(','.join(string_repr))
def __repr__(self):
return repr(self.positive) + "\n\n" + repr(self.negative)
| en | 0.865185 | IG: at the moment we are adding a trace only if it ends up in an event. should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '') recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for execution, nor for learning) #initialize values based on what won't enter the loops; initial values irrelevant #random, had to initialize #looks for second quotation #end search #looks for first quotation #symbol = symbol + '&!n' #necessary for use of pop,insert # with open(rmfile, 'w') as f: # for line in content: # for item in line: # f.write(str(item)) # f.write("\n") # writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))" # f.write(writethis) when adding a trace, it additionally adds all prefixes as negative traces # | is a set union operator #if learned==0: #if learned == 0: # else: # self.negative.add(trace) | 3.563266 | 4 |
example/comp/urls.py | edwilding/django-comments-xtd | 0 | 8216 | import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if django.VERSION[:2] > (1, 9):
from django.views.i18n import JavaScriptCatalog
else:
from django.views.i18n import javascript_catalog
from django_comments_xtd import LatestCommentFeed
from django_comments_xtd.views import XtdCommentListView
from comp import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.HomepageView.as_view(), name='homepage'),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^articles/', include('comp.articles.urls')),
url(r'^quotes/', include('comp.quotes.urls')),
url(r'^comments/', include('django_comments_xtd.urls')),
url(r'^comments/$', XtdCommentListView.as_view(
content_types=["articles.article", "quotes.quote"],
paginate_by=10, page_range=5),
name='comments-xtd-list'),
url(r'^feeds/comments/$', LatestCommentFeed(), name='comments-feed'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
if django.VERSION[:2] > (1, 9):
urlpatterns.append(
url(r'^jsi18n/$', JavaScriptCatalog.as_view(),
name='javascript-catalog')
)
else:
js_info_dict = {
'packages': ('django_comments_xtd',)
}
urlpatterns.append(
url(r'^jsi18n/$', javascript_catalog, js_info_dict,
name='javascript-catalog')
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^rosetta/', include('rosetta.urls'))]
| import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if django.VERSION[:2] > (1, 9):
from django.views.i18n import JavaScriptCatalog
else:
from django.views.i18n import javascript_catalog
from django_comments_xtd import LatestCommentFeed
from django_comments_xtd.views import XtdCommentListView
from comp import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.HomepageView.as_view(), name='homepage'),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^articles/', include('comp.articles.urls')),
url(r'^quotes/', include('comp.quotes.urls')),
url(r'^comments/', include('django_comments_xtd.urls')),
url(r'^comments/$', XtdCommentListView.as_view(
content_types=["articles.article", "quotes.quote"],
paginate_by=10, page_range=5),
name='comments-xtd-list'),
url(r'^feeds/comments/$', LatestCommentFeed(), name='comments-feed'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
if django.VERSION[:2] > (1, 9):
urlpatterns.append(
url(r'^jsi18n/$', JavaScriptCatalog.as_view(),
name='javascript-catalog')
)
else:
js_info_dict = {
'packages': ('django_comments_xtd',)
}
urlpatterns.append(
url(r'^jsi18n/$', javascript_catalog, js_info_dict,
name='javascript-catalog')
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^rosetta/', include('rosetta.urls'))]
| none | 1 | 1.879492 | 2 |
|
09_multiprocessing/prime_validation/primes_factor_test.py | jumploop/high_performance_python | 0 | 8217 | <filename>09_multiprocessing/prime_validation/primes_factor_test.py<gh_stars>0
import math
import time
def check_prime(n):
if n % 2 == 0:
return False, 2
for i in range(3, int(math.sqrt(n)) + 1):
if n % i == 0:
return False, i
return True, None
if __name__ == "__main__":
primes = []
t1 = time.time()
# 100109100129100151 big prime
# http://primes.utm.edu/curios/page.php/100109100129100151.html
# number_range = xrange(100109100129100153, 100109100129101238, 2)
number_range = range(100109100129101237, 100109100129201238, 2)
# new expensive near-primes
# [(95362951, (100109100129100369, 7.254560947418213))
# (171656941, (100109100129101027, 13.052711009979248))
# (121344023, (100109100129101291, 8.994053840637207)
# note these two lines of timings look really wrong, they're about 4sec
# each really
# [(265687139, (100109100129102047, 19.642582178115845)), (219609683, (100109100129102277, 16.178056001663208)), (121344023, (100109100129101291, 8.994053840637207))]
# [(316096873, (100109100129126653, 23.480671882629395)), (313994287, (100109100129111617, 23.262380123138428)), (307151363, (100109100129140177, 22.80288815498352))]
# primes
# 100109100129162907
# 100109100129162947
highest_factors = {}
for possible_prime in number_range:
t2 = time.time()
is_prime, factor = check_prime(possible_prime)
if is_prime:
primes.append(possible_prime)
print("GOT NEW PRIME", possible_prime)
else:
highest_factors[factor] = (possible_prime, time.time() - t2)
hf = highest_factors.items()
hf = sorted(hf, reverse=True)
print(hf[:3])
print("Took:", time.time() - t1)
print(len(primes), primes[:10], primes[-10:])
| <filename>09_multiprocessing/prime_validation/primes_factor_test.py<gh_stars>0
import math
import time
def check_prime(n):
if n % 2 == 0:
return False, 2
for i in range(3, int(math.sqrt(n)) + 1):
if n % i == 0:
return False, i
return True, None
if __name__ == "__main__":
primes = []
t1 = time.time()
# 100109100129100151 big prime
# http://primes.utm.edu/curios/page.php/100109100129100151.html
# number_range = xrange(100109100129100153, 100109100129101238, 2)
number_range = range(100109100129101237, 100109100129201238, 2)
# new expensive near-primes
# [(95362951, (100109100129100369, 7.254560947418213))
# (171656941, (100109100129101027, 13.052711009979248))
# (121344023, (100109100129101291, 8.994053840637207)
# note these two lines of timings look really wrong, they're about 4sec
# each really
# [(265687139, (100109100129102047, 19.642582178115845)), (219609683, (100109100129102277, 16.178056001663208)), (121344023, (100109100129101291, 8.994053840637207))]
# [(316096873, (100109100129126653, 23.480671882629395)), (313994287, (100109100129111617, 23.262380123138428)), (307151363, (100109100129140177, 22.80288815498352))]
# primes
# 100109100129162907
# 100109100129162947
highest_factors = {}
for possible_prime in number_range:
t2 = time.time()
is_prime, factor = check_prime(possible_prime)
if is_prime:
primes.append(possible_prime)
print("GOT NEW PRIME", possible_prime)
else:
highest_factors[factor] = (possible_prime, time.time() - t2)
hf = highest_factors.items()
hf = sorted(hf, reverse=True)
print(hf[:3])
print("Took:", time.time() - t1)
print(len(primes), primes[:10], primes[-10:])
| en | 0.535439 | # 100109100129100151 big prime # http://primes.utm.edu/curios/page.php/100109100129100151.html # number_range = xrange(100109100129100153, 100109100129101238, 2) # new expensive near-primes # [(95362951, (100109100129100369, 7.254560947418213)) # (171656941, (100109100129101027, 13.052711009979248)) # (121344023, (100109100129101291, 8.994053840637207) # note these two lines of timings look really wrong, they're about 4sec # each really # [(265687139, (100109100129102047, 19.642582178115845)), (219609683, (100109100129102277, 16.178056001663208)), (121344023, (100109100129101291, 8.994053840637207))] # [(316096873, (100109100129126653, 23.480671882629395)), (313994287, (100109100129111617, 23.262380123138428)), (307151363, (100109100129140177, 22.80288815498352))] # primes # 100109100129162907 # 100109100129162947 | 2.728615 | 3 |
python/test/test_dynamic_bitset.py | hagabb/katana | 0 | 8218 | import pytest
from katana.dynamic_bitset import DynamicBitset
__all__ = []
SIZE = 50
@pytest.fixture
def dbs():
return DynamicBitset(SIZE)
def test_set(dbs):
dbs[10] = 1
assert dbs[10]
def test_set_invalid_type(dbs):
try:
dbs[2.3] = 0
assert False
except TypeError:
pass
def test_set_invalid_index_low(dbs):
try:
dbs[-1] = 1
assert False
except IndexError:
pass
def test_set_invalid_index_high(dbs):
try:
dbs[SIZE] = 1
assert False
except IndexError:
pass
def test_reset(dbs):
dbs[10] = 1
dbs.reset()
assert not dbs[10]
assert len(dbs) == SIZE
def test_reset_index(dbs):
dbs[10] = 1
dbs[10] = 0
assert not dbs[10]
def test_reset_begin_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[12:17] = 0
assert dbs[10]
assert not dbs[15]
def test_reset_begin_end_invalid_step(dbs):
try:
dbs[12:17:22] = 0
assert False
except ValueError:
pass
def test_reset_none_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[:12] = 0
assert not dbs[10]
assert dbs[15]
def test_resize(dbs):
dbs.resize(20)
assert len(dbs) == 20
dbs[8] = 1
dbs.resize(20)
assert len(dbs) == 20
assert dbs[8]
dbs.resize(70)
assert len(dbs) == 70
assert dbs[8]
assert dbs.count() == 1
def test_clear(dbs):
dbs[10] = 1
dbs.clear()
assert len(dbs) == 0
dbs.resize(20)
assert len(dbs) == 20
assert not dbs[10]
def test_count(dbs):
dbs[10] = 1
assert dbs.count() == 1
| import pytest
from katana.dynamic_bitset import DynamicBitset
__all__ = []
SIZE = 50
@pytest.fixture
def dbs():
return DynamicBitset(SIZE)
def test_set(dbs):
dbs[10] = 1
assert dbs[10]
def test_set_invalid_type(dbs):
try:
dbs[2.3] = 0
assert False
except TypeError:
pass
def test_set_invalid_index_low(dbs):
try:
dbs[-1] = 1
assert False
except IndexError:
pass
def test_set_invalid_index_high(dbs):
try:
dbs[SIZE] = 1
assert False
except IndexError:
pass
def test_reset(dbs):
dbs[10] = 1
dbs.reset()
assert not dbs[10]
assert len(dbs) == SIZE
def test_reset_index(dbs):
dbs[10] = 1
dbs[10] = 0
assert not dbs[10]
def test_reset_begin_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[12:17] = 0
assert dbs[10]
assert not dbs[15]
def test_reset_begin_end_invalid_step(dbs):
try:
dbs[12:17:22] = 0
assert False
except ValueError:
pass
def test_reset_none_end(dbs):
dbs[10] = 1
dbs[15] = 1
dbs[:12] = 0
assert not dbs[10]
assert dbs[15]
def test_resize(dbs):
dbs.resize(20)
assert len(dbs) == 20
dbs[8] = 1
dbs.resize(20)
assert len(dbs) == 20
assert dbs[8]
dbs.resize(70)
assert len(dbs) == 70
assert dbs[8]
assert dbs.count() == 1
def test_clear(dbs):
dbs[10] = 1
dbs.clear()
assert len(dbs) == 0
dbs.resize(20)
assert len(dbs) == 20
assert not dbs[10]
def test_count(dbs):
dbs[10] = 1
assert dbs.count() == 1
| none | 1 | 2.164011 | 2 |
|
tests/basic/test_basic.py | kopp/python-astar | 133 | 8219 | import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
| import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
| en | 0.940567 | ensure that we take the shortest path, and not the path with less elements. the path with less elements is A -> B with a distance of 100 the shortest path is A -> C -> D -> B with a distance of 60 | 3.804317 | 4 |
potions.py | abdza/skyrim_formulas | 0 | 8220 | #!/bin/env python3
import csv
def intersect(list1,list2):
list3 = [ value for value in list1 if value in list2]
return list3
def category(list1,effects):
cat = 'Good'
good = 0
bad = 0
for ing in list1:
if effects[ing]=='Good':
good += 1
else:
bad += 1
if bad==0:
return 'Potion'
elif good==0:
return 'Poison'
else:
return 'Downside'
effects = {}
ingredients = {}
print("Formulating formulas")
with open('ingredients.csv') as csvfile:
aff = csv.reader(csvfile, delimiter=',')
for row in aff:
if row[0] not in effects.keys():
effects[row[0]] = row[1]
with open('skyrim-ingredients.csv', newline='') as csvfile:
ingre = csv.reader(csvfile, delimiter=',')
for row in ingre:
if row[0] not in ingredients.keys():
ingredients[row[0]] = [row[1],row[2],row[3],row[4]]
multieffects = {}
for ce in effects:
curing = []
for ing in ingredients:
if ce in ingredients[ing]:
curing.append(ing)
for k,curi in enumerate(curing):
for i in range(k+1,len(curing)):
cureff = intersect(ingredients[curi],ingredients[curing[i]])
cureff.sort()
if len(cureff)>1:
if curi>curing[i]:
curname = curing[i] + ':' + curi
else:
curname = curi + ':' + curing[i]
multieffects[curname] = cureff
finallist = {}
for me in multieffects:
curing = me.split(":")
for ing in ingredients:
if ing!=curing[0] and ing!=curing[1]:
eff1 = intersect(ingredients[curing[0]],ingredients[ing])
eff2 = intersect(ingredients[curing[1]],ingredients[ing])
if len(eff1)>0 or len(eff2)>0:
tmpname = [ val for val in curing ]
tmpname.append(ing)
tmpname.sort()
finalname = ":".join(tmpname)
finallist[finalname] = list(set(multieffects[me] + eff1 + eff2))
finallist[finalname].sort()
with open('formulas.csv',mode='w') as formula_file:
formula_writer = csv.writer(formula_file, delimiter=',')
formula_writer.writerow(['Category','Ingredient 1','Ingredient 2','Ingredient 3','Effect 1','Effect 2','Effect 3','Effect 4','Effect 5'])
for fl in finallist:
formula_writer.writerow([category(finallist[fl],effects)] + fl.split(":") + finallist[fl])
for fl in multieffects:
formula_writer.writerow([category(multieffects[fl],effects)] + fl.split(":") + [''] + multieffects[fl])
| #!/bin/env python3
import csv
def intersect(list1,list2):
list3 = [ value for value in list1 if value in list2]
return list3
def category(list1,effects):
cat = 'Good'
good = 0
bad = 0
for ing in list1:
if effects[ing]=='Good':
good += 1
else:
bad += 1
if bad==0:
return 'Potion'
elif good==0:
return 'Poison'
else:
return 'Downside'
effects = {}
ingredients = {}
print("Formulating formulas")
with open('ingredients.csv') as csvfile:
aff = csv.reader(csvfile, delimiter=',')
for row in aff:
if row[0] not in effects.keys():
effects[row[0]] = row[1]
with open('skyrim-ingredients.csv', newline='') as csvfile:
ingre = csv.reader(csvfile, delimiter=',')
for row in ingre:
if row[0] not in ingredients.keys():
ingredients[row[0]] = [row[1],row[2],row[3],row[4]]
multieffects = {}
for ce in effects:
curing = []
for ing in ingredients:
if ce in ingredients[ing]:
curing.append(ing)
for k,curi in enumerate(curing):
for i in range(k+1,len(curing)):
cureff = intersect(ingredients[curi],ingredients[curing[i]])
cureff.sort()
if len(cureff)>1:
if curi>curing[i]:
curname = curing[i] + ':' + curi
else:
curname = curi + ':' + curing[i]
multieffects[curname] = cureff
finallist = {}
for me in multieffects:
curing = me.split(":")
for ing in ingredients:
if ing!=curing[0] and ing!=curing[1]:
eff1 = intersect(ingredients[curing[0]],ingredients[ing])
eff2 = intersect(ingredients[curing[1]],ingredients[ing])
if len(eff1)>0 or len(eff2)>0:
tmpname = [ val for val in curing ]
tmpname.append(ing)
tmpname.sort()
finalname = ":".join(tmpname)
finallist[finalname] = list(set(multieffects[me] + eff1 + eff2))
finallist[finalname].sort()
with open('formulas.csv',mode='w') as formula_file:
formula_writer = csv.writer(formula_file, delimiter=',')
formula_writer.writerow(['Category','Ingredient 1','Ingredient 2','Ingredient 3','Effect 1','Effect 2','Effect 3','Effect 4','Effect 5'])
for fl in finallist:
formula_writer.writerow([category(finallist[fl],effects)] + fl.split(":") + finallist[fl])
for fl in multieffects:
formula_writer.writerow([category(multieffects[fl],effects)] + fl.split(":") + [''] + multieffects[fl])
| ru | 0.167759 | #!/bin/env python3 | 3.36262 | 3 |
src/clients/ctm_api_client/models/user_additional_properties.py | IceT-M/ctm-python-client | 5 | 8221 | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class UserAdditionalProperties(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"member_of_groups": "list[str]",
"authentication": "AuthenticationData",
"is_external_user": "bool",
}
attribute_map = {
"member_of_groups": "memberOfGroups",
"authentication": "authentication",
"is_external_user": "isExternalUser",
}
def __init__(
self,
member_of_groups=None,
authentication=None,
is_external_user=None,
_configuration=None,
): # noqa: E501
"""UserAdditionalProperties - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._member_of_groups = None
self._authentication = None
self._is_external_user = None
self.discriminator = None
if member_of_groups is not None:
self.member_of_groups = member_of_groups
if authentication is not None:
self.authentication = authentication
if is_external_user is not None:
self.is_external_user = is_external_user
@property
def member_of_groups(self):
"""Gets the member_of_groups of this UserAdditionalProperties. # noqa: E501
List of role names # noqa: E501
:return: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:rtype: list[str]
"""
return self._member_of_groups
@member_of_groups.setter
def member_of_groups(self, member_of_groups):
"""Sets the member_of_groups of this UserAdditionalProperties.
List of role names # noqa: E501
:param member_of_groups: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:type: list[str]
"""
self._member_of_groups = member_of_groups
@property
def authentication(self):
"""Gets the authentication of this UserAdditionalProperties. # noqa: E501
user authentication # noqa: E501
:return: The authentication of this UserAdditionalProperties. # noqa: E501
:rtype: AuthenticationData
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this UserAdditionalProperties.
user authentication # noqa: E501
:param authentication: The authentication of this UserAdditionalProperties. # noqa: E501
:type: AuthenticationData
"""
self._authentication = authentication
@property
def is_external_user(self):
"""Gets the is_external_user of this UserAdditionalProperties. # noqa: E501
:return: The is_external_user of this UserAdditionalProperties. # noqa: E501
:rtype: bool
"""
return self._is_external_user
@is_external_user.setter
def is_external_user(self, is_external_user):
"""Sets the is_external_user of this UserAdditionalProperties.
:param is_external_user: The is_external_user of this UserAdditionalProperties. # noqa: E501
:type: bool
"""
self._is_external_user = is_external_user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(UserAdditionalProperties, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserAdditionalProperties):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserAdditionalProperties):
return True
return self.to_dict() != other.to_dict()
| # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class UserAdditionalProperties(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"member_of_groups": "list[str]",
"authentication": "AuthenticationData",
"is_external_user": "bool",
}
attribute_map = {
"member_of_groups": "memberOfGroups",
"authentication": "authentication",
"is_external_user": "isExternalUser",
}
def __init__(
self,
member_of_groups=None,
authentication=None,
is_external_user=None,
_configuration=None,
): # noqa: E501
"""UserAdditionalProperties - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._member_of_groups = None
self._authentication = None
self._is_external_user = None
self.discriminator = None
if member_of_groups is not None:
self.member_of_groups = member_of_groups
if authentication is not None:
self.authentication = authentication
if is_external_user is not None:
self.is_external_user = is_external_user
@property
def member_of_groups(self):
"""Gets the member_of_groups of this UserAdditionalProperties. # noqa: E501
List of role names # noqa: E501
:return: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:rtype: list[str]
"""
return self._member_of_groups
@member_of_groups.setter
def member_of_groups(self, member_of_groups):
"""Sets the member_of_groups of this UserAdditionalProperties.
List of role names # noqa: E501
:param member_of_groups: The member_of_groups of this UserAdditionalProperties. # noqa: E501
:type: list[str]
"""
self._member_of_groups = member_of_groups
@property
def authentication(self):
"""Gets the authentication of this UserAdditionalProperties. # noqa: E501
user authentication # noqa: E501
:return: The authentication of this UserAdditionalProperties. # noqa: E501
:rtype: AuthenticationData
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this UserAdditionalProperties.
user authentication # noqa: E501
:param authentication: The authentication of this UserAdditionalProperties. # noqa: E501
:type: AuthenticationData
"""
self._authentication = authentication
@property
def is_external_user(self):
"""Gets the is_external_user of this UserAdditionalProperties. # noqa: E501
:return: The is_external_user of this UserAdditionalProperties. # noqa: E501
:rtype: bool
"""
return self._is_external_user
@is_external_user.setter
def is_external_user(self, is_external_user):
"""Sets the is_external_user of this UserAdditionalProperties.
:param is_external_user: The is_external_user of this UserAdditionalProperties. # noqa: E501
:type: bool
"""
self._is_external_user = is_external_user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(UserAdditionalProperties, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserAdditionalProperties):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserAdditionalProperties):
return True
return self.to_dict() != other.to_dict()
| en | 0.687058 | # coding: utf-8 Control-M Services Provides access to BMC Control-M Services # noqa: E501 OpenAPI spec version: 9.20.215 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 UserAdditionalProperties - a model defined in Swagger # noqa: E501 Gets the member_of_groups of this UserAdditionalProperties. # noqa: E501 List of role names # noqa: E501 :return: The member_of_groups of this UserAdditionalProperties. # noqa: E501 :rtype: list[str] Sets the member_of_groups of this UserAdditionalProperties. List of role names # noqa: E501 :param member_of_groups: The member_of_groups of this UserAdditionalProperties. # noqa: E501 :type: list[str] Gets the authentication of this UserAdditionalProperties. # noqa: E501 user authentication # noqa: E501 :return: The authentication of this UserAdditionalProperties. # noqa: E501 :rtype: AuthenticationData Sets the authentication of this UserAdditionalProperties. user authentication # noqa: E501 :param authentication: The authentication of this UserAdditionalProperties. # noqa: E501 :type: AuthenticationData Gets the is_external_user of this UserAdditionalProperties. # noqa: E501 :return: The is_external_user of this UserAdditionalProperties. # noqa: E501 :rtype: bool Sets the is_external_user of this UserAdditionalProperties. :param is_external_user: The is_external_user of this UserAdditionalProperties. # noqa: E501 :type: bool Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.695197 | 2 |
Tests/Methods/Mesh/Interpolation/test_interpolation.py | harshasunder-1/pyleecan | 2 | 8222 | <filename>Tests/Methods/Mesh/Interpolation/test_interpolation.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from pyleecan.Classes.CellMat import CellMat
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.PointMat import PointMat
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.ScalarProductL2 import ScalarProductL2
from pyleecan.Classes.Interpolation import Interpolation
from pyleecan.Classes.RefSegmentP1 import RefSegmentP1
from pyleecan.Classes.FPGNSeg import FPGNSeg
@pytest.mark.MeshSol
class unittest_real_points(TestCase):
""" Tests for interpolation method"""
def test_line(self):
DELTA = 1e-10
mesh = MeshMat()
mesh.cell["line"] = CellMat(nb_pt_per_cell=2)
mesh.point = PointMat()
mesh.point.add_point(np.array([0, 0]))
mesh.point.add_point(np.array([1, 0]))
mesh.point.add_point(np.array([0, 1]))
mesh.point.add_point(np.array([2, 3]))
mesh.point.add_point(np.array([3, 3]))
mesh.add_cell(np.array([0, 1]), "line")
mesh.add_cell(np.array([0, 2]), "line")
mesh.add_cell(np.array([1, 2]), "line")
c_line = mesh.cell["line"]
c_line.interpolation = Interpolation()
c_line.interpolation.ref_cell = RefSegmentP1()
c_line.interpolation.scalar_product = ScalarProductL2()
c_line.interpolation.gauss_point = FPGNSeg()
meshsol = MeshSolution()
meshsol.mesh = [mesh]
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.array([1, 1])
sol = [1]
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(test_field)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.ones(
(2, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = np.ones((120, 3))
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(2)["line"]
test_pt = np.array([0.6, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[0, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.6 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(1)["line"]
test_pt = np.array([0, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[1, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.4 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
| <filename>Tests/Methods/Mesh/Interpolation/test_interpolation.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from pyleecan.Classes.CellMat import CellMat
from pyleecan.Classes.MeshSolution import MeshSolution
from pyleecan.Classes.PointMat import PointMat
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.ScalarProductL2 import ScalarProductL2
from pyleecan.Classes.Interpolation import Interpolation
from pyleecan.Classes.RefSegmentP1 import RefSegmentP1
from pyleecan.Classes.FPGNSeg import FPGNSeg
@pytest.mark.MeshSol
class unittest_real_points(TestCase):
""" Tests for interpolation method"""
def test_line(self):
DELTA = 1e-10
mesh = MeshMat()
mesh.cell["line"] = CellMat(nb_pt_per_cell=2)
mesh.point = PointMat()
mesh.point.add_point(np.array([0, 0]))
mesh.point.add_point(np.array([1, 0]))
mesh.point.add_point(np.array([0, 1]))
mesh.point.add_point(np.array([2, 3]))
mesh.point.add_point(np.array([3, 3]))
mesh.add_cell(np.array([0, 1]), "line")
mesh.add_cell(np.array([0, 2]), "line")
mesh.add_cell(np.array([1, 2]), "line")
c_line = mesh.cell["line"]
c_line.interpolation = Interpolation()
c_line.interpolation.ref_cell = RefSegmentP1()
c_line.interpolation.scalar_product = ScalarProductL2()
c_line.interpolation.gauss_point = FPGNSeg()
meshsol = MeshSolution()
meshsol.mesh = [mesh]
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.array([1, 1])
sol = [1]
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(test_field)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(0)["line"]
test_pt = np.array([0.7, 0])
test_field = np.ones(
(2, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = np.ones((120, 3))
testA = np.sum(abs(func - sol))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(2)["line"]
test_pt = np.array([0.6, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[0, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.6 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
vert = mesh.get_vertice(1)["line"]
test_pt = np.array([0, 0.4])
test_field = np.zeros((2, 120, 3))
test_field[1, :] = np.ones(
(1, 120, 3)
) # Simulate a 3D vector field for 120 time step
func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field)
sol = 0.4 * np.ones((120, 3))
testA = np.sum(abs(sol - func))
msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol)
self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
| en | 0.708302 | # -*- coding: utf-8 -*- Tests for interpolation method # Simulate a 3D vector field for 120 time step # Simulate a 3D vector field for 120 time step # Simulate a 3D vector field for 120 time step | 2.362566 | 2 |
lib/models.py | ecarg/grace | 7 | 8223 | # -*- coding: utf-8 -*-
"""
Pytorch models
__author__ = 'Jamie (<EMAIL>)'
__copyright__ = 'No copyright. Just copyleft!'
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
###########
# imports #
###########
import torch
import torch.nn as nn
from embedder import Embedder
from pos_models import PosTagger, FnnTagger, CnnTagger # pylint: disable=unused-import
#############
# Ner Class #
#############
class Ner(nn.Module):
"""
named entity recognizer pytorch model
"""
def __init__(self, embedder, encoder, decoder):
"""
* embedder (Embedder)
[sentence_len, context_len] => [sentence_len, context_len, embed_dim]
* encoder (nn.Module)
[sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
* decoder (nn.Module)
[sentence_len, hidden_dim] => [sentence_len, n_tags],
"""
super().__init__()
self.embedder = embedder
self.encoder = encoder
self.decoder = decoder
assert isinstance(embedder, Embedder)
assert isinstance(encoder, nn.Module)
assert isinstance(decoder, nn.Module)
def forward(self, sentence, gazet, pos, words): #pylint: disable=arguments-differ
# [sentence_len, context_len] => [sentence_len, context_len, embed_dim]
sentence_embed = self.embedder(sentence, gazet, pos, words)
# [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
hidden = self.encoder(sentence_embed)
# [sentence_len, hidden_dim] => [sentence_len, n_tags]
predicted_tags = self.decoder(hidden)
return predicted_tags
def save(self, path):
"""
모델을 저장하는 메소드
:param path: 경로
"""
if torch.cuda.is_available():
self.cpu()
torch.save(self, str(path))
if torch.cuda.is_available():
self.cuda()
@classmethod
def load(cls, path):
"""
저장된 모델을 로드하는 메소드
:param path: 경로
:return: 모델 클래스 객체
"""
model = torch.load(str(path))
if torch.cuda.is_available():
model.cuda()
return model
#################
# Encoder Class #
#################
class Fnn5(nn.Module):
"""
2-Layer Full-Connected Neural Networks
"""
def __init__(self, context_len=21, in_dim=50, hidden_dim=500):
super(Fnn5, self).__init__()
self.context_len = context_len
self.hidden_dim = hidden_dim
self.out_dim = hidden_dim
self.net = nn.Sequential(
nn.Linear(context_len*in_dim, hidden_dim),
)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, in_dim]
Return:
x: [sentence_len, out_dim]
"""
sentence_len = x.size(0)
x = x.view(sentence_len, -1) # [sentence_len, context_len x in_dim]
x = self.net(x) # [setence_len, out_dim]
return x
class Cnn7(nn.Module):
"""
ConvNet kernels=[2,3,4,5] + Fully-Connected
"""
def __init__(self, in_dim=50, hidden_dim=500):
"""
"""
super(Cnn7, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = in_dim * 4
self.conv2 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 4
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 2
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 1
)
self.conv3 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=3), # 1
)
self.conv4 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 4
nn.ReLU(),
nn.Conv1d(in_dim, in_dim, kernel_size=4), # 1
)
self.conv5 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=1), # 1
)
def forward(self, x): #pylint: disable=arguments-differ
"""
Args:
x: [sentence_length, context_len, in_dim]
Return:
x: [sentence_length, in_dim * 4]
"""
# [sentence_length, in_dim, context_len]
x = x.transpose(1, 2)
conv2 = self.conv2(x).squeeze(-1) # [sentence_len, in_dim]
conv3 = self.conv3(x).squeeze(-1) # [sentence_len, in_dim]
conv4 = self.conv4(x).squeeze(-1) # [sentence_len, in_dim]
conv5 = self.conv5(x).squeeze(-1) # [sentence_len, in_dim]
# [sentence_len, in_dim * 4]
out = torch.cat([conv2, conv3, conv4, conv5], dim=1)
return out
class Cnn8(nn.Module):
"""
9-layer Conv NN + Batch Norm + Residual
"""
def __init__(self, context_len=21, in_dim=64, hidden_dim=None):
super(Cnn8, self).__init__()
self.context_len = context_len
# conv block 64
self.conv_block1_1 = self.conv_block(in_dim, 2, False)
self.conv_block1_2_1 = self.conv_block(in_dim, 1, False)
self.conv_block1_2_2 = self.conv_block(in_dim, 1, True)
self.pool1 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 128
self.conv_block2_1 = self.conv_block(in_dim*2, 2, False)
self.conv_block2_2_1 = self.conv_block(in_dim*2, 1, False)
self.conv_block2_2_2 = self.conv_block(in_dim*2, 1, True)
self.pool2 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 256
self.conv_block3_1 = self.conv_block(in_dim*4, 2, False)
self.conv_block3_2_1 = self.conv_block(in_dim*4, 1, False)
self.conv_block3_2_2 = self.conv_block(in_dim*4, 1, True)
self.pool3 = nn.MaxPool1d(kernel_size=2)
# conv block 512
self.conv_block4_1 = self.conv_block(in_dim*8, 2, False)
self.conv_block4_2_1 = self.conv_block(in_dim*8, 1, False)
self.conv_block4_2_2 = self.conv_block(in_dim*8, 1, True)
self.pool4 = nn.MaxPool1d(kernel_size=3)
self.out_dim = in_dim*16
@classmethod
def conv_block(cls, in_dim=64, depth=2, double=True):
"""
Args:
[batch_size, dim, length]
Return:
[batch_size, dim*2, length] if double=True
[batch_size, dim, length] if double=False
"""
out_dim = in_dim
layers = []
for i in range(depth):
if double:
if i == depth - 1:
out_dim = in_dim * 2
layers.append(nn.Conv1d(in_dim, out_dim, kernel_size=3, padding=1))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def forward(self, sentence):#pylint: disable=arguments-differ
"""
Args:
sentence: [sentence_len, context_len, embed_dim]
Return:
logit: [batch_size, out_dim]
"""
# [sentence_len, embed_dim, context_len]
x = sentence.transpose(1, 2)
# conv block 64
x = self.conv_block1_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_2(x) # [batch, in_dim*2, 21]
x = self.pool1(x) # [batch, in_dim*2, 11]
# conv block 128
x = self.conv_block2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_2(x) # [batch, in_dim*4, 11]
x = self.pool2(x) # [batch, in_dim*4, 6]
# conv block 256
x = self.conv_block3_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_2(x) # [batch, in_dim*8, 6]
x = self.pool3(x) # [batch, in_dim*8, 3]
# conv block 512
x = self.conv_block4_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_2(x) # [batch, in_dim*16, 3]
x = self.pool4(x) # [batch_size, in_dim*16, 1]
x = x.squeeze(-1) # [batch, in_dim*16]
return x
class RnnEncoder(nn.Module):
"""
RNN Encoder Module
"""
def __init__(self, context_len=21, in_dim=1024, out_dim=1024,
num_layers=2, cell='gru'):
super(RnnEncoder, self).__init__()
self.hidden_dim = out_dim // 2
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, input_size]
Return:
x: [sentence_len, hidden_size]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sequence_len, context_len, input_size]
# =>[sentence_len, context_len, hidden_size x 2]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x[:, 10, :]
return x
#################
# Decoder Class #
#################
class FCDecoder(nn.Module):
"""
Fully-Connected Decoder
"""
def __init__(self, in_dim, hidden_dim, n_tags):
super(FCDecoder, self).__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_dim, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
return self.net(x)
class RnnDecoder(nn.Module):
"""
RNN-based Decoder
"""
def __init__(self, in_dim=1024, hidden_dim=512, n_tags=11,
num_layers=2, cell='gru'):
super(RnnDecoder, self).__init__()
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
self.out = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_dim * 2, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sentence_len, batch=1, input_size]
x = x.unsqueeze(1)
# x: [sentence_len, batch=1, hidden_size x 2]
# h_n: [num_layers * 2, batch=1, hidden_size]
# c_n: [num_layers * 2, batch=1, hidden_size]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x.squeeze(1)
# [sequence_len, n_tags]
x = self.out(x)
return x
| # -*- coding: utf-8 -*-
"""
Pytorch models
__author__ = 'Jamie (<EMAIL>)'
__copyright__ = 'No copyright. Just copyleft!'
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
###########
# imports #
###########
import torch
import torch.nn as nn
from embedder import Embedder
from pos_models import PosTagger, FnnTagger, CnnTagger # pylint: disable=unused-import
#############
# Ner Class #
#############
class Ner(nn.Module):
"""
named entity recognizer pytorch model
"""
def __init__(self, embedder, encoder, decoder):
"""
* embedder (Embedder)
[sentence_len, context_len] => [sentence_len, context_len, embed_dim]
* encoder (nn.Module)
[sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
* decoder (nn.Module)
[sentence_len, hidden_dim] => [sentence_len, n_tags],
"""
super().__init__()
self.embedder = embedder
self.encoder = encoder
self.decoder = decoder
assert isinstance(embedder, Embedder)
assert isinstance(encoder, nn.Module)
assert isinstance(decoder, nn.Module)
def forward(self, sentence, gazet, pos, words): #pylint: disable=arguments-differ
# [sentence_len, context_len] => [sentence_len, context_len, embed_dim]
sentence_embed = self.embedder(sentence, gazet, pos, words)
# [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim]
hidden = self.encoder(sentence_embed)
# [sentence_len, hidden_dim] => [sentence_len, n_tags]
predicted_tags = self.decoder(hidden)
return predicted_tags
def save(self, path):
"""
모델을 저장하는 메소드
:param path: 경로
"""
if torch.cuda.is_available():
self.cpu()
torch.save(self, str(path))
if torch.cuda.is_available():
self.cuda()
@classmethod
def load(cls, path):
"""
저장된 모델을 로드하는 메소드
:param path: 경로
:return: 모델 클래스 객체
"""
model = torch.load(str(path))
if torch.cuda.is_available():
model.cuda()
return model
#################
# Encoder Class #
#################
class Fnn5(nn.Module):
"""
2-Layer Full-Connected Neural Networks
"""
def __init__(self, context_len=21, in_dim=50, hidden_dim=500):
super(Fnn5, self).__init__()
self.context_len = context_len
self.hidden_dim = hidden_dim
self.out_dim = hidden_dim
self.net = nn.Sequential(
nn.Linear(context_len*in_dim, hidden_dim),
)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, in_dim]
Return:
x: [sentence_len, out_dim]
"""
sentence_len = x.size(0)
x = x.view(sentence_len, -1) # [sentence_len, context_len x in_dim]
x = self.net(x) # [setence_len, out_dim]
return x
class Cnn7(nn.Module):
"""
ConvNet kernels=[2,3,4,5] + Fully-Connected
"""
def __init__(self, in_dim=50, hidden_dim=500):
"""
"""
super(Cnn7, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.out_dim = in_dim * 4
self.conv2 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 4
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 2
nn.Conv1d(in_dim, in_dim, kernel_size=2), # 1
)
self.conv3 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=3), # 1
)
self.conv4 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 20
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 9
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5
nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 4
nn.ReLU(),
nn.Conv1d(in_dim, in_dim, kernel_size=4), # 1
)
self.conv5 = nn.Sequential(
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 21
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 11
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 6
nn.ReLU(),
nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3
nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=1), # 1
)
def forward(self, x): #pylint: disable=arguments-differ
"""
Args:
x: [sentence_length, context_len, in_dim]
Return:
x: [sentence_length, in_dim * 4]
"""
# [sentence_length, in_dim, context_len]
x = x.transpose(1, 2)
conv2 = self.conv2(x).squeeze(-1) # [sentence_len, in_dim]
conv3 = self.conv3(x).squeeze(-1) # [sentence_len, in_dim]
conv4 = self.conv4(x).squeeze(-1) # [sentence_len, in_dim]
conv5 = self.conv5(x).squeeze(-1) # [sentence_len, in_dim]
# [sentence_len, in_dim * 4]
out = torch.cat([conv2, conv3, conv4, conv5], dim=1)
return out
class Cnn8(nn.Module):
"""
9-layer Conv NN + Batch Norm + Residual
"""
def __init__(self, context_len=21, in_dim=64, hidden_dim=None):
super(Cnn8, self).__init__()
self.context_len = context_len
# conv block 64
self.conv_block1_1 = self.conv_block(in_dim, 2, False)
self.conv_block1_2_1 = self.conv_block(in_dim, 1, False)
self.conv_block1_2_2 = self.conv_block(in_dim, 1, True)
self.pool1 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 128
self.conv_block2_1 = self.conv_block(in_dim*2, 2, False)
self.conv_block2_2_1 = self.conv_block(in_dim*2, 1, False)
self.conv_block2_2_2 = self.conv_block(in_dim*2, 1, True)
self.pool2 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True)
# conv block 256
self.conv_block3_1 = self.conv_block(in_dim*4, 2, False)
self.conv_block3_2_1 = self.conv_block(in_dim*4, 1, False)
self.conv_block3_2_2 = self.conv_block(in_dim*4, 1, True)
self.pool3 = nn.MaxPool1d(kernel_size=2)
# conv block 512
self.conv_block4_1 = self.conv_block(in_dim*8, 2, False)
self.conv_block4_2_1 = self.conv_block(in_dim*8, 1, False)
self.conv_block4_2_2 = self.conv_block(in_dim*8, 1, True)
self.pool4 = nn.MaxPool1d(kernel_size=3)
self.out_dim = in_dim*16
@classmethod
def conv_block(cls, in_dim=64, depth=2, double=True):
"""
Args:
[batch_size, dim, length]
Return:
[batch_size, dim*2, length] if double=True
[batch_size, dim, length] if double=False
"""
out_dim = in_dim
layers = []
for i in range(depth):
if double:
if i == depth - 1:
out_dim = in_dim * 2
layers.append(nn.Conv1d(in_dim, out_dim, kernel_size=3, padding=1))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def forward(self, sentence):#pylint: disable=arguments-differ
"""
Args:
sentence: [sentence_len, context_len, embed_dim]
Return:
logit: [batch_size, out_dim]
"""
# [sentence_len, embed_dim, context_len]
x = sentence.transpose(1, 2)
# conv block 64
x = self.conv_block1_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_1(x) + x # [batch, in_dim, 21]
x = self.conv_block1_2_2(x) # [batch, in_dim*2, 21]
x = self.pool1(x) # [batch, in_dim*2, 11]
# conv block 128
x = self.conv_block2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_1(x) + x # [batch, in_dim*2, 11]
x = self.conv_block2_2_2(x) # [batch, in_dim*4, 11]
x = self.pool2(x) # [batch, in_dim*4, 6]
# conv block 256
x = self.conv_block3_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_1(x) + x # [batch, in_dim*4, 6]
x = self.conv_block3_2_2(x) # [batch, in_dim*8, 6]
x = self.pool3(x) # [batch, in_dim*8, 3]
# conv block 512
x = self.conv_block4_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_1(x) + x # [batch, in_dim*8, 3]
x = self.conv_block4_2_2(x) # [batch, in_dim*16, 3]
x = self.pool4(x) # [batch_size, in_dim*16, 1]
x = x.squeeze(-1) # [batch, in_dim*16]
return x
class RnnEncoder(nn.Module):
"""
RNN Encoder Module
"""
def __init__(self, context_len=21, in_dim=1024, out_dim=1024,
num_layers=2, cell='gru'):
super(RnnEncoder, self).__init__()
self.hidden_dim = out_dim // 2
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=self.hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
def forward(self, x):#pylint: disable=arguments-differ
"""
Args:
x: [sentence_len, context_len, input_size]
Return:
x: [sentence_len, hidden_size]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sequence_len, context_len, input_size]
# =>[sentence_len, context_len, hidden_size x 2]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x[:, 10, :]
return x
#################
# Decoder Class #
#################
class FCDecoder(nn.Module):
"""
Fully-Connected Decoder
"""
def __init__(self, in_dim, hidden_dim, n_tags):
super(FCDecoder, self).__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_dim, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
return self.net(x)
class RnnDecoder(nn.Module):
"""
RNN-based Decoder
"""
def __init__(self, in_dim=1024, hidden_dim=512, n_tags=11,
num_layers=2, cell='gru'):
super(RnnDecoder, self).__init__()
if cell == 'gru':
self.rnn = nn.GRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
if cell == 'lstm':
self.rnn = nn.LSTM(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
elif cell == 'sru':
from sru import SRU
self.rnn = SRU(
input_size=in_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=0.5,
bidirectional=True)
self.out = nn.Sequential(
nn.ReLU(),
nn.Dropout(),
nn.Linear(hidden_dim * 2, n_tags)
)
def forward(self, x):#pylint: disable=arguments-differ
"""
[sentence_len, in_dim] => [sentence_len, n_tags]
"""
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
# [sentence_len, batch=1, input_size]
x = x.unsqueeze(1)
# x: [sentence_len, batch=1, hidden_size x 2]
# h_n: [num_layers * 2, batch=1, hidden_size]
# c_n: [num_layers * 2, batch=1, hidden_size]
x, _ = self.rnn(x)
# [sequence_len, hidden_size x 2]
x = x.squeeze(1)
# [sequence_len, n_tags]
x = self.out(x)
return x
| en | 0.513864 | # -*- coding: utf-8 -*- Pytorch models __author__ = 'Jamie (<EMAIL>)' __copyright__ = 'No copyright. Just copyleft!' # pylint: disable=no-member # pylint: disable=invalid-name ########### # imports # ########### # pylint: disable=unused-import ############# # Ner Class # ############# named entity recognizer pytorch model * embedder (Embedder) [sentence_len, context_len] => [sentence_len, context_len, embed_dim] * encoder (nn.Module) [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim] * decoder (nn.Module) [sentence_len, hidden_dim] => [sentence_len, n_tags], #pylint: disable=arguments-differ # [sentence_len, context_len] => [sentence_len, context_len, embed_dim] # [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim] # [sentence_len, hidden_dim] => [sentence_len, n_tags] 모델을 저장하는 메소드 :param path: 경로 저장된 모델을 로드하는 메소드 :param path: 경로 :return: 모델 클래스 객체 ################# # Encoder Class # ################# 2-Layer Full-Connected Neural Networks #pylint: disable=arguments-differ Args: x: [sentence_len, context_len, in_dim] Return: x: [sentence_len, out_dim] # [sentence_len, context_len x in_dim] # [setence_len, out_dim] ConvNet kernels=[2,3,4,5] + Fully-Connected # 20 # 10 # 9 # 5 # 4 # 2 # 1 # 21 # 11 # 11 # 6 # 6 # 3 # 1 # 20 # 10 # 9 # 5 # 4 # 1 # 21 # 11 # 11 # 6 # 6 # 3 # 1 #pylint: disable=arguments-differ Args: x: [sentence_length, context_len, in_dim] Return: x: [sentence_length, in_dim * 4] # [sentence_length, in_dim, context_len] # [sentence_len, in_dim] # [sentence_len, in_dim] # [sentence_len, in_dim] # [sentence_len, in_dim] # [sentence_len, in_dim * 4] 9-layer Conv NN + Batch Norm + Residual # conv block 64 # conv block 128 # conv block 256 # conv block 512 Args: [batch_size, dim, length] Return: [batch_size, dim*2, length] if double=True [batch_size, dim, length] if double=False #pylint: disable=arguments-differ Args: sentence: [sentence_len, context_len, embed_dim] Return: logit: [batch_size, out_dim] # [sentence_len, embed_dim, context_len] # conv block 64 # [batch, in_dim, 21] # [batch, in_dim, 21] # [batch, in_dim*2, 21] # [batch, in_dim*2, 11] # conv block 128 # [batch, in_dim*2, 11] # [batch, in_dim*2, 11] # [batch, in_dim*4, 11] # [batch, in_dim*4, 6] # conv block 256 # [batch, in_dim*4, 6] # [batch, in_dim*4, 6] # [batch, in_dim*8, 6] # [batch, in_dim*8, 3] # conv block 512 # [batch, in_dim*8, 3] # [batch, in_dim*8, 3] # [batch, in_dim*16, 3] # [batch_size, in_dim*16, 1] # [batch, in_dim*16] RNN Encoder Module #pylint: disable=arguments-differ Args: x: [sentence_len, context_len, input_size] Return: x: [sentence_len, hidden_size] # input (seq_len, batch, input_size) # h_0 (num_layers * num_directions, batch, hidden_size) # output (seq_len, batch, hidden_size * num_directions) # h_n (num_layers * num_directions, batch, hidden_size) # [sequence_len, context_len, input_size] # =>[sentence_len, context_len, hidden_size x 2] # [sequence_len, hidden_size x 2] ################# # Decoder Class # ################# Fully-Connected Decoder #pylint: disable=arguments-differ [sentence_len, in_dim] => [sentence_len, n_tags] RNN-based Decoder #pylint: disable=arguments-differ [sentence_len, in_dim] => [sentence_len, n_tags] # input (seq_len, batch, input_size) # h_0 (num_layers * num_directions, batch, hidden_size) # output (seq_len, batch, hidden_size * num_directions) # h_n (num_layers * num_directions, batch, hidden_size) # [sentence_len, batch=1, input_size] # x: [sentence_len, batch=1, hidden_size x 2] # h_n: [num_layers * 2, batch=1, hidden_size] # c_n: [num_layers * 2, batch=1, hidden_size] # [sequence_len, hidden_size x 2] # [sequence_len, n_tags] | 2.722874 | 3 |
pyseqlogo/__init__.py | BioGeek/pyseqlogo | 24 | 8224 | <filename>pyseqlogo/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for pyseqlogo."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from .pyseqlogo import draw_logo
from .pyseqlogo import setup_axis
| <filename>pyseqlogo/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for pyseqlogo."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from .pyseqlogo import draw_logo
from .pyseqlogo import setup_axis
| en | 0.70263 | # -*- coding: utf-8 -*- Top-level package for pyseqlogo. <NAME> | 1.153367 | 1 |
setup.py | edulix/apscheduler | 0 | 8225 | # coding: utf-8
import os.path
try:
from setuptools import setup
extras = dict(zip_safe=False, test_suite='nose.collector', tests_require=['nose'])
except ImportError:
from distutils.core import setup
extras = {}
import apscheduler
here = os.path.dirname(__file__)
readme_path = os.path.join(here, 'README.rst')
readme = open(readme_path).read()
setup(
name='APScheduler',
version=apscheduler.release,
description='In-process task scheduler with Cron-like capabilities',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='http://pypi.python.org/pypi/APScheduler/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3'
],
keywords='scheduling cron',
license='MIT',
packages=('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers', 'apscheduler.triggers.cron'),
)
| # coding: utf-8
import os.path
try:
from setuptools import setup
extras = dict(zip_safe=False, test_suite='nose.collector', tests_require=['nose'])
except ImportError:
from distutils.core import setup
extras = {}
import apscheduler
here = os.path.dirname(__file__)
readme_path = os.path.join(here, 'README.rst')
readme = open(readme_path).read()
setup(
name='APScheduler',
version=apscheduler.release,
description='In-process task scheduler with Cron-like capabilities',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='http://pypi.python.org/pypi/APScheduler/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3'
],
keywords='scheduling cron',
license='MIT',
packages=('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers', 'apscheduler.triggers.cron'),
)
| en | 0.833554 | # coding: utf-8 | 1.352301 | 1 |
object_detection/exporter_test.py | travisyates81/object-detection | 1 | 8226 | <reponame>travisyates81/object-detection
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# <NAME>
"""Tests for object_detection.export_inference_graph."""
import os
import mock
import numpy as np
import tensorflow as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
class FakeModel(model.DetectionModel):
def preprocess(self, inputs):
return (tf.identity(inputs) *
tf.get_variable('dummy', shape=(),
initializer=tf.constant_initializer(2),
dtype=tf.float32))
def predict(self, preprocessed_inputs):
return {'image': tf.identity(preprocessed_inputs)}
def postprocess(self, prediction_dict):
with tf.control_dependencies(prediction_dict.values()):
return {
'detection_boxes': tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]], tf.float32),
'detection_scores': tf.constant([[0.7, 0.6]], tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
def restore_fn(self, checkpoint_path, from_detection_checkpoint):
pass
def loss(self, prediction_dict):
pass
class ExportInferenceGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path,
use_moving_averages):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel(num_classes=1)
mock_model.preprocess(tf.constant([1, 3, 4, 3], tf.float32))
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _load_inference_graph(self, inference_graph_path):
od_graph = tf.Graph()
with od_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path) as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return od_graph
def _create_tf_example(self, image_array):
with self.test_session():
encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_feature(encoded_image),
'image/format': _bytes_feature('jpg'),
'image/source_id': _bytes_feature('image_id')
})).SerializeToString()
return example
def test_export_graph_with_image_tensor_input(self):
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pbtxt')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=None,
inference_graph_path=inference_graph_path)
def test_export_graph_with_tf_example_input(self):
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pbtxt')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
checkpoint_path=None,
inference_graph_path=inference_graph_path)
def test_export_frozen_graph(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
def test_export_frozen_graph_with_moving_averages(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=True)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
def test_export_and_run_inference_with_image_tensor(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
image_tensor = inference_graph.get_tensor_by_name('image_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: np.ones((1, 4, 4, 3)).astype(np.uint8)})
self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]])
self.assertAllClose(scores, [[0.7, 0.6]])
self.assertAllClose(classes, [[1, 2]])
self.assertAllClose(num_detections, [2])
def test_export_and_run_inference_with_tf_example(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name('tf_example:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={tf_example: self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))})
self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]])
self.assertAllClose(scores, [[0.7, 0.6]])
self.assertAllClose(classes, [[1, 2]])
self.assertAllClose(num_detections, [2])
if __name__ == '__main__':
tf.test.main()
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# <NAME>
"""Tests for object_detection.export_inference_graph."""
import os
import mock
import numpy as np
import tensorflow as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
class FakeModel(model.DetectionModel):
def preprocess(self, inputs):
return (tf.identity(inputs) *
tf.get_variable('dummy', shape=(),
initializer=tf.constant_initializer(2),
dtype=tf.float32))
def predict(self, preprocessed_inputs):
return {'image': tf.identity(preprocessed_inputs)}
def postprocess(self, prediction_dict):
with tf.control_dependencies(prediction_dict.values()):
return {
'detection_boxes': tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]], tf.float32),
'detection_scores': tf.constant([[0.7, 0.6]], tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32)
}
def restore_fn(self, checkpoint_path, from_detection_checkpoint):
pass
def loss(self, prediction_dict):
pass
class ExportInferenceGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path,
use_moving_averages):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel(num_classes=1)
mock_model.preprocess(tf.constant([1, 3, 4, 3], tf.float32))
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _load_inference_graph(self, inference_graph_path):
od_graph = tf.Graph()
with od_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_graph_path) as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return od_graph
def _create_tf_example(self, image_array):
with self.test_session():
encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval()
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_feature(encoded_image),
'image/format': _bytes_feature('jpg'),
'image/source_id': _bytes_feature('image_id')
})).SerializeToString()
return example
def test_export_graph_with_image_tensor_input(self):
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pbtxt')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=None,
inference_graph_path=inference_graph_path)
def test_export_graph_with_tf_example_input(self):
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pbtxt')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
checkpoint_path=None,
inference_graph_path=inference_graph_path)
def test_export_frozen_graph(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
def test_export_frozen_graph_with_moving_averages(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=True)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
def test_export_and_run_inference_with_image_tensor(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
image_tensor = inference_graph.get_tensor_by_name('image_tensor:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: np.ones((1, 4, 4, 3)).astype(np.uint8)})
self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]])
self.assertAllClose(scores, [[0.7, 0.6]])
self.assertAllClose(classes, [[1, 2]])
self.assertAllClose(num_detections, [2])
def test_export_and_run_inference_with_tf_example(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path,
use_moving_averages=False)
inference_graph_path = os.path.join(self.get_temp_dir(),
'exported_graph.pb')
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=1)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
exporter.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
checkpoint_path=checkpoint_path,
inference_graph_path=inference_graph_path)
inference_graph = self._load_inference_graph(inference_graph_path)
with self.test_session(graph=inference_graph) as sess:
tf_example = inference_graph.get_tensor_by_name('tf_example:0')
boxes = inference_graph.get_tensor_by_name('detection_boxes:0')
scores = inference_graph.get_tensor_by_name('detection_scores:0')
classes = inference_graph.get_tensor_by_name('detection_classes:0')
num_detections = inference_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={tf_example: self._create_tf_example(
np.ones((4, 4, 3)).astype(np.uint8))})
self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]])
self.assertAllClose(scores, [[0.7, 0.6]])
self.assertAllClose(classes, [[1, 2]])
self.assertAllClose(num_detections, [2])
if __name__ == '__main__':
tf.test.main() | en | 0.747133 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # <NAME> Tests for object_detection.export_inference_graph. | 2.177202 | 2 |
run.py | matthewyoung28/macmentum | 0 | 8227 | import os
import sys
import random
def get_next_wallpaper(curr_path):
lst_dir = os.listdir()
rand_index = random.randint(0, len(lst_dir) - 1)
return lst_dir[rand_index]
def get_wall_dir():
return "/Users/MYOUNG/Pictures/mmt"
def main():
script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '"
path = get_wall_dir()
file = get_next_wallpaper(path)
# print("FILE = ", file)
script = script + path + "/" + file
# print("SCRIPT = ", script)
os.system(script)
main()
| import os
import sys
import random
def get_next_wallpaper(curr_path):
lst_dir = os.listdir()
rand_index = random.randint(0, len(lst_dir) - 1)
return lst_dir[rand_index]
def get_wall_dir():
return "/Users/MYOUNG/Pictures/mmt"
def main():
script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '"
path = get_wall_dir()
file = get_next_wallpaper(path)
# print("FILE = ", file)
script = script + path + "/" + file
# print("SCRIPT = ", script)
os.system(script)
main()
| en | 0.169379 | # print("FILE = ", file) # print("SCRIPT = ", script) | 2.771834 | 3 |
noxfile.py | dolfno/mlops_demo | 0 | 8228 | """Automated CI tools to run with Nox"""
import nox
from nox import Session
locations = "src", "noxfile.py", "docs/conf.py"
nox.options.sessions = "lint", "tests"
@nox.session(python="3.9")
def tests(session: Session) -> None:
"""Run tests with nox"""
session.run("poetry", "install", external=True)
session.run("pytest", "--cov")
@nox.session(python="3.9")
def lint(session: Session) -> None:
"""Run linting with nox"""
session.install(
"flake8",
"flake8-annotations",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-import-order",
)
args = session.posargs or locations
session.run("flake8", *args)
@nox.session(python="3.9")
def black(session: Session) -> None:
"""Run black with nox"""
session.install("black")
args = session.posargs or locations
session.run("black", *args, "--line-length=120")
@nox.session(python="3.9")
def pytype(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or ["--disable=import-error", *locations]
session.install("pytype")
session.run("pytype", *args)
package = "hypermodern_python"
@nox.session(python=["3.9"])
def typeguard(session: Session) -> None:
"""Run typeguard for type checking with nox"""
args = session.posargs or ["-m", "not e2e"]
session.run("poetry", "install", "--no-dev", external=True)
session.install("pytest", "pytest-mock", "typeguard")
session.run("pytest", f"--typeguard-packages={package}", *args)
@nox.session(python="3.9")
def docs(session: Session) -> None:
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
session.install("sphinx", "sphinx-autodoc-typehints")
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.9")
def coverage(session: Session) -> None:
"""Upload coverage data."""
session.install("coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
| """Automated CI tools to run with Nox"""
import nox
from nox import Session
locations = "src", "noxfile.py", "docs/conf.py"
nox.options.sessions = "lint", "tests"
@nox.session(python="3.9")
def tests(session: Session) -> None:
"""Run tests with nox"""
session.run("poetry", "install", external=True)
session.run("pytest", "--cov")
@nox.session(python="3.9")
def lint(session: Session) -> None:
"""Run linting with nox"""
session.install(
"flake8",
"flake8-annotations",
"flake8-bandit",
"flake8-black",
"flake8-bugbear",
"flake8-docstrings",
"flake8-import-order",
)
args = session.posargs or locations
session.run("flake8", *args)
@nox.session(python="3.9")
def black(session: Session) -> None:
"""Run black with nox"""
session.install("black")
args = session.posargs or locations
session.run("black", *args, "--line-length=120")
@nox.session(python="3.9")
def pytype(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or ["--disable=import-error", *locations]
session.install("pytype")
session.run("pytype", *args)
package = "hypermodern_python"
@nox.session(python=["3.9"])
def typeguard(session: Session) -> None:
"""Run typeguard for type checking with nox"""
args = session.posargs or ["-m", "not e2e"]
session.run("poetry", "install", "--no-dev", external=True)
session.install("pytest", "pytest-mock", "typeguard")
session.run("pytest", f"--typeguard-packages={package}", *args)
@nox.session(python="3.9")
def docs(session: Session) -> None:
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
session.install("sphinx", "sphinx-autodoc-typehints")
session.run("sphinx-build", "docs", "docs/_build")
@nox.session(python="3.9")
def coverage(session: Session) -> None:
"""Upload coverage data."""
session.install("coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
| en | 0.79862 | Automated CI tools to run with Nox Run tests with nox Run linting with nox Run black with nox Run the static type checker. Run typeguard for type checking with nox Build the documentation. Upload coverage data. | 2.380755 | 2 |
cocotb_test/run.py | canerbulduk/cocotb-test | 0 | 8229 |
import cocotb_test.simulator
# For partial back compatibility
def run(simulator=None, **kwargs):
if simulator:
sim = simulator(**kwargs)
sim.run()
else:
cocotb_test.simulator.run(**kwargs)
|
import cocotb_test.simulator
# For partial back compatibility
def run(simulator=None, **kwargs):
if simulator:
sim = simulator(**kwargs)
sim.run()
else:
cocotb_test.simulator.run(**kwargs)
| en | 0.606338 | # For partial back compatibility | 1.679216 | 2 |
kanban_backend/project_management/apps.py | hamzabouissi/kanban_backend | 0 | 8230 | <filename>kanban_backend/project_management/apps.py
from django.apps import AppConfig
class ProjectManagementConfig(AppConfig):
name = 'kanban_backend.project_management'
def ready(self):
try:
import kanban_backend.users.signals # noqa F401
except ImportError:
pass
| <filename>kanban_backend/project_management/apps.py
from django.apps import AppConfig
class ProjectManagementConfig(AppConfig):
name = 'kanban_backend.project_management'
def ready(self):
try:
import kanban_backend.users.signals # noqa F401
except ImportError:
pass
| uz | 0.378174 | # noqa F401 | 1.217828 | 1 |
src/framework/tracing.py | davidhozic/Discord-Shiller | 12 | 8231 | <reponame>davidhozic/Discord-Shiller
"""
~ Tracing ~
This modules containes functions and classes
related to the console debug long or trace.
"""
from enum import Enum, auto
import time
__all__ = (
"TraceLEVELS",
"trace"
)
m_use_debug = None
class TraceLEVELS(Enum):
"""
Info: Level of trace for debug
"""
NORMAL = 0
WARNING = auto()
ERROR = auto()
def trace(message: str,
level: TraceLEVELS = TraceLEVELS.NORMAL):
""""
Name : trace
Param:
- message : str = Trace message
- level : TraceLEVELS = Level of the trace
"""
if m_use_debug:
timestruct = time.localtime()
timestamp = "Date: {:02d}.{:02d}.{:04d} Time:{:02d}:{:02d}"
timestamp = timestamp.format(timestruct.tm_mday,
timestruct.tm_mon,
timestruct.tm_year,
timestruct.tm_hour,
timestruct.tm_min)
l_trace = f"{timestamp}\nTrace level: {level.name}\nMessage: {message}\n"
print(l_trace)
| """
~ Tracing ~
This modules containes functions and classes
related to the console debug long or trace.
"""
from enum import Enum, auto
import time
__all__ = (
"TraceLEVELS",
"trace"
)
m_use_debug = None
class TraceLEVELS(Enum):
"""
Info: Level of trace for debug
"""
NORMAL = 0
WARNING = auto()
ERROR = auto()
def trace(message: str,
level: TraceLEVELS = TraceLEVELS.NORMAL):
""""
Name : trace
Param:
- message : str = Trace message
- level : TraceLEVELS = Level of the trace
"""
if m_use_debug:
timestruct = time.localtime()
timestamp = "Date: {:02d}.{:02d}.{:04d} Time:{:02d}:{:02d}"
timestamp = timestamp.format(timestruct.tm_mday,
timestruct.tm_mon,
timestruct.tm_year,
timestruct.tm_hour,
timestruct.tm_min)
l_trace = f"{timestamp}\nTrace level: {level.name}\nMessage: {message}\n"
print(l_trace) | en | 0.516125 | ~ Tracing ~ This modules containes functions and classes related to the console debug long or trace. Info: Level of trace for debug " Name : trace Param: - message : str = Trace message - level : TraceLEVELS = Level of the trace | 2.901532 | 3 |
sunkit_image/__init__.py | jeffreypaul15/sunkit-image | 0 | 8232 | <gh_stars>0
"""
sunkit-image
============
A image processing toolbox for Solar Physics.
* Homepage: https://sunpy.org
* Documentation: https://sunkit-image.readthedocs.io/en/latest/
"""
import sys
from .version import version as __version__ # NOQA
# Enforce Python version check during package import.
__minimum_python_version__ = "3.7"
class UnsupportedPythonError(Exception):
"""
Running on an unsupported version of Python.
"""
if sys.version_info < tuple(int(val) for val in __minimum_python_version__.split(".")):
# This has to be .format to keep backwards compatibly.
raise UnsupportedPythonError(
"sunkit_image does not support Python < {}".format(__minimum_python_version__)
)
__all__ = []
| """
sunkit-image
============
A image processing toolbox for Solar Physics.
* Homepage: https://sunpy.org
* Documentation: https://sunkit-image.readthedocs.io/en/latest/
"""
import sys
from .version import version as __version__ # NOQA
# Enforce Python version check during package import.
__minimum_python_version__ = "3.7"
class UnsupportedPythonError(Exception):
"""
Running on an unsupported version of Python.
"""
if sys.version_info < tuple(int(val) for val in __minimum_python_version__.split(".")):
# This has to be .format to keep backwards compatibly.
raise UnsupportedPythonError(
"sunkit_image does not support Python < {}".format(__minimum_python_version__)
)
__all__ = [] | en | 0.748881 | sunkit-image ============ A image processing toolbox for Solar Physics. * Homepage: https://sunpy.org * Documentation: https://sunkit-image.readthedocs.io/en/latest/ # NOQA # Enforce Python version check during package import. Running on an unsupported version of Python. # This has to be .format to keep backwards compatibly. | 2.548037 | 3 |
app/view.py | lucasblazzi/stocker | 0 | 8233 | <gh_stars>0
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
class View:
def __init__(self, st):
self.st = st
self.st.set_page_config(layout='wide')
self.side_bar = st.sidebar
def show_message(self, location, _type, message):
if location == "sb":
component = self.side_bar
else:
component = self.st
if _type == "success":
component.success(message)
elif _type == "error":
component.error(message)
elif _type == "warning":
component.warning(message)
elif _type == "info":
component.info(message)
def login(self):
_user = self.side_bar.text_input("Username:")
_pass = self.side_bar.text_input("Password", type="password")
return _user, _pass
def advisor_setup(self):
option = self.side_bar.selectbox("Options:", ("Research", ))
if option == "Research":
self.st.header("Advisor Research Area")
self.st.markdown("___")
return option
def research_area(self):
execute = False
args = {"price": {"enabled": False}, "sector": {"enabled": False}, "news": {"enabled": False},
"company_info": {"enabled": False}, "volatility": {"enabled": False}, "return": {"enabled": False},
"raw_price": {"enabled": False}, "volume": {"enabled": False}}
self.st.markdown("___")
check_cols = self.st.beta_columns(4)
args["price"]["enabled"] = check_cols[0].checkbox("Price")
args["company_info"]["enabled"] = check_cols[1].checkbox("Company Information")
args["sector"]["enabled"] = check_cols[2].checkbox("Sector Distribution")
args["news"]["enabled"] = check_cols[3].checkbox("News")
if args["price"]["enabled"]:
self.st.markdown("___")
self.st.subheader("Price Insights")
price_cols = self.st.beta_columns(7)
args["price"]["_type"] = price_cols[0].selectbox("Price type:", ("close", "open", "high", "low"))
args["price"]["period"] = price_cols[1].selectbox("Period:", ("ytd", "1m", "6m", "1y", "2y", "5y", "max"))
args["raw_price"]["enabled"] = price_cols[3].checkbox("Raw Price")
args["volume"]["enabled"] = price_cols[4].checkbox("Volume")
args["return"]["enabled"] = price_cols[5].checkbox("Return")
args["volatility"]["enabled"] = price_cols[6].checkbox("Volatility")
return execute, args
def show_cryptos(self, cryptos):
for crypto in cryptos:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Symbol: ** {crypto.get('symbol', '-')}")
cols[1].markdown(f"**Name: ** {crypto.get('name', '-')}")
cols[2].markdown(f"**Price: ** {crypto.get('price', '-')}")
def crypto_form(self):
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
_input = self.st.text_input("Cryptocurrency")
return _input
def sector_distribution(self, sectors):
self.st.subheader("Sector Distribution")
r = sectors['sector'].value_counts()
fig = go.Figure(data=[go.Pie(labels=r.index, values=r)])
fig.update_layout(
width=400, height=400,
)
self.st.plotly_chart(fig)
def plot_price(self, prices, _type):
self.st.subheader(_type.capitalize())
fig = go.Figure()
for price in prices:
name = price["symbol"][0]
fig.add_trace(go.Scatter(x=price.index, y=price[_type],
mode='lines',
name=name))
fig.update_layout(
template="plotly_white",
width=1400, height=500,
hovermode="x unified",
plot_bgcolor='rgba(0,0,0,0)'
)
self.st.plotly_chart(fig)
def show_companies(self, companies):
self.st.markdown("___")
self.st.subheader("Company Information")
self.st.markdown("<br>", unsafe_allow_html=True)
for company in companies:
basic = self.st.beta_columns(4)
basic[0].markdown(f"## **{company.get('name', ' ')} ({company.get('symbol', ' ')})**")
if company.get("logo"):
basic[3].image(company.get("logo"), width=50)
basic[3].markdown("<br>", unsafe_allow_html=True)
desc = self.st.beta_columns(2)
if company.get('sector'):
desc[0].markdown(f"**Sector: ** {company.get('sector', '-')}")
if company.get('industry'):
desc[1].markdown(f"**Industry: ** {company.get('industry', '-')}")
if company.get('description'):
desc[0].markdown(f"**Description: ** {company.get('description', '-')}")
info = self.st.beta_columns(2)
if company.get('CEO'):
info[0].markdown(f"**CEO: ** {company.get('CEO', '-')}")
if company.get('employees'):
info[1].markdown(f"**Employees: ** {company.get('employees', '-')}")
if company.get('website'):
info[0].markdown(f"**Website: ** {company.get('website', '-')}")
if company.get('city') or company.get('state') or company.get('country'):
info[1].markdown(f"**Location: ** {company.get('city', ' ')} - {company.get('state', ' ')} - {company.get('country', ' ')}")
self.st.markdown("___")
def show_news(self, news, title="Company News"):
self.st.markdown("___")
self.st.subheader(title)
self.st.markdown("<br>", unsafe_allow_html=True)
for n in news:
if n.get('symbol') or n.get('title') or n.get('date'):
self.st.markdown(f"**{n.get('symbol', ' ')} - {n.get('title', ' ')} [{n.get('date', ' ')}]**")
if n.get('source'):
self.st.markdown(f"**Source: ** {n.get('source', '-')}")
if n.get("image"):
self.st.image(n.get("image"), width=300)
if n.get("description"):
self.st.markdown(f"**Description: ** {n.get('description', '-')}")
if n.get("url"):
self.st.markdown(f"**Access on: ** {n.get('url', '-')}")
self.st.markdown("<br>", unsafe_allow_html=True)
def list_advisors(self, advisors):
for advisor in advisors:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Name: ** {advisor[0]}")
cols[1].markdown(f"**CPF: ** {advisor[1]}")
cols[2].markdown(f"**CVM: ** {advisor[2]}")
def symbol_input(self, symbols):
selected_symbols = self.st.multiselect("Stocks list:", symbols)
return selected_symbols
def admin_setup(self):
option = self.side_bar.selectbox("Option:", ("Data Loader", "Advisors", "Ad-Hoc"))
execute = False
arg = None
self.st.title("Stocker Administration Area")
self.st.markdown("___")
if option == "Data Loader":
arg = dict()
self.st.header("Stocker Data Loader")
arg["symbols"] = self.st.selectbox("Stocks Option:", ("Sample", "S&P 100"))
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Company Loader")
self.show_message("st", "info", "Stock Loading: Load on our database information about the companies listed"
"on the Stocks Option selected")
if self.st.button("Load Stocks"):
execute = True
arg["loader"] = "company"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Price Loader")
self.show_message("st", "info", "Price Loading: Load on our database information about companies daily"
" prices, you can select a specific period")
arg["period"] = self.st.selectbox("Prices Period:", ("5y", "2y", "1y", "ytd", "6m", "3m", "1m", "5d"))
if self.st.button("Load Prices"):
execute = True
arg["loader"] = "price"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker News Loader")
self.show_message("st", "info", "News Loading: Load on our database information about the latest news of"
" companies which can impact the market")
if self.st.button("Load News"):
execute = True
arg["loader"] = "news"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Crypto Loader")
self.show_message("st", "info", "Crypto Loading: Load on our database information about all "
"cryptocurrencies available on the market")
if self.st.button("Load Crypto"):
execute = True
arg["loader"] = "crypto"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Full Loader")
self.show_message("st", "info", "Full Loading: Load on our database all information listed above: companies"
" prices, news and cryptocurrencies")
if self.st.button("Full Load"):
execute = True
arg["loader"] = "full"
elif option == "Ad-Hoc":
self.st.header("Ad-Hoc")
elif option == "Advisors":
sub_option = self.st.selectbox("Opções:", ("List Advisors", "Register Advisor", "Edit Advisor"))
self.st.markdown("___")
if sub_option == "List Advisors":
option = sub_option
execute = True
elif sub_option == "Register Advisor":
arg = self.advisor_form(None)
option = sub_option
if arg:
execute = True
elif sub_option == "Edit Advisor":
arg = self.st.text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12")
execute = True
option = sub_option
self.st.markdown("___")
return option, execute, arg
def advisor_form(self, advisor):
cols = self.st.beta_columns([0.5, 0.25, 0.25])
button = "Update Advisor" if advisor else "Register Advisor"
advisor = {
"name": cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo",
value=advisor["name"]) if advisor
else cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo"),
"username": cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login",
value=advisor["username"]) if advisor
else cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login"),
"password": cols[2].text_input("Senha", max_chars=15, type='password', help="Senha para login"),
"cpf": advisor["cpf"] if advisor
else cols[2].text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12"),
"cvm_license": cols[1].text_input("Lincença CVM", max_chars=10, type='default',
value=advisor["cvm_license"]) if advisor
else cols[1].text_input("Lincença CVM", max_chars=10, type='default'),
"email": cols[0].text_input("Email", max_chars=30, type='default', value=advisor["email"]) if advisor
else cols[0].text_input("Email", max_chars=30, type='default'),
"profile": "advisor"
}
register = self.st.button(button)
self.st.markdown("___")
filled = True
for b in advisor.values():
if not b:
filled = False
if register:
if not filled:
self.show_message("st", "warning", "Preencha todos os campos")
else:
return advisor
@staticmethod
def plot_bar(companies, x, y, title, color):
df = pd.DataFrame(companies)
fig = px.bar(df, x=x, y=y,
color=color, title=title,
color_discrete_sequence=px.colors.qualitative.Pastel,
height=400)
return fig
@staticmethod
def plot_bar2(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Bar(x=df[y], y=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_pie(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Pie(labels=df[y], values=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_highest_emp(highest_emp):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=highest_emp[0][1],
title={
"text": f"{highest_emp[0][0]}<br><span style='font-size:0.8em;color:gray'>Highest number</span><br>"
f"<span style='font-size:0.8em;color:gray'>of employees</span>"},
)])
return fig
@staticmethod
def plot_information_companies(cols, companies):
logos = [company[1] for company in companies]
names = [company[0] for company in companies]
for idx, logo in enumerate(logos):
col = 2 if idx % 2 == 0 else 3
cols[col].image(logo, width=50)
for idx, name in enumerate(names):
col = 0 if idx % 2 == 0 else 1
cols[col].markdown(f"**Name: ** {name}")
@staticmethod
def plot_notusa_companies(cols, companies):
for company in companies:
cols[0].markdown(f"**Name: ** {company[0]}")
cols[1].markdown(f"**Country: ** {company[2]}")
cols[2].image(company[1], width=50)
@staticmethod
def plot_insight_prices(k, v):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=v[0][1],
title={
"text": f"{v[0][0]}<br><span style='font-size:0.8em;color:gray'>{k.split('_')[0].capitalize()} {k.split('_')[1].capitalize()}</span><br>"
f"<span style='font-size:0.8em;color:gray'>{v[0][2]}</span>"},
)])
return fig
def plot_company_ad_hoc(self, results):
companies = results["company"]["specific"]
highest_emp = results["company"]["insights"]["highest_emp"]
information = results["company"]["insights"]["tech"]
not_usa = results["company"]["insights"]["not_us"]
fields = results["company"]["fields"]
if companies:
if not "symbol" in fields:
self.st.warning("Be sure to select the symbol option")
else:
self.show_companies(companies)
col = self.st.beta_columns(2)
if "employees" in fields:
fig1 = self.plot_bar(companies, "symbol", "employees", "Number of employees by company", "employees")
col[0].plotly_chart(fig1, use_container_width=True)
if "state" in fields:
fig2 = self.plot_bar2(companies, "state", "State distribution")
col[1].plotly_chart(fig2, use_container_width=True)
col2 = self.st.beta_columns(2)
if "sector" in fields:
fig3 = self.plot_pie(companies, "sector", "Companies by sector")
col2[0].plotly_chart(fig3, use_container_width=True)
if "industry" in fields:
fig4 = self.plot_pie(companies, "industry", "Companies by industry")
col2[1].plotly_chart(fig4, use_container_width=True)
if highest_emp:
fig5 = self.plot_highest_emp(highest_emp)
self.st.plotly_chart(fig5, use_container_width=True)
if information:
self.st.markdown("___")
title_col = self.st.beta_columns(1)
cols4 = self.st.beta_columns([1, 1, 0.2, 0.2])
title_col[0].subheader("Information sector companies")
self.plot_information_companies(cols4, information)
if not_usa:
self.st.markdown("___")
title_col2 = self.st.beta_columns(1)
title_col2[0].subheader("Nasdaq listed companies outside USA")
cols5 = self.st.beta_columns(4)
self.plot_notusa_companies(cols5, not_usa)
def plot_price_ad_hoc(self, results):
if not results["price"]["specific"].empty:
self.st.markdown("___")
dfs = list()
for company in results["price"]["company_list"]:
mask = (results["price"]["specific"]["symbol"] == company)
dfs.append(results["price"]["specific"][mask])
self.plot_price(dfs, results["price"]["type"][0])
self.st.markdown("___")
c = 0
cols = self.st.beta_columns(len(results["price"]["insights"].keys()))
for k, val in results["price"]["insights"].items():
if val:
cols[c].plotly_chart(self.plot_insight_prices(k, val), use_container_width=True)
c += 1
def plot_news_ad_hoc(self, results):
if results["news"]["filter"]:
self.show_news(results["news"]["filter"], "Filtered News")
if results["news"]["insights"]:
news_fields = ("id", "symbol", "date", "title", "source", "url", "description", "image")
latest = results["news"]["insights"][0]
latest_news = dict()
for idx, v in enumerate(latest):
latest_news[news_fields[idx]] = v
self.show_news([latest], f"Latest news - {latest['symbol']} - {latest['date']}")
def plot_crypto_ad_hoc(self, results):
if results["crypto"]:
self.st.markdown("___")
self.show_cryptos(results["crypto"])
def ad_hoc_plot(self, results):
self.plot_company_ad_hoc(results)
self.plot_price_ad_hoc(results)
self.plot_news_ad_hoc(results)
self.plot_crypto_ad_hoc(results)
def ad_hoc_form(self, symbols):
company_fields = ("symbol", "name", "exchange", "industry", "website", "description", "CEO", "sector",
"employees", "state", "city", "country", "logo")
news_fields = ("symbol", "date", "title", "source", "url", "description", "image")
ad_hoc = self.default_ad_hoc()
self.st.markdown("___")
self.st.markdown(f"**Company Options:**")
cols = self.st.beta_columns([2, 1, 1])
cols[0].markdown(f"**Specific company views:**")
ad_hoc["company"]["specific"]["company_list"] = cols[0].multiselect("Stocks list:", sum(symbols, []))
ad_hoc["company"]["specific"]["fields"] = cols[0].multiselect("Information:", company_fields)
filter_cols = self.st.beta_columns(6)
ad_hoc["company"]["specific"]["order_by"] = filter_cols[0].selectbox("Order By:", ad_hoc["company"]["specific"]["fields"]),
ad_hoc["company"]["specific"]["order_method"] = filter_cols[1].selectbox("Order Method:", ("Ascending", "Descending")),
ad_hoc["company"]["specific"]["limit"] = filter_cols[2].number_input("Number of results:", value=1, min_value=1, max_value=100),
ad_hoc["company"]["specific"]["rule_filter"] = {}
cols[1].markdown(f"**Insights views:**")
cols[2].markdown(f"**-**")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["highest_emp"] = cols[1].checkbox("Highest employees number")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["tech"] = cols[1].checkbox("Information Companies")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["not_us"] = cols[2].checkbox("Outside USA")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["specific"]["rule_filter"]["apply"] = cols[2].checkbox("Rule filter")
if ad_hoc["company"]["specific"]["rule_filter"]["apply"]:
ad_hoc["company"]["specific"]["rule_filter"]["field"] = filter_cols[0].selectbox(
"Filter Field:", ("symbol", "name", "employees"))
ad_hoc["company"]["specific"]["rule_filter"]["operation"] = filter_cols[1].selectbox(
"Operation", ("Greater than", "Less than", "Equals to") if
ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees" else ("Equals to", ))
ad_hoc["company"]["specific"]["rule_filter"]["value"] = filter_cols[2].number_input("Value: ") \
if ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees"\
else filter_cols[2].text_input("Value: ")
self.st.markdown("___")
self.st.markdown(f"**Prices Options:**")
price_cols = self.st.beta_columns([2, 1, 1])
price_cols[0].markdown(f"**Specific price views:**")
ad_hoc["price"]["specific"]["company_list"] = price_cols[0].multiselect("Price Stocks:", sum(symbols, []))
filter_price_cols = self.st.beta_columns(6)
ad_hoc["price"]["specific"]["start_date"] = filter_price_cols[0].date_input("Start Date:")
ad_hoc["price"]["specific"]["end_date"] = filter_price_cols[1].date_input("End Date:")
ad_hoc["price"]["specific"]["type"] = filter_price_cols[2].selectbox("Price Type:", ("close", "open", "high", "low")),
price_cols[1].markdown(f"**Insights views:**")
price_cols[2].markdown(f"**-**")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["highest_close"] = price_cols[1].checkbox("Highest close price")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_close"] = price_cols[2].checkbox("Lowest close price")
ad_hoc["price"]["insights"]["highest_volume"] = price_cols[1].checkbox("Highest volume")
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_volume"] = price_cols[2].checkbox("Lowest volume")
self.st.markdown("___")
self.st.markdown(f"**News Options:**")
news_cols = self.st.beta_columns([2, 1, 1, 1])
news_cols[0].markdown(f"**Specific news views:**")
news_cols[1].markdown("-<br>", unsafe_allow_html=True)
news_cols[2].markdown("-<br>", unsafe_allow_html=True)
news_cols[3].markdown("-<br>", unsafe_allow_html=True)
ad_hoc["news"]["company_list"] = news_cols[0].multiselect("News Stocks:", sum(symbols, []))
ad_hoc["news"]["fields"] = news_cols[0].multiselect("News Info:", news_fields)
ad_hoc["news"]["date"] = news_cols[1].date_input("Date:")
ad_hoc["news"]["filter_date"] = news_cols[2].selectbox("Filter Date as:", ("On", "Starting from", "Until"))
ad_hoc["news"]["order_by"] = news_cols[1].selectbox("Order by field:", ad_hoc["news"]["fields"])
ad_hoc["news"]["order_method"] = news_cols[2].selectbox("Order results:", ("Ascending", "Descending"))
ad_hoc["news"]["limit"] = news_cols[3].number_input("Limit of results:", value=1, min_value=1, max_value=100)
ad_hoc["news"]["latest"] = news_cols[3].checkbox("Latest News")
self.st.markdown("___")
self.st.markdown(f"**Crypto Options:**")
crypto_col = self.st.beta_columns([2, 0.5, 1])
ad_hoc["crypto"]["name"] = crypto_col[0].text_input("Cryptocurrency")
ad_hoc["crypto"]["limit"] = crypto_col[1].number_input("Limit of crypto:", value=1, min_value=1, max_value=100)
generate = self.st.button("Generate Report")
if generate:
return ad_hoc
@staticmethod
def default_ad_hoc():
return {
"company": {
"specific": {
"company_list": [],
"fields": [],
"order_by": None,
"order_method": None,
"limit": None,
"rule_filter": {
"apply": False,
"field": None,
"operation": None,
"value": None
}
},
"insights": {
"highest_emp": False,
"tech": False,
"not_us": False
}
},
"news": {
"company_list": [],
"date": None,
"filter_date": None,
},
"price": {
"specific": {
"company_list": [],
"type": None,
"start_date": None,
"end_date": None
},
"insights": {
"highest_close": False,
"lowest_close": False,
"highest_volume": False,
"lowest_volume": False,
}
},
"crypto": {
"name": None,
"limit": None
}
} | import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
class View:
def __init__(self, st):
self.st = st
self.st.set_page_config(layout='wide')
self.side_bar = st.sidebar
def show_message(self, location, _type, message):
if location == "sb":
component = self.side_bar
else:
component = self.st
if _type == "success":
component.success(message)
elif _type == "error":
component.error(message)
elif _type == "warning":
component.warning(message)
elif _type == "info":
component.info(message)
def login(self):
_user = self.side_bar.text_input("Username:")
_pass = self.side_bar.text_input("Password", type="password")
return _user, _pass
def advisor_setup(self):
option = self.side_bar.selectbox("Options:", ("Research", ))
if option == "Research":
self.st.header("Advisor Research Area")
self.st.markdown("___")
return option
def research_area(self):
execute = False
args = {"price": {"enabled": False}, "sector": {"enabled": False}, "news": {"enabled": False},
"company_info": {"enabled": False}, "volatility": {"enabled": False}, "return": {"enabled": False},
"raw_price": {"enabled": False}, "volume": {"enabled": False}}
self.st.markdown("___")
check_cols = self.st.beta_columns(4)
args["price"]["enabled"] = check_cols[0].checkbox("Price")
args["company_info"]["enabled"] = check_cols[1].checkbox("Company Information")
args["sector"]["enabled"] = check_cols[2].checkbox("Sector Distribution")
args["news"]["enabled"] = check_cols[3].checkbox("News")
if args["price"]["enabled"]:
self.st.markdown("___")
self.st.subheader("Price Insights")
price_cols = self.st.beta_columns(7)
args["price"]["_type"] = price_cols[0].selectbox("Price type:", ("close", "open", "high", "low"))
args["price"]["period"] = price_cols[1].selectbox("Period:", ("ytd", "1m", "6m", "1y", "2y", "5y", "max"))
args["raw_price"]["enabled"] = price_cols[3].checkbox("Raw Price")
args["volume"]["enabled"] = price_cols[4].checkbox("Volume")
args["return"]["enabled"] = price_cols[5].checkbox("Return")
args["volatility"]["enabled"] = price_cols[6].checkbox("Volatility")
return execute, args
def show_cryptos(self, cryptos):
for crypto in cryptos:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Symbol: ** {crypto.get('symbol', '-')}")
cols[1].markdown(f"**Name: ** {crypto.get('name', '-')}")
cols[2].markdown(f"**Price: ** {crypto.get('price', '-')}")
def crypto_form(self):
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
_input = self.st.text_input("Cryptocurrency")
return _input
def sector_distribution(self, sectors):
self.st.subheader("Sector Distribution")
r = sectors['sector'].value_counts()
fig = go.Figure(data=[go.Pie(labels=r.index, values=r)])
fig.update_layout(
width=400, height=400,
)
self.st.plotly_chart(fig)
def plot_price(self, prices, _type):
self.st.subheader(_type.capitalize())
fig = go.Figure()
for price in prices:
name = price["symbol"][0]
fig.add_trace(go.Scatter(x=price.index, y=price[_type],
mode='lines',
name=name))
fig.update_layout(
template="plotly_white",
width=1400, height=500,
hovermode="x unified",
plot_bgcolor='rgba(0,0,0,0)'
)
self.st.plotly_chart(fig)
def show_companies(self, companies):
self.st.markdown("___")
self.st.subheader("Company Information")
self.st.markdown("<br>", unsafe_allow_html=True)
for company in companies:
basic = self.st.beta_columns(4)
basic[0].markdown(f"## **{company.get('name', ' ')} ({company.get('symbol', ' ')})**")
if company.get("logo"):
basic[3].image(company.get("logo"), width=50)
basic[3].markdown("<br>", unsafe_allow_html=True)
desc = self.st.beta_columns(2)
if company.get('sector'):
desc[0].markdown(f"**Sector: ** {company.get('sector', '-')}")
if company.get('industry'):
desc[1].markdown(f"**Industry: ** {company.get('industry', '-')}")
if company.get('description'):
desc[0].markdown(f"**Description: ** {company.get('description', '-')}")
info = self.st.beta_columns(2)
if company.get('CEO'):
info[0].markdown(f"**CEO: ** {company.get('CEO', '-')}")
if company.get('employees'):
info[1].markdown(f"**Employees: ** {company.get('employees', '-')}")
if company.get('website'):
info[0].markdown(f"**Website: ** {company.get('website', '-')}")
if company.get('city') or company.get('state') or company.get('country'):
info[1].markdown(f"**Location: ** {company.get('city', ' ')} - {company.get('state', ' ')} - {company.get('country', ' ')}")
self.st.markdown("___")
def show_news(self, news, title="Company News"):
self.st.markdown("___")
self.st.subheader(title)
self.st.markdown("<br>", unsafe_allow_html=True)
for n in news:
if n.get('symbol') or n.get('title') or n.get('date'):
self.st.markdown(f"**{n.get('symbol', ' ')} - {n.get('title', ' ')} [{n.get('date', ' ')}]**")
if n.get('source'):
self.st.markdown(f"**Source: ** {n.get('source', '-')}")
if n.get("image"):
self.st.image(n.get("image"), width=300)
if n.get("description"):
self.st.markdown(f"**Description: ** {n.get('description', '-')}")
if n.get("url"):
self.st.markdown(f"**Access on: ** {n.get('url', '-')}")
self.st.markdown("<br>", unsafe_allow_html=True)
def list_advisors(self, advisors):
for advisor in advisors:
cols = self.st.beta_columns(3)
cols[0].markdown(f"**Name: ** {advisor[0]}")
cols[1].markdown(f"**CPF: ** {advisor[1]}")
cols[2].markdown(f"**CVM: ** {advisor[2]}")
def symbol_input(self, symbols):
selected_symbols = self.st.multiselect("Stocks list:", symbols)
return selected_symbols
def admin_setup(self):
option = self.side_bar.selectbox("Option:", ("Data Loader", "Advisors", "Ad-Hoc"))
execute = False
arg = None
self.st.title("Stocker Administration Area")
self.st.markdown("___")
if option == "Data Loader":
arg = dict()
self.st.header("Stocker Data Loader")
arg["symbols"] = self.st.selectbox("Stocks Option:", ("Sample", "S&P 100"))
self.st.markdown("<br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Company Loader")
self.show_message("st", "info", "Stock Loading: Load on our database information about the companies listed"
"on the Stocks Option selected")
if self.st.button("Load Stocks"):
execute = True
arg["loader"] = "company"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Price Loader")
self.show_message("st", "info", "Price Loading: Load on our database information about companies daily"
" prices, you can select a specific period")
arg["period"] = self.st.selectbox("Prices Period:", ("5y", "2y", "1y", "ytd", "6m", "3m", "1m", "5d"))
if self.st.button("Load Prices"):
execute = True
arg["loader"] = "price"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker News Loader")
self.show_message("st", "info", "News Loading: Load on our database information about the latest news of"
" companies which can impact the market")
if self.st.button("Load News"):
execute = True
arg["loader"] = "news"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Crypto Loader")
self.show_message("st", "info", "Crypto Loading: Load on our database information about all "
"cryptocurrencies available on the market")
if self.st.button("Load Crypto"):
execute = True
arg["loader"] = "crypto"
self.st.markdown("<br><br><br>", unsafe_allow_html=True)
self.st.markdown("___")
self.st.subheader("Stocker Full Loader")
self.show_message("st", "info", "Full Loading: Load on our database all information listed above: companies"
" prices, news and cryptocurrencies")
if self.st.button("Full Load"):
execute = True
arg["loader"] = "full"
elif option == "Ad-Hoc":
self.st.header("Ad-Hoc")
elif option == "Advisors":
sub_option = self.st.selectbox("Opções:", ("List Advisors", "Register Advisor", "Edit Advisor"))
self.st.markdown("___")
if sub_option == "List Advisors":
option = sub_option
execute = True
elif sub_option == "Register Advisor":
arg = self.advisor_form(None)
option = sub_option
if arg:
execute = True
elif sub_option == "Edit Advisor":
arg = self.st.text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12")
execute = True
option = sub_option
self.st.markdown("___")
return option, execute, arg
def advisor_form(self, advisor):
cols = self.st.beta_columns([0.5, 0.25, 0.25])
button = "Update Advisor" if advisor else "Register Advisor"
advisor = {
"name": cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo",
value=advisor["name"]) if advisor
else cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo"),
"username": cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login",
value=advisor["username"]) if advisor
else cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login"),
"password": cols[2].text_input("Senha", max_chars=15, type='password', help="Senha para login"),
"cpf": advisor["cpf"] if advisor
else cols[2].text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12"),
"cvm_license": cols[1].text_input("Lincença CVM", max_chars=10, type='default',
value=advisor["cvm_license"]) if advisor
else cols[1].text_input("Lincença CVM", max_chars=10, type='default'),
"email": cols[0].text_input("Email", max_chars=30, type='default', value=advisor["email"]) if advisor
else cols[0].text_input("Email", max_chars=30, type='default'),
"profile": "advisor"
}
register = self.st.button(button)
self.st.markdown("___")
filled = True
for b in advisor.values():
if not b:
filled = False
if register:
if not filled:
self.show_message("st", "warning", "Preencha todos os campos")
else:
return advisor
@staticmethod
def plot_bar(companies, x, y, title, color):
df = pd.DataFrame(companies)
fig = px.bar(df, x=x, y=y,
color=color, title=title,
color_discrete_sequence=px.colors.qualitative.Pastel,
height=400)
return fig
@staticmethod
def plot_bar2(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Bar(x=df[y], y=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_pie(companies, y, title):
df = pd.DataFrame(companies)[["symbol", y]]
r = df[y].value_counts()
fig = go.Figure(data=[go.Pie(labels=df[y], values=r)])
fig.update_layout(
height=400,
title=title
)
return fig
@staticmethod
def plot_highest_emp(highest_emp):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=highest_emp[0][1],
title={
"text": f"{highest_emp[0][0]}<br><span style='font-size:0.8em;color:gray'>Highest number</span><br>"
f"<span style='font-size:0.8em;color:gray'>of employees</span>"},
)])
return fig
@staticmethod
def plot_information_companies(cols, companies):
logos = [company[1] for company in companies]
names = [company[0] for company in companies]
for idx, logo in enumerate(logos):
col = 2 if idx % 2 == 0 else 3
cols[col].image(logo, width=50)
for idx, name in enumerate(names):
col = 0 if idx % 2 == 0 else 1
cols[col].markdown(f"**Name: ** {name}")
@staticmethod
def plot_notusa_companies(cols, companies):
for company in companies:
cols[0].markdown(f"**Name: ** {company[0]}")
cols[1].markdown(f"**Country: ** {company[2]}")
cols[2].image(company[1], width=50)
@staticmethod
def plot_insight_prices(k, v):
fig = go.Figure(data=[go.Indicator(
mode="number+delta",
value=v[0][1],
title={
"text": f"{v[0][0]}<br><span style='font-size:0.8em;color:gray'>{k.split('_')[0].capitalize()} {k.split('_')[1].capitalize()}</span><br>"
f"<span style='font-size:0.8em;color:gray'>{v[0][2]}</span>"},
)])
return fig
def plot_company_ad_hoc(self, results):
companies = results["company"]["specific"]
highest_emp = results["company"]["insights"]["highest_emp"]
information = results["company"]["insights"]["tech"]
not_usa = results["company"]["insights"]["not_us"]
fields = results["company"]["fields"]
if companies:
if not "symbol" in fields:
self.st.warning("Be sure to select the symbol option")
else:
self.show_companies(companies)
col = self.st.beta_columns(2)
if "employees" in fields:
fig1 = self.plot_bar(companies, "symbol", "employees", "Number of employees by company", "employees")
col[0].plotly_chart(fig1, use_container_width=True)
if "state" in fields:
fig2 = self.plot_bar2(companies, "state", "State distribution")
col[1].plotly_chart(fig2, use_container_width=True)
col2 = self.st.beta_columns(2)
if "sector" in fields:
fig3 = self.plot_pie(companies, "sector", "Companies by sector")
col2[0].plotly_chart(fig3, use_container_width=True)
if "industry" in fields:
fig4 = self.plot_pie(companies, "industry", "Companies by industry")
col2[1].plotly_chart(fig4, use_container_width=True)
if highest_emp:
fig5 = self.plot_highest_emp(highest_emp)
self.st.plotly_chart(fig5, use_container_width=True)
if information:
self.st.markdown("___")
title_col = self.st.beta_columns(1)
cols4 = self.st.beta_columns([1, 1, 0.2, 0.2])
title_col[0].subheader("Information sector companies")
self.plot_information_companies(cols4, information)
if not_usa:
self.st.markdown("___")
title_col2 = self.st.beta_columns(1)
title_col2[0].subheader("Nasdaq listed companies outside USA")
cols5 = self.st.beta_columns(4)
self.plot_notusa_companies(cols5, not_usa)
def plot_price_ad_hoc(self, results):
if not results["price"]["specific"].empty:
self.st.markdown("___")
dfs = list()
for company in results["price"]["company_list"]:
mask = (results["price"]["specific"]["symbol"] == company)
dfs.append(results["price"]["specific"][mask])
self.plot_price(dfs, results["price"]["type"][0])
self.st.markdown("___")
c = 0
cols = self.st.beta_columns(len(results["price"]["insights"].keys()))
for k, val in results["price"]["insights"].items():
if val:
cols[c].plotly_chart(self.plot_insight_prices(k, val), use_container_width=True)
c += 1
def plot_news_ad_hoc(self, results):
if results["news"]["filter"]:
self.show_news(results["news"]["filter"], "Filtered News")
if results["news"]["insights"]:
news_fields = ("id", "symbol", "date", "title", "source", "url", "description", "image")
latest = results["news"]["insights"][0]
latest_news = dict()
for idx, v in enumerate(latest):
latest_news[news_fields[idx]] = v
self.show_news([latest], f"Latest news - {latest['symbol']} - {latest['date']}")
def plot_crypto_ad_hoc(self, results):
if results["crypto"]:
self.st.markdown("___")
self.show_cryptos(results["crypto"])
def ad_hoc_plot(self, results):
self.plot_company_ad_hoc(results)
self.plot_price_ad_hoc(results)
self.plot_news_ad_hoc(results)
self.plot_crypto_ad_hoc(results)
def ad_hoc_form(self, symbols):
company_fields = ("symbol", "name", "exchange", "industry", "website", "description", "CEO", "sector",
"employees", "state", "city", "country", "logo")
news_fields = ("symbol", "date", "title", "source", "url", "description", "image")
ad_hoc = self.default_ad_hoc()
self.st.markdown("___")
self.st.markdown(f"**Company Options:**")
cols = self.st.beta_columns([2, 1, 1])
cols[0].markdown(f"**Specific company views:**")
ad_hoc["company"]["specific"]["company_list"] = cols[0].multiselect("Stocks list:", sum(symbols, []))
ad_hoc["company"]["specific"]["fields"] = cols[0].multiselect("Information:", company_fields)
filter_cols = self.st.beta_columns(6)
ad_hoc["company"]["specific"]["order_by"] = filter_cols[0].selectbox("Order By:", ad_hoc["company"]["specific"]["fields"]),
ad_hoc["company"]["specific"]["order_method"] = filter_cols[1].selectbox("Order Method:", ("Ascending", "Descending")),
ad_hoc["company"]["specific"]["limit"] = filter_cols[2].number_input("Number of results:", value=1, min_value=1, max_value=100),
ad_hoc["company"]["specific"]["rule_filter"] = {}
cols[1].markdown(f"**Insights views:**")
cols[2].markdown(f"**-**")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["highest_emp"] = cols[1].checkbox("Highest employees number")
cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["tech"] = cols[1].checkbox("Information Companies")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["insights"]["not_us"] = cols[2].checkbox("Outside USA")
cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["company"]["specific"]["rule_filter"]["apply"] = cols[2].checkbox("Rule filter")
if ad_hoc["company"]["specific"]["rule_filter"]["apply"]:
ad_hoc["company"]["specific"]["rule_filter"]["field"] = filter_cols[0].selectbox(
"Filter Field:", ("symbol", "name", "employees"))
ad_hoc["company"]["specific"]["rule_filter"]["operation"] = filter_cols[1].selectbox(
"Operation", ("Greater than", "Less than", "Equals to") if
ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees" else ("Equals to", ))
ad_hoc["company"]["specific"]["rule_filter"]["value"] = filter_cols[2].number_input("Value: ") \
if ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees"\
else filter_cols[2].text_input("Value: ")
self.st.markdown("___")
self.st.markdown(f"**Prices Options:**")
price_cols = self.st.beta_columns([2, 1, 1])
price_cols[0].markdown(f"**Specific price views:**")
ad_hoc["price"]["specific"]["company_list"] = price_cols[0].multiselect("Price Stocks:", sum(symbols, []))
filter_price_cols = self.st.beta_columns(6)
ad_hoc["price"]["specific"]["start_date"] = filter_price_cols[0].date_input("Start Date:")
ad_hoc["price"]["specific"]["end_date"] = filter_price_cols[1].date_input("End Date:")
ad_hoc["price"]["specific"]["type"] = filter_price_cols[2].selectbox("Price Type:", ("close", "open", "high", "low")),
price_cols[1].markdown(f"**Insights views:**")
price_cols[2].markdown(f"**-**")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["highest_close"] = price_cols[1].checkbox("Highest close price")
price_cols[1].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_close"] = price_cols[2].checkbox("Lowest close price")
ad_hoc["price"]["insights"]["highest_volume"] = price_cols[1].checkbox("Highest volume")
price_cols[2].markdown("<br>", unsafe_allow_html=True)
ad_hoc["price"]["insights"]["lowest_volume"] = price_cols[2].checkbox("Lowest volume")
self.st.markdown("___")
self.st.markdown(f"**News Options:**")
news_cols = self.st.beta_columns([2, 1, 1, 1])
news_cols[0].markdown(f"**Specific news views:**")
news_cols[1].markdown("-<br>", unsafe_allow_html=True)
news_cols[2].markdown("-<br>", unsafe_allow_html=True)
news_cols[3].markdown("-<br>", unsafe_allow_html=True)
ad_hoc["news"]["company_list"] = news_cols[0].multiselect("News Stocks:", sum(symbols, []))
ad_hoc["news"]["fields"] = news_cols[0].multiselect("News Info:", news_fields)
ad_hoc["news"]["date"] = news_cols[1].date_input("Date:")
ad_hoc["news"]["filter_date"] = news_cols[2].selectbox("Filter Date as:", ("On", "Starting from", "Until"))
ad_hoc["news"]["order_by"] = news_cols[1].selectbox("Order by field:", ad_hoc["news"]["fields"])
ad_hoc["news"]["order_method"] = news_cols[2].selectbox("Order results:", ("Ascending", "Descending"))
ad_hoc["news"]["limit"] = news_cols[3].number_input("Limit of results:", value=1, min_value=1, max_value=100)
ad_hoc["news"]["latest"] = news_cols[3].checkbox("Latest News")
self.st.markdown("___")
self.st.markdown(f"**Crypto Options:**")
crypto_col = self.st.beta_columns([2, 0.5, 1])
ad_hoc["crypto"]["name"] = crypto_col[0].text_input("Cryptocurrency")
ad_hoc["crypto"]["limit"] = crypto_col[1].number_input("Limit of crypto:", value=1, min_value=1, max_value=100)
generate = self.st.button("Generate Report")
if generate:
return ad_hoc
@staticmethod
def default_ad_hoc():
return {
"company": {
"specific": {
"company_list": [],
"fields": [],
"order_by": None,
"order_method": None,
"limit": None,
"rule_filter": {
"apply": False,
"field": None,
"operation": None,
"value": None
}
},
"insights": {
"highest_emp": False,
"tech": False,
"not_us": False
}
},
"news": {
"company_list": [],
"date": None,
"filter_date": None,
},
"price": {
"specific": {
"company_list": [],
"type": None,
"start_date": None,
"end_date": None
},
"insights": {
"highest_close": False,
"lowest_close": False,
"highest_volume": False,
"lowest_volume": False,
}
},
"crypto": {
"name": None,
"limit": None
}
} | ru | 0.094472 | # **{company.get('name', ' ')} ({company.get('symbol', ' ')})**") | 2.632679 | 3 |
ch_4/stopping_length.py | ProhardONE/python_primer | 51 | 8234 | # Exercise 4.11
# Author: <NAME>
import sys
g = 9.81 # acceleration due to gravity
try:
# initial velocity (convert to m/s)
v0 = (1000. / 3600) * float(sys.argv[1])
mu = float(sys.argv[2]) # coefficient of friction
except IndexError:
print 'Both v0 (in km/s) and mu must be supplied on the command line'
v0 = (1000. / 3600) * float(raw_input('v0 = ?\n'))
mu = float(raw_input('mu = ?\n'))
except ValueError:
print 'v0 and mu must be pure numbers'
sys.exit(1)
d = 0.5 * v0 ** 2 / mu / g
print d
"""
Sample run:
python stopping_length.py 120 0.3
188.771850342
python stopping_length.py 50 0.3
32.7728906843
"""
| # Exercise 4.11
# Author: <NAME>
import sys
g = 9.81 # acceleration due to gravity
try:
# initial velocity (convert to m/s)
v0 = (1000. / 3600) * float(sys.argv[1])
mu = float(sys.argv[2]) # coefficient of friction
except IndexError:
print 'Both v0 (in km/s) and mu must be supplied on the command line'
v0 = (1000. / 3600) * float(raw_input('v0 = ?\n'))
mu = float(raw_input('mu = ?\n'))
except ValueError:
print 'v0 and mu must be pure numbers'
sys.exit(1)
d = 0.5 * v0 ** 2 / mu / g
print d
"""
Sample run:
python stopping_length.py 120 0.3
188.771850342
python stopping_length.py 50 0.3
32.7728906843
"""
| en | 0.641024 | # Exercise 4.11 # Author: <NAME> # acceleration due to gravity # initial velocity (convert to m/s) # coefficient of friction Sample run: python stopping_length.py 120 0.3 188.771850342 python stopping_length.py 50 0.3 32.7728906843 | 3.613799 | 4 |
TestFiles/volumioTest.py | GeorgeIoak/Oden | 0 | 8235 | <filename>TestFiles/volumioTest.py
# Testing code to check update status on demand
from socketIO_client import SocketIO, LoggingNamespace
from threading import Thread
socketIO = SocketIO('localhost', 3000)
status = 'pause'
def on_push_state(*args):
print('state', args)
global status, position, duration, seek
status = args[0]['status'].encode('ascii', 'ignore')
seek = args[0]['seek']
duration = args[0]['duration']
if duration:
position = int(seek / 1000)
else:
position = 0
print("status", status, "position", position)
def _receive_thread():
socketIO.wait()
receive_thread = Thread(target=_receive_thread, daemon=True)
receive_thread.start()
socketIO.on('pushState', on_push_state)
# issue this and the socketIO.wait in the background will push the reply
socketIO.emit('getState', '', on_push_state) | <filename>TestFiles/volumioTest.py
# Testing code to check update status on demand
from socketIO_client import SocketIO, LoggingNamespace
from threading import Thread
socketIO = SocketIO('localhost', 3000)
status = 'pause'
def on_push_state(*args):
print('state', args)
global status, position, duration, seek
status = args[0]['status'].encode('ascii', 'ignore')
seek = args[0]['seek']
duration = args[0]['duration']
if duration:
position = int(seek / 1000)
else:
position = 0
print("status", status, "position", position)
def _receive_thread():
socketIO.wait()
receive_thread = Thread(target=_receive_thread, daemon=True)
receive_thread.start()
socketIO.on('pushState', on_push_state)
# issue this and the socketIO.wait in the background will push the reply
socketIO.emit('getState', '', on_push_state) | en | 0.758049 | # Testing code to check update status on demand # issue this and the socketIO.wait in the background will push the reply | 2.511271 | 3 |
examples/DeepWisdom/Auto_NLP/deepWisdom/transformers_/__init__.py | zichuan-scott-xu/automl-workflow | 3 | 8236 | __version__ = "2.1.1"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple, XLNetForQuestionAnswering,
load_tf_weights_in_xlnet, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import AlbertForSequenceClassification
# Optimization
from .optimization import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, WarmupCosineSchedule,
WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| __version__ = "2.1.1"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple, XLNetForQuestionAnswering,
load_tf_weights_in_xlnet, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import AlbertForSequenceClassification
# Optimization
from .optimization import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, WarmupCosineSchedule,
WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| en | 0.698146 | # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. # see: https://github.com/abseil/abseil-py/issues/99 # and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493 # pylint: disable=invalid-name # Files and general utilities # Tokenizers # Configurations # Modeling # Optimization | 1.9828 | 2 |
src/use-model.py | sofieditmer/self-assigned | 0 | 8237 | #!/usr/bin/env python
"""
Info: This script loads the model trained in the cnn-asl.py script and enables the user to use it for classifying unseen ASL letters. It also visualizes the feature map of the last convolutional layer of the network to enable the user to get an insight into exactly which parts of the original image that the model is paying attention to when classifying the image.
Parameters:
(optional) model_name: str <name-of-the-model-to-load>, default = "saved_model.json"
(optional) train_data: str <name-of-training-data>, default = "asl_alphabet_train_subset"
(optional) unseen_image: str <name-of-unseen-image>, default = "unseen_img_test1.png"
Usage:
$ python use-model.py
Output:
- unseen_image_superimposed_heatmap.png: superimposed heatmap on unseen image.
- unseen_image_prediction.txt: model prediction of unseen image.
"""
### DEPENDENCIES ###
# Core libraries
import os
import sys
sys.path.append(os.path.join(".."))
# Matplotlib, numpy, OpenCV
import matplotlib.pyplot as plt
import numpy as np
import cv2
# TensorFlow
import tensorflow as tf
from tensorflow.keras.preprocessing.image import (load_img, img_to_array)
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.models import model_from_json
from tensorflow.keras import backend as K
# argparse
import argparse
### MAIN FUNCTION ###
def main():
### ARGPARSE ###
# Initialize ArgumentParser class
ap = argparse.ArgumentParser()
# Argument 1: Model name
ap.add_argument("-m", "--model_name",
type = str,
required = False, # the argument is not required
help = "Name of the model",
default = "saved_model.json") # default name
# Argument 2: Training data
ap.add_argument("-t", "--train_data",
type = str,
required = False, # the argument is not required
help = "Name of training data folder",
default = "asl_alphabet_train_subset") # default is a subset of the training dataset
# Argument 3: Input image
ap.add_argument("-u", "--unseen_image",
type = str,
required = False, # the argument is not required
help = "Name of the image the model should classify",
default = "unseen_img_test1.png") # default unseen image provided in the unseen_images folder
# Parse arguments
args = vars(ap.parse_args())
# Save input parameters
model_name = args["model_name"]
train_data = os.path.join("..", "data", "subset_asl_sign_language", args["train_data"])
unseen_image = args["unseen_image"]
# Create output directory if it does not already exist
if not os.path.exists(os.path.join("..", "output")):
os.mkdir(os.path.join("..", "output"))
# Start message
print("\n[INFO] Initializing...")
# Instantiate the class
classifier = Loaded_model_classifier(train_data, unseen_image)
# Create list of label names from the directory names in the training data folder
labels = classifier.list_labels()
# Load the model
print(f"\n[INFO] Loading the CNN model, {model_name}, from 'output' directory...")
model = classifier.load_model(model_name)
# Classify input image
print(f"\n[INFO] Using the model to predict the class of {unseen_image}...")
label = classifier.classify_unseen_image(labels, model)
# Visualize feature map of network for input image
print(f"\n[INFO] Visualizing the feature map of the last convolutional layer of the network...")
classifier.visualize_feature_map(model)
# User message
print(f"\n[INFO] Done! The {unseen_image} has been classified as {label} and the feature map of the last convolutional layer of the network has been visualized and saved as {unseen_image}_superimposed_heatmap.png in 'output' directory\n")
# Creating classifier class
class Loaded_model_classifier:
def __init__(self, train_data, unseen_image):
# Receive inputs: train data and input image
self.train_data = train_data
self.unseen_image = unseen_image
def list_labels(self):
"""
This method defines the label names by listing the names of the folders within training directory without listing hidden files. It sorts the names alphabetically.
"""
# Create empty list
labels = []
# For every name in training directory
for name in os.listdir(self.train_data):
# If it does not start with . (which hidden files do)
if not name.startswith('.'):
labels.append(name)
# Sort labels alphabetically
labels = sorted(labels)
return labels
def load_model(self, model_name):
"""
This method loads the model and the model weights that are saved in the output directory.
"""
# Load JSON-file and create model
model_path = os.path.join("..", "output", model_name)
json_model = open(model_path, "r")
# Read file
loaded_file = json_model.read()
# Create model
loaded_model = model_from_json(loaded_file)
# Load weights into new model
loaded_model.load_weights(os.path.join("..", "output", "model_weights.h5"))
# Compile model
loaded_model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return loaded_model
def classify_unseen_image(self, labels, model):
"""
This method takes an unseen image, performs some preprocessing to prepare it for the model, and predicts the class of the image using the model.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load unseen image
image = load_img(img_path, target_size=(224, 224)) # using the same size as the images the model has been trained on
# Convert the image to a numpy array
image = img_to_array(image)
# Reshape the image, because the model expects a tensor of rank 4. The image goes from being 3-dimensional to 4-dimensional: (1, 224, 224, 3)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the ResNet50 model
image = preprocess_input(image)
# Predict the class of the image
prediction = np.argmax(model.predict(image))
# Convert labels to be a dictionary which is needed to extract the label that corresponds to the prediction
labels = dict(zip(labels, range(len(labels))))
# Define function that finds the key (letter) that corresponds to the predicted value
def find_key(dictionary, value):
return {k for k, v in dictionary.items() if v == value}
# Extract letter that corresponds to the predicted value from the label dictionary
label = find_key(labels, prediction)
# Print the predicted class to the terminal
print(f"\nThe model predicts {self.unseen_image} to be the letter {label}")
# Save prediction as txt-file to output directory
with open(os.path.join("..", "output", f"{self.unseen_image}_prediction.txt"), "w") as f:
f.write(f"The predicted class of the {self.unseen_image} made by the model is {label}")
return label
def visualize_feature_map(self, model):
"""
This method visualizes the feature map of the last convolutional layer of the network.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load image with dimensions corresponding to training images
img = load_img(img_path, target_size=(224, 224))
# Convert image to array
x = img_to_array(img)
# Convert to rank 4 tensor
x = np.expand_dims(x, axis=0)
# Preprocess to be in line with ResNet50 data
x = preprocess_input(x)
# Create activation heatmap for final layer. This is done by taking advantage of how the model learns through gradient descent. We use the gradients that have been learned through training, and we go the opposite way (rather than minimizing we are maximizing). Essentially, we make use of the gradients in the final layer to highlight which regions are particularly informative when predicting a given class.
with tf.GradientTape() as tape:
# Take the last convolutional layer in the network
last_conv_layer = model.get_layer('conv5_block3_out')
# Create a model that maps the input image to the activations of the last convolutional layer as well as the output predictions
iterate = tf.keras.models.Model([model.inputs],
[model.output, last_conv_layer.output])
# Compute the gradient of the top predicted class for the input image with respect to the activations of the last conv layer
# Take the gradients from the last layer
model_out, last_conv_layer = iterate(x)
# Find the class that has been predicted by the model
class_out = model_out[:, np.argmax(model_out[0])]
# Extract gradient of the output neuron of the last convolutional layer
grads = tape.gradient(class_out,
last_conv_layer)
# Vector of mean intensity of the gradient over a specific feature map channel
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# Multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class. Then sum all the channels to obtain the heatmap class activation
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer), axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
heatmap = heatmap.reshape((7,7))
plt.matshow(heatmap)
# Load unseen image with OpenCV
img = cv2.imread(img_path)
# Make heatmap semi-transparent
intensity = 0.5
# Resize the heatmap to be the original dimensions of the input
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
# Apply colormap
heatmap = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
# Multiply heatmap by intensity and 'add' this on top of the original image
superimposed = (heatmap * intensity) + img
# Save the superimposed image to output directory
cv2.imwrite(os.path.join("..", "output", f"{self.unseen_image}_superimposed_heatmap.png"), superimposed)
# User message
print(f"\n[INFO] The feature map has now been visualized and superimposed on {self.unseen_image}. Find image as {self.unseen_image}_superimposed_heatmap.png in 'output' directory...")
# Define behaviour when called from command line
if __name__=="__main__":
main() | #!/usr/bin/env python
"""
Info: This script loads the model trained in the cnn-asl.py script and enables the user to use it for classifying unseen ASL letters. It also visualizes the feature map of the last convolutional layer of the network to enable the user to get an insight into exactly which parts of the original image that the model is paying attention to when classifying the image.
Parameters:
(optional) model_name: str <name-of-the-model-to-load>, default = "saved_model.json"
(optional) train_data: str <name-of-training-data>, default = "asl_alphabet_train_subset"
(optional) unseen_image: str <name-of-unseen-image>, default = "unseen_img_test1.png"
Usage:
$ python use-model.py
Output:
- unseen_image_superimposed_heatmap.png: superimposed heatmap on unseen image.
- unseen_image_prediction.txt: model prediction of unseen image.
"""
### DEPENDENCIES ###
# Core libraries
import os
import sys
sys.path.append(os.path.join(".."))
# Matplotlib, numpy, OpenCV
import matplotlib.pyplot as plt
import numpy as np
import cv2
# TensorFlow
import tensorflow as tf
from tensorflow.keras.preprocessing.image import (load_img, img_to_array)
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.models import model_from_json
from tensorflow.keras import backend as K
# argparse
import argparse
### MAIN FUNCTION ###
def main():
### ARGPARSE ###
# Initialize ArgumentParser class
ap = argparse.ArgumentParser()
# Argument 1: Model name
ap.add_argument("-m", "--model_name",
type = str,
required = False, # the argument is not required
help = "Name of the model",
default = "saved_model.json") # default name
# Argument 2: Training data
ap.add_argument("-t", "--train_data",
type = str,
required = False, # the argument is not required
help = "Name of training data folder",
default = "asl_alphabet_train_subset") # default is a subset of the training dataset
# Argument 3: Input image
ap.add_argument("-u", "--unseen_image",
type = str,
required = False, # the argument is not required
help = "Name of the image the model should classify",
default = "unseen_img_test1.png") # default unseen image provided in the unseen_images folder
# Parse arguments
args = vars(ap.parse_args())
# Save input parameters
model_name = args["model_name"]
train_data = os.path.join("..", "data", "subset_asl_sign_language", args["train_data"])
unseen_image = args["unseen_image"]
# Create output directory if it does not already exist
if not os.path.exists(os.path.join("..", "output")):
os.mkdir(os.path.join("..", "output"))
# Start message
print("\n[INFO] Initializing...")
# Instantiate the class
classifier = Loaded_model_classifier(train_data, unseen_image)
# Create list of label names from the directory names in the training data folder
labels = classifier.list_labels()
# Load the model
print(f"\n[INFO] Loading the CNN model, {model_name}, from 'output' directory...")
model = classifier.load_model(model_name)
# Classify input image
print(f"\n[INFO] Using the model to predict the class of {unseen_image}...")
label = classifier.classify_unseen_image(labels, model)
# Visualize feature map of network for input image
print(f"\n[INFO] Visualizing the feature map of the last convolutional layer of the network...")
classifier.visualize_feature_map(model)
# User message
print(f"\n[INFO] Done! The {unseen_image} has been classified as {label} and the feature map of the last convolutional layer of the network has been visualized and saved as {unseen_image}_superimposed_heatmap.png in 'output' directory\n")
# Creating classifier class
class Loaded_model_classifier:
def __init__(self, train_data, unseen_image):
# Receive inputs: train data and input image
self.train_data = train_data
self.unseen_image = unseen_image
def list_labels(self):
"""
This method defines the label names by listing the names of the folders within training directory without listing hidden files. It sorts the names alphabetically.
"""
# Create empty list
labels = []
# For every name in training directory
for name in os.listdir(self.train_data):
# If it does not start with . (which hidden files do)
if not name.startswith('.'):
labels.append(name)
# Sort labels alphabetically
labels = sorted(labels)
return labels
def load_model(self, model_name):
"""
This method loads the model and the model weights that are saved in the output directory.
"""
# Load JSON-file and create model
model_path = os.path.join("..", "output", model_name)
json_model = open(model_path, "r")
# Read file
loaded_file = json_model.read()
# Create model
loaded_model = model_from_json(loaded_file)
# Load weights into new model
loaded_model.load_weights(os.path.join("..", "output", "model_weights.h5"))
# Compile model
loaded_model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return loaded_model
def classify_unseen_image(self, labels, model):
"""
This method takes an unseen image, performs some preprocessing to prepare it for the model, and predicts the class of the image using the model.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load unseen image
image = load_img(img_path, target_size=(224, 224)) # using the same size as the images the model has been trained on
# Convert the image to a numpy array
image = img_to_array(image)
# Reshape the image, because the model expects a tensor of rank 4. The image goes from being 3-dimensional to 4-dimensional: (1, 224, 224, 3)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the ResNet50 model
image = preprocess_input(image)
# Predict the class of the image
prediction = np.argmax(model.predict(image))
# Convert labels to be a dictionary which is needed to extract the label that corresponds to the prediction
labels = dict(zip(labels, range(len(labels))))
# Define function that finds the key (letter) that corresponds to the predicted value
def find_key(dictionary, value):
return {k for k, v in dictionary.items() if v == value}
# Extract letter that corresponds to the predicted value from the label dictionary
label = find_key(labels, prediction)
# Print the predicted class to the terminal
print(f"\nThe model predicts {self.unseen_image} to be the letter {label}")
# Save prediction as txt-file to output directory
with open(os.path.join("..", "output", f"{self.unseen_image}_prediction.txt"), "w") as f:
f.write(f"The predicted class of the {self.unseen_image} made by the model is {label}")
return label
def visualize_feature_map(self, model):
"""
This method visualizes the feature map of the last convolutional layer of the network.
"""
# Define path
img_path = os.path.join("..", "data", "unseen_images", self.unseen_image)
# Load image with dimensions corresponding to training images
img = load_img(img_path, target_size=(224, 224))
# Convert image to array
x = img_to_array(img)
# Convert to rank 4 tensor
x = np.expand_dims(x, axis=0)
# Preprocess to be in line with ResNet50 data
x = preprocess_input(x)
# Create activation heatmap for final layer. This is done by taking advantage of how the model learns through gradient descent. We use the gradients that have been learned through training, and we go the opposite way (rather than minimizing we are maximizing). Essentially, we make use of the gradients in the final layer to highlight which regions are particularly informative when predicting a given class.
with tf.GradientTape() as tape:
# Take the last convolutional layer in the network
last_conv_layer = model.get_layer('conv5_block3_out')
# Create a model that maps the input image to the activations of the last convolutional layer as well as the output predictions
iterate = tf.keras.models.Model([model.inputs],
[model.output, last_conv_layer.output])
# Compute the gradient of the top predicted class for the input image with respect to the activations of the last conv layer
# Take the gradients from the last layer
model_out, last_conv_layer = iterate(x)
# Find the class that has been predicted by the model
class_out = model_out[:, np.argmax(model_out[0])]
# Extract gradient of the output neuron of the last convolutional layer
grads = tape.gradient(class_out,
last_conv_layer)
# Vector of mean intensity of the gradient over a specific feature map channel
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# Multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class. Then sum all the channels to obtain the heatmap class activation
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer), axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
heatmap = heatmap.reshape((7,7))
plt.matshow(heatmap)
# Load unseen image with OpenCV
img = cv2.imread(img_path)
# Make heatmap semi-transparent
intensity = 0.5
# Resize the heatmap to be the original dimensions of the input
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
# Apply colormap
heatmap = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
# Multiply heatmap by intensity and 'add' this on top of the original image
superimposed = (heatmap * intensity) + img
# Save the superimposed image to output directory
cv2.imwrite(os.path.join("..", "output", f"{self.unseen_image}_superimposed_heatmap.png"), superimposed)
# User message
print(f"\n[INFO] The feature map has now been visualized and superimposed on {self.unseen_image}. Find image as {self.unseen_image}_superimposed_heatmap.png in 'output' directory...")
# Define behaviour when called from command line
if __name__=="__main__":
main() | en | 0.777998 | #!/usr/bin/env python Info: This script loads the model trained in the cnn-asl.py script and enables the user to use it for classifying unseen ASL letters. It also visualizes the feature map of the last convolutional layer of the network to enable the user to get an insight into exactly which parts of the original image that the model is paying attention to when classifying the image. Parameters: (optional) model_name: str <name-of-the-model-to-load>, default = "saved_model.json" (optional) train_data: str <name-of-training-data>, default = "asl_alphabet_train_subset" (optional) unseen_image: str <name-of-unseen-image>, default = "unseen_img_test1.png" Usage: $ python use-model.py Output: - unseen_image_superimposed_heatmap.png: superimposed heatmap on unseen image. - unseen_image_prediction.txt: model prediction of unseen image. ### DEPENDENCIES ### # Core libraries # Matplotlib, numpy, OpenCV # TensorFlow # argparse ### MAIN FUNCTION ### ### ARGPARSE ### # Initialize ArgumentParser class # Argument 1: Model name # the argument is not required # default name # Argument 2: Training data # the argument is not required # default is a subset of the training dataset # Argument 3: Input image # the argument is not required # default unseen image provided in the unseen_images folder # Parse arguments # Save input parameters # Create output directory if it does not already exist # Start message # Instantiate the class # Create list of label names from the directory names in the training data folder # Load the model # Classify input image # Visualize feature map of network for input image # User message # Creating classifier class # Receive inputs: train data and input image This method defines the label names by listing the names of the folders within training directory without listing hidden files. It sorts the names alphabetically. # Create empty list # For every name in training directory # If it does not start with . (which hidden files do) # Sort labels alphabetically This method loads the model and the model weights that are saved in the output directory. # Load JSON-file and create model # Read file # Create model # Load weights into new model # Compile model This method takes an unseen image, performs some preprocessing to prepare it for the model, and predicts the class of the image using the model. # Define path # Load unseen image # using the same size as the images the model has been trained on # Convert the image to a numpy array # Reshape the image, because the model expects a tensor of rank 4. The image goes from being 3-dimensional to 4-dimensional: (1, 224, 224, 3) # Prepare the image for the ResNet50 model # Predict the class of the image # Convert labels to be a dictionary which is needed to extract the label that corresponds to the prediction # Define function that finds the key (letter) that corresponds to the predicted value # Extract letter that corresponds to the predicted value from the label dictionary # Print the predicted class to the terminal # Save prediction as txt-file to output directory This method visualizes the feature map of the last convolutional layer of the network. # Define path # Load image with dimensions corresponding to training images # Convert image to array # Convert to rank 4 tensor # Preprocess to be in line with ResNet50 data # Create activation heatmap for final layer. This is done by taking advantage of how the model learns through gradient descent. We use the gradients that have been learned through training, and we go the opposite way (rather than minimizing we are maximizing). Essentially, we make use of the gradients in the final layer to highlight which regions are particularly informative when predicting a given class. # Take the last convolutional layer in the network # Create a model that maps the input image to the activations of the last convolutional layer as well as the output predictions # Compute the gradient of the top predicted class for the input image with respect to the activations of the last conv layer # Take the gradients from the last layer # Find the class that has been predicted by the model # Extract gradient of the output neuron of the last convolutional layer # Vector of mean intensity of the gradient over a specific feature map channel # Multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class. Then sum all the channels to obtain the heatmap class activation # Load unseen image with OpenCV # Make heatmap semi-transparent # Resize the heatmap to be the original dimensions of the input # Apply colormap # Multiply heatmap by intensity and 'add' this on top of the original image # Save the superimposed image to output directory # User message # Define behaviour when called from command line | 3.415819 | 3 |
examples/hello_world/src/Algorithm.py | algorithmiaio/algorithmia-adk-python | 4 | 8238 | from Algorithmia import ADK
# API calls will begin at the apply() method, with the request body passed as 'input'
# For more details, see algorithmia.com/developers/algorithm-development/languages
def apply(input):
# If your apply function uses state that's loaded into memory via load, you can pass that loaded state to your apply
# function by defining an additional "globals" parameter in your apply function; but it's optional!
return "hello {}".format(str(input))
# This turns your library code into an algorithm that can run on the platform.
# If you intend to use loading operations, remember to pass a `load` function as a second variable.
algorithm = ADK(apply)
# The 'init()' function actually starts the algorithm, you can follow along in the source code
# to see how everything works.
algorithm.init("Algorithmia")
| from Algorithmia import ADK
# API calls will begin at the apply() method, with the request body passed as 'input'
# For more details, see algorithmia.com/developers/algorithm-development/languages
def apply(input):
# If your apply function uses state that's loaded into memory via load, you can pass that loaded state to your apply
# function by defining an additional "globals" parameter in your apply function; but it's optional!
return "hello {}".format(str(input))
# This turns your library code into an algorithm that can run on the platform.
# If you intend to use loading operations, remember to pass a `load` function as a second variable.
algorithm = ADK(apply)
# The 'init()' function actually starts the algorithm, you can follow along in the source code
# to see how everything works.
algorithm.init("Algorithmia")
| en | 0.878675 | # API calls will begin at the apply() method, with the request body passed as 'input' # For more details, see algorithmia.com/developers/algorithm-development/languages # If your apply function uses state that's loaded into memory via load, you can pass that loaded state to your apply # function by defining an additional "globals" parameter in your apply function; but it's optional! # This turns your library code into an algorithm that can run on the platform. # If you intend to use loading operations, remember to pass a `load` function as a second variable. # The 'init()' function actually starts the algorithm, you can follow along in the source code # to see how everything works. | 3.362922 | 3 |
src/gluonts/nursery/autogluon_tabular/estimator.py | Xiaoxiong-Liu/gluon-ts | 2,648 | 8239 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Callable, Optional, List, Tuple
import pandas as pd
from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.dataset.util import to_pandas
from gluonts.model.estimator import Estimator
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from .predictor import (
TabularPredictor,
mean_abs_scaling,
get_features_dataframe,
)
logger = logging.getLogger(__name__)
class TabularEstimator(Estimator):
"""An estimator that trains an Autogluon Tabular model for time series
forecasting.
Additional keyword arguments to the constructor, other than the ones documented
below, will be passed on to Autogluon Tabular's ``fit`` method used for training
the model.
Parameters
----------
freq
Frequency of the data to handle
prediction_length
Prediction length
lag_indices
List of indices of the lagged observations to use as features. If
None, this will be set automatically based on the frequency.
time_features
List of time features to be used. If None, this will be set automatically
based on the frequency.
scaling
Function to be used to scale time series. This should take a pd.Series object
as input, and return a scaled pd.Series and the scale (float). By default,
this divides a series by the mean of its absolute value.
batch_size
Batch size of the resulting predictor; this is just used at prediction
time, and does not affect training in any way.
disable_auto_regression
Whether to forecefully disable auto-regression in the model. If ``True``,
this will remove any lag index which is smaller than ``prediction_length``.
This will make predictions more efficient, but may impact their accuracy.
quantiles_to_predict
Whether to forecast in quantile way. If assigned with quantile values,
this will train model using quantile prediction model. If None, then the model
will be trained in a regular way.
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
lag_indices: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
scaling: Callable[
[pd.Series], Tuple[pd.Series, float]
] = mean_abs_scaling,
batch_size: Optional[int] = 32,
disable_auto_regression: bool = False,
last_k_for_val: Optional[int] = None,
quantiles_to_predict: Optional[List[float]] = None,
eval_metric: str = "mean_absolute_error",
**kwargs,
) -> None:
super().__init__()
self.freq = freq
self.prediction_length = prediction_length
self.lag_indices = (
lag_indices
if lag_indices is not None
else get_lags_for_frequency(self.freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.batch_size = batch_size
self.disable_auto_regression = disable_auto_regression
self.scaling = scaling
self.last_k_for_val = last_k_for_val
self.eval_metric = eval_metric
self.quantiles_to_predict = quantiles_to_predict
if self.disable_auto_regression:
self.lag_indices = [
lag_idx
for lag_idx in self.lag_indices
if lag_idx >= self.prediction_length
]
default_kwargs = {
"time_limit": 60,
# "excluded_model_types": ["KNN", "XT", "RF"],
"presets": [
"high_quality_fast_inference_only_refit",
"optimize_for_deployment",
],
"auto_stack": True,
}
self.kwargs = {**default_kwargs, **kwargs}
def train(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
) -> TabularPredictor:
kwargs_override = {}
dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in training_data
]
if validation_data is not None or self.last_k_for_val is not None:
kwargs_override["auto_stack"] = False
logger.warning(
"Auto Stacking is turned off "
"as validation dataset is provided before input into Tabular Predictor."
)
if validation_data is not None:
logger.log(20, "Validation dataset is directly provided.")
validation_dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in validation_data
]
train_df = pd.concat(dfs)
val_df = pd.concat(validation_dfs)
elif self.last_k_for_val is not None:
logger.log(
20,
f"last_k_for_val is provided, choosing last {self.last_k_for_val} of each time series as validation set.",
)
train_dfs = [
tmp_df.iloc[: -self.last_k_for_val, :] for tmp_df in dfs
]
validation_dfs = [
tmp_df.iloc[-self.last_k_for_val :, :] for tmp_df in dfs
]
train_df = pd.concat(train_dfs)
val_df = pd.concat(validation_dfs)
else:
logger.log(
20,
"No validation dataset is provided, will let TabularPredictor do the splitting automatically,"
"Note that this might break the time order of time series data.",
)
train_df = pd.concat(dfs)
val_df = None
if self.quantiles_to_predict is not None:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="quantile",
quantile_levels=self.quantiles_to_predict,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
else:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="regression",
eval_metric=self.eval_metric,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
return TabularPredictor(
ag_model=ag_model,
freq=self.freq,
prediction_length=self.prediction_length,
time_features=self.time_features,
lag_indices=self.lag_indices,
scaling=self.scaling,
batch_size=self.batch_size,
quantiles_to_predict=self.quantiles_to_predict,
)
| # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Callable, Optional, List, Tuple
import pandas as pd
from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.dataset.util import to_pandas
from gluonts.model.estimator import Estimator
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from .predictor import (
TabularPredictor,
mean_abs_scaling,
get_features_dataframe,
)
logger = logging.getLogger(__name__)
class TabularEstimator(Estimator):
"""An estimator that trains an Autogluon Tabular model for time series
forecasting.
Additional keyword arguments to the constructor, other than the ones documented
below, will be passed on to Autogluon Tabular's ``fit`` method used for training
the model.
Parameters
----------
freq
Frequency of the data to handle
prediction_length
Prediction length
lag_indices
List of indices of the lagged observations to use as features. If
None, this will be set automatically based on the frequency.
time_features
List of time features to be used. If None, this will be set automatically
based on the frequency.
scaling
Function to be used to scale time series. This should take a pd.Series object
as input, and return a scaled pd.Series and the scale (float). By default,
this divides a series by the mean of its absolute value.
batch_size
Batch size of the resulting predictor; this is just used at prediction
time, and does not affect training in any way.
disable_auto_regression
Whether to forecefully disable auto-regression in the model. If ``True``,
this will remove any lag index which is smaller than ``prediction_length``.
This will make predictions more efficient, but may impact their accuracy.
quantiles_to_predict
Whether to forecast in quantile way. If assigned with quantile values,
this will train model using quantile prediction model. If None, then the model
will be trained in a regular way.
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
lag_indices: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
scaling: Callable[
[pd.Series], Tuple[pd.Series, float]
] = mean_abs_scaling,
batch_size: Optional[int] = 32,
disable_auto_regression: bool = False,
last_k_for_val: Optional[int] = None,
quantiles_to_predict: Optional[List[float]] = None,
eval_metric: str = "mean_absolute_error",
**kwargs,
) -> None:
super().__init__()
self.freq = freq
self.prediction_length = prediction_length
self.lag_indices = (
lag_indices
if lag_indices is not None
else get_lags_for_frequency(self.freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.batch_size = batch_size
self.disable_auto_regression = disable_auto_regression
self.scaling = scaling
self.last_k_for_val = last_k_for_val
self.eval_metric = eval_metric
self.quantiles_to_predict = quantiles_to_predict
if self.disable_auto_regression:
self.lag_indices = [
lag_idx
for lag_idx in self.lag_indices
if lag_idx >= self.prediction_length
]
default_kwargs = {
"time_limit": 60,
# "excluded_model_types": ["KNN", "XT", "RF"],
"presets": [
"high_quality_fast_inference_only_refit",
"optimize_for_deployment",
],
"auto_stack": True,
}
self.kwargs = {**default_kwargs, **kwargs}
def train(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
) -> TabularPredictor:
kwargs_override = {}
dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in training_data
]
if validation_data is not None or self.last_k_for_val is not None:
kwargs_override["auto_stack"] = False
logger.warning(
"Auto Stacking is turned off "
"as validation dataset is provided before input into Tabular Predictor."
)
if validation_data is not None:
logger.log(20, "Validation dataset is directly provided.")
validation_dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in validation_data
]
train_df = pd.concat(dfs)
val_df = pd.concat(validation_dfs)
elif self.last_k_for_val is not None:
logger.log(
20,
f"last_k_for_val is provided, choosing last {self.last_k_for_val} of each time series as validation set.",
)
train_dfs = [
tmp_df.iloc[: -self.last_k_for_val, :] for tmp_df in dfs
]
validation_dfs = [
tmp_df.iloc[-self.last_k_for_val :, :] for tmp_df in dfs
]
train_df = pd.concat(train_dfs)
val_df = pd.concat(validation_dfs)
else:
logger.log(
20,
"No validation dataset is provided, will let TabularPredictor do the splitting automatically,"
"Note that this might break the time order of time series data.",
)
train_df = pd.concat(dfs)
val_df = None
if self.quantiles_to_predict is not None:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="quantile",
quantile_levels=self.quantiles_to_predict,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
else:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="regression",
eval_metric=self.eval_metric,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
return TabularPredictor(
ag_model=ag_model,
freq=self.freq,
prediction_length=self.prediction_length,
time_features=self.time_features,
lag_indices=self.lag_indices,
scaling=self.scaling,
batch_size=self.batch_size,
quantiles_to_predict=self.quantiles_to_predict,
)
| en | 0.859786 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. An estimator that trains an Autogluon Tabular model for time series forecasting. Additional keyword arguments to the constructor, other than the ones documented below, will be passed on to Autogluon Tabular's ``fit`` method used for training the model. Parameters ---------- freq Frequency of the data to handle prediction_length Prediction length lag_indices List of indices of the lagged observations to use as features. If None, this will be set automatically based on the frequency. time_features List of time features to be used. If None, this will be set automatically based on the frequency. scaling Function to be used to scale time series. This should take a pd.Series object as input, and return a scaled pd.Series and the scale (float). By default, this divides a series by the mean of its absolute value. batch_size Batch size of the resulting predictor; this is just used at prediction time, and does not affect training in any way. disable_auto_regression Whether to forecefully disable auto-regression in the model. If ``True``, this will remove any lag index which is smaller than ``prediction_length``. This will make predictions more efficient, but may impact their accuracy. quantiles_to_predict Whether to forecast in quantile way. If assigned with quantile values, this will train model using quantile prediction model. If None, then the model will be trained in a regular way. # "excluded_model_types": ["KNN", "XT", "RF"], | 2.571231 | 3 |
src/dcar/errors.py | andreas19/dcar | 1 | 8240 | <filename>src/dcar/errors.py
"""Errors module."""
__all__ = [
'Error',
'AddressError',
'AuthenticationError',
'TransportError',
'ValidationError',
'RegisterError',
'MessageError',
'DBusError',
'SignatureError',
'TooLongError',
]
class Error(Exception):
"""Base class."""
class AddressError(Error):
"""Raised for errors in server addresses."""
class AuthenticationError(Error):
"""Raised when authentication failed."""
class TransportError(Error):
"""Raised for transport related errors."""
class ValidationError(Error):
"""Raised when validation failed."""
class RegisterError(Error):
"""Raised when a signal or method could not be registered."""
class MessageError(Error):
"""Raised for errors in messages."""
class DBusError(MessageError):
"""Raised for errors from ERROR messages."""
class SignatureError(MessageError):
"""Raised for errors in signatures."""
class TooLongError(MessageError):
"""Raised when a message, an array, a name etc. is too long."""
| <filename>src/dcar/errors.py
"""Errors module."""
__all__ = [
'Error',
'AddressError',
'AuthenticationError',
'TransportError',
'ValidationError',
'RegisterError',
'MessageError',
'DBusError',
'SignatureError',
'TooLongError',
]
class Error(Exception):
"""Base class."""
class AddressError(Error):
"""Raised for errors in server addresses."""
class AuthenticationError(Error):
"""Raised when authentication failed."""
class TransportError(Error):
"""Raised for transport related errors."""
class ValidationError(Error):
"""Raised when validation failed."""
class RegisterError(Error):
"""Raised when a signal or method could not be registered."""
class MessageError(Error):
"""Raised for errors in messages."""
class DBusError(MessageError):
"""Raised for errors from ERROR messages."""
class SignatureError(MessageError):
"""Raised for errors in signatures."""
class TooLongError(MessageError):
"""Raised when a message, an array, a name etc. is too long."""
| en | 0.832178 | Errors module. Base class. Raised for errors in server addresses. Raised when authentication failed. Raised for transport related errors. Raised when validation failed. Raised when a signal or method could not be registered. Raised for errors in messages. Raised for errors from ERROR messages. Raised for errors in signatures. Raised when a message, an array, a name etc. is too long. | 2.538429 | 3 |
Packs/CortexXDR/Integrations/XDR_iocs/XDR_iocs_test.py | SergeBakharev/content | 1 | 8241 | from XDR_iocs import *
import pytest
from freezegun import freeze_time
Client.severity = 'INFO'
client = Client({'url': 'test'})
def d_sort(in_dict):
return sorted(in_dict.items())
class TestGetHeaders:
@freeze_time('2020-06-01T00:00:00Z')
def test_sanity(self, mocker):
"""
Given:
- API key
- API key ID
Then:
- Verify headers created correct.
"""
params = {
"apikey_id": "7",
"apikey": "<KEY>" # noqa: E501
}
headers = {
'Authorization': 'da94963b561e3c95899d843b1284cecf410606e9e809be528ec1cf03880c6e9e',
'x-iocs-source': 'xsoar',
'x-xdr-auth-id': '7',
'x-xdr-nonce': '1111111111111111111111111111111111111111111111111111111111111111',
'x-xdr-timestamp': '1590969600000'
}
mocker.patch('secrets.choice', return_value='1')
output = get_headers(params)
assert output == headers, f'get_headers({params})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(headers)}'
def test_empty_case(self):
"""
Given:
Empty params
Then:
get_headers will not raise error
"""
get_headers({})
class TestHttpRequest:
class Res:
content = 'error'.encode()
def __init__(self, code):
self.status_code = code
@staticmethod
def json():
return {}
XDR_SERVER_ERROR = 500
INVALID_CREDS = 401
LICENSE_ERROR = 402
PERMISSION_ERROR = 403
OK = 200
data_test_http_request_error_codes = [
(OK, {}),
(XDR_SERVER_ERROR, 'XDR internal server error.\t(error)'),
(INVALID_CREDS, 'Unauthorized access. An issue occurred during authentication. This can indicate an incorrect key, id, or other invalid authentication parameters.\t(error)'), # noqa: E501
(LICENSE_ERROR, 'Unauthorized access. User does not have the required license type to run this API.\t(error)'),
(PERMISSION_ERROR, 'Unauthorized access. The provided API key does not have the required RBAC permissions to run this API.\t(error)') # noqa: E501
]
@pytest.mark.parametrize('res, expected_output', data_test_http_request_error_codes)
def test_http_request_error_codes(self, res, expected_output, mocker):
"""
Given:
- Status code
When:
- http_request returns this status code.
Then:
- Verify error/success format.
"""
mocker.patch('requests.post', return_value=self.Res(res))
try:
output = client.http_request('', {})
except DemistoException as error:
output = str(error)
assert output == expected_output, f'status code {res}\n\treturns: {output}\n\tinstead: {expected_output}'
class TestGetRequestsKwargs:
def test_with_file(self, mocker):
"""
Given:
- file to upload
Then:
- Verify output format.
"""
def override_open(open_path, *_other):
return open_path
mocker.patch('builtins.open', side_effect=override_open)
path = '/Users/some_user/some_dir/some_file.file'
output = get_requests_kwargs(file_path=path)
expected_output = {'files': [('file', ('iocs.json', path, 'application/json'))]}
assert output == expected_output, f'get_requests_kwargs(file_path={path})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
def test_with_json(self):
"""
Given:
- simple json
Then:
- the json ready to send
"""
_json = {'test': 'test'}
output = get_requests_kwargs(_json=_json)
expected_output = {'data': '{"request_data": {"test": "test"}}'}
assert output == expected_output, f'get_requests_kwargs(_json={_json})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
class TestPrepareCommands:
def test_prepare_get_changes(self):
"""
Given:
- get changes command
Then:
- Verify url and json format.
"""
ts = int(datetime.now(timezone.utc).timestamp() * 1000)
url_suffix, _json = prepare_get_changes(ts)
assert url_suffix == 'get_changes', f'prepare_get_changes\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: get_changes' # noqa: E501
assert _json == {'last_update_ts': ts}
def test_prepare_enable_iocs(self):
"""
Given:
- enable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_enable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'enable_iocs', f'prepare_enable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: enable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
def test_prepare_disable_iocs(self):
"""
Given:
- disable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_disable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'disable_iocs', f'prepare_disable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: disable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
class TestCreateFile:
path = 'test_data/sync_file_test.json'
data_test_create_file_sync = [
('Domain_iocs', 'Domain_sync_file'),
('IP_iocs', 'IP_sync_file'),
('File_iocs', 'File_sync_file')
]
data_test_create_file_iocs_to_keep = [
('Domain_iocs', 'Domain_iocs_to_keep_file'),
('IP_iocs', 'IP_iocs_to_keep_file'),
('File_iocs', 'File_iocs_to_keep_file')
]
def setup(self):
# creates the file
with open(TestCreateFile.path, 'w') as _file:
_file.write('')
def teardown(self):
# removes the file when done
os.remove(TestCreateFile.path)
@staticmethod
def get_file(path):
with open(path, 'r') as _file:
return _file.read()
@staticmethod
def get_all_iocs(go_over, extension):
iocs = []
total = 0
data = []
for in_iocs, out_iocs in go_over:
ioc = json.loads(TestCreateFile.get_file(f'test_data/{in_iocs}.json'))
iocs.extend(ioc['iocs'])
total += ioc['total']
data.append(TestCreateFile.get_file(f'test_data/{out_iocs}.{extension}'))
all_iocs = {'iocs': iocs, 'total': total}
all_data = ''.join(data)
return all_iocs, all_data
def test_create_file_sync_without_iocs(self, mocker):
"""
Given:
- Sync command
When:
- there is no iocs
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_sync with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_sync)
def test_create_file_sync(self, in_iocs, out_iocs, mocker):
"""
Given:
- Sync command
When:
- iocs type is a specific type.
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(self.get_file(f'test_data/{in_iocs}.json'))) # noqa: E501
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_sync with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
def test_create_file_sync_all_types(self, mocker):
"""
Given:
- Sync command
When:
- iocs as all types
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
data_test_create_file_with_empty_indicators = [
{},
{'value': '11.11.11.11'},
{'indicator_type': 'IP'}
]
@pytest.mark.parametrize('defective_indicator', data_test_create_file_with_empty_indicators)
def test_create_file_sync_with_empty_indicators(self, defective_indicator, mocker):
"""
Given:
- Sync command
When:
- a part iocs dont have all required data
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
all_iocs['iocs'].append(defective_indicator)
all_iocs['total'] += 1
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
warnings = mocker.patch.object(demisto, 'debug')
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
error_msg = warnings.call_args.args[0]
assert error_msg.startswith("unexpected IOC format in key: '"), f"create_file_sync empty message\n\tstarts: {error_msg}\n\tinstead: unexpected IOC format in key: '" # noqa: E501
assert error_msg.endswith(f"', {str(defective_indicator)}"), f"create_file_sync empty message\n\tends: {error_msg}\n\tinstead: ', {str(defective_indicator)}" # noqa: E501
def test_create_file_iocs_to_keep_without_iocs(self, mocker):
"""
Given:
- iocs to keep command
When:
- there is no iocs
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_iocs_to_keep with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_iocs_to_keep)
def test_create_file_iocs_to_keep(self, in_iocs, out_iocs, mocker):
"""
Given:
- iocs to keep command
When:
- iocs type is a specific type.
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(
self.get_file(f'test_data/{in_iocs}.json')))
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_iocs_to_keep with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}' # noqa: E501
def test_create_file_iocs_to_keep_all_types(self, mocker):
"""
Given:
- iocs to keep command
When:
- iocs as all types
Then:
- Verify iocs to keep file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_iocs_to_keep with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
class TestDemistoIOCToXDR:
data_test_demisto_expiration_to_xdr = [
(None, -1),
('', -1),
('0001-01-01T00:00:00Z', -1),
('2020-06-03T00:00:00Z', 1591142400000)
]
@pytest.mark.parametrize('demisto_expiration, xdr_expiration', data_test_demisto_expiration_to_xdr)
def test_demisto_expiration_to_xdr(self, demisto_expiration, xdr_expiration):
"""
Given:
- demisto indicator expiration
Then:
- Verify XDR expiration.
"""
output = demisto_expiration_to_xdr(demisto_expiration)
assert xdr_expiration == output, f'demisto_expiration_to_xdr({demisto_expiration})\n\treturns: {output}\n\tinstead: {xdr_expiration}' # noqa: E501
data_test_demisto_reliability_to_xdr = [
(None, 'F'),
('A - Completely reliable', 'A'),
('B - Usually reliable', 'B'),
('C - Fairly reliable', 'C'),
('D - Not usually reliable', 'D'),
('E - Unreliable', 'E'),
('F - Reliability cannot be judged', 'F')
]
@pytest.mark.parametrize('demisto_reliability, xdr_reliability', data_test_demisto_reliability_to_xdr)
def test_demisto_reliability_to_xdr(self, demisto_reliability, xdr_reliability):
"""
Given:
- demisto indicator reliability
Then:
- Verify XDR reliability.
"""
output = demisto_reliability_to_xdr(demisto_reliability)
assert output == xdr_reliability, f'demisto_reliability_to_xdr({demisto_reliability})\n\treturns: {output}\n\tinstead: {xdr_reliability}' # noqa: E501
data_test_demisto_types_to_xdr = [
('File', 'HASH'),
('IP', 'IP'),
('Domain', 'DOMAIN_NAME')
]
@pytest.mark.parametrize('demisto_type, xdr_type', data_test_demisto_types_to_xdr)
def test_demisto_types_to_xdr(self, demisto_type, xdr_type):
"""
Given:
- demisto indicator type
Then:
- Verify XDR type.
"""
output = demisto_types_to_xdr(demisto_type)
assert output == xdr_type, f'demisto_reliability_to_xdr({demisto_type})\n\treturns: {output}\n\tinstead: {xdr_type}'
data_test_demisto_vendors_to_xdr = [
(
{'moduleID': {'sourceBrand': 'test', 'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'moduleID', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'sourceBrand': 'test', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 0}},
{'vendor_name': 'moduleID', 'reputation': 'UNKNOWN', 'reliability': 'A'}
)
]
@pytest.mark.parametrize('demisto_vendor, xdr_vendor', data_test_demisto_vendors_to_xdr)
def test_demisto_vendors_to_xdr(self, demisto_vendor, xdr_vendor):
"""
Given:
- demisto indicator vendors reports.
Then:
- Verify XDR vendors format.
"""
output = demisto_vendors_to_xdr(demisto_vendor)[0]
assert output == xdr_vendor, f'demisto_vendors_to_xdr({demisto_vendor})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_vendor)}' # noqa: E501
data_test_demisto_ioc_to_xdr = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 100, 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO', 'type': '100'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'expiration': '2020-06-03T00:00:00Z'},
{'expiration_date': 1591142400000, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentTimeLine', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'test'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}, {'type': 'IndicatorCommentRegular', 'content': 'this is the comment'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'this is the comment'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'aggregatedReliability': 'A - Completely reliable'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'reliability': 'A'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'CustomFields': {'threattypes': {'threatcategory': 'Malware'}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'class': 'Malware'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'moduleToFeedMap': {'module': {'sourceBrand': 'test', 'score': 2}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'vendors': [{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}]} # noqa: E501
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc', data_test_demisto_ioc_to_xdr)
def test_demisto_ioc_to_xdr(self, demisto_ioc, xdr_ioc):
"""
Given:
- demisto indicator.
Then:
- Verify XDR indicator format.
"""
output = demisto_ioc_to_xdr(demisto_ioc)
assert output == xdr_ioc, f'demisto_ioc_to_xdr({demisto_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_ioc)}' # noqa: E501
def test_empty_demisto_ioc_to_xdr(self, mocker):
warnings = mocker.patch.object(demisto, 'debug')
output = demisto_ioc_to_xdr({})
assert output == {}, 'demisto_ioc_to_xdr({})\n\treturns: ' + str(d_sort(output)) + '\n\tinstead: {}'
assert warnings.call_args.args[0] == "unexpected IOC format in key: 'value', {}"
class TestXDRIOCToDemisto:
data_test_xdr_expiration_to_demisto = [
(-1, 'Never'),
(1591142400000, '2020-06-03T00:00:00Z'),
(1592142400000, '2020-06-14T13:46:40Z')
]
@pytest.mark.parametrize('xdr_expiration, demisto_expiration', data_test_xdr_expiration_to_demisto)
def test_xdr_expiration_to_demisto(self, xdr_expiration, demisto_expiration):
"""
Given:
- expiration in XDR format.
Then:
- expiration in demisto format.
"""
output = xdr_expiration_to_demisto(xdr_expiration)
assert output == demisto_expiration, f'xdr_expiration_to_demisto({xdr_expiration})\n\treturns: {output}\n\tinstead: {demisto_expiration}' # noqa: E501
data_test_xdr_ioc_to_demisto = [
(
{
'RULE_ID': 863, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801230, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'HASH',
'RULE_INDICATOR': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e',
'type': 'File',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 861, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.com', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'test.com',
'type': 'Domain',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 862, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'ENABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.co.il',
'REPUTATION': 'SUSPICIOUS', 'RELIABILITY': 'A',
'VENDORS': [{'vendor_name': 'Cortex XDR - IOC', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}],
'KLASS': None,
'IS_DEFAULT_TTL': False, 'RULE_TTL': -1, 'MARKED_DELETED': 0
},
{
'value': 'test.co.il',
'type': 'Domain',
'score': 2,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'enabled'
}
}
)
]
@pytest.mark.parametrize('xdr_ioc, demisto_ioc', data_test_xdr_ioc_to_demisto)
def test_xdr_ioc_to_demisto(self, xdr_ioc, demisto_ioc, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
output = xdr_ioc_to_demisto(xdr_ioc)
del output['rawJSON']
assert output == demisto_ioc, f'xdr_ioc_to_demisto({xdr_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(demisto_ioc)}' # noqa: E501
class TestCommands:
# test commands full flow
class TestIOCSCommand:
def test_iocs_command_with_enable(self, mocker):
"""
Given:
- enable command
Then:
- Verify enable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-enable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
enable_ioc = mocker.patch('XDR_iocs.prepare_enable_iocs', side_effect=prepare_enable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 enabled.', f'enable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 enabled.' # noqa: E501
assert enable_ioc.call_count == 1, 'enable command not called'
def test_iocs_command_with_disable(self, mocker):
"""
Given:
- disable command
Then:
- Verify disable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-disable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
disable_ioc = mocker.patch('XDR_iocs.prepare_disable_iocs', side_effect=prepare_disable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 disabled.', f'disable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 disabled.' # noqa: E501
assert disable_ioc.call_count == 1, 'disable command not called'
def test_sync(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
sync(client)
assert http_request.call_args.args[0] == 'sync_tim_iocs', 'sync command url changed'
@freeze_time('2020-06-03T02:00:00Z')
def test_iocs_to_keep(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
iocs_to_keep(client)
assert http_request.call_args.args[0] == 'iocs_to_keep', 'iocs_to_keep command url changed'
def test_tim_insert_jsons(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'time': '2020-06-03T00:00:00Z'})
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs)
mocker.patch('XDR_iocs.return_outputs')
tim_insert_jsons(client)
assert http_request.call_args.kwargs['url_suffix'] == 'tim_insert_jsons/', 'tim_insert_jsons command url changed'
def test_get_changes(self, mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'createIndicators')
mocker.patch.object(demisto, 'searchIndicators', return_value={})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
xdr_ioc_to_timeline(list(map(lambda x: str(x[0].get('RULE_INDICATOR')), TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))) # noqa: E501
class TestParams:
tags_test = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tlp_color': ''},
'Cortex XDR',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tag': 'tag1'},
'tag1',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'feedTags': 'tag2', 'tlp_color': 'AMBER'},
'tag2',
'AMBER'
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color', tags_test)
def test_feed_tags_and_tlp_color(self, demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
mocker.patch.object(demisto, 'params', return_value=param_value)
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'searchIndicators', return_value={})
outputs = mocker.patch.object(demisto, 'createIndicators')
Client.tag = demisto.params().get('feedTags', demisto.params().get('tag', Client.tag))
Client.tlp_color = demisto.params().get('tlp_color')
client = Client({'url': 'yana'})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
output = outputs.call_args.args[0]
assert output[0]['fields']['tags'] == expected_tags
assert output[0]['fields'].get('trafficlightprotocol') == expected_tlp_color
| from XDR_iocs import *
import pytest
from freezegun import freeze_time
Client.severity = 'INFO'
client = Client({'url': 'test'})
def d_sort(in_dict):
return sorted(in_dict.items())
class TestGetHeaders:
@freeze_time('2020-06-01T00:00:00Z')
def test_sanity(self, mocker):
"""
Given:
- API key
- API key ID
Then:
- Verify headers created correct.
"""
params = {
"apikey_id": "7",
"apikey": "<KEY>" # noqa: E501
}
headers = {
'Authorization': 'da94963b561e3c95899d843b1284cecf410606e9e809be528ec1cf03880c6e9e',
'x-iocs-source': 'xsoar',
'x-xdr-auth-id': '7',
'x-xdr-nonce': '1111111111111111111111111111111111111111111111111111111111111111',
'x-xdr-timestamp': '1590969600000'
}
mocker.patch('secrets.choice', return_value='1')
output = get_headers(params)
assert output == headers, f'get_headers({params})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(headers)}'
def test_empty_case(self):
"""
Given:
Empty params
Then:
get_headers will not raise error
"""
get_headers({})
class TestHttpRequest:
class Res:
content = 'error'.encode()
def __init__(self, code):
self.status_code = code
@staticmethod
def json():
return {}
XDR_SERVER_ERROR = 500
INVALID_CREDS = 401
LICENSE_ERROR = 402
PERMISSION_ERROR = 403
OK = 200
data_test_http_request_error_codes = [
(OK, {}),
(XDR_SERVER_ERROR, 'XDR internal server error.\t(error)'),
(INVALID_CREDS, 'Unauthorized access. An issue occurred during authentication. This can indicate an incorrect key, id, or other invalid authentication parameters.\t(error)'), # noqa: E501
(LICENSE_ERROR, 'Unauthorized access. User does not have the required license type to run this API.\t(error)'),
(PERMISSION_ERROR, 'Unauthorized access. The provided API key does not have the required RBAC permissions to run this API.\t(error)') # noqa: E501
]
@pytest.mark.parametrize('res, expected_output', data_test_http_request_error_codes)
def test_http_request_error_codes(self, res, expected_output, mocker):
"""
Given:
- Status code
When:
- http_request returns this status code.
Then:
- Verify error/success format.
"""
mocker.patch('requests.post', return_value=self.Res(res))
try:
output = client.http_request('', {})
except DemistoException as error:
output = str(error)
assert output == expected_output, f'status code {res}\n\treturns: {output}\n\tinstead: {expected_output}'
class TestGetRequestsKwargs:
def test_with_file(self, mocker):
"""
Given:
- file to upload
Then:
- Verify output format.
"""
def override_open(open_path, *_other):
return open_path
mocker.patch('builtins.open', side_effect=override_open)
path = '/Users/some_user/some_dir/some_file.file'
output = get_requests_kwargs(file_path=path)
expected_output = {'files': [('file', ('iocs.json', path, 'application/json'))]}
assert output == expected_output, f'get_requests_kwargs(file_path={path})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
def test_with_json(self):
"""
Given:
- simple json
Then:
- the json ready to send
"""
_json = {'test': 'test'}
output = get_requests_kwargs(_json=_json)
expected_output = {'data': '{"request_data": {"test": "test"}}'}
assert output == expected_output, f'get_requests_kwargs(_json={_json})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
class TestPrepareCommands:
def test_prepare_get_changes(self):
"""
Given:
- get changes command
Then:
- Verify url and json format.
"""
ts = int(datetime.now(timezone.utc).timestamp() * 1000)
url_suffix, _json = prepare_get_changes(ts)
assert url_suffix == 'get_changes', f'prepare_get_changes\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: get_changes' # noqa: E501
assert _json == {'last_update_ts': ts}
def test_prepare_enable_iocs(self):
"""
Given:
- enable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_enable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'enable_iocs', f'prepare_enable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: enable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
def test_prepare_disable_iocs(self):
"""
Given:
- disable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_disable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'disable_iocs', f'prepare_disable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: disable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
class TestCreateFile:
path = 'test_data/sync_file_test.json'
data_test_create_file_sync = [
('Domain_iocs', 'Domain_sync_file'),
('IP_iocs', 'IP_sync_file'),
('File_iocs', 'File_sync_file')
]
data_test_create_file_iocs_to_keep = [
('Domain_iocs', 'Domain_iocs_to_keep_file'),
('IP_iocs', 'IP_iocs_to_keep_file'),
('File_iocs', 'File_iocs_to_keep_file')
]
def setup(self):
# creates the file
with open(TestCreateFile.path, 'w') as _file:
_file.write('')
def teardown(self):
# removes the file when done
os.remove(TestCreateFile.path)
@staticmethod
def get_file(path):
with open(path, 'r') as _file:
return _file.read()
@staticmethod
def get_all_iocs(go_over, extension):
iocs = []
total = 0
data = []
for in_iocs, out_iocs in go_over:
ioc = json.loads(TestCreateFile.get_file(f'test_data/{in_iocs}.json'))
iocs.extend(ioc['iocs'])
total += ioc['total']
data.append(TestCreateFile.get_file(f'test_data/{out_iocs}.{extension}'))
all_iocs = {'iocs': iocs, 'total': total}
all_data = ''.join(data)
return all_iocs, all_data
def test_create_file_sync_without_iocs(self, mocker):
"""
Given:
- Sync command
When:
- there is no iocs
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_sync with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_sync)
def test_create_file_sync(self, in_iocs, out_iocs, mocker):
"""
Given:
- Sync command
When:
- iocs type is a specific type.
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(self.get_file(f'test_data/{in_iocs}.json'))) # noqa: E501
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_sync with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
def test_create_file_sync_all_types(self, mocker):
"""
Given:
- Sync command
When:
- iocs as all types
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
data_test_create_file_with_empty_indicators = [
{},
{'value': '11.11.11.11'},
{'indicator_type': 'IP'}
]
@pytest.mark.parametrize('defective_indicator', data_test_create_file_with_empty_indicators)
def test_create_file_sync_with_empty_indicators(self, defective_indicator, mocker):
"""
Given:
- Sync command
When:
- a part iocs dont have all required data
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
all_iocs['iocs'].append(defective_indicator)
all_iocs['total'] += 1
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
warnings = mocker.patch.object(demisto, 'debug')
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
error_msg = warnings.call_args.args[0]
assert error_msg.startswith("unexpected IOC format in key: '"), f"create_file_sync empty message\n\tstarts: {error_msg}\n\tinstead: unexpected IOC format in key: '" # noqa: E501
assert error_msg.endswith(f"', {str(defective_indicator)}"), f"create_file_sync empty message\n\tends: {error_msg}\n\tinstead: ', {str(defective_indicator)}" # noqa: E501
def test_create_file_iocs_to_keep_without_iocs(self, mocker):
"""
Given:
- iocs to keep command
When:
- there is no iocs
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_iocs_to_keep with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_iocs_to_keep)
def test_create_file_iocs_to_keep(self, in_iocs, out_iocs, mocker):
"""
Given:
- iocs to keep command
When:
- iocs type is a specific type.
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(
self.get_file(f'test_data/{in_iocs}.json')))
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_iocs_to_keep with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}' # noqa: E501
def test_create_file_iocs_to_keep_all_types(self, mocker):
"""
Given:
- iocs to keep command
When:
- iocs as all types
Then:
- Verify iocs to keep file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_iocs_to_keep with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
class TestDemistoIOCToXDR:
data_test_demisto_expiration_to_xdr = [
(None, -1),
('', -1),
('0001-01-01T00:00:00Z', -1),
('2020-06-03T00:00:00Z', 1591142400000)
]
@pytest.mark.parametrize('demisto_expiration, xdr_expiration', data_test_demisto_expiration_to_xdr)
def test_demisto_expiration_to_xdr(self, demisto_expiration, xdr_expiration):
"""
Given:
- demisto indicator expiration
Then:
- Verify XDR expiration.
"""
output = demisto_expiration_to_xdr(demisto_expiration)
assert xdr_expiration == output, f'demisto_expiration_to_xdr({demisto_expiration})\n\treturns: {output}\n\tinstead: {xdr_expiration}' # noqa: E501
data_test_demisto_reliability_to_xdr = [
(None, 'F'),
('A - Completely reliable', 'A'),
('B - Usually reliable', 'B'),
('C - Fairly reliable', 'C'),
('D - Not usually reliable', 'D'),
('E - Unreliable', 'E'),
('F - Reliability cannot be judged', 'F')
]
@pytest.mark.parametrize('demisto_reliability, xdr_reliability', data_test_demisto_reliability_to_xdr)
def test_demisto_reliability_to_xdr(self, demisto_reliability, xdr_reliability):
"""
Given:
- demisto indicator reliability
Then:
- Verify XDR reliability.
"""
output = demisto_reliability_to_xdr(demisto_reliability)
assert output == xdr_reliability, f'demisto_reliability_to_xdr({demisto_reliability})\n\treturns: {output}\n\tinstead: {xdr_reliability}' # noqa: E501
data_test_demisto_types_to_xdr = [
('File', 'HASH'),
('IP', 'IP'),
('Domain', 'DOMAIN_NAME')
]
@pytest.mark.parametrize('demisto_type, xdr_type', data_test_demisto_types_to_xdr)
def test_demisto_types_to_xdr(self, demisto_type, xdr_type):
"""
Given:
- demisto indicator type
Then:
- Verify XDR type.
"""
output = demisto_types_to_xdr(demisto_type)
assert output == xdr_type, f'demisto_reliability_to_xdr({demisto_type})\n\treturns: {output}\n\tinstead: {xdr_type}'
data_test_demisto_vendors_to_xdr = [
(
{'moduleID': {'sourceBrand': 'test', 'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'moduleID', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'sourceBrand': 'test', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 0}},
{'vendor_name': 'moduleID', 'reputation': 'UNKNOWN', 'reliability': 'A'}
)
]
@pytest.mark.parametrize('demisto_vendor, xdr_vendor', data_test_demisto_vendors_to_xdr)
def test_demisto_vendors_to_xdr(self, demisto_vendor, xdr_vendor):
"""
Given:
- demisto indicator vendors reports.
Then:
- Verify XDR vendors format.
"""
output = demisto_vendors_to_xdr(demisto_vendor)[0]
assert output == xdr_vendor, f'demisto_vendors_to_xdr({demisto_vendor})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_vendor)}' # noqa: E501
data_test_demisto_ioc_to_xdr = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 100, 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO', 'type': '100'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'expiration': '2020-06-03T00:00:00Z'},
{'expiration_date': 1591142400000, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentTimeLine', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'test'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}, {'type': 'IndicatorCommentRegular', 'content': 'this is the comment'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'this is the comment'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'aggregatedReliability': 'A - Completely reliable'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'reliability': 'A'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'CustomFields': {'threattypes': {'threatcategory': 'Malware'}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'class': 'Malware'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'moduleToFeedMap': {'module': {'sourceBrand': 'test', 'score': 2}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'vendors': [{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}]} # noqa: E501
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc', data_test_demisto_ioc_to_xdr)
def test_demisto_ioc_to_xdr(self, demisto_ioc, xdr_ioc):
"""
Given:
- demisto indicator.
Then:
- Verify XDR indicator format.
"""
output = demisto_ioc_to_xdr(demisto_ioc)
assert output == xdr_ioc, f'demisto_ioc_to_xdr({demisto_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_ioc)}' # noqa: E501
def test_empty_demisto_ioc_to_xdr(self, mocker):
warnings = mocker.patch.object(demisto, 'debug')
output = demisto_ioc_to_xdr({})
assert output == {}, 'demisto_ioc_to_xdr({})\n\treturns: ' + str(d_sort(output)) + '\n\tinstead: {}'
assert warnings.call_args.args[0] == "unexpected IOC format in key: 'value', {}"
class TestXDRIOCToDemisto:
data_test_xdr_expiration_to_demisto = [
(-1, 'Never'),
(1591142400000, '2020-06-03T00:00:00Z'),
(1592142400000, '2020-06-14T13:46:40Z')
]
@pytest.mark.parametrize('xdr_expiration, demisto_expiration', data_test_xdr_expiration_to_demisto)
def test_xdr_expiration_to_demisto(self, xdr_expiration, demisto_expiration):
"""
Given:
- expiration in XDR format.
Then:
- expiration in demisto format.
"""
output = xdr_expiration_to_demisto(xdr_expiration)
assert output == demisto_expiration, f'xdr_expiration_to_demisto({xdr_expiration})\n\treturns: {output}\n\tinstead: {demisto_expiration}' # noqa: E501
data_test_xdr_ioc_to_demisto = [
(
{
'RULE_ID': 863, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801230, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'HASH',
'RULE_INDICATOR': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e',
'type': 'File',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 861, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.com', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'test.com',
'type': 'Domain',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 862, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'ENABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.co.il',
'REPUTATION': 'SUSPICIOUS', 'RELIABILITY': 'A',
'VENDORS': [{'vendor_name': 'Cortex XDR - IOC', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}],
'KLASS': None,
'IS_DEFAULT_TTL': False, 'RULE_TTL': -1, 'MARKED_DELETED': 0
},
{
'value': 'test.co.il',
'type': 'Domain',
'score': 2,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'enabled'
}
}
)
]
@pytest.mark.parametrize('xdr_ioc, demisto_ioc', data_test_xdr_ioc_to_demisto)
def test_xdr_ioc_to_demisto(self, xdr_ioc, demisto_ioc, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
output = xdr_ioc_to_demisto(xdr_ioc)
del output['rawJSON']
assert output == demisto_ioc, f'xdr_ioc_to_demisto({xdr_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(demisto_ioc)}' # noqa: E501
class TestCommands:
# test commands full flow
class TestIOCSCommand:
def test_iocs_command_with_enable(self, mocker):
"""
Given:
- enable command
Then:
- Verify enable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-enable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
enable_ioc = mocker.patch('XDR_iocs.prepare_enable_iocs', side_effect=prepare_enable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 enabled.', f'enable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 enabled.' # noqa: E501
assert enable_ioc.call_count == 1, 'enable command not called'
def test_iocs_command_with_disable(self, mocker):
"""
Given:
- disable command
Then:
- Verify disable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-disable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
disable_ioc = mocker.patch('XDR_iocs.prepare_disable_iocs', side_effect=prepare_disable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 disabled.', f'disable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 disabled.' # noqa: E501
assert disable_ioc.call_count == 1, 'disable command not called'
def test_sync(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
sync(client)
assert http_request.call_args.args[0] == 'sync_tim_iocs', 'sync command url changed'
@freeze_time('2020-06-03T02:00:00Z')
def test_iocs_to_keep(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, data = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
iocs_to_keep(client)
assert http_request.call_args.args[0] == 'iocs_to_keep', 'iocs_to_keep command url changed'
def test_tim_insert_jsons(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'time': '2020-06-03T00:00:00Z'})
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs)
mocker.patch('XDR_iocs.return_outputs')
tim_insert_jsons(client)
assert http_request.call_args.kwargs['url_suffix'] == 'tim_insert_jsons/', 'tim_insert_jsons command url changed'
def test_get_changes(self, mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'createIndicators')
mocker.patch.object(demisto, 'searchIndicators', return_value={})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
xdr_ioc_to_timeline(list(map(lambda x: str(x[0].get('RULE_INDICATOR')), TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))) # noqa: E501
class TestParams:
tags_test = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tlp_color': ''},
'Cortex XDR',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tag': 'tag1'},
'tag1',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'feedTags': 'tag2', 'tlp_color': 'AMBER'},
'tag2',
'AMBER'
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color', tags_test)
def test_feed_tags_and_tlp_color(self, demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
mocker.patch.object(demisto, 'params', return_value=param_value)
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'searchIndicators', return_value={})
outputs = mocker.patch.object(demisto, 'createIndicators')
Client.tag = demisto.params().get('feedTags', demisto.params().get('tag', Client.tag))
Client.tlp_color = demisto.params().get('tlp_color')
client = Client({'url': 'yana'})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
output = outputs.call_args.args[0]
assert output[0]['fields']['tags'] == expected_tags
assert output[0]['fields'].get('trafficlightprotocol') == expected_tlp_color
| en | 0.418577 | Given: - API key - API key ID Then: - Verify headers created correct. # noqa: E501 Given: Empty params Then: get_headers will not raise error # noqa: E501 # noqa: E501 Given: - Status code When: - http_request returns this status code. Then: - Verify error/success format. Given: - file to upload Then: - Verify output format. # noqa: E501 Given: - simple json Then: - the json ready to send # noqa: E501 Given: - get changes command Then: - Verify url and json format. # noqa: E501 Given: - enable iocs command Then: - Verify url and json format. # noqa: E501 Given: - disable iocs command Then: - Verify url and json format. # noqa: E501 # creates the file # removes the file when done Given: - Sync command When: - there is no iocs Then: - Verify sync file data. Given: - Sync command When: - iocs type is a specific type. Then: - Verify sync file data. # noqa: E501 Given: - Sync command When: - iocs as all types Then: - Verify sync file data. Given: - Sync command When: - a part iocs dont have all required data Then: - Verify sync file data. # noqa: E501 # noqa: E501 Given: - iocs to keep command When: - there is no iocs Then: - Verify iocs to keep file data. Given: - iocs to keep command When: - iocs type is a specific type. Then: - Verify iocs to keep file data. # noqa: E501 Given: - iocs to keep command When: - iocs as all types Then: - Verify iocs to keep file data. Given: - demisto indicator expiration Then: - Verify XDR expiration. # noqa: E501 Given: - demisto indicator reliability Then: - Verify XDR reliability. # noqa: E501 Given: - demisto indicator type Then: - Verify XDR type. Given: - demisto indicator vendors reports. Then: - Verify XDR vendors format. # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Given: - demisto indicator. Then: - Verify XDR indicator format. # noqa: E501 Given: - expiration in XDR format. Then: - expiration in demisto format. # noqa: E501 # noqa: E501 # noqa: E501 Given: - IOC in XDR format. Then: - IOC in demisto format. # noqa: E501 # test commands full flow Given: - enable command Then: - Verify enable command is called. # noqa: E501 Given: - disable command Then: - Verify disable command is called. # noqa: E501 # noqa: E501 Given: - IOC in XDR format. Then: - IOC in demisto format. | 2.234555 | 2 |
project/users/models.py | rchdlps/django-docker | 0 | 8242 | <gh_stars>0
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.db import models
from PIL import Image
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_('Nome de usuário:'), blank=True, max_length=255)
# Profile Models
image = models.ImageField(verbose_name='Foto de Perfil:',
default='default.jpg', upload_to='profile_pics')
birth_date = models.DateField(_('Data de Nascimento:'), null=True, blank=True)
cpf = models.CharField(_('CPF:'), max_length=50, blank=True)
cnpj = models.CharField(_('CNPJ:'), max_length=50, blank=True)
bio = models.TextField(_('Descrição:'), blank=True, default='')
cep = models.CharField(_('CEP:'), max_length=50, blank=True)
street = models.CharField(_('Rua:'), max_length=100, blank=True)
number_home = models.CharField(_('Número:'), max_length=10, blank=True)
neighborhood = models.CharField(_('Bairro:'), max_length=100, blank=True)
city = models.CharField(_('Cidade:'), max_length=50, blank=True)
state = models.CharField(_('Estado:'), max_length=50, blank=True)
phone = models.CharField(_('Telefone:'), max_length=50, blank=True)
cel_phone = models.CharField(_('Celular:'), max_length=50, blank=True)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
"""def save(self):
super().save()
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)"""
| from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.db import models
from PIL import Image
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_('Nome de usuário:'), blank=True, max_length=255)
# Profile Models
image = models.ImageField(verbose_name='Foto de Perfil:',
default='default.jpg', upload_to='profile_pics')
birth_date = models.DateField(_('Data de Nascimento:'), null=True, blank=True)
cpf = models.CharField(_('CPF:'), max_length=50, blank=True)
cnpj = models.CharField(_('CNPJ:'), max_length=50, blank=True)
bio = models.TextField(_('Descrição:'), blank=True, default='')
cep = models.CharField(_('CEP:'), max_length=50, blank=True)
street = models.CharField(_('Rua:'), max_length=100, blank=True)
number_home = models.CharField(_('Número:'), max_length=10, blank=True)
neighborhood = models.CharField(_('Bairro:'), max_length=100, blank=True)
city = models.CharField(_('Cidade:'), max_length=50, blank=True)
state = models.CharField(_('Estado:'), max_length=50, blank=True)
phone = models.CharField(_('Telefone:'), max_length=50, blank=True)
cel_phone = models.CharField(_('Celular:'), max_length=50, blank=True)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
"""def save(self):
super().save()
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)""" | en | 0.277904 | # First Name and Last Name do not cover name patterns # around the globe. # Profile Models def save(self): super().save() img = Image.open(self.image.path) if img.height > 300 or img.width > 300: output_size = (300, 300) img.thumbnail(output_size) img.save(self.image.path) | 2.298902 | 2 |
cloudify_terminal_sdk/netconf_connection.py | cloudify-incubator/cloudify-plugins-sdk | 1 | 8243 | <reponame>cloudify-incubator/cloudify-plugins-sdk
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify_common_sdk import exceptions
from cloudify_terminal_sdk import base_connection
# final of any package
NETCONF_1_0_END = "]]>]]>"
# base level of communication
NETCONF_1_0_CAPABILITY = 'urn:ietf:params:netconf:base:1.0'
# package based communication
NETCONF_1_1_CAPABILITY = 'urn:ietf:params:netconf:base:1.1'
class NetConfConnection(base_connection.SSHConnection):
# ssh connection
ssh = None
chan = None
# buffer for same packages, will save partial packages between calls
buff = ""
current_level = NETCONF_1_0_CAPABILITY
def connect(
self, ip, user, hello_string, password=<PASSWORD>, key_content=None,
port=830
):
"""open connection and send xml string by link"""
self._ssh_connect(ip, user, password, key_content, port)
self.conn = self.ssh.get_transport().open_session()
self.conn.invoke_subsystem('netconf')
self.buff = ""
capabilities = self.send(hello_string)
return capabilities
def send(self, xml):
"""send xml string by connection"""
if self.current_level == NETCONF_1_1_CAPABILITY:
self._send_1_1(xml)
return self._recv_1_1()
else:
self._send_1_0(xml)
return self._recv_1_0()
def _send_1_0(self, xml):
"""send xml string with NETCONF_1_0_END by connection"""
if xml:
message = xml + NETCONF_1_0_END
self._conn_send(message)
def _recv_1_0(self):
"""recv xml string with NETCONF_1_0_END by connection"""
while self.buff.find(NETCONF_1_0_END) == -1:
self.buff += self._conn_recv(8192)
if self.conn.closed:
break
package_end = self.buff.find(NETCONF_1_0_END)
# we have already closed connection
if package_end == -1:
package_end = len(self.buff)
response = self.buff[:package_end]
self.buff = self.buff[package_end + len(NETCONF_1_0_END):]
return response
def _send_1_1(self, xml):
"""send xml string as package by connection"""
if xml:
message = "\n#{0}\n".format(len(xml))
message += xml
message += "\n##\n"
self._conn_send(message)
def _recv_1_1(self):
"""send xml string as package by connection"""
get_everything = False
response = ""
while not get_everything:
if len(self.buff) < 2:
self.buff += self._conn_recv(2)
# skip new line
if self.buff[:2] != "\n#":
# We have already closed connection
# caller shoud stop to ask new messages
if not self.buff and self.conn.closed:
return ""
raise exceptions.NonRecoverableError("no start")
self.buff = self.buff[2:]
# get package length
while self.buff.find("\n") == -1:
self.buff += self._conn_recv(20)
if self.buff[:2] == "#\n":
get_everything = True
self.buff = self.buff[2:]
break
length = int(self.buff[:self.buff.find("\n")])
self.buff = self.buff[self.buff.find("\n") + 1:]
# load current package
while length > len(self.buff):
self.buff += self._conn_recv(length - len(self.buff))
response += self.buff[:length]
self.buff = self.buff[length:]
return response
def close(self, goodbye_string=None):
"""send xml string by link and close connection"""
response = None
if goodbye_string:
# we have something to say
response = self.send(goodbye_string)
self._ssh_close()
return response
| # Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify_common_sdk import exceptions
from cloudify_terminal_sdk import base_connection
# final of any package
NETCONF_1_0_END = "]]>]]>"
# base level of communication
NETCONF_1_0_CAPABILITY = 'urn:ietf:params:netconf:base:1.0'
# package based communication
NETCONF_1_1_CAPABILITY = 'urn:ietf:params:netconf:base:1.1'
class NetConfConnection(base_connection.SSHConnection):
# ssh connection
ssh = None
chan = None
# buffer for same packages, will save partial packages between calls
buff = ""
current_level = NETCONF_1_0_CAPABILITY
def connect(
self, ip, user, hello_string, password=<PASSWORD>, key_content=None,
port=830
):
"""open connection and send xml string by link"""
self._ssh_connect(ip, user, password, key_content, port)
self.conn = self.ssh.get_transport().open_session()
self.conn.invoke_subsystem('netconf')
self.buff = ""
capabilities = self.send(hello_string)
return capabilities
def send(self, xml):
"""send xml string by connection"""
if self.current_level == NETCONF_1_1_CAPABILITY:
self._send_1_1(xml)
return self._recv_1_1()
else:
self._send_1_0(xml)
return self._recv_1_0()
def _send_1_0(self, xml):
"""send xml string with NETCONF_1_0_END by connection"""
if xml:
message = xml + NETCONF_1_0_END
self._conn_send(message)
def _recv_1_0(self):
"""recv xml string with NETCONF_1_0_END by connection"""
while self.buff.find(NETCONF_1_0_END) == -1:
self.buff += self._conn_recv(8192)
if self.conn.closed:
break
package_end = self.buff.find(NETCONF_1_0_END)
# we have already closed connection
if package_end == -1:
package_end = len(self.buff)
response = self.buff[:package_end]
self.buff = self.buff[package_end + len(NETCONF_1_0_END):]
return response
def _send_1_1(self, xml):
"""send xml string as package by connection"""
if xml:
message = "\n#{0}\n".format(len(xml))
message += xml
message += "\n##\n"
self._conn_send(message)
def _recv_1_1(self):
"""send xml string as package by connection"""
get_everything = False
response = ""
while not get_everything:
if len(self.buff) < 2:
self.buff += self._conn_recv(2)
# skip new line
if self.buff[:2] != "\n#":
# We have already closed connection
# caller shoud stop to ask new messages
if not self.buff and self.conn.closed:
return ""
raise exceptions.NonRecoverableError("no start")
self.buff = self.buff[2:]
# get package length
while self.buff.find("\n") == -1:
self.buff += self._conn_recv(20)
if self.buff[:2] == "#\n":
get_everything = True
self.buff = self.buff[2:]
break
length = int(self.buff[:self.buff.find("\n")])
self.buff = self.buff[self.buff.find("\n") + 1:]
# load current package
while length > len(self.buff):
self.buff += self._conn_recv(length - len(self.buff))
response += self.buff[:length]
self.buff = self.buff[length:]
return response
def close(self, goodbye_string=None):
"""send xml string by link and close connection"""
response = None
if goodbye_string:
# we have something to say
response = self.send(goodbye_string)
self._ssh_close()
return response | en | 0.882232 | # Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # final of any package # base level of communication # package based communication # ssh connection # buffer for same packages, will save partial packages between calls open connection and send xml string by link send xml string by connection send xml string with NETCONF_1_0_END by connection recv xml string with NETCONF_1_0_END by connection # we have already closed connection send xml string as package by connection #{0}\n".format(len(xml)) ##\n" send xml string as package by connection # skip new line #": # We have already closed connection # caller shoud stop to ask new messages # get package length # load current package send xml string by link and close connection # we have something to say | 1.922966 | 2 |
Seismic_Conv1D_dec.py | dyt1990/Seis_DCEC | 1 | 8244 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 17:48:13 2018
@author: Sediment
"""
# -*- coding: utf-8 -*-
'''
Keras implementation of deep embedder to improve clustering, inspired by:
"Unsupervised Deep Embedding for Clustering Analysis" (Xie et al, ICML 2016)
Definition can accept somewhat custom neural networks. Defaults are from paper.
'''
import sys
import numpy as np
import pandas as pd
import keras.backend as K
from keras.initializers import RandomNormal
from keras.engine.topology import Layer, InputSpec
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Input, Conv1D, MaxPooling1D, BatchNormalization, Activation, Flatten, UpSampling1D, Reshape
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Nadam
from keras.regularizers import l2
from sklearn.preprocessing import normalize
from keras.callbacks import LearningRateScheduler
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
from sklearn import manifold
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
if (sys.version[0] == 2):
import cPickle as pickle
else:
import pickle
class ClusteringLayer(Layer):
'''
Clustering layer which converts latent space Z of input layer
into a probability vector for each cluster defined by its centre in
Z-space. Use Kullback-Leibler divergence as loss, with a probability
target distribution.
# Arguments
output_dim: int > 0. Should be same as number of clusters.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
alpha: parameter in Student's t-distribution. Default is 1.0.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
'''
def __init__(self, output_dim, input_dim=None, weights=None, alpha=1.0, **kwargs):
self.output_dim = output_dim
self.input_dim = input_dim
self.alpha = alpha
# kmeans cluster centre locations
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(ClusteringLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
self.W = K.variable(self.initial_weights)
self.trainable_weights = [self.W]
def call(self, x, mask=None):
q = 1.0/(1.0 + K.sqrt(K.sum(K.square(K.expand_dims(x, 1) - self.W), axis=2))**2 /self.alpha)
q = q**((self.alpha+1.0)/2.0)
q = K.transpose(K.transpose(q)/K.sum(q, axis=1))
return q
def get_output_shape_for(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def get_config(self):
config = {'output_dim': self.output_dim,
'input_dim': self.input_dim}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DeepEmbeddingClustering(object):
def __init__(self,
n_clusters,
input_dim,
learning_rate=0.1,
encoded=None,
decoded=None,
alpha=1.0,
pretrained_weights=None,
cluster_centres=None,
batch_size=256,
conv_filters=[8, 16, 32],
kernel_size=12,
Maxpooling_size=2,
LatentSpace_Z=25,
finetune_epochs=5,
**kwargs):
super(DeepEmbeddingClustering, self).__init__()
self.n_clusters = n_clusters
self.input_dim = input_dim
self.encoded = encoded
self.decoded = decoded
self.alpha = alpha
self.pretrained_weights = pretrained_weights
self.cluster_centres = cluster_centres
self.batch_size = batch_size
self.learning_rate = learning_rate
self.iters_lr_update = 6000
self.lr_change_rate = 0.1
self.finetune_epochs = finetune_epochs
self.conv_filters = conv_filters
self.kernel_size = kernel_size
self.Maxpooling_size = Maxpooling_size
self.LatentSpace_Z = LatentSpace_Z
self.encoders = []
self.decoders = []
input_data = Input(shape=(self.input_dim, 1))
x = Conv1D(self.conv_filters[0], (self.kernel_size), activation='relu', padding='same')(input_data)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
x = Conv1D(self.conv_filters[1], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
x = Conv1D(self.conv_filters[2], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
# at this point the representation is (16 x conv_filters) i.e. 128-dimensional
x = Flatten()(x)
# at this point the representation is (6) i.e. 128-dimensional
encoded = Dense(LatentSpace_Z, activation='relu')(x)
# 256 = input_data / ((2^maxpool_num) * conv_fileters * 4)
x = Dense(self.input_dim // (2**3) * self.conv_filters[2], kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),
bias_initializer='zeros', activation='relu')(encoded)
x = Reshape((self.input_dim // (2**3), self.conv_filters[2]))(x) # 16 * 2 * 2 * 2 = 128, 多少个maxpool就与多少个2相乘
x = Conv1D(self.conv_filters[2], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
x = Conv1D(self.conv_filters[1], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
x = Conv1D(self.conv_filters[0], (1), activation='relu')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
decoded = Conv1D(1, (self.kernel_size), activation='relu', padding='same')(x)
self.autoencoder = Model(input_data, decoded)
self.autoencoder.summary()
self.encoder = Model(input_data, encoded)
# build the end-to-end autoencoder for finetuning
# Note that at this point dropout is discarded
self.encoder.compile(loss='mse', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
self.autoencoder.compile(loss='mse', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
if cluster_centres is not None:
assert cluster_centres.shape[0] == self.n_clusters
assert cluster_centres.shape[1] == self.encoder.layers[-1].output_dim
if self.pretrained_weights is not None:
self.autoencoder.load_weights(self.pretrained_weights)
def p_mat(self, q):
weight = q**2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def initialize(self, X, save_autoencoder=False, finetune_iters=5000):
if self.pretrained_weights is None:
iters_per_epoch = int(len(X) / self.batch_size)
print('layerwise pretrain')
lr_epoch_update = max(1, self.iters_lr_update / float(iters_per_epoch))
def step_decay(epoch):
initial_rate = self.learning_rate
factor = int(epoch / lr_epoch_update)
lr = initial_rate / (10 ** factor)
return lr
lr_schedule = LearningRateScheduler(step_decay)
#update encoder and decoder weights:
self.autoencoder.fit(X, X, batch_size=self.batch_size, epochs=self.finetune_epochs, callbacks=[lr_schedule])
if save_autoencoder:
self.autoencoder.save_weights('autoencoder.h5')
else:
print('Loading pretrained weights for autoencoder.')
self.autoencoder.load_weights(self.pretrained_weights)
# update encoder, decoder
# TODO: is this needed? Might be redundant...
for i in range(len(self.encoder.layers)):
self.encoder.layers[i].set_weights(self.autoencoder.layers[i].get_weights())
# initialize cluster centres using k-means
print('Initializing cluster centres with k-means.')
if self.cluster_centres is None:
np.random.seed(42) #随机种子,用于初始化聚类中心
kmeans = KMeans(n_clusters=self.n_clusters, max_iter=100, n_init=6, precompute_distances='auto', random_state=None, tol=1e-4)
self.y_pred = kmeans.fit_predict(self.encoder.predict(X))
self.cluster_centres = kmeans.cluster_centers_
print ('cluster_centres:\n ', self.cluster_centres)
# prepare DCEC model
self.DCEC = Sequential([self.encoder,
ClusteringLayer(self.n_clusters,
weights=self.cluster_centres,
name='clustering')])
self.DCEC.compile(loss='kullback_leibler_divergence', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
# loss: 'mean_squared_error', 'categorical_crossentropy', 'hinge', 'squared_hinge'
return
def visualizeData(self, Z, labels, num_clusters, csv_filename, title):
'''
TSNE visualization of the points in latent space Z
:param Z: Numpy array containing points in latent space in which clustering was performed
:param labels: True labels - used for coloring points
:param num_clusters: Total number of clusters
:param title: filename where the plot should be saved
:return: None - (side effect) saves clustering visualization plot in specified location
'''
print ('Start visualizing Data')
labels = labels.astype(int)
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
Z_tsne = tsne.fit_transform(Z)
fig = plt.figure()
plt.scatter(Z_tsne[:, 0], Z_tsne[:, 1], s=2, c=labels, cmap=plt.cm.get_cmap("jet", num_clusters))
plt.colorbar(ticks=range(num_clusters))
# fig.savefig(title, dpi=fig.dpi)
fig.savefig(title, dpi=600)
# save t_sne results
print('Save t_sne results')
dataframe = pd.DataFrame({'Z_tsne_x':Z_tsne[:, 0], 'Z_tsne_y':Z_tsne[:, 1], 'labels':labels})
dataframe.to_csv(csv_filename, index=False, sep=',')
def cluster(self, X, y=None,
tol=0.001, update_interval=None,
iter_max=799,
save_interval=None,
**kwargs):
if update_interval is None:
# 1 epochs
update_interval = X.shape[0]/self.batch_size
print('Update interval', update_interval)
if save_interval is None:
# 50 epochs
save_interval = X.shape[0]/self.batch_size*50
print('Save interval', save_interval)
assert save_interval >= update_interval
train = True
iteration, index = 0, 0
self.accuracy = []
while train:
sys.stdout.write('\r')
# cutoff iteration
if iter_max < iteration:
print('Reached maximum iteration limit. Stopping training.')
return self.y_pred
# update (or initialize) probability distributions and propagate weight changes
# from DCEC model to encoder.
if iteration % update_interval == 0:
self.q = self.DCEC.predict(X, verbose=0)
self.p = self.p_mat(self.q)
y_pred = self.q.argmax(1)
delta_label = ((y_pred == self.y_pred).sum().astype(np.float32) / y_pred.shape[0])
if y is None:
print(str(np.round(delta_label*100, 5))+'% change in label assignment')
if iteration > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
train = False
continue
else:
self.y_pred = y_pred
for i in range(len(self.encoder.layers)):
self.encoder.layers[i].set_weights(self.DCEC.layers[0].layers[i].get_weights())
self.cluster_centres = self.DCEC.layers[-1].get_weights()[0]
# train on batch
sys.stdout.write('Iteration %d, ' % iteration)
if (index+1)*self.batch_size >= X.shape[0]:
loss = self.DCEC.train_on_batch(X[index*self.batch_size::], self.p[index*self.batch_size::])
index = 0
sys.stdout.write('Loss %f\n' % loss)
else:
loss = self.DCEC.train_on_batch(X[index*self.batch_size:(index+1) * self.batch_size],
self.p[index*self.batch_size:(index+1) * self.batch_size])
sys.stdout.write('Loss %f\n' % loss)
index += 1
# save intermediate
if iteration % save_interval == 0:
z = self.encoder.predict(X)
pca = PCA(n_components=2).fit(z)
z_2d = pca.transform(z)
clust_2d = pca.transform(self.cluster_centres)
# save states for visualization
pickle.dump({'z_2d': z_2d, 'clust_2d': clust_2d, 'q': self.q, 'p': self.p},
open('c'+str(iteration)+'.pkl', 'wb'))
# save DCEC model checkpoints
self.DCEC.save('DCEC_model_'+str(iteration)+'.h5')
iteration += 1
sys.stdout.flush()
return y_pred
| # -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 17:48:13 2018
@author: Sediment
"""
# -*- coding: utf-8 -*-
'''
Keras implementation of deep embedder to improve clustering, inspired by:
"Unsupervised Deep Embedding for Clustering Analysis" (Xie et al, ICML 2016)
Definition can accept somewhat custom neural networks. Defaults are from paper.
'''
import sys
import numpy as np
import pandas as pd
import keras.backend as K
from keras.initializers import RandomNormal
from keras.engine.topology import Layer, InputSpec
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Input, Conv1D, MaxPooling1D, BatchNormalization, Activation, Flatten, UpSampling1D, Reshape
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Nadam
from keras.regularizers import l2
from sklearn.preprocessing import normalize
from keras.callbacks import LearningRateScheduler
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
from sklearn import manifold
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
if (sys.version[0] == 2):
import cPickle as pickle
else:
import pickle
class ClusteringLayer(Layer):
'''
Clustering layer which converts latent space Z of input layer
into a probability vector for each cluster defined by its centre in
Z-space. Use Kullback-Leibler divergence as loss, with a probability
target distribution.
# Arguments
output_dim: int > 0. Should be same as number of clusters.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
alpha: parameter in Student's t-distribution. Default is 1.0.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
'''
def __init__(self, output_dim, input_dim=None, weights=None, alpha=1.0, **kwargs):
self.output_dim = output_dim
self.input_dim = input_dim
self.alpha = alpha
# kmeans cluster centre locations
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(ClusteringLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
self.W = K.variable(self.initial_weights)
self.trainable_weights = [self.W]
def call(self, x, mask=None):
q = 1.0/(1.0 + K.sqrt(K.sum(K.square(K.expand_dims(x, 1) - self.W), axis=2))**2 /self.alpha)
q = q**((self.alpha+1.0)/2.0)
q = K.transpose(K.transpose(q)/K.sum(q, axis=1))
return q
def get_output_shape_for(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def get_config(self):
config = {'output_dim': self.output_dim,
'input_dim': self.input_dim}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DeepEmbeddingClustering(object):
def __init__(self,
n_clusters,
input_dim,
learning_rate=0.1,
encoded=None,
decoded=None,
alpha=1.0,
pretrained_weights=None,
cluster_centres=None,
batch_size=256,
conv_filters=[8, 16, 32],
kernel_size=12,
Maxpooling_size=2,
LatentSpace_Z=25,
finetune_epochs=5,
**kwargs):
super(DeepEmbeddingClustering, self).__init__()
self.n_clusters = n_clusters
self.input_dim = input_dim
self.encoded = encoded
self.decoded = decoded
self.alpha = alpha
self.pretrained_weights = pretrained_weights
self.cluster_centres = cluster_centres
self.batch_size = batch_size
self.learning_rate = learning_rate
self.iters_lr_update = 6000
self.lr_change_rate = 0.1
self.finetune_epochs = finetune_epochs
self.conv_filters = conv_filters
self.kernel_size = kernel_size
self.Maxpooling_size = Maxpooling_size
self.LatentSpace_Z = LatentSpace_Z
self.encoders = []
self.decoders = []
input_data = Input(shape=(self.input_dim, 1))
x = Conv1D(self.conv_filters[0], (self.kernel_size), activation='relu', padding='same')(input_data)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
x = Conv1D(self.conv_filters[1], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
x = Conv1D(self.conv_filters[2], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling1D((self.Maxpooling_size), padding='same')(x)
# at this point the representation is (16 x conv_filters) i.e. 128-dimensional
x = Flatten()(x)
# at this point the representation is (6) i.e. 128-dimensional
encoded = Dense(LatentSpace_Z, activation='relu')(x)
# 256 = input_data / ((2^maxpool_num) * conv_fileters * 4)
x = Dense(self.input_dim // (2**3) * self.conv_filters[2], kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),
bias_initializer='zeros', activation='relu')(encoded)
x = Reshape((self.input_dim // (2**3), self.conv_filters[2]))(x) # 16 * 2 * 2 * 2 = 128, 多少个maxpool就与多少个2相乘
x = Conv1D(self.conv_filters[2], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
x = Conv1D(self.conv_filters[1], (self.kernel_size), activation='relu', padding='same')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
x = Conv1D(self.conv_filters[0], (1), activation='relu')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = UpSampling1D((self.Maxpooling_size))(x)
decoded = Conv1D(1, (self.kernel_size), activation='relu', padding='same')(x)
self.autoencoder = Model(input_data, decoded)
self.autoencoder.summary()
self.encoder = Model(input_data, encoded)
# build the end-to-end autoencoder for finetuning
# Note that at this point dropout is discarded
self.encoder.compile(loss='mse', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
self.autoencoder.compile(loss='mse', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
if cluster_centres is not None:
assert cluster_centres.shape[0] == self.n_clusters
assert cluster_centres.shape[1] == self.encoder.layers[-1].output_dim
if self.pretrained_weights is not None:
self.autoencoder.load_weights(self.pretrained_weights)
def p_mat(self, q):
weight = q**2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def initialize(self, X, save_autoencoder=False, finetune_iters=5000):
if self.pretrained_weights is None:
iters_per_epoch = int(len(X) / self.batch_size)
print('layerwise pretrain')
lr_epoch_update = max(1, self.iters_lr_update / float(iters_per_epoch))
def step_decay(epoch):
initial_rate = self.learning_rate
factor = int(epoch / lr_epoch_update)
lr = initial_rate / (10 ** factor)
return lr
lr_schedule = LearningRateScheduler(step_decay)
#update encoder and decoder weights:
self.autoencoder.fit(X, X, batch_size=self.batch_size, epochs=self.finetune_epochs, callbacks=[lr_schedule])
if save_autoencoder:
self.autoencoder.save_weights('autoencoder.h5')
else:
print('Loading pretrained weights for autoencoder.')
self.autoencoder.load_weights(self.pretrained_weights)
# update encoder, decoder
# TODO: is this needed? Might be redundant...
for i in range(len(self.encoder.layers)):
self.encoder.layers[i].set_weights(self.autoencoder.layers[i].get_weights())
# initialize cluster centres using k-means
print('Initializing cluster centres with k-means.')
if self.cluster_centres is None:
np.random.seed(42) #随机种子,用于初始化聚类中心
kmeans = KMeans(n_clusters=self.n_clusters, max_iter=100, n_init=6, precompute_distances='auto', random_state=None, tol=1e-4)
self.y_pred = kmeans.fit_predict(self.encoder.predict(X))
self.cluster_centres = kmeans.cluster_centers_
print ('cluster_centres:\n ', self.cluster_centres)
# prepare DCEC model
self.DCEC = Sequential([self.encoder,
ClusteringLayer(self.n_clusters,
weights=self.cluster_centres,
name='clustering')])
self.DCEC.compile(loss='kullback_leibler_divergence', optimizer=SGD(lr=self.learning_rate, decay=0, momentum=0.9))
# loss: 'mean_squared_error', 'categorical_crossentropy', 'hinge', 'squared_hinge'
return
def visualizeData(self, Z, labels, num_clusters, csv_filename, title):
'''
TSNE visualization of the points in latent space Z
:param Z: Numpy array containing points in latent space in which clustering was performed
:param labels: True labels - used for coloring points
:param num_clusters: Total number of clusters
:param title: filename where the plot should be saved
:return: None - (side effect) saves clustering visualization plot in specified location
'''
print ('Start visualizing Data')
labels = labels.astype(int)
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
Z_tsne = tsne.fit_transform(Z)
fig = plt.figure()
plt.scatter(Z_tsne[:, 0], Z_tsne[:, 1], s=2, c=labels, cmap=plt.cm.get_cmap("jet", num_clusters))
plt.colorbar(ticks=range(num_clusters))
# fig.savefig(title, dpi=fig.dpi)
fig.savefig(title, dpi=600)
# save t_sne results
print('Save t_sne results')
dataframe = pd.DataFrame({'Z_tsne_x':Z_tsne[:, 0], 'Z_tsne_y':Z_tsne[:, 1], 'labels':labels})
dataframe.to_csv(csv_filename, index=False, sep=',')
def cluster(self, X, y=None,
tol=0.001, update_interval=None,
iter_max=799,
save_interval=None,
**kwargs):
if update_interval is None:
# 1 epochs
update_interval = X.shape[0]/self.batch_size
print('Update interval', update_interval)
if save_interval is None:
# 50 epochs
save_interval = X.shape[0]/self.batch_size*50
print('Save interval', save_interval)
assert save_interval >= update_interval
train = True
iteration, index = 0, 0
self.accuracy = []
while train:
sys.stdout.write('\r')
# cutoff iteration
if iter_max < iteration:
print('Reached maximum iteration limit. Stopping training.')
return self.y_pred
# update (or initialize) probability distributions and propagate weight changes
# from DCEC model to encoder.
if iteration % update_interval == 0:
self.q = self.DCEC.predict(X, verbose=0)
self.p = self.p_mat(self.q)
y_pred = self.q.argmax(1)
delta_label = ((y_pred == self.y_pred).sum().astype(np.float32) / y_pred.shape[0])
if y is None:
print(str(np.round(delta_label*100, 5))+'% change in label assignment')
if iteration > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
train = False
continue
else:
self.y_pred = y_pred
for i in range(len(self.encoder.layers)):
self.encoder.layers[i].set_weights(self.DCEC.layers[0].layers[i].get_weights())
self.cluster_centres = self.DCEC.layers[-1].get_weights()[0]
# train on batch
sys.stdout.write('Iteration %d, ' % iteration)
if (index+1)*self.batch_size >= X.shape[0]:
loss = self.DCEC.train_on_batch(X[index*self.batch_size::], self.p[index*self.batch_size::])
index = 0
sys.stdout.write('Loss %f\n' % loss)
else:
loss = self.DCEC.train_on_batch(X[index*self.batch_size:(index+1) * self.batch_size],
self.p[index*self.batch_size:(index+1) * self.batch_size])
sys.stdout.write('Loss %f\n' % loss)
index += 1
# save intermediate
if iteration % save_interval == 0:
z = self.encoder.predict(X)
pca = PCA(n_components=2).fit(z)
z_2d = pca.transform(z)
clust_2d = pca.transform(self.cluster_centres)
# save states for visualization
pickle.dump({'z_2d': z_2d, 'clust_2d': clust_2d, 'q': self.q, 'p': self.p},
open('c'+str(iteration)+'.pkl', 'wb'))
# save DCEC model checkpoints
self.DCEC.save('DCEC_model_'+str(iteration)+'.h5')
iteration += 1
sys.stdout.flush()
return y_pred | en | 0.707035 | # -*- coding: utf-8 -*- Created on Sun Aug 19 17:48:13 2018
@author: Sediment # -*- coding: utf-8 -*- Keras implementation of deep embedder to improve clustering, inspired by:
"Unsupervised Deep Embedding for Clustering Analysis" (Xie et al, ICML 2016)
Definition can accept somewhat custom neural networks. Defaults are from paper. Clustering layer which converts latent space Z of input layer
into a probability vector for each cluster defined by its centre in
Z-space. Use Kullback-Leibler divergence as loss, with a probability
target distribution.
# Arguments
output_dim: int > 0. Should be same as number of clusters.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
alpha: parameter in Student's t-distribution. Default is 1.0.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`. # kmeans cluster centre locations # x = BatchNormalization()(x) # x = Activation('relu')(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) # at this point the representation is (16 x conv_filters) i.e. 128-dimensional # at this point the representation is (6) i.e. 128-dimensional # 256 = input_data / ((2^maxpool_num) * conv_fileters * 4) # 16 * 2 * 2 * 2 = 128, 多少个maxpool就与多少个2相乘 # x = BatchNormalization()(x) # x = Activation('relu')(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) # build the end-to-end autoencoder for finetuning # Note that at this point dropout is discarded #update encoder and decoder weights: # update encoder, decoder # TODO: is this needed? Might be redundant... # initialize cluster centres using k-means #随机种子,用于初始化聚类中心 # prepare DCEC model # loss: 'mean_squared_error', 'categorical_crossentropy', 'hinge', 'squared_hinge' TSNE visualization of the points in latent space Z
:param Z: Numpy array containing points in latent space in which clustering was performed
:param labels: True labels - used for coloring points
:param num_clusters: Total number of clusters
:param title: filename where the plot should be saved
:return: None - (side effect) saves clustering visualization plot in specified location # fig.savefig(title, dpi=fig.dpi) # save t_sne results # 1 epochs # 50 epochs # cutoff iteration # update (or initialize) probability distributions and propagate weight changes # from DCEC model to encoder. # train on batch # save intermediate # save states for visualization # save DCEC model checkpoints | 2.440138 | 2 |
ppq/utils/round.py | xiguadong/ppq | 0 | 8245 | from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal
from math import ceil, floor, log2
from typing import Union
import torch
from ppq.core import RoundingPolicy
def ppq_numerical_round(value: float,
policy: RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> int:
"""
reference: https://en.wikipedia.org/wiki/Rounding
decimal defination:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Args:
value (float): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
int: [description]
"""
assert isinstance(value, float), 'numerical round only takes effect on float number.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_EVEN))
elif policy == RoundingPolicy.ROUND_HALF_UP:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_DOWN)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_UP)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
if value > 0: return floor(value + 0.5)
else: return ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_UP:
return ceil(value)
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_tensor_round(value: torch.Tensor,
policy:RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> torch.Tensor:
"""
reference: https://en.wikipedia.org/wiki/Rounding
Args:
value (torch.Tensor): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
torch.Tensor: [description]
"""
assert isinstance(value, torch.Tensor), 'tensor round only takes effect on torch tensor.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
# default rounding policy of torch is ROUND_TO_NEAR_EVEN
# try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round())
# However it may generate unexpected results due to version difference.
return value.round()
elif policy == RoundingPolicy.ROUND_UP:
return value.ceil()
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return torch.sign(value) * torch.ceil(value.abs() - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return torch.sign(value) * torch.floor(value.abs() + 0.5)
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
return torch.ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_UP:
return torch.floor(value + 0.5)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
raise NotImplementedError(f'Torch Tensor can not use this rounding policy({policy}) try ROUND_HALF_EVEN instead.')
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_round_to_power_of_2(value: Union[float, int],
policy: RoundingPolicy=RoundingPolicy.ROUND_UP) -> float:
if value == 0: return 0
sign = 1 if value >= 0 else -1
assert isinstance(value, float) or isinstance(value, int), \
'power-of-2 round only takes effect on float or int.'
return sign * float(pow(2, ppq_numerical_round(log2(sign * value), policy=policy)))
| from decimal import ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_HALF_UP, Decimal
from math import ceil, floor, log2
from typing import Union
import torch
from ppq.core import RoundingPolicy
def ppq_numerical_round(value: float,
policy: RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> int:
"""
reference: https://en.wikipedia.org/wiki/Rounding
decimal defination:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Args:
value (float): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
int: [description]
"""
assert isinstance(value, float), 'numerical round only takes effect on float number.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_EVEN))
elif policy == RoundingPolicy.ROUND_HALF_UP:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
if value > 0: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_DOWN))
else: return int(Decimal(value).quantize(exp=Decimal(1), rounding=ROUND_HALF_UP))
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_DOWN)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return ppq_numerical_round(value, RoundingPolicy.ROUND_HALF_UP)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
if value > 0: return floor(value + 0.5)
else: return ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_UP:
return ceil(value)
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_tensor_round(value: torch.Tensor,
policy:RoundingPolicy=RoundingPolicy.ROUND_HALF_EVEN) -> torch.Tensor:
"""
reference: https://en.wikipedia.org/wiki/Rounding
Args:
value (torch.Tensor): [description]
policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN.
Raises:
ValueError: [description]
Returns:
torch.Tensor: [description]
"""
assert isinstance(value, torch.Tensor), 'tensor round only takes effect on torch tensor.'
if policy == RoundingPolicy.ROUND_HALF_EVEN:
# default rounding policy of torch is ROUND_TO_NEAR_EVEN
# try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round())
# However it may generate unexpected results due to version difference.
return value.round()
elif policy == RoundingPolicy.ROUND_UP:
return value.ceil()
elif policy == RoundingPolicy.ROUND_HALF_TOWARDS_ZERO:
return torch.sign(value) * torch.ceil(value.abs() - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_FAR_FORM_ZERO:
return torch.sign(value) * torch.floor(value.abs() + 0.5)
elif policy == RoundingPolicy.ROUND_HALF_DOWN:
return torch.ceil(value - 0.5)
elif policy == RoundingPolicy.ROUND_HALF_UP:
return torch.floor(value + 0.5)
elif policy == RoundingPolicy.ROUND_TO_NEAR_INT:
raise NotImplementedError(f'Torch Tensor can not use this rounding policy({policy}) try ROUND_HALF_EVEN instead.')
else:
raise ValueError('Unexpected rounding policy found.')
def ppq_round_to_power_of_2(value: Union[float, int],
policy: RoundingPolicy=RoundingPolicy.ROUND_UP) -> float:
if value == 0: return 0
sign = 1 if value >= 0 else -1
assert isinstance(value, float) or isinstance(value, int), \
'power-of-2 round only takes effect on float or int.'
return sign * float(pow(2, ppq_numerical_round(log2(sign * value), policy=policy)))
| en | 0.730482 | reference: https://en.wikipedia.org/wiki/Rounding decimal defination: - decimal.ROUND_CEILING (towards Infinity) - decimal.ROUND_DOWN (towards zero) - decimal.ROUND_FLOOR (towards -Infinity) - decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero) - decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer) - decimal.ROUND_HALF_UP (to nearest with ties going away from zero) - decimal.ROUND_UP (away from zero) - decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero) Args: value (float): [description] policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN. Raises: ValueError: [description] Returns: int: [description] reference: https://en.wikipedia.org/wiki/Rounding Args: value (torch.Tensor): [description] policy (RoundingPolicy, optional): [description]. Defaults to RoundingPolicy.ROUND_HALF_EVEN. Raises: ValueError: [description] Returns: torch.Tensor: [description] # default rounding policy of torch is ROUND_TO_NEAR_EVEN # try this: print(torch.Tensor([1.5, 2.5, 3.5, 4.5]).round()) # However it may generate unexpected results due to version difference. | 2.972526 | 3 |
python/repair/train.py | maropu/scavenger | 0 | 8246 | <filename>python/repair/train.py
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import time
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
from repair.utils import elapsed_time, get_option_value, setup_logger
_logger = setup_logger()
# List of internal configurations
_option = namedtuple('_option', 'key default_value type_class validator err_msg')
_opt_boosting_type = \
_option('model.lgb.boosting_type', 'gbdt', str,
lambda v: v in ['gbdt', 'dart', 'goss', 'rf'], "`{}` should be in ['gbdt', 'dart', 'goss', 'rf']")
_opt_class_weight = \
_option('model.lgb.class_weight', 'balanced', str, None, None)
_opt_learning_rate = \
_option('model.lgb.learning_rate', 0.01, float,
lambda v: v > 0.0, '`{}` should be positive')
_opt_max_depth = \
_option('model.lgb.max_depth', 7, int, None, None)
_opt_max_bin = \
_option('model.lgb.max_bin', 255, int, None, None)
_opt_reg_alpha = \
_option('model.lgb.reg_alpha', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_min_split_gain = \
_option('model.lgb.min_split_gain', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_n_estimators = \
_option('model.lgb.n_estimators', 300, int,
lambda v: v > 0, '`{}` should be positive')
_opt_importance_type = \
_option('model.lgb.importance_type', 'gain', str,
lambda v: v in ['split', 'gain'], "`{}` should be in ['split', 'gain']")
_opt_n_splits = \
_option('model.cv.n_splits', 3, int,
lambda v: v >= 3, '`{}` should be greater than 2')
_opt_timeout = \
_option('model.hp.timeout', 0, int, None, None)
_opt_max_evals = \
_option('model.hp.max_evals', 100000000, int,
lambda v: v > 0, '`{}` should be positive')
_opt_no_progress_loss = \
_option('model.hp.no_progress_loss', 50, int,
lambda v: v > 0, '`{}` should be positive')
train_option_keys = [
_opt_boosting_type.key,
_opt_class_weight.key,
_opt_learning_rate.key,
_opt_max_depth.key,
_opt_max_bin.key,
_opt_reg_alpha.key,
_opt_min_split_gain.key,
_opt_n_estimators.key,
_opt_importance_type.key,
_opt_n_splits.key,
_opt_timeout.key,
_opt_max_evals.key,
_opt_no_progress_loss.key
]
@elapsed_time # type: ignore
def _build_lgb_model(X: pd.DataFrame, y: pd.Series, is_discrete: bool, num_class: int, n_jobs: int,
opts: Dict[str, str]) -> Tuple[Any, float]:
import lightgbm as lgb # type: ignore[import]
def _get_option_value(*args) -> Any: # type: ignore
return get_option_value(opts, *args)
if is_discrete:
objective = "binary" if num_class <= 2 else "multiclass"
else:
objective = "regression"
fixed_params = {
"boosting_type": _get_option_value(*_opt_boosting_type),
"objective": objective,
"class_weight": _get_option_value(*_opt_class_weight),
"learning_rate": _get_option_value(*_opt_learning_rate),
"max_depth": _get_option_value(*_opt_max_depth),
"max_bin": _get_option_value(*_opt_max_bin),
"reg_alpha": _get_option_value(*_opt_reg_alpha),
"min_split_gain": _get_option_value(*_opt_min_split_gain),
"n_estimators": _get_option_value(*_opt_n_estimators),
"importance_type": _get_option_value(*_opt_importance_type),
"random_state": 42,
"n_jobs": n_jobs
}
# Set `num_class` only in the `multiclass` mode
if objective == "multiclass":
fixed_params["num_class"] = num_class
model_class = lgb.LGBMClassifier if is_discrete \
else lgb.LGBMRegressor
def _create_model(params: Dict[str, Any]) -> Any:
# Some params must be int
for k in ["num_leaves", "subsample_freq", "min_child_samples"]:
if k in params:
params[k] = int(params[k])
p = copy.deepcopy(fixed_params)
p.update(params)
return model_class(**p)
from hyperopt import hp, tpe, Trials # type: ignore[import]
from hyperopt.early_stop import no_progress_loss # type: ignore[import]
from hyperopt.fmin import fmin # type: ignore[import]
from sklearn.model_selection import ( # type: ignore[import]
cross_val_score, KFold, StratifiedKFold
)
# TODO: Temporality supress `sklearn.model_selection` user's warning
import warnings
warnings.simplefilter("ignore", UserWarning)
# Forcibly disable INFO-level logging in the `hyperopt` module
from logging import getLogger, WARN
getLogger("hyperopt").setLevel(WARN)
param_space = {
"num_leaves": hp.quniform("num_leaves", 2, 100, 1),
"subsample": hp.uniform("subsample", 0.5, 1.0),
"subsample_freq": hp.quniform("subsample_freq", 1, 20, 1),
"colsample_bytree": hp.uniform("colsample_bytree", 0.01, 1.0),
"min_child_samples": hp.quniform("min_child_samples", 1, 50, 1),
"min_child_weight": hp.loguniform("min_child_weight", -3, 1),
"reg_lambda": hp.loguniform("reg_lambda", -2, 3)
}
scorer = "f1_macro" if is_discrete else "neg_mean_squared_error"
n_splits = int(_get_option_value(*_opt_n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True) if is_discrete \
else KFold(n_splits=n_splits, shuffle=True)
def _objective(params: Dict[str, Any]) -> float:
model = _create_model(params)
fit_params: Dict[str, str] = {
# TODO: Raises an error if a single regressor is used
# "categorical_feature": "auto",
}
try:
# TODO: Replace with `lgb.cv` to remove the `sklearn` dependency
scores = cross_val_score(
model, X, y, scoring=scorer, cv=cv, fit_params=fit_params, n_jobs=n_jobs)
return -scores.mean()
# it might throw an exception because `y` contains
# previously unseen labels.
except Exception as e:
_logger.warning(f"{e.__class__}: {e}")
return 0.0
def _early_stop_fn() -> Any:
no_progress_loss_fn = no_progress_loss(int(_get_option_value(*_opt_no_progress_loss)))
timeout = int(_get_option_value(*_opt_timeout))
if timeout <= 0:
return no_progress_loss_fn
# Set base time for budget mechanism
start_time = time.time()
def timeout_fn(trials, best_loss=None, iteration_no_progress=0): # type: ignore
no_progress_loss, meta = no_progress_loss_fn(trials, best_loss, iteration_no_progress)
to = time.time() - start_time > timeout
return no_progress_loss or to, meta
return timeout_fn
try:
trials = Trials()
max_evals = int(_get_option_value(*_opt_max_evals))
best_params = fmin(
fn=_objective,
space=param_space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals,
early_stop_fn=_early_stop_fn(),
rstate=np.random.RandomState(42),
show_progressbar=False,
verbose=False)
_logger.info("hyperopt: #eval={}/{}".format(len(trials.trials), max_evals))
# Builds a model with `best_params`
# TODO: Could we extract constraint rules (e.g., FD and CFD) from built statistical models?
model = _create_model(best_params)
model.fit(X, y)
def _feature_importances() -> List[Any]:
f = filter(lambda x: x[1] > 0.0, zip(model.feature_name_, model.feature_importances_))
return list(sorted(f, key=lambda x: x[1], reverse=True))
_logger.debug(f"lightgbm: feature_importances={_feature_importances()}")
sorted_lst = sorted(trials.trials, key=lambda x: x['result']['loss'])
min_loss = sorted_lst[0]['result']['loss']
return model, -min_loss
except Exception as e:
_logger.warning(f"Failed to build a stat model because: {e}")
return None, 0.0
def build_model(X: pd.DataFrame, y: pd.Series, is_discrete: bool, num_class: int, n_jobs: int,
opts: Dict[str, str]) -> Tuple[Any, float]:
return _build_lgb_model(X, y, is_discrete, num_class, n_jobs, opts)
def compute_class_nrow_stdv(y: pd.Series, is_discrete: bool) -> Optional[float]:
from collections import Counter
return float(np.std(list(map(lambda x: x[1], Counter(y).items())))) if is_discrete else None
def rebalance_training_data(X: pd.DataFrame, y: pd.Series, target: str) -> Tuple[pd.DataFrame, pd.Series]:
# Uses median as the number of training rows for each class
from collections import Counter
prev_nrows = len(X)
prev_stdv = compute_class_nrow_stdv(y, is_discrete=True)
hist = dict(Counter(y).items()) # type: ignore
median = int(np.median([count for key, count in hist.items()]))
def _split_data(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Series]:
X = df[df.columns[df.columns != target]] # type: ignore
y = df[target]
return X, y
# Filters out rows having NaN values for over-sampling
X[target] = y
X_notna, y_notna = _split_data(X.dropna())
X_na, y_na = _split_data(X[X.isnull().any(axis=1)])
# Over-sampling for training data whose row number is smaller than the median value
hist_na = dict(Counter(y_na).items()) # type: ignore
smote_targets = []
kn = 5 # `k_neighbors` default value in `SMOTEN`
for key, count in hist.items():
if count < median:
nna = hist_na[key] if key in hist_na else 0
if count - nna > kn:
smote_targets.append((key, median - nna))
else:
_logger.warning(f"Over-sampling of '{key}' in y='{target}' failed because the number of the clean rows "
f"is too small: {count - nna}")
if len(smote_targets) > 0:
from imblearn.over_sampling import SMOTEN
sampler = SMOTEN(random_state=42, sampling_strategy=dict(smote_targets), k_neighbors=kn)
X_notna, y_notna = sampler.fit_resample(X_notna, y_notna)
X = pd.concat([X_notna, X_na])
y = pd.concat([y_notna, y_na])
# Under-sampling for training data whose row number is greater than the median value
rus_targets = list(map(lambda x: (x[0], median), filter(lambda x: x[1] > median, hist.items())))
if len(rus_targets) > 0:
# NOTE: The other smarter implementations can skew samples if there are many rows having NaN values,
# so we just use `RandomUnderSampler` here.
from imblearn.under_sampling import RandomUnderSampler
sampler = RandomUnderSampler(random_state=42, sampling_strategy=dict(rus_targets))
X, y = sampler.fit_resample(X, y)
_logger.info("Rebalanced training data (y={}, median={}): #rows={}(stdv={}) -> #rows={}(stdv={})".format(
target, median, prev_nrows, prev_stdv, len(X), compute_class_nrow_stdv(y, is_discrete=True)))
_logger.debug("class hist: {} => {}".format(hist.items(), Counter(y).items()))
return X, y
| <filename>python/repair/train.py
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import time
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
from repair.utils import elapsed_time, get_option_value, setup_logger
_logger = setup_logger()
# List of internal configurations
_option = namedtuple('_option', 'key default_value type_class validator err_msg')
_opt_boosting_type = \
_option('model.lgb.boosting_type', 'gbdt', str,
lambda v: v in ['gbdt', 'dart', 'goss', 'rf'], "`{}` should be in ['gbdt', 'dart', 'goss', 'rf']")
_opt_class_weight = \
_option('model.lgb.class_weight', 'balanced', str, None, None)
_opt_learning_rate = \
_option('model.lgb.learning_rate', 0.01, float,
lambda v: v > 0.0, '`{}` should be positive')
_opt_max_depth = \
_option('model.lgb.max_depth', 7, int, None, None)
_opt_max_bin = \
_option('model.lgb.max_bin', 255, int, None, None)
_opt_reg_alpha = \
_option('model.lgb.reg_alpha', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_min_split_gain = \
_option('model.lgb.min_split_gain', 0.0, float,
lambda v: v >= 0.0, '`{}` should be greater than or equal to 0.0')
_opt_n_estimators = \
_option('model.lgb.n_estimators', 300, int,
lambda v: v > 0, '`{}` should be positive')
_opt_importance_type = \
_option('model.lgb.importance_type', 'gain', str,
lambda v: v in ['split', 'gain'], "`{}` should be in ['split', 'gain']")
_opt_n_splits = \
_option('model.cv.n_splits', 3, int,
lambda v: v >= 3, '`{}` should be greater than 2')
_opt_timeout = \
_option('model.hp.timeout', 0, int, None, None)
_opt_max_evals = \
_option('model.hp.max_evals', 100000000, int,
lambda v: v > 0, '`{}` should be positive')
_opt_no_progress_loss = \
_option('model.hp.no_progress_loss', 50, int,
lambda v: v > 0, '`{}` should be positive')
train_option_keys = [
_opt_boosting_type.key,
_opt_class_weight.key,
_opt_learning_rate.key,
_opt_max_depth.key,
_opt_max_bin.key,
_opt_reg_alpha.key,
_opt_min_split_gain.key,
_opt_n_estimators.key,
_opt_importance_type.key,
_opt_n_splits.key,
_opt_timeout.key,
_opt_max_evals.key,
_opt_no_progress_loss.key
]
@elapsed_time # type: ignore
def _build_lgb_model(X: pd.DataFrame, y: pd.Series, is_discrete: bool, num_class: int, n_jobs: int,
opts: Dict[str, str]) -> Tuple[Any, float]:
import lightgbm as lgb # type: ignore[import]
def _get_option_value(*args) -> Any: # type: ignore
return get_option_value(opts, *args)
if is_discrete:
objective = "binary" if num_class <= 2 else "multiclass"
else:
objective = "regression"
fixed_params = {
"boosting_type": _get_option_value(*_opt_boosting_type),
"objective": objective,
"class_weight": _get_option_value(*_opt_class_weight),
"learning_rate": _get_option_value(*_opt_learning_rate),
"max_depth": _get_option_value(*_opt_max_depth),
"max_bin": _get_option_value(*_opt_max_bin),
"reg_alpha": _get_option_value(*_opt_reg_alpha),
"min_split_gain": _get_option_value(*_opt_min_split_gain),
"n_estimators": _get_option_value(*_opt_n_estimators),
"importance_type": _get_option_value(*_opt_importance_type),
"random_state": 42,
"n_jobs": n_jobs
}
# Set `num_class` only in the `multiclass` mode
if objective == "multiclass":
fixed_params["num_class"] = num_class
model_class = lgb.LGBMClassifier if is_discrete \
else lgb.LGBMRegressor
def _create_model(params: Dict[str, Any]) -> Any:
# Some params must be int
for k in ["num_leaves", "subsample_freq", "min_child_samples"]:
if k in params:
params[k] = int(params[k])
p = copy.deepcopy(fixed_params)
p.update(params)
return model_class(**p)
from hyperopt import hp, tpe, Trials # type: ignore[import]
from hyperopt.early_stop import no_progress_loss # type: ignore[import]
from hyperopt.fmin import fmin # type: ignore[import]
from sklearn.model_selection import ( # type: ignore[import]
cross_val_score, KFold, StratifiedKFold
)
# TODO: Temporality supress `sklearn.model_selection` user's warning
import warnings
warnings.simplefilter("ignore", UserWarning)
# Forcibly disable INFO-level logging in the `hyperopt` module
from logging import getLogger, WARN
getLogger("hyperopt").setLevel(WARN)
param_space = {
"num_leaves": hp.quniform("num_leaves", 2, 100, 1),
"subsample": hp.uniform("subsample", 0.5, 1.0),
"subsample_freq": hp.quniform("subsample_freq", 1, 20, 1),
"colsample_bytree": hp.uniform("colsample_bytree", 0.01, 1.0),
"min_child_samples": hp.quniform("min_child_samples", 1, 50, 1),
"min_child_weight": hp.loguniform("min_child_weight", -3, 1),
"reg_lambda": hp.loguniform("reg_lambda", -2, 3)
}
scorer = "f1_macro" if is_discrete else "neg_mean_squared_error"
n_splits = int(_get_option_value(*_opt_n_splits))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True) if is_discrete \
else KFold(n_splits=n_splits, shuffle=True)
def _objective(params: Dict[str, Any]) -> float:
model = _create_model(params)
fit_params: Dict[str, str] = {
# TODO: Raises an error if a single regressor is used
# "categorical_feature": "auto",
}
try:
# TODO: Replace with `lgb.cv` to remove the `sklearn` dependency
scores = cross_val_score(
model, X, y, scoring=scorer, cv=cv, fit_params=fit_params, n_jobs=n_jobs)
return -scores.mean()
# it might throw an exception because `y` contains
# previously unseen labels.
except Exception as e:
_logger.warning(f"{e.__class__}: {e}")
return 0.0
def _early_stop_fn() -> Any:
no_progress_loss_fn = no_progress_loss(int(_get_option_value(*_opt_no_progress_loss)))
timeout = int(_get_option_value(*_opt_timeout))
if timeout <= 0:
return no_progress_loss_fn
# Set base time for budget mechanism
start_time = time.time()
def timeout_fn(trials, best_loss=None, iteration_no_progress=0): # type: ignore
no_progress_loss, meta = no_progress_loss_fn(trials, best_loss, iteration_no_progress)
to = time.time() - start_time > timeout
return no_progress_loss or to, meta
return timeout_fn
try:
trials = Trials()
max_evals = int(_get_option_value(*_opt_max_evals))
best_params = fmin(
fn=_objective,
space=param_space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals,
early_stop_fn=_early_stop_fn(),
rstate=np.random.RandomState(42),
show_progressbar=False,
verbose=False)
_logger.info("hyperopt: #eval={}/{}".format(len(trials.trials), max_evals))
# Builds a model with `best_params`
# TODO: Could we extract constraint rules (e.g., FD and CFD) from built statistical models?
model = _create_model(best_params)
model.fit(X, y)
def _feature_importances() -> List[Any]:
f = filter(lambda x: x[1] > 0.0, zip(model.feature_name_, model.feature_importances_))
return list(sorted(f, key=lambda x: x[1], reverse=True))
_logger.debug(f"lightgbm: feature_importances={_feature_importances()}")
sorted_lst = sorted(trials.trials, key=lambda x: x['result']['loss'])
min_loss = sorted_lst[0]['result']['loss']
return model, -min_loss
except Exception as e:
_logger.warning(f"Failed to build a stat model because: {e}")
return None, 0.0
def build_model(X: pd.DataFrame, y: pd.Series, is_discrete: bool, num_class: int, n_jobs: int,
opts: Dict[str, str]) -> Tuple[Any, float]:
return _build_lgb_model(X, y, is_discrete, num_class, n_jobs, opts)
def compute_class_nrow_stdv(y: pd.Series, is_discrete: bool) -> Optional[float]:
from collections import Counter
return float(np.std(list(map(lambda x: x[1], Counter(y).items())))) if is_discrete else None
def rebalance_training_data(X: pd.DataFrame, y: pd.Series, target: str) -> Tuple[pd.DataFrame, pd.Series]:
# Uses median as the number of training rows for each class
from collections import Counter
prev_nrows = len(X)
prev_stdv = compute_class_nrow_stdv(y, is_discrete=True)
hist = dict(Counter(y).items()) # type: ignore
median = int(np.median([count for key, count in hist.items()]))
def _split_data(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Series]:
X = df[df.columns[df.columns != target]] # type: ignore
y = df[target]
return X, y
# Filters out rows having NaN values for over-sampling
X[target] = y
X_notna, y_notna = _split_data(X.dropna())
X_na, y_na = _split_data(X[X.isnull().any(axis=1)])
# Over-sampling for training data whose row number is smaller than the median value
hist_na = dict(Counter(y_na).items()) # type: ignore
smote_targets = []
kn = 5 # `k_neighbors` default value in `SMOTEN`
for key, count in hist.items():
if count < median:
nna = hist_na[key] if key in hist_na else 0
if count - nna > kn:
smote_targets.append((key, median - nna))
else:
_logger.warning(f"Over-sampling of '{key}' in y='{target}' failed because the number of the clean rows "
f"is too small: {count - nna}")
if len(smote_targets) > 0:
from imblearn.over_sampling import SMOTEN
sampler = SMOTEN(random_state=42, sampling_strategy=dict(smote_targets), k_neighbors=kn)
X_notna, y_notna = sampler.fit_resample(X_notna, y_notna)
X = pd.concat([X_notna, X_na])
y = pd.concat([y_notna, y_na])
# Under-sampling for training data whose row number is greater than the median value
rus_targets = list(map(lambda x: (x[0], median), filter(lambda x: x[1] > median, hist.items())))
if len(rus_targets) > 0:
# NOTE: The other smarter implementations can skew samples if there are many rows having NaN values,
# so we just use `RandomUnderSampler` here.
from imblearn.under_sampling import RandomUnderSampler
sampler = RandomUnderSampler(random_state=42, sampling_strategy=dict(rus_targets))
X, y = sampler.fit_resample(X, y)
_logger.info("Rebalanced training data (y={}, median={}): #rows={}(stdv={}) -> #rows={}(stdv={})".format(
target, median, prev_nrows, prev_stdv, len(X), compute_class_nrow_stdv(y, is_discrete=True)))
_logger.debug("class hist: {} => {}".format(hist.items(), Counter(y).items()))
return X, y
| en | 0.740132 | #!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore[import] # type: ignore[import] # List of internal configurations # type: ignore # type: ignore[import] # type: ignore # Set `num_class` only in the `multiclass` mode # Some params must be int # type: ignore[import] # type: ignore[import] # type: ignore[import] # type: ignore[import] # TODO: Temporality supress `sklearn.model_selection` user's warning # Forcibly disable INFO-level logging in the `hyperopt` module # TODO: Raises an error if a single regressor is used # "categorical_feature": "auto", # TODO: Replace with `lgb.cv` to remove the `sklearn` dependency # it might throw an exception because `y` contains # previously unseen labels. # Set base time for budget mechanism # type: ignore #eval={}/{}".format(len(trials.trials), max_evals)) # Builds a model with `best_params` # TODO: Could we extract constraint rules (e.g., FD and CFD) from built statistical models? # Uses median as the number of training rows for each class # type: ignore # type: ignore # Filters out rows having NaN values for over-sampling # Over-sampling for training data whose row number is smaller than the median value # type: ignore # `k_neighbors` default value in `SMOTEN` # Under-sampling for training data whose row number is greater than the median value # NOTE: The other smarter implementations can skew samples if there are many rows having NaN values, # so we just use `RandomUnderSampler` here. #rows={}(stdv={}) -> #rows={}(stdv={})".format( | 1.861578 | 2 |
howl/roomsensor/urls.py | volzotan/django-howl | 0 | 8247 | from django.conf.urls import patterns, url
from roomsensor import views
urlpatterns = patterns('',
url(r'^$', views.index, name='roomsensor'),
# ex: /roomsensor/name/
url(r'^(?P<roomsensor_name>\w+)/$', views.display, name='roomsensor_display'),
url(r'^(?P<roomsensor_name>\w+)/read/$', views.read, name='roomsensor_read'),
# JSON data for graph creation
url(r'^(?P<roomsensor_name>\w+)/rawdata/(?P<datapoints>\d+)/(?P<compression_factor>\d+)/$', views.rawdata, name='roomsensor_rawdata'),
) | from django.conf.urls import patterns, url
from roomsensor import views
urlpatterns = patterns('',
url(r'^$', views.index, name='roomsensor'),
# ex: /roomsensor/name/
url(r'^(?P<roomsensor_name>\w+)/$', views.display, name='roomsensor_display'),
url(r'^(?P<roomsensor_name>\w+)/read/$', views.read, name='roomsensor_read'),
# JSON data for graph creation
url(r'^(?P<roomsensor_name>\w+)/rawdata/(?P<datapoints>\d+)/(?P<compression_factor>\d+)/$', views.rawdata, name='roomsensor_rawdata'),
) | en | 0.634116 | # ex: /roomsensor/name/ # JSON data for graph creation | 2.097435 | 2 |
main.py | vu-telab/DAKOTA-moga-post-processing-tool | 0 | 8248 | # main.py
#
# currently just an example script I use to test my optimization_results module
#
# WARNING: design point numbers 0-indexed in pandas database, but
# eval_id column is the original 1-indexed value given by DAKOTA
import optimization_results as optr
def main():
a4 = optr.MogaOptimizationResults()
print a4.gen_size_list
print a4.pareto_front
assert a4.gen_size_list == [100, 94, 48, 45, 45, 46, 62, 85, 102, 108, 131, 130, 134, 119,
127, 128, 155, 124, 124, 130, 128, 123, 137, 135, 149, 165, 154,
164, 169, 177, 205, 196, 215, 185, 205, 190, 162, 158, 154, 159,
163, 183, 175, 183, 186, 188, 188, 186, 201, 213, 222]
### OLD MATLAB CODE I NEED TO REWORK ###
# # read force and atan accuracy objectives from
# # all_accuracy_objectives.dat
# A3 = load('all_accuracy_objectives.dat');
# completed_points = A3(:,1);
# force_objs = A3(:,2);
# atan_objs = A3(:,3);
# n3 = length(A3(:,1));
if __name__=='__main__':
main()
| # main.py
#
# currently just an example script I use to test my optimization_results module
#
# WARNING: design point numbers 0-indexed in pandas database, but
# eval_id column is the original 1-indexed value given by DAKOTA
import optimization_results as optr
def main():
a4 = optr.MogaOptimizationResults()
print a4.gen_size_list
print a4.pareto_front
assert a4.gen_size_list == [100, 94, 48, 45, 45, 46, 62, 85, 102, 108, 131, 130, 134, 119,
127, 128, 155, 124, 124, 130, 128, 123, 137, 135, 149, 165, 154,
164, 169, 177, 205, 196, 215, 185, 205, 190, 162, 158, 154, 159,
163, 183, 175, 183, 186, 188, 188, 186, 201, 213, 222]
### OLD MATLAB CODE I NEED TO REWORK ###
# # read force and atan accuracy objectives from
# # all_accuracy_objectives.dat
# A3 = load('all_accuracy_objectives.dat');
# completed_points = A3(:,1);
# force_objs = A3(:,2);
# atan_objs = A3(:,3);
# n3 = length(A3(:,1));
if __name__=='__main__':
main()
| en | 0.673668 | # main.py # # currently just an example script I use to test my optimization_results module # # WARNING: design point numbers 0-indexed in pandas database, but # eval_id column is the original 1-indexed value given by DAKOTA ### OLD MATLAB CODE I NEED TO REWORK ### # # read force and atan accuracy objectives from # # all_accuracy_objectives.dat # A3 = load('all_accuracy_objectives.dat'); # completed_points = A3(:,1); # force_objs = A3(:,2); # atan_objs = A3(:,3); # n3 = length(A3(:,1)); | 2.162557 | 2 |
Topaz/Core.py | Rhodolite/Gem.py.UnitTest | 0 | 8249 | #
# Copyright (c) 2017 <NAME>. All rights reserved.
#
@gem('Topaz.Core')
def gem():
require_gem('Gem.Global')
from Gem import gem_global
gem_global.testing = true
require_gem('Gem.Cache2')
require_gem('Gem.DumpCache')
require_gem('Gem.GeneratedConjureQuadruple')
require_gem('Gem.Map')
require_gem('Gem.Method')
require_gem('Gem.Path')
require_gem('Gem.System')
from Gem import create_cache, create_herd_2, create_horde_2, dump_cache_to_string, empty_herd
from Gem import print_cache, produce_conjure_by_name__V2
from Gem import produce_conjure_unique_dual, produce_conjure_unique_dual__21
from Gem import produce_conjure_quadruple__4123
from Gem import produce_conjure_unique_triple, produce_conjure_unique_triple__312
from Gem import reference_count, values_tuple_sorted_by_key, write_binary_to_path
share(
#
# Imported functions
#
'create_cache', create_cache,
'create_herd_2', create_herd_2,
'create_horde_2', create_horde_2,
'dump_cache_to_string', dump_cache_to_string,
'print_cache', print_cache,
'produce_conjure_by_name__V2', produce_conjure_by_name__V2,
'produce_conjure_unique_dual__21', produce_conjure_unique_dual__21,
'produce_conjure_unique_dual', produce_conjure_unique_dual,
'produce_conjure_unique_dual', produce_conjure_unique_dual,
'produce_conjure_quadruple__4123', produce_conjure_quadruple__4123,
'produce_conjure_unique_triple__312', produce_conjure_unique_triple__312,
'produce_conjure_unique_triple', produce_conjure_unique_triple,
'reference_count', reference_count,
'values_tuple_sorted_by_key', values_tuple_sorted_by_key,
'write_binary_to_path', write_binary_to_path,
#
# Imported Values
#
'empty_herd', empty_herd,
)
| #
# Copyright (c) 2017 <NAME>. All rights reserved.
#
@gem('Topaz.Core')
def gem():
require_gem('Gem.Global')
from Gem import gem_global
gem_global.testing = true
require_gem('Gem.Cache2')
require_gem('Gem.DumpCache')
require_gem('Gem.GeneratedConjureQuadruple')
require_gem('Gem.Map')
require_gem('Gem.Method')
require_gem('Gem.Path')
require_gem('Gem.System')
from Gem import create_cache, create_herd_2, create_horde_2, dump_cache_to_string, empty_herd
from Gem import print_cache, produce_conjure_by_name__V2
from Gem import produce_conjure_unique_dual, produce_conjure_unique_dual__21
from Gem import produce_conjure_quadruple__4123
from Gem import produce_conjure_unique_triple, produce_conjure_unique_triple__312
from Gem import reference_count, values_tuple_sorted_by_key, write_binary_to_path
share(
#
# Imported functions
#
'create_cache', create_cache,
'create_herd_2', create_herd_2,
'create_horde_2', create_horde_2,
'dump_cache_to_string', dump_cache_to_string,
'print_cache', print_cache,
'produce_conjure_by_name__V2', produce_conjure_by_name__V2,
'produce_conjure_unique_dual__21', produce_conjure_unique_dual__21,
'produce_conjure_unique_dual', produce_conjure_unique_dual,
'produce_conjure_unique_dual', produce_conjure_unique_dual,
'produce_conjure_quadruple__4123', produce_conjure_quadruple__4123,
'produce_conjure_unique_triple__312', produce_conjure_unique_triple__312,
'produce_conjure_unique_triple', produce_conjure_unique_triple,
'reference_count', reference_count,
'values_tuple_sorted_by_key', values_tuple_sorted_by_key,
'write_binary_to_path', write_binary_to_path,
#
# Imported Values
#
'empty_herd', empty_herd,
)
| en | 0.722818 | # # Copyright (c) 2017 <NAME>. All rights reserved. # # # Imported functions # # # Imported Values # | 1.785374 | 2 |
app.py | kosovojs/wikibooster | 0 | 8250 | <gh_stars>0
import flask
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS, cross_origin
from flask import render_template
import mwoauth
import requests_oauthlib
import os
import yaml
import mwapi
from tasks.main import Tasks
from save import Save
from db import DB
from typo.fix import TypoFix
app = Flask(__name__, static_folder="./frontend/build/static", template_folder="./frontend/build")
#app = Flask(__name__)
CORS(app)
user_agent = 'WikiBooster'
__dir__ = os.path.dirname(__file__)
configFile = open(os.path.join(__dir__, 'config.yaml'))
app.config.update(yaml.safe_load(configFile))
def authenticated_session(domain = 'meta.wikimedia.org'):
if 'oauth_access_token' in flask.session:
access_token = mwoauth.AccessToken(**flask.session['oauth_access_token'])
auth = requests_oauthlib.OAuth1(client_key=app.config['CONSUMER_KEY'], client_secret=app.config['CONSUMER_SECRET'],
resource_owner_key=access_token.key, resource_owner_secret=access_token.secret)
return mwapi.Session(host='https://'+domain, auth=auth, user_agent=user_agent)
else:
return None
def getUserInfo(domain = 'meta.wikimedia.org'):
session = authenticated_session(domain)
if not session:
return None, None, {'status':'error','message':'not logged in'}
try:
userinfo = session.get(action='query',
meta='userinfo',
uiprop=['groups', 'centralids'])['query']['userinfo']
return True, session, {'status':'ok','username':userinfo['name']}
except mwapi.errors.APIError as e:
if e.code == 'mwoauth-invalid-authorization-invalid-user':
# user is viewing a batch for a wiki where they do not have a local user account
# treat as anonymous on the local wiki, but query Meta to find out if they’re a steward
return None, None, {'status':'error','message':'server error'}
else:
raise e
return None, None, {'status':'error','message':'server error'}
@app.route('/', methods=['GET'])
def index_page():
return render_template('index.html')
#http://1172.16.17.32:5000/task/lvwiki/1/Helēna Mārnija
@app.route('/task/<wiki>/<name>/<page>', methods=['GET'])
def getTaskResult(wiki,name,page):
tasks = Tasks(wiki)
articleInfo = tasks.getDataForTask(name,page)
return jsonify(articleInfo)
@app.route('/testing', methods=['GET'])
def runTests():
tasks = Tasks('lvwiki')
articleInfo = tasks.runTests()
return articleInfo
@app.route('/wikis', methods=['GET'])
def listWikis():
db = DB()
wikis = db.getAvailableWikis()
return jsonify(wikis)
@app.route('/tasks/<wiki>', methods=['GET'])
def listJobs(wiki):
db = DB()
articles = db.getTasksForWiki(wiki)
return jsonify(articles)
@app.route('/task/<wiki>/<task_id>/articles', methods=['GET'])
def listArticles(wiki,task_id):
db = DB()
articles = db.get_articles_for_task(wiki,task_id)
return jsonify(articles)
#
@app.route('/typo/<wiki>', methods=['GET'])
def listTypos(wiki):
db = DB()
typos = db.getTyposForWiki(wiki)
return jsonify(typos)
@app.route('/typo/articles', methods=['GET'])
def typo_list_for_wiki():
db = DB()
wiki = 'lvwiki'
typos = db.get_typo_articles(wiki)
return jsonify(typos)
@app.route('/typo/fix/<article>', methods=['GET'])
def fix_typos(article):
db = DB()
typoFixer = TypoFix()
res = typoFixer.getData('lvwiki', article, db)
return jsonify(res)
@app.route('/rules/<wiki>', methods=['GET'])
def listRules(wiki):
db = DB()
rules = db.getRulesForWiki(wiki)
return jsonify(rules)
@app.route('/save', methods=['POST'])
def doSave():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki)
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
#
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
job = req['job']
article = req['article']
result = req['result']
wikitext = req['wikitext']
status = req['status']
handlingSave = Save(session)
respFromSave = handlingSave.saveArticle(job,article,result,wikitext,status,userName)
return jsonify(respFromSave)
@app.route('/save_typo', methods=['POST'])
def doSaveTypo():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki.replace('wiki',''))
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
active = req['active']
case = req['case']
comment = req['comment']
dumpsearch = req['dumpsearch']
minor = req['minor']
name = req['name']
regex = req['regex']
replace_with = req['replace_with']
search_for = req['search_for']
test_cases = req['test_cases']
whole = req['whole']
id = req['id']
db = DB()
typoData = db.saveTypo(active,case,comment,dumpsearch,minor,name,regex,replace_with,search_for,test_cases,whole,wiki,userName,id)
return jsonify({'status':'ok', 'info':typoData})
@app.route('/save_rule', methods=['POST'])
def saveRule():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki.replace('wiki',''))
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
wiki = req['wiki']
rule_name = req['rule_name']
rule_object = req['rule_object']
rule = req['rule']
result = req['result']
id = req['id']
db = DB()
db.saveRule(id, wiki, rule_name, rule_object, rule, result)
return jsonify({'status':'ok'})
@app.route('/info', methods=['GET'])
def user_info():
userStatus, _,respFromGettingUserInfo = getUserInfo()
return jsonify(respFromGettingUserInfo)
@app.route('/login')
def login():
consumer_token = mwoauth.ConsumerToken(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
redirect, request_token = mwoauth.initiate('https://meta.wikimedia.org/w/index.php', consumer_token, user_agent=user_agent)
flask.session['oauth_request_token'] = dict(zip(request_token._fields, request_token))
return flask.redirect(redirect)
@app.route('/oauth-callback')
def oauth_callback():
consumer_token = mwoauth.ConsumerToken(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
request_token = mwoauth.RequestToken(**flask.session.pop('oauth_request_token'))
access_token = mwoauth.complete('https://meta.wikimedia.org/w/index.php', consumer_token, request_token, flask.request.query_string, user_agent=user_agent)
flask.session['oauth_access_token'] = dict(zip(access_token._fields, access_token))
return flask.redirect(flask.url_for('index_page'))
@app.route('/logout')
def logout():
"""Log the user out by clearing their session."""
flask.session.clear()
return flask.redirect(flask.url_for('index_page'))
if __name__ == '__main__':
app.run(debug=True) | import flask
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS, cross_origin
from flask import render_template
import mwoauth
import requests_oauthlib
import os
import yaml
import mwapi
from tasks.main import Tasks
from save import Save
from db import DB
from typo.fix import TypoFix
app = Flask(__name__, static_folder="./frontend/build/static", template_folder="./frontend/build")
#app = Flask(__name__)
CORS(app)
user_agent = 'WikiBooster'
__dir__ = os.path.dirname(__file__)
configFile = open(os.path.join(__dir__, 'config.yaml'))
app.config.update(yaml.safe_load(configFile))
def authenticated_session(domain = 'meta.wikimedia.org'):
if 'oauth_access_token' in flask.session:
access_token = mwoauth.AccessToken(**flask.session['oauth_access_token'])
auth = requests_oauthlib.OAuth1(client_key=app.config['CONSUMER_KEY'], client_secret=app.config['CONSUMER_SECRET'],
resource_owner_key=access_token.key, resource_owner_secret=access_token.secret)
return mwapi.Session(host='https://'+domain, auth=auth, user_agent=user_agent)
else:
return None
def getUserInfo(domain = 'meta.wikimedia.org'):
session = authenticated_session(domain)
if not session:
return None, None, {'status':'error','message':'not logged in'}
try:
userinfo = session.get(action='query',
meta='userinfo',
uiprop=['groups', 'centralids'])['query']['userinfo']
return True, session, {'status':'ok','username':userinfo['name']}
except mwapi.errors.APIError as e:
if e.code == 'mwoauth-invalid-authorization-invalid-user':
# user is viewing a batch for a wiki where they do not have a local user account
# treat as anonymous on the local wiki, but query Meta to find out if they’re a steward
return None, None, {'status':'error','message':'server error'}
else:
raise e
return None, None, {'status':'error','message':'server error'}
@app.route('/', methods=['GET'])
def index_page():
return render_template('index.html')
#http://1172.16.17.32:5000/task/lvwiki/1/Helēna Mārnija
@app.route('/task/<wiki>/<name>/<page>', methods=['GET'])
def getTaskResult(wiki,name,page):
tasks = Tasks(wiki)
articleInfo = tasks.getDataForTask(name,page)
return jsonify(articleInfo)
@app.route('/testing', methods=['GET'])
def runTests():
tasks = Tasks('lvwiki')
articleInfo = tasks.runTests()
return articleInfo
@app.route('/wikis', methods=['GET'])
def listWikis():
db = DB()
wikis = db.getAvailableWikis()
return jsonify(wikis)
@app.route('/tasks/<wiki>', methods=['GET'])
def listJobs(wiki):
db = DB()
articles = db.getTasksForWiki(wiki)
return jsonify(articles)
@app.route('/task/<wiki>/<task_id>/articles', methods=['GET'])
def listArticles(wiki,task_id):
db = DB()
articles = db.get_articles_for_task(wiki,task_id)
return jsonify(articles)
#
@app.route('/typo/<wiki>', methods=['GET'])
def listTypos(wiki):
db = DB()
typos = db.getTyposForWiki(wiki)
return jsonify(typos)
@app.route('/typo/articles', methods=['GET'])
def typo_list_for_wiki():
db = DB()
wiki = 'lvwiki'
typos = db.get_typo_articles(wiki)
return jsonify(typos)
@app.route('/typo/fix/<article>', methods=['GET'])
def fix_typos(article):
db = DB()
typoFixer = TypoFix()
res = typoFixer.getData('lvwiki', article, db)
return jsonify(res)
@app.route('/rules/<wiki>', methods=['GET'])
def listRules(wiki):
db = DB()
rules = db.getRulesForWiki(wiki)
return jsonify(rules)
@app.route('/save', methods=['POST'])
def doSave():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki)
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
#
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
job = req['job']
article = req['article']
result = req['result']
wikitext = req['wikitext']
status = req['status']
handlingSave = Save(session)
respFromSave = handlingSave.saveArticle(job,article,result,wikitext,status,userName)
return jsonify(respFromSave)
@app.route('/save_typo', methods=['POST'])
def doSaveTypo():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki.replace('wiki',''))
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
active = req['active']
case = req['case']
comment = req['comment']
dumpsearch = req['dumpsearch']
minor = req['minor']
name = req['name']
regex = req['regex']
replace_with = req['replace_with']
search_for = req['search_for']
test_cases = req['test_cases']
whole = req['whole']
id = req['id']
db = DB()
typoData = db.saveTypo(active,case,comment,dumpsearch,minor,name,regex,replace_with,search_for,test_cases,whole,wiki,userName,id)
return jsonify({'status':'ok', 'info':typoData})
@app.route('/save_rule', methods=['POST'])
def saveRule():
req = request.get_json()
wiki = req['wiki']
domain = "{}.wikipedia.org".format(wiki.replace('wiki',''))
userStatus, session, respFromGettingUserInfo = getUserInfo(domain)
if not userStatus:
return jsonify(respFromGettingUserInfo)
userName = respFromGettingUserInfo['username'] if 'username' in respFromGettingUserInfo else respFromGettingUserInfo['message']
wiki = req['wiki']
rule_name = req['rule_name']
rule_object = req['rule_object']
rule = req['rule']
result = req['result']
id = req['id']
db = DB()
db.saveRule(id, wiki, rule_name, rule_object, rule, result)
return jsonify({'status':'ok'})
@app.route('/info', methods=['GET'])
def user_info():
userStatus, _,respFromGettingUserInfo = getUserInfo()
return jsonify(respFromGettingUserInfo)
@app.route('/login')
def login():
consumer_token = mwoauth.ConsumerToken(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
redirect, request_token = mwoauth.initiate('https://meta.wikimedia.org/w/index.php', consumer_token, user_agent=user_agent)
flask.session['oauth_request_token'] = dict(zip(request_token._fields, request_token))
return flask.redirect(redirect)
@app.route('/oauth-callback')
def oauth_callback():
consumer_token = mwoauth.ConsumerToken(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
request_token = mwoauth.RequestToken(**flask.session.pop('oauth_request_token'))
access_token = mwoauth.complete('https://meta.wikimedia.org/w/index.php', consumer_token, request_token, flask.request.query_string, user_agent=user_agent)
flask.session['oauth_access_token'] = dict(zip(access_token._fields, access_token))
return flask.redirect(flask.url_for('index_page'))
@app.route('/logout')
def logout():
"""Log the user out by clearing their session."""
flask.session.clear()
return flask.redirect(flask.url_for('index_page'))
if __name__ == '__main__':
app.run(debug=True) | en | 0.848101 | #app = Flask(__name__) # user is viewing a batch for a wiki where they do not have a local user account # treat as anonymous on the local wiki, but query Meta to find out if they’re a steward #http://1172.16.17.32:5000/task/lvwiki/1/Helēna Mārnija # # Log the user out by clearing their session. | 2.394401 | 2 |
pre_embed.py | shelleyyyyu/few_shot | 253 | 8251 | import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
def transX(dataset):
rel2id = json.load(open(dataset + '/relation2ids'))
ent2id = json.load(open(dataset + '/ent2ids'))
with open('../Fast-TransX/' + dataset + '_base/entity2id.txt', 'w') as g1:
num_ents = len(ent2id.keys())
g1.write(str(num_ents) + '\n')
for k, v in ent2id.items():
g1.write(k + '\t' + str(v) + '\n')
with open('../Fast-TransX/' + dataset + '_base/relation2id.txt', 'w') as g1:
num_rels = len(rel2id.keys())
g1.write(str(num_rels) + '\n')
for k, v in rel2id.items():
g1.write(k + '\t' + str(v) + '\n')
file_name = dataset + '/path_graph'
train_triples = []
with open(file_name) as f:
lines = f.readlines()
for line in tqdm(lines):
e1 = line.split('\t')[0]
e2 = line.rstrip().split('\t')[2]
rel = line.split('\t')[1]
train_triples.append([e1,rel,e2])
train_triples.append([e2,rel+'_inv',e1])
with open('../Fast-TransX/' + dataset + '_base/train2id.txt', 'w') as g3:
num_triples = len(train_triples)
g3.write(str(num_triples) + '\n')
for triple in train_triples:
e1, rel, e2 = triple
g3.write(str(ent2id[e1]) + '\t' + str(ent2id[e2]) + '\t' + str(rel2id[rel]) + '\n')
if __name__ == '__main__':
transX('Wiki') | import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
def transX(dataset):
rel2id = json.load(open(dataset + '/relation2ids'))
ent2id = json.load(open(dataset + '/ent2ids'))
with open('../Fast-TransX/' + dataset + '_base/entity2id.txt', 'w') as g1:
num_ents = len(ent2id.keys())
g1.write(str(num_ents) + '\n')
for k, v in ent2id.items():
g1.write(k + '\t' + str(v) + '\n')
with open('../Fast-TransX/' + dataset + '_base/relation2id.txt', 'w') as g1:
num_rels = len(rel2id.keys())
g1.write(str(num_rels) + '\n')
for k, v in rel2id.items():
g1.write(k + '\t' + str(v) + '\n')
file_name = dataset + '/path_graph'
train_triples = []
with open(file_name) as f:
lines = f.readlines()
for line in tqdm(lines):
e1 = line.split('\t')[0]
e2 = line.rstrip().split('\t')[2]
rel = line.split('\t')[1]
train_triples.append([e1,rel,e2])
train_triples.append([e2,rel+'_inv',e1])
with open('../Fast-TransX/' + dataset + '_base/train2id.txt', 'w') as g3:
num_triples = len(train_triples)
g3.write(str(num_triples) + '\n')
for triple in train_triples:
e1, rel, e2 = triple
g3.write(str(ent2id[e1]) + '\t' + str(ent2id[e2]) + '\t' + str(rel2id[rel]) + '\n')
if __name__ == '__main__':
transX('Wiki') | none | 1 | 2.215231 | 2 |
|
botc/gamemodes/troublebrewing/FortuneTeller.py | Xinverse/BOTC-Bot | 1 | 8252 | <filename>botc/gamemodes/troublebrewing/FortuneTeller.py<gh_stars>1-10
"""Contains the Fortune Teller Character class"""
import json
import random
import discord
import datetime
from botc import Action, ActionTypes, Townsfolk, Character, Storyteller, RedHerring, \
RecurringAction, Category, StatusList
from botc.BOTCUtils import GameLogic
from ._utils import TroubleBrewing, TBRole
import globvars
with open('botc/gamemodes/troublebrewing/character_text.json') as json_file:
character_text = json.load(json_file)[TBRole.fortuneteller.value.lower()]
with open('botutils/bot_text.json') as json_file:
bot_text = json.load(json_file)
butterfly = bot_text["esthetics"]["butterfly"]
with open('botc/game_text.json') as json_file:
strings = json.load(json_file)
fortune_teller_nightly = strings["gameplay"]["fortune_teller_nightly"]
copyrights_str = strings["misc"]["copyrights"]
yes = strings["gameplay"]["yes"]
no = strings["gameplay"]["no"]
good_link = strings["images"]["good"]
evil_link = strings["images"]["evil"]
class FortuneTeller(Townsfolk, TroubleBrewing, Character, RecurringAction):
"""Fortune Teller: Each night, choose 2 players: you learn if either is a Demon.
There is 1 good player that registers falsely to you.
===== FORTUNE TELLER =====
true_self = fortune teller
ego_self = fortune teller
social_self = fortune teller
commands:
- read <player> and <player>
initialize setup? -> NO
initialize role? -> YES
----- First night
START:
override first night instruction? -> YES # default is to send instruction string only
=> Send query for "read" command
----- Regular night
START:
override regular night instruction? -> YES # default is to send nothing
=> Send query for "read" command
"""
def __init__(self):
Character.__init__(self)
TroubleBrewing.__init__(self)
Townsfolk.__init__(self)
self._desc_string = character_text["description"]
self._examp_string = character_text["examples"]
self._instr_string = character_text["instruction"]
self._lore_string = character_text["lore"]
self._brief_string = character_text["brief"]
self._action = character_text["action"]
self._art_link = "https://bloodontheclocktower.com/wiki/images/3/3a/Fortune_Teller_Token.png"
self._art_link_cropped = "https://imgur.com/23ZXb1y.png"
self._wiki_link = "https://bloodontheclocktower.com/wiki/Fortune_Teller"
self._role_enum = TBRole.fortuneteller
self._emoji = "<:tbfortuneteller:739317350733578280>"
def create_n1_instr_str(self):
"""Create the instruction field on the opening dm card"""
# First line is the character instruction string
msg = f"{self.emoji} {self.instruction}"
addendum = character_text["n1_addendum"]
# Some characters have a line of addendum
if addendum:
with open("botutils/bot_text.json") as json_file:
bot_text = json.load(json_file)
scroll_emoji = bot_text["esthetics"]["scroll"]
msg += f"\n{scroll_emoji} {addendum}"
return msg
def add_action_field_n1(self, embed_obj):
"""Send the stats list n1"""
msg = self.action
msg += globvars.master_state.game.create_sitting_order_stats_string()
embed_obj.add_field(name = butterfly + " **「 Your Action 」**", value = msg, inline = False)
return embed_obj
def exec_init_role(self, setup):
"""Assign one of the townsfolks or outsiders as a red herring"""
possibilities = setup.townsfolks + setup.outsiders
chosen = random.choice(possibilities)
chosen.add_status_effect(RedHerring(Storyteller(), chosen))
globvars.logging.info(f">>> Fortune Teller [exec_init_role] Set red herring to {str(chosen)}")
def has_finished_night_action(self, player):
"""Return True if fortune teller has submitted the read action"""
if player.is_alive():
current_phase_id = globvars.master_state.game._chrono.phase_id
received_action = player.action_grid.retrieve_an_action(current_phase_id)
return received_action is not None and received_action.action_type == ActionTypes.read
return True
@GameLogic.requires_two_targets
@GameLogic.requires_different_targets
@GameLogic.changes_not_allowed
async def register_read(self, player, targets):
"""Read command"""
# Must be 2 targets
assert len(targets) == 2, "Received a number of targets different than 2 for fortune teller 'read'"
action = Action(player, targets, ActionTypes.read, globvars.master_state.game._chrono.phase_id)
player.action_grid.register_an_action(action, globvars.master_state.game._chrono.phase_id)
msg = butterfly + " " + character_text["feedback"].format(targets[0].game_nametag, targets[1].game_nametag)
await player.user.send(msg)
async def exec_read(self, fortune_teller_player, read_player_1, read_player_2):
"""Execute the read action (night ability interaction)"""
if fortune_teller_player.is_alive():
# Correct info
if not fortune_teller_player.is_droisoned():
response = read_player_1.role.social_self.category == Category.demon or \
read_player_2.role.social_self.category == Category.demon or \
read_player_1.has_status_effect(StatusList.red_herring) or \
read_player_2.has_status_effect(StatusList.red_herring)
# Droisoned info
else:
response = random.choice((True, False))
reply = yes if response else no
link = evil_link if response else good_link
recipient = fortune_teller_player.user
msg = f"***{recipient.name}#{recipient.discriminator}***, the **{self.name}**:"
msg += "\n"
msg += self.emoji + " " + self.instruction
msg += "\n"
msg += fortune_teller_nightly.format(reply)
embed = discord.Embed(description = msg)
embed.set_thumbnail(url = link)
embed.set_footer(text = copyrights_str)
embed.timestamp = datetime.datetime.utcnow()
try:
await recipient.send(embed = embed)
except discord.Forbidden:
pass
# If the fortune teller player is dead, then nothing is sent to them
else:
pass
async def process_night_ability(self, player):
"""Process night actions for the fortune teller character.
@player : the Fortune Teller player (Player object)
"""
phase = globvars.master_state.game._chrono.phase_id
action = player.action_grid.retrieve_an_action(phase)
# The Fortune teller has submitted an action. We call the execution function immediately
if action:
assert action.action_type == ActionTypes.read, f"Wrong action type {action} in fortune teller"
targets = action.target_player
read_player_1 = targets[0]
read_player_2 = targets[1]
await self.exec_read(player, read_player_1, read_player_2)
# The fortune teller has not submitted an action. We will not randomize the action since
# the reading ability is a "priviledged" ability
else:
pass
| <filename>botc/gamemodes/troublebrewing/FortuneTeller.py<gh_stars>1-10
"""Contains the Fortune Teller Character class"""
import json
import random
import discord
import datetime
from botc import Action, ActionTypes, Townsfolk, Character, Storyteller, RedHerring, \
RecurringAction, Category, StatusList
from botc.BOTCUtils import GameLogic
from ._utils import TroubleBrewing, TBRole
import globvars
with open('botc/gamemodes/troublebrewing/character_text.json') as json_file:
character_text = json.load(json_file)[TBRole.fortuneteller.value.lower()]
with open('botutils/bot_text.json') as json_file:
bot_text = json.load(json_file)
butterfly = bot_text["esthetics"]["butterfly"]
with open('botc/game_text.json') as json_file:
strings = json.load(json_file)
fortune_teller_nightly = strings["gameplay"]["fortune_teller_nightly"]
copyrights_str = strings["misc"]["copyrights"]
yes = strings["gameplay"]["yes"]
no = strings["gameplay"]["no"]
good_link = strings["images"]["good"]
evil_link = strings["images"]["evil"]
class FortuneTeller(Townsfolk, TroubleBrewing, Character, RecurringAction):
"""Fortune Teller: Each night, choose 2 players: you learn if either is a Demon.
There is 1 good player that registers falsely to you.
===== FORTUNE TELLER =====
true_self = fortune teller
ego_self = fortune teller
social_self = fortune teller
commands:
- read <player> and <player>
initialize setup? -> NO
initialize role? -> YES
----- First night
START:
override first night instruction? -> YES # default is to send instruction string only
=> Send query for "read" command
----- Regular night
START:
override regular night instruction? -> YES # default is to send nothing
=> Send query for "read" command
"""
def __init__(self):
Character.__init__(self)
TroubleBrewing.__init__(self)
Townsfolk.__init__(self)
self._desc_string = character_text["description"]
self._examp_string = character_text["examples"]
self._instr_string = character_text["instruction"]
self._lore_string = character_text["lore"]
self._brief_string = character_text["brief"]
self._action = character_text["action"]
self._art_link = "https://bloodontheclocktower.com/wiki/images/3/3a/Fortune_Teller_Token.png"
self._art_link_cropped = "https://imgur.com/23ZXb1y.png"
self._wiki_link = "https://bloodontheclocktower.com/wiki/Fortune_Teller"
self._role_enum = TBRole.fortuneteller
self._emoji = "<:tbfortuneteller:739317350733578280>"
def create_n1_instr_str(self):
"""Create the instruction field on the opening dm card"""
# First line is the character instruction string
msg = f"{self.emoji} {self.instruction}"
addendum = character_text["n1_addendum"]
# Some characters have a line of addendum
if addendum:
with open("botutils/bot_text.json") as json_file:
bot_text = json.load(json_file)
scroll_emoji = bot_text["esthetics"]["scroll"]
msg += f"\n{scroll_emoji} {addendum}"
return msg
def add_action_field_n1(self, embed_obj):
"""Send the stats list n1"""
msg = self.action
msg += globvars.master_state.game.create_sitting_order_stats_string()
embed_obj.add_field(name = butterfly + " **「 Your Action 」**", value = msg, inline = False)
return embed_obj
def exec_init_role(self, setup):
"""Assign one of the townsfolks or outsiders as a red herring"""
possibilities = setup.townsfolks + setup.outsiders
chosen = random.choice(possibilities)
chosen.add_status_effect(RedHerring(Storyteller(), chosen))
globvars.logging.info(f">>> Fortune Teller [exec_init_role] Set red herring to {str(chosen)}")
def has_finished_night_action(self, player):
"""Return True if fortune teller has submitted the read action"""
if player.is_alive():
current_phase_id = globvars.master_state.game._chrono.phase_id
received_action = player.action_grid.retrieve_an_action(current_phase_id)
return received_action is not None and received_action.action_type == ActionTypes.read
return True
@GameLogic.requires_two_targets
@GameLogic.requires_different_targets
@GameLogic.changes_not_allowed
async def register_read(self, player, targets):
"""Read command"""
# Must be 2 targets
assert len(targets) == 2, "Received a number of targets different than 2 for fortune teller 'read'"
action = Action(player, targets, ActionTypes.read, globvars.master_state.game._chrono.phase_id)
player.action_grid.register_an_action(action, globvars.master_state.game._chrono.phase_id)
msg = butterfly + " " + character_text["feedback"].format(targets[0].game_nametag, targets[1].game_nametag)
await player.user.send(msg)
async def exec_read(self, fortune_teller_player, read_player_1, read_player_2):
"""Execute the read action (night ability interaction)"""
if fortune_teller_player.is_alive():
# Correct info
if not fortune_teller_player.is_droisoned():
response = read_player_1.role.social_self.category == Category.demon or \
read_player_2.role.social_self.category == Category.demon or \
read_player_1.has_status_effect(StatusList.red_herring) or \
read_player_2.has_status_effect(StatusList.red_herring)
# Droisoned info
else:
response = random.choice((True, False))
reply = yes if response else no
link = evil_link if response else good_link
recipient = fortune_teller_player.user
msg = f"***{recipient.name}#{recipient.discriminator}***, the **{self.name}**:"
msg += "\n"
msg += self.emoji + " " + self.instruction
msg += "\n"
msg += fortune_teller_nightly.format(reply)
embed = discord.Embed(description = msg)
embed.set_thumbnail(url = link)
embed.set_footer(text = copyrights_str)
embed.timestamp = datetime.datetime.utcnow()
try:
await recipient.send(embed = embed)
except discord.Forbidden:
pass
# If the fortune teller player is dead, then nothing is sent to them
else:
pass
async def process_night_ability(self, player):
"""Process night actions for the fortune teller character.
@player : the Fortune Teller player (Player object)
"""
phase = globvars.master_state.game._chrono.phase_id
action = player.action_grid.retrieve_an_action(phase)
# The Fortune teller has submitted an action. We call the execution function immediately
if action:
assert action.action_type == ActionTypes.read, f"Wrong action type {action} in fortune teller"
targets = action.target_player
read_player_1 = targets[0]
read_player_2 = targets[1]
await self.exec_read(player, read_player_1, read_player_2)
# The fortune teller has not submitted an action. We will not randomize the action since
# the reading ability is a "priviledged" ability
else:
pass
| en | 0.843591 | Contains the Fortune Teller Character class Fortune Teller: Each night, choose 2 players: you learn if either is a Demon. There is 1 good player that registers falsely to you. ===== FORTUNE TELLER ===== true_self = fortune teller ego_self = fortune teller social_self = fortune teller commands: - read <player> and <player> initialize setup? -> NO initialize role? -> YES ----- First night START: override first night instruction? -> YES # default is to send instruction string only => Send query for "read" command ----- Regular night START: override regular night instruction? -> YES # default is to send nothing => Send query for "read" command Create the instruction field on the opening dm card # First line is the character instruction string # Some characters have a line of addendum Send the stats list n1 Assign one of the townsfolks or outsiders as a red herring Return True if fortune teller has submitted the read action Read command # Must be 2 targets Execute the read action (night ability interaction) # Correct info # Droisoned info #{recipient.discriminator}***, the **{self.name}**:" # If the fortune teller player is dead, then nothing is sent to them Process night actions for the fortune teller character. @player : the Fortune Teller player (Player object) # The Fortune teller has submitted an action. We call the execution function immediately # The fortune teller has not submitted an action. We will not randomize the action since # the reading ability is a "priviledged" ability | 2.697974 | 3 |
src/schmetterling/build/tests/test_maven.py | bjuvensjo/schmetterling | 0 | 8253 | <filename>src/schmetterling/build/tests/test_maven.py
from unittest.mock import call, MagicMock, patch
from schmetterling.build.maven import build_multi_modules
from schmetterling.build.maven import create_build_result
from schmetterling.build.maven import create_command
from schmetterling.build.maven import create_multi_modules
from schmetterling.build.maven import create_state
from schmetterling.build.maven import get_maven_infos
from schmetterling.build.maven import get_maven_repos
from schmetterling.build.maven import get_multi_modules
from schmetterling.build.state import BuildState, Build
from schmetterling.setup.state import Repo
def test_build_multi_modules():
mm = [
{
'updated': 'updated1',
'pom_dir': 'pom_dir1',
'coordinates': 'coordinates1'
},
{
'updated': 'updated2',
'pom_dir': 'pom_dir2',
'coordinates': 'coordinates2'
},
]
with patch(
'schmetterling.build.maven.create_command',
return_value='create_command') as m_create_command, patch(
'schmetterling.build.maven.run_command') as m_run_command, patch(
'schmetterling.build.maven.create_build_result',
return_value=[['success_coordinates'], [
'failure_coordinates'
]]) as m_create_build_result:
assert (
['success_coordinates', 'success_coordinates'],
['failure_coordinates', 'failure_coordinates'],
) == build_multi_modules(mm, 'repository_dir', 'settings_file', 'logback_file')
assert [
call('updated1', 'pom_dir1/mvn.log', 'repository_dir', 'settings_file', 'logback_file'),
call('updated2', 'pom_dir2/mvn.log', 'repository_dir', 'settings_file', 'logback_file')
] == m_create_command.mock_calls
assert [
call('create_command', cwd='pom_dir1'),
call('create_command', cwd='pom_dir2')
] == m_run_command.mock_calls
assert [
call('coordinates1', 'updated1', 'pom_dir1/mvn.log'),
call('coordinates2', 'updated2', 'pom_dir2/mvn.log')
] == m_create_build_result.mock_calls
def test_create_command():
assert str('mvn -Dmaven.repo.local=repository '
'-s settings.xml '
'-DcreateChecksum=true '
'-Dfile.encoding=UTF-8 '
'-Dsun.jnu.encoding=UTF-8 '
'-Dlogback.configurationFile=logback.xml '
'-B -amd -pl mygroup:app.admin,mygroup:app.sign '
'clean install javadoc:jar source:jar '
'--fail-at-end | tee mvn.log') == create_command(
[{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
}, {
'artifact_id': 'app.sign',
'group_id': 'mygroup',
}], 'mvn.log', 'repository', 'settings.xml', 'logback.xml')
@patch(
'schmetterling.build.maven.get_summary',
return_value=(['mygroup:app.admin'], ['app.sign']))
def test_create_build_result(mock_get_summary):
assert (
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
],
[
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
],
) == create_build_result(
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
{
'artifact_id': 'xml.ws',
'group_id': 'mygroup',
},
],
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
],
'mvn.log',
)
def test_create_multi_modules():
with patch('schmetterling.build.maven.makedirs') as m, patch(
'schmetterling.build.maven.open') as o:
f = MagicMock()
o.return_value = MagicMock(__enter__=MagicMock(return_value=f))
create_multi_modules([
{
'pom_dir': 'pd1',
'pom_content': 'pc1'
},
{
'pom_dir': 'pd2',
'pom_content': 'pc2'
},
])
assert [call('pd1', exist_ok=True),
call('pd2', exist_ok=True)] == m.mock_calls
assert [call.write('pc1'), call.write('pc2')] == f.mock_calls
def test_create_state():
state = BuildState('schmetterling.build.maven',
[
Build('mygroup', 'app.admin', '0.0.1-SNAPSHOT', 'app.admin',
Build.SUCCESS, 1),
Build('mygroup', 'pipeline-apache-proxy', '1.0.0-SNAPSHOT',
'pipeline-apache-proxy', Build.FAILURE, 1),
])
assert state == create_state(
[],
[{
'pom_path': 'app.admin/pom.xml',
'artifact_id': 'app.admin',
'group_id': 'mygroup',
'version': '0.0.1-SNAPSHOT',
'packaging': 'jar'
}],
[{
'pom_path': 'pipeline-apache-proxy/pom.xml',
'artifact_id': 'pipeline-apache-proxy',
'group_id': 'mygroup',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
}],
1,
)
def test_get_maven_info():
with patch('schmetterling.build.maven.get_pom_info', side_effect=lambda x: x):
repos = [
MagicMock(status=Repo.STATUS_UPDATED, path='path1'),
MagicMock(status=Repo.STATUS_UNCHANGED, path='path2'),
]
assert [(True, 'path1/pom.xml'),
(False, 'path2/pom.xml')] == get_maven_infos(repos)
def test_get_maven_repos():
with patch('schmetterling.build.maven.isinstance', return_value=True):
with patch('schmetterling.build.maven.exists', side_effect=[False, True]):
m = MagicMock(path='pom_repo', return_value='pom_repo')
state = [MagicMock(repos=[
MagicMock(path='non_pom_repo'),
m,
])]
assert [m] == get_maven_repos(state)
def test_get_multi_modules():
with patch('schmetterling.build.maven.get_pom', return_value='pom_content'):
assert [] == get_multi_modules([(False, {})], 'build_dir')
assert [{
'coordinates': [{}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/jar-modules',
'updated': [{}]
}] == get_multi_modules([(True, {})], 'build_dir')
assert [{
'coordinates': [{
'packaging': 'jar'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/jar-modules',
'updated': [{
'packaging': 'jar'
}]
}] == get_multi_modules([(True, {
'packaging': 'jar'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'super-pom',
'packaging': 'pom'
}],
'pom_content':
'pom_content',
'pom_dir':
'build_dir/super-pom-modules',
'updated': [{
'artifact_id': 'super-pom',
'packaging': 'pom'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'super-pom',
'packaging': 'pom'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'pom',
'packaging': 'pom'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/pom-pom-modules',
'updated': [{
'artifact_id': 'pom',
'packaging': 'pom'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'pom',
'packaging': 'pom'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'x',
'packaging': 'x'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/other-modules',
'updated': [{
'artifact_id': 'x',
'packaging': 'x'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'x',
'packaging': 'x'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'war',
'packaging': 'war'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/war-modules',
'updated': [{
'artifact_id': 'war',
'packaging': 'war'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'war',
'packaging': 'war'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'jar1',
'packaging': 'jar'
}, {
'artifact_id': 'jar2'
}, {
'artifact_id': 'jar3'
}],
'pom_content':
'pom_content',
'pom_dir':
'build_dir/jar-modules',
'updated': [{
'artifact_id': 'jar1',
'packaging': 'jar'
}, {
'artifact_id': 'jar2'
}]
}, {
'coordinates': [{
'artifact_id': 'war',
'packaging': 'war'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/war-modules',
'updated': [{
'artifact_id': 'war',
'packaging': 'war'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'jar1',
'packaging': 'jar'
}), (True, {
'artifact_id': 'jar2'
}), (False, {
'artifact_id': 'jar3'
}), (True, {
'artifact_id': 'war',
'packaging': 'war'
})], 'build_dir')
| <filename>src/schmetterling/build/tests/test_maven.py
from unittest.mock import call, MagicMock, patch
from schmetterling.build.maven import build_multi_modules
from schmetterling.build.maven import create_build_result
from schmetterling.build.maven import create_command
from schmetterling.build.maven import create_multi_modules
from schmetterling.build.maven import create_state
from schmetterling.build.maven import get_maven_infos
from schmetterling.build.maven import get_maven_repos
from schmetterling.build.maven import get_multi_modules
from schmetterling.build.state import BuildState, Build
from schmetterling.setup.state import Repo
def test_build_multi_modules():
mm = [
{
'updated': 'updated1',
'pom_dir': 'pom_dir1',
'coordinates': 'coordinates1'
},
{
'updated': 'updated2',
'pom_dir': 'pom_dir2',
'coordinates': 'coordinates2'
},
]
with patch(
'schmetterling.build.maven.create_command',
return_value='create_command') as m_create_command, patch(
'schmetterling.build.maven.run_command') as m_run_command, patch(
'schmetterling.build.maven.create_build_result',
return_value=[['success_coordinates'], [
'failure_coordinates'
]]) as m_create_build_result:
assert (
['success_coordinates', 'success_coordinates'],
['failure_coordinates', 'failure_coordinates'],
) == build_multi_modules(mm, 'repository_dir', 'settings_file', 'logback_file')
assert [
call('updated1', 'pom_dir1/mvn.log', 'repository_dir', 'settings_file', 'logback_file'),
call('updated2', 'pom_dir2/mvn.log', 'repository_dir', 'settings_file', 'logback_file')
] == m_create_command.mock_calls
assert [
call('create_command', cwd='pom_dir1'),
call('create_command', cwd='pom_dir2')
] == m_run_command.mock_calls
assert [
call('coordinates1', 'updated1', 'pom_dir1/mvn.log'),
call('coordinates2', 'updated2', 'pom_dir2/mvn.log')
] == m_create_build_result.mock_calls
def test_create_command():
assert str('mvn -Dmaven.repo.local=repository '
'-s settings.xml '
'-DcreateChecksum=true '
'-Dfile.encoding=UTF-8 '
'-Dsun.jnu.encoding=UTF-8 '
'-Dlogback.configurationFile=logback.xml '
'-B -amd -pl mygroup:app.admin,mygroup:app.sign '
'clean install javadoc:jar source:jar '
'--fail-at-end | tee mvn.log') == create_command(
[{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
}, {
'artifact_id': 'app.sign',
'group_id': 'mygroup',
}], 'mvn.log', 'repository', 'settings.xml', 'logback.xml')
@patch(
'schmetterling.build.maven.get_summary',
return_value=(['mygroup:app.admin'], ['app.sign']))
def test_create_build_result(mock_get_summary):
assert (
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
],
[
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
],
) == create_build_result(
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
{
'artifact_id': 'xml.ws',
'group_id': 'mygroup',
},
],
[
{
'artifact_id': 'app.admin',
'group_id': 'mygroup',
},
{
'artifact_id': 'app.sign',
'group_id': 'mygroup',
},
{
'artifact_id': 'pipeline.env',
'group_id': 'mygroup',
},
],
'mvn.log',
)
def test_create_multi_modules():
with patch('schmetterling.build.maven.makedirs') as m, patch(
'schmetterling.build.maven.open') as o:
f = MagicMock()
o.return_value = MagicMock(__enter__=MagicMock(return_value=f))
create_multi_modules([
{
'pom_dir': 'pd1',
'pom_content': 'pc1'
},
{
'pom_dir': 'pd2',
'pom_content': 'pc2'
},
])
assert [call('pd1', exist_ok=True),
call('pd2', exist_ok=True)] == m.mock_calls
assert [call.write('pc1'), call.write('pc2')] == f.mock_calls
def test_create_state():
state = BuildState('schmetterling.build.maven',
[
Build('mygroup', 'app.admin', '0.0.1-SNAPSHOT', 'app.admin',
Build.SUCCESS, 1),
Build('mygroup', 'pipeline-apache-proxy', '1.0.0-SNAPSHOT',
'pipeline-apache-proxy', Build.FAILURE, 1),
])
assert state == create_state(
[],
[{
'pom_path': 'app.admin/pom.xml',
'artifact_id': 'app.admin',
'group_id': 'mygroup',
'version': '0.0.1-SNAPSHOT',
'packaging': 'jar'
}],
[{
'pom_path': 'pipeline-apache-proxy/pom.xml',
'artifact_id': 'pipeline-apache-proxy',
'group_id': 'mygroup',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
}],
1,
)
def test_get_maven_info():
with patch('schmetterling.build.maven.get_pom_info', side_effect=lambda x: x):
repos = [
MagicMock(status=Repo.STATUS_UPDATED, path='path1'),
MagicMock(status=Repo.STATUS_UNCHANGED, path='path2'),
]
assert [(True, 'path1/pom.xml'),
(False, 'path2/pom.xml')] == get_maven_infos(repos)
def test_get_maven_repos():
with patch('schmetterling.build.maven.isinstance', return_value=True):
with patch('schmetterling.build.maven.exists', side_effect=[False, True]):
m = MagicMock(path='pom_repo', return_value='pom_repo')
state = [MagicMock(repos=[
MagicMock(path='non_pom_repo'),
m,
])]
assert [m] == get_maven_repos(state)
def test_get_multi_modules():
with patch('schmetterling.build.maven.get_pom', return_value='pom_content'):
assert [] == get_multi_modules([(False, {})], 'build_dir')
assert [{
'coordinates': [{}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/jar-modules',
'updated': [{}]
}] == get_multi_modules([(True, {})], 'build_dir')
assert [{
'coordinates': [{
'packaging': 'jar'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/jar-modules',
'updated': [{
'packaging': 'jar'
}]
}] == get_multi_modules([(True, {
'packaging': 'jar'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'super-pom',
'packaging': 'pom'
}],
'pom_content':
'pom_content',
'pom_dir':
'build_dir/super-pom-modules',
'updated': [{
'artifact_id': 'super-pom',
'packaging': 'pom'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'super-pom',
'packaging': 'pom'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'pom',
'packaging': 'pom'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/pom-pom-modules',
'updated': [{
'artifact_id': 'pom',
'packaging': 'pom'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'pom',
'packaging': 'pom'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'x',
'packaging': 'x'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/other-modules',
'updated': [{
'artifact_id': 'x',
'packaging': 'x'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'x',
'packaging': 'x'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'war',
'packaging': 'war'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/war-modules',
'updated': [{
'artifact_id': 'war',
'packaging': 'war'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'war',
'packaging': 'war'
})], 'build_dir')
assert [{
'coordinates': [{
'artifact_id': 'jar1',
'packaging': 'jar'
}, {
'artifact_id': 'jar2'
}, {
'artifact_id': 'jar3'
}],
'pom_content':
'pom_content',
'pom_dir':
'build_dir/jar-modules',
'updated': [{
'artifact_id': 'jar1',
'packaging': 'jar'
}, {
'artifact_id': 'jar2'
}]
}, {
'coordinates': [{
'artifact_id': 'war',
'packaging': 'war'
}],
'pom_content': 'pom_content',
'pom_dir': 'build_dir/war-modules',
'updated': [{
'artifact_id': 'war',
'packaging': 'war'
}]
}] == get_multi_modules([(True, {
'artifact_id': 'jar1',
'packaging': 'jar'
}), (True, {
'artifact_id': 'jar2'
}), (False, {
'artifact_id': 'jar3'
}), (True, {
'artifact_id': 'war',
'packaging': 'war'
})], 'build_dir')
| none | 1 | 2.134487 | 2 |
|
Copados y Clases/Mastermind_DEBUG.py | FdelMazo/7540rw-Algo1 | 1 | 8254 | <filename>Copados y Clases/Mastermind_DEBUG.py
#Sacar las lineas con DEBUG para que el juego funcione
import random
DIGITOS = 4
def mastermind():
"""Funcion principal del juego Mastermind"""
print("Bienvenido al Mastermind!")
print("Instrucciones: Tenes que adivinar un codigo de {} digitos distintos. Tu cantidad de aciertos son los numeros que estan correctamente posicionados, tu cantidad de coincidencias son los numeros bien elegidos pero mal posicionados. Suerte!".format(DIGITOS))
codigo = elegir_codigo()
intentos = 1
propuesta = input("Que codigo propones? (o pone 'Me retiro') ")
retirarse = "Me retiro"
while propuesta != codigo and propuesta != retirarse:
intentos+=1
aciertos, coincidencias = analizar_propuesta(propuesta, codigo)
print ("Tu propuesta ({}) tiene {} aciertos y {} coincidencias.".format(propuesta,aciertos,coincidencias))
propuesta = input("Propone otro codigo: ")
if propuesta == retirarse:
print ("El codigo era: {}".format(codigo))
else:
print ("Ganaste! Ganaste en {} intentos".format(intentos))
def elegir_codigo():
"""Elige un codigo de DIGITOS digitos al azar"""
digitos= ("0","1","2","3","4","5","6","7","8","9")
codigo = ""
for i in range(DIGITOS):
candidato = random.choice(digitos)
print("[DEBUG] candidato:", candidato)
while candidato in codigo:
candidato = random.choice(digitos)
codigo = codigo + candidato
print("[DEBUG] el codigo va siendo", codigo)
return codigo
def analizar_propuesta(propuesta, codigo):
"""Determina aciertos y coincidencias"""
aciertos = 0
coincidencias = 0
for i in range(DIGITOS):
if propuesta[i] == codigo[i]:
aciertos += 1
elif propuesta[i] in codigo:
coincidencias += 1
return aciertos,coincidencias
mastermind() | <filename>Copados y Clases/Mastermind_DEBUG.py
#Sacar las lineas con DEBUG para que el juego funcione
import random
DIGITOS = 4
def mastermind():
"""Funcion principal del juego Mastermind"""
print("Bienvenido al Mastermind!")
print("Instrucciones: Tenes que adivinar un codigo de {} digitos distintos. Tu cantidad de aciertos son los numeros que estan correctamente posicionados, tu cantidad de coincidencias son los numeros bien elegidos pero mal posicionados. Suerte!".format(DIGITOS))
codigo = elegir_codigo()
intentos = 1
propuesta = input("Que codigo propones? (o pone 'Me retiro') ")
retirarse = "Me retiro"
while propuesta != codigo and propuesta != retirarse:
intentos+=1
aciertos, coincidencias = analizar_propuesta(propuesta, codigo)
print ("Tu propuesta ({}) tiene {} aciertos y {} coincidencias.".format(propuesta,aciertos,coincidencias))
propuesta = input("Propone otro codigo: ")
if propuesta == retirarse:
print ("El codigo era: {}".format(codigo))
else:
print ("Ganaste! Ganaste en {} intentos".format(intentos))
def elegir_codigo():
"""Elige un codigo de DIGITOS digitos al azar"""
digitos= ("0","1","2","3","4","5","6","7","8","9")
codigo = ""
for i in range(DIGITOS):
candidato = random.choice(digitos)
print("[DEBUG] candidato:", candidato)
while candidato in codigo:
candidato = random.choice(digitos)
codigo = codigo + candidato
print("[DEBUG] el codigo va siendo", codigo)
return codigo
def analizar_propuesta(propuesta, codigo):
"""Determina aciertos y coincidencias"""
aciertos = 0
coincidencias = 0
for i in range(DIGITOS):
if propuesta[i] == codigo[i]:
aciertos += 1
elif propuesta[i] in codigo:
coincidencias += 1
return aciertos,coincidencias
mastermind() | es | 0.930579 | #Sacar las lineas con DEBUG para que el juego funcione Funcion principal del juego Mastermind Elige un codigo de DIGITOS digitos al azar Determina aciertos y coincidencias | 3.876134 | 4 |
setup.py | ovnicraft/runa | 5 | 8255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0", "suds2==0.7.1"]
setup_requirements = [
# TODO(ovnicraft): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name="runa",
version="0.2.10",
description="Librería para uso de WS del Bus Gubernamental de Ecuador",
long_description=readme + "\n\n" + history,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ovnicraft/runa",
packages=find_packages(include=["runa"]),
entry_points={"console_scripts": ["runa=runa.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="runa webservices ecuador bgs",
classifiers=[
"Development Status :: 3 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0", "suds2==0.7.1"]
setup_requirements = [
# TODO(ovnicraft): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name="runa",
version="0.2.10",
description="Librería para uso de WS del Bus Gubernamental de Ecuador",
long_description=readme + "\n\n" + history,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ovnicraft/runa",
packages=find_packages(include=["runa"]),
entry_points={"console_scripts": ["runa=runa.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="runa webservices ecuador bgs",
classifiers=[
"Development Status :: 3 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| en | 0.540701 | #!/usr/bin/env python # -*- coding: utf-8 -*- The setup script. # TODO(ovnicraft): put setup requirements (distutils extensions, etc.) here # TODO: put package test requirements here | 1.501855 | 2 |
PyPortal_User_Interface/code.py | RichardA1/Adafruit_Learning_System_Guides | 1 | 8256 | import time
import board
import displayio
import busio
from analogio import AnalogIn
import neopixel
import adafruit_adt7410
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
from adafruit_button import Button
import adafruit_touchscreen
from adafruit_pyportal import PyPortal
# ------------- Inputs and Outputs Setup ------------- #
# init. the temperature sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
adt = adafruit_adt7410.ADT7410(i2c_bus, address=0x48)
adt.high_resolution = True
# init. the light sensor
light_sensor = AnalogIn(board.LIGHT)
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=1)
WHITE = 0xffffff
RED = 0xff0000
YELLOW = 0xffff00
GREEN = 0x00ff00
BLUE = 0x0000ff
PURPLE = 0xff00ff
BLACK = 0x000000
# ---------- Sound Effects ------------- #
soundDemo = '/sounds/sound.wav'
soundBeep = '/sounds/beep.wav'
soundTab = '/sounds/tab.wav'
# ------------- Other Helper Functions------------- #
# Helper for cycling through a number set of 1 to x.
def numberUP(num, max_val):
num += 1
if num <= max_val:
return num
else:
return 1
# ------------- Screen Setup ------------- #
pyportal = PyPortal()
display = board.DISPLAY
display.rotation = 270
# Backlight function
# Value between 0 and 1 where 0 is OFF, 0.5 is 50% and 1 is 100% brightness.
def set_backlight(val):
val = max(0, min(1.0, val))
board.DISPLAY.auto_brightness = False
board.DISPLAY.brightness = val
# Set the Backlight
set_backlight(0.3)
# Touchscreen setup
# ------Rotate 270:
screen_width = 240
screen_height = 320
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_YD, board.TOUCH_YU,
board.TOUCH_XR, board.TOUCH_XL,
calibration=((5200, 59000),
(5800, 57000)),
size=(screen_width, screen_height))
# ------------- Display Groups ------------- #
splash = displayio.Group(max_size=15) # The Main Display Group
view1 = displayio.Group(max_size=15) # Group for View 1 objects
view2 = displayio.Group(max_size=15) # Group for View 2 objects
view3 = displayio.Group(max_size=15) # Group for View 3 objects
def hideLayer(hide_target):
try:
splash.remove(hide_target)
except ValueError:
pass
def showLayer(show_target):
try:
time.sleep(0.1)
splash.append(show_target)
except ValueError:
pass
# ------------- Setup for Images ------------- #
# Display an image until the loop starts
pyportal.set_background('/images/loading.bmp')
bg_group = displayio.Group(max_size=1)
splash.append(bg_group)
icon_group = displayio.Group(max_size=1)
icon_group.x = 180
icon_group.y = 120
icon_group.scale = 1
view2.append(icon_group)
# This will handel switching Images and Icons
def set_image(group, filename):
"""Set the image file for a given goup for display.
This is most useful for Icons or image slideshows.
:param group: The chosen group
:param filename: The filename of the chosen image
"""
print("Set image to ", filename)
if group:
group.pop()
if not filename:
return # we're done, no icon desired
image_file = open(filename, "rb")
image = displayio.OnDiskBitmap(image_file)
try:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter())
except TypeError:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter(),
position=(0, 0))
group.append(image_sprite)
set_image(bg_group, "/images/BGimage.bmp")
# ---------- Text Boxes ------------- #
# Set the font and preload letters
font = bitmap_font.load_font("/fonts/Helvetica-Bold-16.bdf")
font.load_glyphs(b'abcdefghjiklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890- ()')
# Default Label styling:
TABS_X = 5
TABS_Y = 50
# Text Label Objects
feed1_label = Label(font, text="Text Wondow 1", color=0xE39300, max_glyphs=200)
feed1_label.x = TABS_X
feed1_label.y = TABS_Y
view1.append(feed1_label)
feed2_label = Label(font, text="Text Wondow 2", color=0xFFFFFF, max_glyphs=200)
feed2_label.x = TABS_X
feed2_label.y = TABS_Y
view2.append(feed2_label)
sensors_label = Label(font, text="Data View", color=0x03AD31, max_glyphs=200)
sensors_label.x = TABS_X
sensors_label.y = TABS_Y
view3.append(sensors_label)
sensor_data = Label(font, text="Data View", color=0x03AD31, max_glyphs=100)
sensor_data.x = TABS_X+15
sensor_data.y = 170
view3.append(sensor_data)
text_hight = Label(font, text="M", color=0x03AD31, max_glyphs=10)
# return a reformatted string with word wrapping using PyPortal.wrap_nicely
def text_box(target, top, string, max_chars):
text = pyportal.wrap_nicely(string, max_chars)
new_text = ""
test = ""
for w in text:
new_text += '\n'+w
test += 'M\n'
text_hight.text = test # Odd things happen without this
glyph_box = text_hight.bounding_box
target.text = "" # Odd things happen without this
target.y = int(glyph_box[3]/2)+top
target.text = new_text
# ---------- Display Buttons ------------- #
# Default button styling:
BUTTON_HEIGHT = 40
BUTTON_WIDTH = 80
# We want three buttons across the top of the screen
TAPS_HEIGHT = 40
TAPS_WIDTH = int(screen_width/3)
TAPS_Y = 0
# We want two big buttons at the bottom of the screen
BIG_BUTTON_HEIGHT = int(screen_height/3.2)
BIG_BUTTON_WIDTH = int(screen_width/2)
BIG_BUTTON_Y = int(screen_height-BIG_BUTTON_HEIGHT)
# This group will make it easy for us to read a button press later.
buttons = []
# Main User Interface Buttons
button_view1 = Button(x=0, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View1", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view1) # adding this button to the buttons group
button_view2 = Button(x=TAPS_WIDTH, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View2", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view2) # adding this button to the buttons group
button_view3 = Button(x=TAPS_WIDTH*2, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View3", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view3) # adding this button to the buttons group
button_switch = Button(x=0, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Switch", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_switch) # adding this button to the buttons group
button_2 = Button(x=BIG_BUTTON_WIDTH, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Button", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_2) # adding this button to the buttons group
# Add all of the main buttons to the spalsh Group
for b in buttons:
splash.append(b.group)
# Make a button to change the icon image on view2
button_icon = Button(x=150, y=60,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Icon", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_icon) # adding this button to the buttons group
# Add this button to view2 Group
view2.append(button_icon.group)
# Make a button to play a sound on view2
button_sound = Button(x=150, y=170,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Sound", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_sound) # adding this button to the buttons group
# Add this button to view2 Group
view3.append(button_sound.group)
#pylint: disable=global-statement
def switch_view(what_view):
global view_live
if what_view == 1:
hideLayer(view2)
hideLayer(view3)
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
view_live = 1
print("View1 On")
elif what_view == 2:
# global icon
hideLayer(view1)
hideLayer(view3)
button_view1.selected = True
button_view2.selected = False
button_view3.selected = True
showLayer(view2)
view_live = 2
print("View2 On")
else:
hideLayer(view1)
hideLayer(view2)
button_view1.selected = True
button_view2.selected = True
button_view3.selected = False
showLayer(view3)
view_live = 3
print("View3 On")
#pylint: enable=global-statement
# Set veriables and startup states
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
hideLayer(view2)
hideLayer(view3)
view_live = 1
icon = 1
icon_name = "Ruby"
button_mode = 1
switch_state = 0
button_switch.label = "OFF"
button_switch.selected = True
# Update out Labels with display text.
text_box(feed1_label, TABS_Y,
"The text on this screen is wrapped so that all of it fits nicely into a \
text box that is ### x ###.", 30)
text_box(feed1_label, TABS_Y,
'The text on this screen is wrapped so that all of it fits nicely into a \
text box that is {} x {}.'
.format(feed1_label.bounding_box[2], feed1_label.bounding_box[3]*2), 30)
text_box(feed2_label, TABS_Y, 'Tap on the Icon button to meet a new friend.', 18)
text_box(sensors_label, TABS_Y,
"This screen can display sensor readings and tap Sound to play a WAV file.", 28)
board.DISPLAY.show(splash)
# ------------- Code Loop ------------- #
while True:
touch = ts.touch_point
light = light_sensor.value
tempC = round(adt.temperature)
tempF = tempC * 1.8 + 32
sensor_data.text = 'Touch: {}\nLight: {}\n Temp: {}°F'.format(touch, light, tempF)
# ------------- Handle Button Press Detection ------------- #
if touch: # Only do this if the screen is touched
# loop with buttons using enumerate() to number each button group as i
for i, b in enumerate(buttons):
if b.contains(touch): # Test each button to see if it was pressed
print('button%d pressed' % i)
if i == 0 and view_live != 1: # only if view1 is visable
pyportal.play_file(soundTab)
switch_view(1)
while ts.touch_point:
pass
if i == 1 and view_live != 2: # only if view2 is visable
pyportal.play_file(soundTab)
switch_view(2)
while ts.touch_point:
pass
if i == 2 and view_live != 3: # only if view3 is visable
pyportal.play_file(soundTab)
switch_view(3)
while ts.touch_point:
pass
if i == 3:
pyportal.play_file(soundBeep)
# Toggle switch button type
if switch_state == 0:
switch_state = 1
b.label = "ON"
b.selected = False
pixel.fill(WHITE)
print("Swich ON")
else:
switch_state = 0
b.label = "OFF"
b.selected = True
pixel.fill(BLACK)
print("Swich OFF")
# for debounce
while ts.touch_point:
pass
print("Swich Pressed")
if i == 4:
pyportal.play_file(soundBeep)
# Momentary button type
b.selected = True
print('Button Pressed')
button_mode = numberUP(button_mode, 5)
if button_mode == 1:
pixel.fill(RED)
elif button_mode == 2:
pixel.fill(YELLOW)
elif button_mode == 3:
pixel.fill(GREEN)
elif button_mode == 4:
pixel.fill(BLUE)
elif button_mode == 5:
pixel.fill(PURPLE)
switch_state = 1
button_switch.label = "ON"
button_switch.selected = False
# for debounce
while ts.touch_point:
pass
print("Button released")
b.selected = False
if i == 5 and view_live == 2: # only if view2 is visable
pyportal.play_file(soundBeep)
b.selected = True
while ts.touch_point:
pass
print("Icon Button Pressed")
icon = numberUP(icon, 3)
if icon == 1:
icon_name = "Ruby"
elif icon == 2:
icon_name = "Gus"
elif icon == 3:
icon_name = "Billie"
b.selected = False
text_box(feed2_label, TABS_Y,
"Every time you tap the Icon button the icon image will \
change. Say hi to {}!".format(icon_name), 18)
set_image(icon_group, "/images/"+icon_name+".bmp")
if i == 6 and view_live == 3: # only if view3 is visable
b.selected = True
while ts.touch_point:
pass
print("Sound Button Pressed")
pyportal.play_file(soundDemo)
b.selected = False
| import time
import board
import displayio
import busio
from analogio import AnalogIn
import neopixel
import adafruit_adt7410
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
from adafruit_button import Button
import adafruit_touchscreen
from adafruit_pyportal import PyPortal
# ------------- Inputs and Outputs Setup ------------- #
# init. the temperature sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
adt = adafruit_adt7410.ADT7410(i2c_bus, address=0x48)
adt.high_resolution = True
# init. the light sensor
light_sensor = AnalogIn(board.LIGHT)
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=1)
WHITE = 0xffffff
RED = 0xff0000
YELLOW = 0xffff00
GREEN = 0x00ff00
BLUE = 0x0000ff
PURPLE = 0xff00ff
BLACK = 0x000000
# ---------- Sound Effects ------------- #
soundDemo = '/sounds/sound.wav'
soundBeep = '/sounds/beep.wav'
soundTab = '/sounds/tab.wav'
# ------------- Other Helper Functions------------- #
# Helper for cycling through a number set of 1 to x.
def numberUP(num, max_val):
num += 1
if num <= max_val:
return num
else:
return 1
# ------------- Screen Setup ------------- #
pyportal = PyPortal()
display = board.DISPLAY
display.rotation = 270
# Backlight function
# Value between 0 and 1 where 0 is OFF, 0.5 is 50% and 1 is 100% brightness.
def set_backlight(val):
val = max(0, min(1.0, val))
board.DISPLAY.auto_brightness = False
board.DISPLAY.brightness = val
# Set the Backlight
set_backlight(0.3)
# Touchscreen setup
# ------Rotate 270:
screen_width = 240
screen_height = 320
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_YD, board.TOUCH_YU,
board.TOUCH_XR, board.TOUCH_XL,
calibration=((5200, 59000),
(5800, 57000)),
size=(screen_width, screen_height))
# ------------- Display Groups ------------- #
splash = displayio.Group(max_size=15) # The Main Display Group
view1 = displayio.Group(max_size=15) # Group for View 1 objects
view2 = displayio.Group(max_size=15) # Group for View 2 objects
view3 = displayio.Group(max_size=15) # Group for View 3 objects
def hideLayer(hide_target):
try:
splash.remove(hide_target)
except ValueError:
pass
def showLayer(show_target):
try:
time.sleep(0.1)
splash.append(show_target)
except ValueError:
pass
# ------------- Setup for Images ------------- #
# Display an image until the loop starts
pyportal.set_background('/images/loading.bmp')
bg_group = displayio.Group(max_size=1)
splash.append(bg_group)
icon_group = displayio.Group(max_size=1)
icon_group.x = 180
icon_group.y = 120
icon_group.scale = 1
view2.append(icon_group)
# This will handel switching Images and Icons
def set_image(group, filename):
"""Set the image file for a given goup for display.
This is most useful for Icons or image slideshows.
:param group: The chosen group
:param filename: The filename of the chosen image
"""
print("Set image to ", filename)
if group:
group.pop()
if not filename:
return # we're done, no icon desired
image_file = open(filename, "rb")
image = displayio.OnDiskBitmap(image_file)
try:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter())
except TypeError:
image_sprite = displayio.TileGrid(image, pixel_shader=displayio.ColorConverter(),
position=(0, 0))
group.append(image_sprite)
set_image(bg_group, "/images/BGimage.bmp")
# ---------- Text Boxes ------------- #
# Set the font and preload letters
font = bitmap_font.load_font("/fonts/Helvetica-Bold-16.bdf")
font.load_glyphs(b'abcdefghjiklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890- ()')
# Default Label styling:
TABS_X = 5
TABS_Y = 50
# Text Label Objects
feed1_label = Label(font, text="Text Wondow 1", color=0xE39300, max_glyphs=200)
feed1_label.x = TABS_X
feed1_label.y = TABS_Y
view1.append(feed1_label)
feed2_label = Label(font, text="Text Wondow 2", color=0xFFFFFF, max_glyphs=200)
feed2_label.x = TABS_X
feed2_label.y = TABS_Y
view2.append(feed2_label)
sensors_label = Label(font, text="Data View", color=0x03AD31, max_glyphs=200)
sensors_label.x = TABS_X
sensors_label.y = TABS_Y
view3.append(sensors_label)
sensor_data = Label(font, text="Data View", color=0x03AD31, max_glyphs=100)
sensor_data.x = TABS_X+15
sensor_data.y = 170
view3.append(sensor_data)
text_hight = Label(font, text="M", color=0x03AD31, max_glyphs=10)
# return a reformatted string with word wrapping using PyPortal.wrap_nicely
def text_box(target, top, string, max_chars):
text = pyportal.wrap_nicely(string, max_chars)
new_text = ""
test = ""
for w in text:
new_text += '\n'+w
test += 'M\n'
text_hight.text = test # Odd things happen without this
glyph_box = text_hight.bounding_box
target.text = "" # Odd things happen without this
target.y = int(glyph_box[3]/2)+top
target.text = new_text
# ---------- Display Buttons ------------- #
# Default button styling:
BUTTON_HEIGHT = 40
BUTTON_WIDTH = 80
# We want three buttons across the top of the screen
TAPS_HEIGHT = 40
TAPS_WIDTH = int(screen_width/3)
TAPS_Y = 0
# We want two big buttons at the bottom of the screen
BIG_BUTTON_HEIGHT = int(screen_height/3.2)
BIG_BUTTON_WIDTH = int(screen_width/2)
BIG_BUTTON_Y = int(screen_height-BIG_BUTTON_HEIGHT)
# This group will make it easy for us to read a button press later.
buttons = []
# Main User Interface Buttons
button_view1 = Button(x=0, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View1", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view1) # adding this button to the buttons group
button_view2 = Button(x=TAPS_WIDTH, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View2", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view2) # adding this button to the buttons group
button_view3 = Button(x=TAPS_WIDTH*2, y=0,
width=TAPS_WIDTH, height=TAPS_HEIGHT,
label="View3", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_view3) # adding this button to the buttons group
button_switch = Button(x=0, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Switch", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_switch) # adding this button to the buttons group
button_2 = Button(x=BIG_BUTTON_WIDTH, y=BIG_BUTTON_Y,
width=BIG_BUTTON_WIDTH, height=BIG_BUTTON_HEIGHT,
label="Button", label_font=font, label_color=0xff7e00,
fill_color=0x5c5b5c, outline_color=0x767676,
selected_fill=0x1a1a1a, selected_outline=0x2e2e2e,
selected_label=0x525252)
buttons.append(button_2) # adding this button to the buttons group
# Add all of the main buttons to the spalsh Group
for b in buttons:
splash.append(b.group)
# Make a button to change the icon image on view2
button_icon = Button(x=150, y=60,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Icon", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_icon) # adding this button to the buttons group
# Add this button to view2 Group
view2.append(button_icon.group)
# Make a button to play a sound on view2
button_sound = Button(x=150, y=170,
width=BUTTON_WIDTH, height=BUTTON_HEIGHT,
label="Sound", label_font=font, label_color=0xffffff,
fill_color=0x8900ff, outline_color=0xbc55fd,
selected_fill=0x5a5a5a, selected_outline=0xff6600,
selected_label=0x525252, style=Button.ROUNDRECT)
buttons.append(button_sound) # adding this button to the buttons group
# Add this button to view2 Group
view3.append(button_sound.group)
#pylint: disable=global-statement
def switch_view(what_view):
global view_live
if what_view == 1:
hideLayer(view2)
hideLayer(view3)
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
view_live = 1
print("View1 On")
elif what_view == 2:
# global icon
hideLayer(view1)
hideLayer(view3)
button_view1.selected = True
button_view2.selected = False
button_view3.selected = True
showLayer(view2)
view_live = 2
print("View2 On")
else:
hideLayer(view1)
hideLayer(view2)
button_view1.selected = True
button_view2.selected = True
button_view3.selected = False
showLayer(view3)
view_live = 3
print("View3 On")
#pylint: enable=global-statement
# Set veriables and startup states
button_view1.selected = False
button_view2.selected = True
button_view3.selected = True
showLayer(view1)
hideLayer(view2)
hideLayer(view3)
view_live = 1
icon = 1
icon_name = "Ruby"
button_mode = 1
switch_state = 0
button_switch.label = "OFF"
button_switch.selected = True
# Update out Labels with display text.
text_box(feed1_label, TABS_Y,
"The text on this screen is wrapped so that all of it fits nicely into a \
text box that is ### x ###.", 30)
text_box(feed1_label, TABS_Y,
'The text on this screen is wrapped so that all of it fits nicely into a \
text box that is {} x {}.'
.format(feed1_label.bounding_box[2], feed1_label.bounding_box[3]*2), 30)
text_box(feed2_label, TABS_Y, 'Tap on the Icon button to meet a new friend.', 18)
text_box(sensors_label, TABS_Y,
"This screen can display sensor readings and tap Sound to play a WAV file.", 28)
board.DISPLAY.show(splash)
# ------------- Code Loop ------------- #
while True:
touch = ts.touch_point
light = light_sensor.value
tempC = round(adt.temperature)
tempF = tempC * 1.8 + 32
sensor_data.text = 'Touch: {}\nLight: {}\n Temp: {}°F'.format(touch, light, tempF)
# ------------- Handle Button Press Detection ------------- #
if touch: # Only do this if the screen is touched
# loop with buttons using enumerate() to number each button group as i
for i, b in enumerate(buttons):
if b.contains(touch): # Test each button to see if it was pressed
print('button%d pressed' % i)
if i == 0 and view_live != 1: # only if view1 is visable
pyportal.play_file(soundTab)
switch_view(1)
while ts.touch_point:
pass
if i == 1 and view_live != 2: # only if view2 is visable
pyportal.play_file(soundTab)
switch_view(2)
while ts.touch_point:
pass
if i == 2 and view_live != 3: # only if view3 is visable
pyportal.play_file(soundTab)
switch_view(3)
while ts.touch_point:
pass
if i == 3:
pyportal.play_file(soundBeep)
# Toggle switch button type
if switch_state == 0:
switch_state = 1
b.label = "ON"
b.selected = False
pixel.fill(WHITE)
print("Swich ON")
else:
switch_state = 0
b.label = "OFF"
b.selected = True
pixel.fill(BLACK)
print("Swich OFF")
# for debounce
while ts.touch_point:
pass
print("Swich Pressed")
if i == 4:
pyportal.play_file(soundBeep)
# Momentary button type
b.selected = True
print('Button Pressed')
button_mode = numberUP(button_mode, 5)
if button_mode == 1:
pixel.fill(RED)
elif button_mode == 2:
pixel.fill(YELLOW)
elif button_mode == 3:
pixel.fill(GREEN)
elif button_mode == 4:
pixel.fill(BLUE)
elif button_mode == 5:
pixel.fill(PURPLE)
switch_state = 1
button_switch.label = "ON"
button_switch.selected = False
# for debounce
while ts.touch_point:
pass
print("Button released")
b.selected = False
if i == 5 and view_live == 2: # only if view2 is visable
pyportal.play_file(soundBeep)
b.selected = True
while ts.touch_point:
pass
print("Icon Button Pressed")
icon = numberUP(icon, 3)
if icon == 1:
icon_name = "Ruby"
elif icon == 2:
icon_name = "Gus"
elif icon == 3:
icon_name = "Billie"
b.selected = False
text_box(feed2_label, TABS_Y,
"Every time you tap the Icon button the icon image will \
change. Say hi to {}!".format(icon_name), 18)
set_image(icon_group, "/images/"+icon_name+".bmp")
if i == 6 and view_live == 3: # only if view3 is visable
b.selected = True
while ts.touch_point:
pass
print("Sound Button Pressed")
pyportal.play_file(soundDemo)
b.selected = False
| en | 0.649481 | # ------------- Inputs and Outputs Setup ------------- # # init. the temperature sensor # init. the light sensor # ---------- Sound Effects ------------- # # ------------- Other Helper Functions------------- # # Helper for cycling through a number set of 1 to x. # ------------- Screen Setup ------------- # # Backlight function # Value between 0 and 1 where 0 is OFF, 0.5 is 50% and 1 is 100% brightness. # Set the Backlight # Touchscreen setup # ------Rotate 270: # ------------- Display Groups ------------- # # The Main Display Group # Group for View 1 objects # Group for View 2 objects # Group for View 3 objects # ------------- Setup for Images ------------- # # Display an image until the loop starts # This will handel switching Images and Icons Set the image file for a given goup for display. This is most useful for Icons or image slideshows. :param group: The chosen group :param filename: The filename of the chosen image # we're done, no icon desired # ---------- Text Boxes ------------- # # Set the font and preload letters # Default Label styling: # Text Label Objects # return a reformatted string with word wrapping using PyPortal.wrap_nicely # Odd things happen without this # Odd things happen without this # ---------- Display Buttons ------------- # # Default button styling: # We want three buttons across the top of the screen # We want two big buttons at the bottom of the screen # This group will make it easy for us to read a button press later. # Main User Interface Buttons # adding this button to the buttons group # adding this button to the buttons group # adding this button to the buttons group # adding this button to the buttons group # adding this button to the buttons group # Add all of the main buttons to the spalsh Group # Make a button to change the icon image on view2 # adding this button to the buttons group # Add this button to view2 Group # Make a button to play a sound on view2 # adding this button to the buttons group # Add this button to view2 Group #pylint: disable=global-statement # global icon #pylint: enable=global-statement # Set veriables and startup states # Update out Labels with display text. ### x ###.", 30) # ------------- Code Loop ------------- # # ------------- Handle Button Press Detection ------------- # # Only do this if the screen is touched # loop with buttons using enumerate() to number each button group as i # Test each button to see if it was pressed # only if view1 is visable # only if view2 is visable # only if view3 is visable # Toggle switch button type # for debounce # Momentary button type # for debounce # only if view2 is visable # only if view3 is visable | 2.637596 | 3 |
btse_futures/order.py | yottatix/btse-python | 0 | 8257 | import json
from btse_futures.constants import OrderType, Side, TimeInForce
class Order:
"""
Class to represent a BTSE Order
...
Attributes
----------
size : int
order quantity or size. e.g. 1
price : float
price. e.g. 7000.0
side: str
order side. BUY or SELL
time_in_force: str
time the order is in force. Possible options defined in TimeInForce. e.g. GTC
symbol: str
instrument symbol. e.g. BTCPFC
type: str
order type. "LIMIT", "MARKET", or "OCO"
txType: str
transaction type
postOnly: bool
Is order post only?
reduceOnly: bool
Is order reduce only?
triggerPrice: float
Trigger price. Relevant only for LIMIT and OCO order types
stopPrice: float
Stop price.
trailValue: float
Trail value.
clOrderId: str
User defined order id
trigger: str
If an order is a stop loss or take profit order, then this parameter determines the trigger price.
Available values are: 1. markPrice = Mark Price (Default) and 2. lastPrice = Last transacted Price
Documentation: https://www.btse.com/apiexplorer/futures/?shell#tocs_orderformv2
"""
def __init__(self, size: int, price: float, side: str, time_in_force: str, symbol: str, type: str, txType: str, postOnly: bool, reduceOnly: bool, triggerPrice: float, stopPrice: float = None, trailValue: float = None, clOrderId: str = None, trigger: str = None) -> None:
assert(isinstance(size, int))
assert(isinstance(price, float))
assert(isinstance(side, str))
assert(isinstance(time_in_force, str))
assert(isinstance(symbol, str))
assert(isinstance(type, str))
assert(isinstance(postOnly, bool))
assert(isinstance(reduceOnly, bool))
assert(isinstance(triggerPrice, float))
self.size = size
self.price = price
self.side = side
self.time_in_force = time_in_force
self.symbol = symbol
self.type = type
self.txType = txType
self.postOnly = postOnly
self.reduceOnly = reduceOnly
self.triggerPrice = triggerPrice
self.stopPrice = stopPrice
self.trailValue = trailValue
self.clOrderId = clOrderId
self.trigger = trigger
@property
def quantity(self):
return self.size
def to_json(self):
json_string = json.dumps(self.order_without_none_values())
print(f'json string: {json_string}')
return json_string
def order_without_none_values(self):
order_dict = self.__dict__
for key, value in list(order_dict.items()):
if value is None:
del order_dict[key]
return order_dict
class OpenOrder:
"""
open order endpoint response format
https://www.btse.com/apiexplorer/futures/#tocs_positionrespv2_1
Example:
--------
`{
"orderType": 0,
"price": 6875,
"size": 4,
"side": "BUY",
"filledSize": 3,
"orderValue": 20.625,
"pegPriceMin": 0,
"pegPriceMax": 0,
"pegPriceDeviation": 0,
"cancelDuration": 0,
"timestamp": 1576661434072,
"orderID": "string",
"stealth": 0.2,
"triggerOrder": true,
"triggered": true,
"triggerPrice": 0,
"triggerOriginalPrice": 0,
"triggerOrderType": 1001,
"triggerTrailingStopDeviation": 0,
"triggerStopPrice": 0,
"symbol": "string",
"trailValue": 0,
"clOrderID": "market001",
"reduceOnly": true,
"orderState": "string"
}`
"""
def __init__(self) -> None:
self.orderType = 0
self.price = 0
self.size = 0
self.side = ''
self.filledSize = 0
self.orderValue = 0.0
self.pegPriceMin = 0
self.pegPriceMax = 0
self.pegPriceDeviation = 0
self.cancelDuration = 0
self.timestamp = 0
self.orderID = ''
self.stealth = 0.0
self.triggerOrder = ''
self.triggered = ''
self.triggerPrice = 0
self.triggerOriginalPrice = 0
self.triggerOrderType = 0
self.triggerTrailingStopDeviation = 0
self.triggerStopPrice = 0
self.symbol = ''
self.trailValue = 0
self.clOrderID = ''
self.reduceOnly = ''
self.orderState = ''
@staticmethod
def from_dict(data):
open_order = OpenOrder()
open_order.orderType = data.get('orderType')
open_order.price = data.get('price')
open_order.size = data.get('size')
open_order.side = data.get('side')
open_order.filledSize = data.get('filledSize')
open_order.orderValue = data.get('orderValue')
open_order.pegPriceMin = data.get('pegPriceMin')
open_order.pegPriceMax = data.get('pegPriceMax')
open_order.pegPriceDeviation = data.get('pegPriceDeviation')
open_order.cancelDuration = data.get('cancelDuration')
open_order.timestamp = data.get('timestamp')
open_order.orderID = data.get('orderID')
open_order.stealth = data.get('stealth')
open_order.triggerOrder = data.get('triggerOrder')
open_order.triggered = data.get('triggered')
open_order.triggerPrice = data.get('triggerPrice')
open_order.triggerOriginalPrice = data.get('triggerOriginalPrice')
open_order.triggerOrderType = data.get('triggerOrderType')
open_order.triggerTrailingStopDeviation = data.get(
'triggerTrailingStopDeviation')
open_order.triggerStopPrice = data.get('triggerStopPrice')
open_order.symbol = data.get('symbol')
open_order.trailValue = data.get('trailValue')
open_order.clOrderID = data.get('clOrderID')
open_order.reduceOnly = data.get('reduceOnly')
open_order.orderState = data.get('orderState')
return open_order
class OrderResponseV21:
"""
Order Response V2.1
Documentation -- https://www.btse.com/apiexplorer/futures/?shell#tocs_orderrespv2_1
"""
def __init__(self) -> None:
self.status = 0
self.symbol = ''
self.orderType = 0
self.price = 0.0
self.side = ''
self.size = 0
self.orderID = ''
self.timestamp = 0
self.triggerPrice = 0.0
self.trigger = ''
self.deviation = 0.0
self.stealth = 0.0
self.message = ''
self.avgFillPrice = 0.0
self.fillSize = 0.0
self.clOrderID = ''
@staticmethod
def from_dict(data):
order_response_v21 = OrderResponseV21()
order_response_v21.status = data.get('status')
order_response_v21.symbol = data.get('symbol')
order_response_v21.orderType = data.get('orderType')
order_response_v21.price = data.get('price')
order_response_v21.side = data.get('side')
order_response_v21.size = data.get('size')
order_response_v21.orderID = data.get('orderID')
order_response_v21.timestamp = data.get('timestamp')
order_response_v21.triggerPrice = data.get('triggerPrice')
order_response_v21.trigger = data.get('trigger')
order_response_v21.deviation = data.get('deviation')
order_response_v21.stealth = data.get('stealth')
order_response_v21.message = data.get('message')
order_response_v21.avgFillPrice = data.get('avgFillPrice')
order_response_v21.fillSize = data.get('fillSize')
order_response_v21.clOrderID = data.get('clOrderID')
return order_response_v21
| import json
from btse_futures.constants import OrderType, Side, TimeInForce
class Order:
"""
Class to represent a BTSE Order
...
Attributes
----------
size : int
order quantity or size. e.g. 1
price : float
price. e.g. 7000.0
side: str
order side. BUY or SELL
time_in_force: str
time the order is in force. Possible options defined in TimeInForce. e.g. GTC
symbol: str
instrument symbol. e.g. BTCPFC
type: str
order type. "LIMIT", "MARKET", or "OCO"
txType: str
transaction type
postOnly: bool
Is order post only?
reduceOnly: bool
Is order reduce only?
triggerPrice: float
Trigger price. Relevant only for LIMIT and OCO order types
stopPrice: float
Stop price.
trailValue: float
Trail value.
clOrderId: str
User defined order id
trigger: str
If an order is a stop loss or take profit order, then this parameter determines the trigger price.
Available values are: 1. markPrice = Mark Price (Default) and 2. lastPrice = Last transacted Price
Documentation: https://www.btse.com/apiexplorer/futures/?shell#tocs_orderformv2
"""
def __init__(self, size: int, price: float, side: str, time_in_force: str, symbol: str, type: str, txType: str, postOnly: bool, reduceOnly: bool, triggerPrice: float, stopPrice: float = None, trailValue: float = None, clOrderId: str = None, trigger: str = None) -> None:
assert(isinstance(size, int))
assert(isinstance(price, float))
assert(isinstance(side, str))
assert(isinstance(time_in_force, str))
assert(isinstance(symbol, str))
assert(isinstance(type, str))
assert(isinstance(postOnly, bool))
assert(isinstance(reduceOnly, bool))
assert(isinstance(triggerPrice, float))
self.size = size
self.price = price
self.side = side
self.time_in_force = time_in_force
self.symbol = symbol
self.type = type
self.txType = txType
self.postOnly = postOnly
self.reduceOnly = reduceOnly
self.triggerPrice = triggerPrice
self.stopPrice = stopPrice
self.trailValue = trailValue
self.clOrderId = clOrderId
self.trigger = trigger
@property
def quantity(self):
return self.size
def to_json(self):
json_string = json.dumps(self.order_without_none_values())
print(f'json string: {json_string}')
return json_string
def order_without_none_values(self):
order_dict = self.__dict__
for key, value in list(order_dict.items()):
if value is None:
del order_dict[key]
return order_dict
class OpenOrder:
"""
open order endpoint response format
https://www.btse.com/apiexplorer/futures/#tocs_positionrespv2_1
Example:
--------
`{
"orderType": 0,
"price": 6875,
"size": 4,
"side": "BUY",
"filledSize": 3,
"orderValue": 20.625,
"pegPriceMin": 0,
"pegPriceMax": 0,
"pegPriceDeviation": 0,
"cancelDuration": 0,
"timestamp": 1576661434072,
"orderID": "string",
"stealth": 0.2,
"triggerOrder": true,
"triggered": true,
"triggerPrice": 0,
"triggerOriginalPrice": 0,
"triggerOrderType": 1001,
"triggerTrailingStopDeviation": 0,
"triggerStopPrice": 0,
"symbol": "string",
"trailValue": 0,
"clOrderID": "market001",
"reduceOnly": true,
"orderState": "string"
}`
"""
def __init__(self) -> None:
self.orderType = 0
self.price = 0
self.size = 0
self.side = ''
self.filledSize = 0
self.orderValue = 0.0
self.pegPriceMin = 0
self.pegPriceMax = 0
self.pegPriceDeviation = 0
self.cancelDuration = 0
self.timestamp = 0
self.orderID = ''
self.stealth = 0.0
self.triggerOrder = ''
self.triggered = ''
self.triggerPrice = 0
self.triggerOriginalPrice = 0
self.triggerOrderType = 0
self.triggerTrailingStopDeviation = 0
self.triggerStopPrice = 0
self.symbol = ''
self.trailValue = 0
self.clOrderID = ''
self.reduceOnly = ''
self.orderState = ''
@staticmethod
def from_dict(data):
open_order = OpenOrder()
open_order.orderType = data.get('orderType')
open_order.price = data.get('price')
open_order.size = data.get('size')
open_order.side = data.get('side')
open_order.filledSize = data.get('filledSize')
open_order.orderValue = data.get('orderValue')
open_order.pegPriceMin = data.get('pegPriceMin')
open_order.pegPriceMax = data.get('pegPriceMax')
open_order.pegPriceDeviation = data.get('pegPriceDeviation')
open_order.cancelDuration = data.get('cancelDuration')
open_order.timestamp = data.get('timestamp')
open_order.orderID = data.get('orderID')
open_order.stealth = data.get('stealth')
open_order.triggerOrder = data.get('triggerOrder')
open_order.triggered = data.get('triggered')
open_order.triggerPrice = data.get('triggerPrice')
open_order.triggerOriginalPrice = data.get('triggerOriginalPrice')
open_order.triggerOrderType = data.get('triggerOrderType')
open_order.triggerTrailingStopDeviation = data.get(
'triggerTrailingStopDeviation')
open_order.triggerStopPrice = data.get('triggerStopPrice')
open_order.symbol = data.get('symbol')
open_order.trailValue = data.get('trailValue')
open_order.clOrderID = data.get('clOrderID')
open_order.reduceOnly = data.get('reduceOnly')
open_order.orderState = data.get('orderState')
return open_order
class OrderResponseV21:
"""
Order Response V2.1
Documentation -- https://www.btse.com/apiexplorer/futures/?shell#tocs_orderrespv2_1
"""
def __init__(self) -> None:
self.status = 0
self.symbol = ''
self.orderType = 0
self.price = 0.0
self.side = ''
self.size = 0
self.orderID = ''
self.timestamp = 0
self.triggerPrice = 0.0
self.trigger = ''
self.deviation = 0.0
self.stealth = 0.0
self.message = ''
self.avgFillPrice = 0.0
self.fillSize = 0.0
self.clOrderID = ''
@staticmethod
def from_dict(data):
order_response_v21 = OrderResponseV21()
order_response_v21.status = data.get('status')
order_response_v21.symbol = data.get('symbol')
order_response_v21.orderType = data.get('orderType')
order_response_v21.price = data.get('price')
order_response_v21.side = data.get('side')
order_response_v21.size = data.get('size')
order_response_v21.orderID = data.get('orderID')
order_response_v21.timestamp = data.get('timestamp')
order_response_v21.triggerPrice = data.get('triggerPrice')
order_response_v21.trigger = data.get('trigger')
order_response_v21.deviation = data.get('deviation')
order_response_v21.stealth = data.get('stealth')
order_response_v21.message = data.get('message')
order_response_v21.avgFillPrice = data.get('avgFillPrice')
order_response_v21.fillSize = data.get('fillSize')
order_response_v21.clOrderID = data.get('clOrderID')
return order_response_v21
| en | 0.533308 | Class to represent a BTSE Order ... Attributes ---------- size : int order quantity or size. e.g. 1 price : float price. e.g. 7000.0 side: str order side. BUY or SELL time_in_force: str time the order is in force. Possible options defined in TimeInForce. e.g. GTC symbol: str instrument symbol. e.g. BTCPFC type: str order type. "LIMIT", "MARKET", or "OCO" txType: str transaction type postOnly: bool Is order post only? reduceOnly: bool Is order reduce only? triggerPrice: float Trigger price. Relevant only for LIMIT and OCO order types stopPrice: float Stop price. trailValue: float Trail value. clOrderId: str User defined order id trigger: str If an order is a stop loss or take profit order, then this parameter determines the trigger price. Available values are: 1. markPrice = Mark Price (Default) and 2. lastPrice = Last transacted Price Documentation: https://www.btse.com/apiexplorer/futures/?shell#tocs_orderformv2 open order endpoint response format https://www.btse.com/apiexplorer/futures/#tocs_positionrespv2_1 Example: -------- `{ "orderType": 0, "price": 6875, "size": 4, "side": "BUY", "filledSize": 3, "orderValue": 20.625, "pegPriceMin": 0, "pegPriceMax": 0, "pegPriceDeviation": 0, "cancelDuration": 0, "timestamp": 1576661434072, "orderID": "string", "stealth": 0.2, "triggerOrder": true, "triggered": true, "triggerPrice": 0, "triggerOriginalPrice": 0, "triggerOrderType": 1001, "triggerTrailingStopDeviation": 0, "triggerStopPrice": 0, "symbol": "string", "trailValue": 0, "clOrderID": "market001", "reduceOnly": true, "orderState": "string" }` Order Response V2.1 Documentation -- https://www.btse.com/apiexplorer/futures/?shell#tocs_orderrespv2_1 | 2.866748 | 3 |
tests/mock_responses.py | md-reddevil/blinkpy | 0 | 8258 | <filename>tests/mock_responses.py
"""Simple mock responses definitions."""
from blinkpy.helpers.util import BlinkURLHandler
import blinkpy.helpers.constants as const
LOGIN_RESPONSE = {
'region': {'mock': 'Test'},
'networks': {
'1234': {'name': 'test', 'onboarded': True}
},
'authtoken': {'authtoken': '<PASSWORD>', 'message': 'auth'}
}
class MockResponse:
"""Class for mock request response."""
def __init__(self, json_data, status_code, raw_data=None):
"""Initialize mock get response."""
self.json_data = json_data
self.status_code = status_code
self.raw_data = raw_data
def json(self):
"""Return json data from get_request."""
return self.json_data
@property
def raw(self):
"""Return raw data from get request."""
return self.raw_data
def mocked_session_send(*args, **kwargs):
"""Mock session."""
prepped = args[0]
url = prepped.url
header = prepped.headers
method = prepped.method
if method == 'GET':
expected_token = LOGIN_RESPONSE['authtoken']['authtoken']
if header['TOKEN_AUTH'] != expected_token:
response = {'message': 'Not Authorized', 'code': 400}
status = 400
elif url == 'use_bad_response':
response = {'foo': 'bar'}
status = 200
elif url == 'reauth':
response = {'message': 'REAUTH', 'code': 777}
status = 777
else:
response = {'test': 'foo'}
status = 200
elif method == 'POST':
if url in (const.LOGIN_URL, const.LOGIN_BACKUP_URL):
response = LOGIN_RESPONSE
status = 200
elif url == 'http://wrong.url/' or url is None:
response = {'message': 'Error', 'code': 404}
status = 404
else:
response = {'message': 'foo', 'code': 200}
status = 200
return MockResponse(response, status)
class MockURLHandler(BlinkURLHandler):
"""Mocks URL Handler in blinkpy module."""
pass
| <filename>tests/mock_responses.py
"""Simple mock responses definitions."""
from blinkpy.helpers.util import BlinkURLHandler
import blinkpy.helpers.constants as const
LOGIN_RESPONSE = {
'region': {'mock': 'Test'},
'networks': {
'1234': {'name': 'test', 'onboarded': True}
},
'authtoken': {'authtoken': '<PASSWORD>', 'message': 'auth'}
}
class MockResponse:
"""Class for mock request response."""
def __init__(self, json_data, status_code, raw_data=None):
"""Initialize mock get response."""
self.json_data = json_data
self.status_code = status_code
self.raw_data = raw_data
def json(self):
"""Return json data from get_request."""
return self.json_data
@property
def raw(self):
"""Return raw data from get request."""
return self.raw_data
def mocked_session_send(*args, **kwargs):
"""Mock session."""
prepped = args[0]
url = prepped.url
header = prepped.headers
method = prepped.method
if method == 'GET':
expected_token = LOGIN_RESPONSE['authtoken']['authtoken']
if header['TOKEN_AUTH'] != expected_token:
response = {'message': 'Not Authorized', 'code': 400}
status = 400
elif url == 'use_bad_response':
response = {'foo': 'bar'}
status = 200
elif url == 'reauth':
response = {'message': 'REAUTH', 'code': 777}
status = 777
else:
response = {'test': 'foo'}
status = 200
elif method == 'POST':
if url in (const.LOGIN_URL, const.LOGIN_BACKUP_URL):
response = LOGIN_RESPONSE
status = 200
elif url == 'http://wrong.url/' or url is None:
response = {'message': 'Error', 'code': 404}
status = 404
else:
response = {'message': 'foo', 'code': 200}
status = 200
return MockResponse(response, status)
class MockURLHandler(BlinkURLHandler):
"""Mocks URL Handler in blinkpy module."""
pass
| en | 0.581751 | Simple mock responses definitions. Class for mock request response. Initialize mock get response. Return json data from get_request. Return raw data from get request. Mock session. Mocks URL Handler in blinkpy module. | 2.54078 | 3 |
fits_tools.py | steveschulze/Photometry | 6 | 8259 | from astropy import coordinates as coord
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from misc import bcolors
import numpy as np
import os
def convert_hms_dd(RA, DEC):
'''
Convert HMS to DD system
'''
if (':' in RA) and (':' in DEC):
Coord_dd = coord.SkyCoord(RA, DEC, unit=(u.hour,u.degree), frame='icrs')
RA_dd = Coord_dd.ra.deg
Dec_dd = Coord_dd.dec.deg
elif (not (':' in RA) and not (':' in DEC)) and (('.' in RA) and ('.' in DEC)):
RA_dd, Dec_dd = float(RA), float(DEC)
else:
print(bcolors.FAIL + 'Coordinates have wrong format.' + bcolors.ENDC)
sys.exit()
return RA_dd, Dec_dd
def get_header(FILE, KEYWORD):
'''
Get keyword from fits file
'''
header = fits.getheader(FILE)
return header[KEYWORD]
def pix2arcsec(FITS):
'''
Get pixel scale
'''
hdu = fits.open(FITS)
if len(hdu) > 1:
header = fits.getheader(FITS, 0)
header += fits.getheader(FITS, 1)
else:
header = fits.getheader(FITS)
hdu_wcs = wcs.WCS(header)
return np.median(wcs.utils.proj_plane_pixel_scales(hdu_wcs)) * 3600
def sky2xy (FITS, RA=False, DEC=False, CAT=None):
'''
Coordinate transformation: sky -> xy
'''
if CAT == None:
if RA != False and DEC != False:
cmd=('sky2xy %s %s %s | grep -v off' %(FITS, RA, DEC))
program_call = os.popen(cmd)
xy = []
for line in program_call:
xy=np.array(line.strip().split()[-2:]).astype(float)
if len(xy) > 0:
return xy
else:
cmd =("more %s | awk '{print $1,$2}' > %s" %(CAT, CAT.replace(CAT.split('.')[-1], 'reg')))
os.system(cmd)
cmd = ("sky2xy %s @%s | grep -v off | awk '{print $5, $6}'" %(FITS, CAT.replace(CAT.split('.')[-1], 'reg')))
cat = os.popen(cmd)
xy = []
for line in cat:
xy.append(list(map(float, line.replace('\n', '').split())))
return np.array(xy)
def xy2sky (FITSFILE,X,Y):
'''
Coordinate transformation: xy -> sky
'''
program_call = os.popen('xy2sky %s %s %s' %(FITSFILE, X, Y))
sky = []
for line in program_call:
sky.append(line.strip().split()[:2])
return sky
| from astropy import coordinates as coord
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from misc import bcolors
import numpy as np
import os
def convert_hms_dd(RA, DEC):
'''
Convert HMS to DD system
'''
if (':' in RA) and (':' in DEC):
Coord_dd = coord.SkyCoord(RA, DEC, unit=(u.hour,u.degree), frame='icrs')
RA_dd = Coord_dd.ra.deg
Dec_dd = Coord_dd.dec.deg
elif (not (':' in RA) and not (':' in DEC)) and (('.' in RA) and ('.' in DEC)):
RA_dd, Dec_dd = float(RA), float(DEC)
else:
print(bcolors.FAIL + 'Coordinates have wrong format.' + bcolors.ENDC)
sys.exit()
return RA_dd, Dec_dd
def get_header(FILE, KEYWORD):
'''
Get keyword from fits file
'''
header = fits.getheader(FILE)
return header[KEYWORD]
def pix2arcsec(FITS):
'''
Get pixel scale
'''
hdu = fits.open(FITS)
if len(hdu) > 1:
header = fits.getheader(FITS, 0)
header += fits.getheader(FITS, 1)
else:
header = fits.getheader(FITS)
hdu_wcs = wcs.WCS(header)
return np.median(wcs.utils.proj_plane_pixel_scales(hdu_wcs)) * 3600
def sky2xy (FITS, RA=False, DEC=False, CAT=None):
'''
Coordinate transformation: sky -> xy
'''
if CAT == None:
if RA != False and DEC != False:
cmd=('sky2xy %s %s %s | grep -v off' %(FITS, RA, DEC))
program_call = os.popen(cmd)
xy = []
for line in program_call:
xy=np.array(line.strip().split()[-2:]).astype(float)
if len(xy) > 0:
return xy
else:
cmd =("more %s | awk '{print $1,$2}' > %s" %(CAT, CAT.replace(CAT.split('.')[-1], 'reg')))
os.system(cmd)
cmd = ("sky2xy %s @%s | grep -v off | awk '{print $5, $6}'" %(FITS, CAT.replace(CAT.split('.')[-1], 'reg')))
cat = os.popen(cmd)
xy = []
for line in cat:
xy.append(list(map(float, line.replace('\n', '').split())))
return np.array(xy)
def xy2sky (FITSFILE,X,Y):
'''
Coordinate transformation: xy -> sky
'''
program_call = os.popen('xy2sky %s %s %s' %(FITSFILE, X, Y))
sky = []
for line in program_call:
sky.append(line.strip().split()[:2])
return sky
| en | 0.672526 | Convert HMS to DD system Get keyword from fits file Get pixel scale Coordinate transformation: sky -> xy Coordinate transformation: xy -> sky | 2.555313 | 3 |
test_stbp_snn_eval.py | neurom-iot/n3ml | 11 | 8260 | <reponame>neurom-iot/n3ml
import argparse
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from n3ml.model import DynamicModel_STBP_SNN
def validate(val_loader, model, encoder, criterion, opt):
model.eval()
total_images = 0
num_corrects = 0
total_loss = 0
with torch.no_grad():
for step, (images, labels) in enumerate(val_loader):
images = images.cuda()
labels = labels.cuda()
preds = model(encoder, images, opt.num_steps)
labels_ = torch.zeros(torch.numel(labels), 10, device=labels.device)
labels_ = labels_.scatter_(1, labels.view(-1, 1), 1)
loss = criterion(preds, labels_)
num_corrects += torch.argmax(preds, dim=1).eq(labels).sum(dim=0)
total_loss += loss.cpu().detach().numpy() * images.size(0)
total_images += images.size(0)
val_acc = num_corrects.float() / total_images
val_loss = total_loss / total_images
return val_acc, val_loss
def app(opt):
print(opt)
val_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
opt.data,
train=False,
download=True,
transform=torchvision.transforms.Compose([transforms.ToTensor()])),
batch_size=opt.batch_size)
state_dict = torch.load(opt.pretrained)
model = DynamicModel_STBP_SNN(batch_size=opt.batch_size)
for m in state_dict['arch']:
model.add_module(m[0], m[1])
if torch.cuda.is_available():
model.cuda()
encoder = lambda x: (x > torch.rand(x.size(), device=x.device)).float()
criterion = nn.MSELoss()
acc, loss = validate(val_loader, model, encoder, criterion, opt)
print("In test, loss: {} - acc: {}".format(loss, acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='data')
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--num_steps', default=15, type=int)
parser.add_argument('--pretrained', default='pretrained/stbp_dynamic_acc_9897.pt')
app(parser.parse_args())
| import argparse
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from n3ml.model import DynamicModel_STBP_SNN
def validate(val_loader, model, encoder, criterion, opt):
model.eval()
total_images = 0
num_corrects = 0
total_loss = 0
with torch.no_grad():
for step, (images, labels) in enumerate(val_loader):
images = images.cuda()
labels = labels.cuda()
preds = model(encoder, images, opt.num_steps)
labels_ = torch.zeros(torch.numel(labels), 10, device=labels.device)
labels_ = labels_.scatter_(1, labels.view(-1, 1), 1)
loss = criterion(preds, labels_)
num_corrects += torch.argmax(preds, dim=1).eq(labels).sum(dim=0)
total_loss += loss.cpu().detach().numpy() * images.size(0)
total_images += images.size(0)
val_acc = num_corrects.float() / total_images
val_loss = total_loss / total_images
return val_acc, val_loss
def app(opt):
print(opt)
val_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
opt.data,
train=False,
download=True,
transform=torchvision.transforms.Compose([transforms.ToTensor()])),
batch_size=opt.batch_size)
state_dict = torch.load(opt.pretrained)
model = DynamicModel_STBP_SNN(batch_size=opt.batch_size)
for m in state_dict['arch']:
model.add_module(m[0], m[1])
if torch.cuda.is_available():
model.cuda()
encoder = lambda x: (x > torch.rand(x.size(), device=x.device)).float()
criterion = nn.MSELoss()
acc, loss = validate(val_loader, model, encoder, criterion, opt)
print("In test, loss: {} - acc: {}".format(loss, acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='data')
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--num_steps', default=15, type=int)
parser.add_argument('--pretrained', default='pretrained/stbp_dynamic_acc_9897.pt')
app(parser.parse_args()) | none | 1 | 2.217118 | 2 |
|
section_07_(files)/read_csv.py | govex/python-lessons | 5 | 8261 | # If you're new to file handling, be sure to check out with_open.py first!
# You'll also want to check out read_text.py before this example. This one is a bit more advanced.
with open('read_csv.csv', 'r') as states_file:
# Instead of leaving the file contents as a string, we're splitting the file into a list at every new line, and we save that list into the variable states
states = states_file.read().split("\n")
# Since this is a spreadsheet in comma separated values (CSV) format, we can think of states as a list of rows.
# But we'll need to split the columns into a list as well!
for index, state in enumerate(states):
states[index] = state.split(",")
# Now we have a nested list with all of the information!
# Our file looks like this:
# State, Population Estimate, Percent of Total population
# California, 38332521, 11.91%
# Texas, 26448193, 8.04%
# ...
# Our header row is at state[0], so we can use that to display the information in a prettier way.
for state in states[1:]: # We use [1:] so we skip the header row.
# state[0] is the first column in the row, which contains the name of the state.
print("\n---{0}---".format(state[0]))
for index, info in enumerate(state[1:]): # We use [1:] so we don't repeat the state name.
print("{0}:\t{1}".format(states[0][index+1], info))
# states is the full list of all of the states. It's a nested list. The outer list contains the rows, each inner list contains the columns in that row.
# states[0] refers to the header row of the list
# So states[0][0] would refer to "State", states[0][1] would refer to "Population Estimate", and states[0][2] would refer to "Percent of total population"
# state is one state within states. state is also a list, containing the name, population, and percentage of that particular state.
# So the first time through the loop, state[0] would refer to "California", state[1] would refer to 38332521, and state[2] would refer to 11.91%
# Since state is being create by the for loop in line 24, it gets a new value each time through.
# We're using enumerate to get the index (slicing number) of the column we're on, along with the information.
# That way we can pair the column name with the information, as shown in line 30.
# NOTE: Since we're slicing from [1:] in line 29, we need to increase the index by + 1, otherwise our headers will be off by one.
# Sample output:
# ---"California"---
# "Population Estimate": 38332521
# "Percent of Total population": "11.91%"
# ---"Texas"---
# "Population Estimate": 26448193
# "Percent of Total population": "8.04%"
# ---"New York"---
# "Population Estimate": 19651127
# "Percent of Total population": "6.19%"
| # If you're new to file handling, be sure to check out with_open.py first!
# You'll also want to check out read_text.py before this example. This one is a bit more advanced.
with open('read_csv.csv', 'r') as states_file:
# Instead of leaving the file contents as a string, we're splitting the file into a list at every new line, and we save that list into the variable states
states = states_file.read().split("\n")
# Since this is a spreadsheet in comma separated values (CSV) format, we can think of states as a list of rows.
# But we'll need to split the columns into a list as well!
for index, state in enumerate(states):
states[index] = state.split(",")
# Now we have a nested list with all of the information!
# Our file looks like this:
# State, Population Estimate, Percent of Total population
# California, 38332521, 11.91%
# Texas, 26448193, 8.04%
# ...
# Our header row is at state[0], so we can use that to display the information in a prettier way.
for state in states[1:]: # We use [1:] so we skip the header row.
# state[0] is the first column in the row, which contains the name of the state.
print("\n---{0}---".format(state[0]))
for index, info in enumerate(state[1:]): # We use [1:] so we don't repeat the state name.
print("{0}:\t{1}".format(states[0][index+1], info))
# states is the full list of all of the states. It's a nested list. The outer list contains the rows, each inner list contains the columns in that row.
# states[0] refers to the header row of the list
# So states[0][0] would refer to "State", states[0][1] would refer to "Population Estimate", and states[0][2] would refer to "Percent of total population"
# state is one state within states. state is also a list, containing the name, population, and percentage of that particular state.
# So the first time through the loop, state[0] would refer to "California", state[1] would refer to 38332521, and state[2] would refer to 11.91%
# Since state is being create by the for loop in line 24, it gets a new value each time through.
# We're using enumerate to get the index (slicing number) of the column we're on, along with the information.
# That way we can pair the column name with the information, as shown in line 30.
# NOTE: Since we're slicing from [1:] in line 29, we need to increase the index by + 1, otherwise our headers will be off by one.
# Sample output:
# ---"California"---
# "Population Estimate": 38332521
# "Percent of Total population": "11.91%"
# ---"Texas"---
# "Population Estimate": 26448193
# "Percent of Total population": "8.04%"
# ---"New York"---
# "Population Estimate": 19651127
# "Percent of Total population": "6.19%"
| en | 0.929894 | # If you're new to file handling, be sure to check out with_open.py first! # You'll also want to check out read_text.py before this example. This one is a bit more advanced. # Instead of leaving the file contents as a string, we're splitting the file into a list at every new line, and we save that list into the variable states # Since this is a spreadsheet in comma separated values (CSV) format, we can think of states as a list of rows. # But we'll need to split the columns into a list as well! # Now we have a nested list with all of the information! # Our file looks like this: # State, Population Estimate, Percent of Total population # California, 38332521, 11.91% # Texas, 26448193, 8.04% # ... # Our header row is at state[0], so we can use that to display the information in a prettier way. # We use [1:] so we skip the header row. # state[0] is the first column in the row, which contains the name of the state. # We use [1:] so we don't repeat the state name. # states is the full list of all of the states. It's a nested list. The outer list contains the rows, each inner list contains the columns in that row. # states[0] refers to the header row of the list # So states[0][0] would refer to "State", states[0][1] would refer to "Population Estimate", and states[0][2] would refer to "Percent of total population" # state is one state within states. state is also a list, containing the name, population, and percentage of that particular state. # So the first time through the loop, state[0] would refer to "California", state[1] would refer to 38332521, and state[2] would refer to 11.91% # Since state is being create by the for loop in line 24, it gets a new value each time through. # We're using enumerate to get the index (slicing number) of the column we're on, along with the information. # That way we can pair the column name with the information, as shown in line 30. # NOTE: Since we're slicing from [1:] in line 29, we need to increase the index by + 1, otherwise our headers will be off by one. # Sample output: # ---"California"--- # "Population Estimate": 38332521 # "Percent of Total population": "11.91%" # ---"Texas"--- # "Population Estimate": 26448193 # "Percent of Total population": "8.04%" # ---"New York"--- # "Population Estimate": 19651127 # "Percent of Total population": "6.19%" | 4.250616 | 4 |
kaggle_melanoma/schedulers.py | tinve/kaggle_melanoma | 8 | 8262 | import math
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
class PolyLR(_LRScheduler):
"""
Sets the learning rate of each parameter group according to poly learning rate policy
"""
def __init__(self, optimizer, max_iter=90000, power=0.9, last_epoch=-1):
self.max_iter = max_iter
self.power = power
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * (1 - float(self.last_epoch) / self.max_iter) ** self.power for base_lr in self.base_lrs]
func_zoo = {
"cosine_decay": lambda epoch, step, len_epoch, total_epoch: 0.5
* (math.cos(step * math.pi / (total_epoch * len_epoch)) + 1)
}
class CosineWarmRestart:
def __init__(
self,
optimizer: Optimizer,
func: str = "cosine_decay",
warmup: bool = True,
warmup_epoch: int = 1,
period: int = 10,
min_lr: float = 1e-5,
low_epoch: int = 1,
):
# self.base_lrs = list(map(lambda group: group["lr"], optimizer.param_groups))[0]
self.base_lrs = [x["lr"] for x in optimizer.param_groups][0]
self.optimizer = optimizer
self.warmup = warmup
self.warmup_epoch = warmup_epoch
self.period = period
self.cos_period = period - low_epoch
self.low_epoch = low_epoch
self.lr_func = func_zoo[func]
self.min_lr = min_lr
def cosine_step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
if self.warmup and current_epoch < self.warmup_epoch:
lr = self.base_lrs * float(1 + global_step) / (self.warmup_epoch * len_epoch)
else:
lr = self.base_lrs * self.lr_func(current_epoch, global_step, len_epoch, self.cos_period)
lr = max(self.min_lr, lr)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
def step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
current_epoch = current_epoch % self.period
if current_epoch >= self.period - self.low_epoch:
global_step = len_epoch * self.cos_period
else:
global_step = global_step % (self.period * len_epoch)
return self.cosine_step(current_epoch, global_step, len_epoch)
| import math
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
class PolyLR(_LRScheduler):
"""
Sets the learning rate of each parameter group according to poly learning rate policy
"""
def __init__(self, optimizer, max_iter=90000, power=0.9, last_epoch=-1):
self.max_iter = max_iter
self.power = power
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * (1 - float(self.last_epoch) / self.max_iter) ** self.power for base_lr in self.base_lrs]
func_zoo = {
"cosine_decay": lambda epoch, step, len_epoch, total_epoch: 0.5
* (math.cos(step * math.pi / (total_epoch * len_epoch)) + 1)
}
class CosineWarmRestart:
def __init__(
self,
optimizer: Optimizer,
func: str = "cosine_decay",
warmup: bool = True,
warmup_epoch: int = 1,
period: int = 10,
min_lr: float = 1e-5,
low_epoch: int = 1,
):
# self.base_lrs = list(map(lambda group: group["lr"], optimizer.param_groups))[0]
self.base_lrs = [x["lr"] for x in optimizer.param_groups][0]
self.optimizer = optimizer
self.warmup = warmup
self.warmup_epoch = warmup_epoch
self.period = period
self.cos_period = period - low_epoch
self.low_epoch = low_epoch
self.lr_func = func_zoo[func]
self.min_lr = min_lr
def cosine_step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
if self.warmup and current_epoch < self.warmup_epoch:
lr = self.base_lrs * float(1 + global_step) / (self.warmup_epoch * len_epoch)
else:
lr = self.base_lrs * self.lr_func(current_epoch, global_step, len_epoch, self.cos_period)
lr = max(self.min_lr, lr)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
def step(self, current_epoch: int, global_step: int, len_epoch: int) -> float:
current_epoch = current_epoch % self.period
if current_epoch >= self.period - self.low_epoch:
global_step = len_epoch * self.cos_period
else:
global_step = global_step % (self.period * len_epoch)
return self.cosine_step(current_epoch, global_step, len_epoch)
| en | 0.496475 | Sets the learning rate of each parameter group according to poly learning rate policy # self.base_lrs = list(map(lambda group: group["lr"], optimizer.param_groups))[0] | 2.42872 | 2 |
data/data/__init__.py | PumpkinYing/GAT | 0 | 8263 | from .dataset import load_data | from .dataset import load_data | none | 1 | 1.077111 | 1 |
|
utils.py | federicosapienza/InboxNotionTelegramBot | 0 | 8264 | import json
import logging
logger = logging.getLogger(__name__)
with open('configuration.json') as f:
config = json.load(f)
TELEGRAM_TOKEN = config["telegram-bot-token"]
NOTION_TOKEN = config["notion-token"]
NOTION_TABLE_URL = config["inbox_table"]["table_url"]
def check_allowed_user(user_id):
"""
check if allowed user
:param user_id: telegram user id
:return True if user is valid , False otherwise
"""
valid_user = config["allowed_user_id"]
user_id = str(user_id)
return user_id == valid_user
def restrict_action(handled_action):
"""
Wrapper for creating a private bot
:param handled_action: the action to perform
"""
def check_private(update, context):
if not (check_allowed_user(update.message.from_user.id)):
logging.warning("An unauthorized user attempted to use the bot. username: {}, id: {} .".format(
update.message.from_user.username, update.message.from_user.id
))
return
else:
return handled_action(update, context)
return check_private
| import json
import logging
logger = logging.getLogger(__name__)
with open('configuration.json') as f:
config = json.load(f)
TELEGRAM_TOKEN = config["telegram-bot-token"]
NOTION_TOKEN = config["notion-token"]
NOTION_TABLE_URL = config["inbox_table"]["table_url"]
def check_allowed_user(user_id):
"""
check if allowed user
:param user_id: telegram user id
:return True if user is valid , False otherwise
"""
valid_user = config["allowed_user_id"]
user_id = str(user_id)
return user_id == valid_user
def restrict_action(handled_action):
"""
Wrapper for creating a private bot
:param handled_action: the action to perform
"""
def check_private(update, context):
if not (check_allowed_user(update.message.from_user.id)):
logging.warning("An unauthorized user attempted to use the bot. username: {}, id: {} .".format(
update.message.from_user.username, update.message.from_user.id
))
return
else:
return handled_action(update, context)
return check_private
| en | 0.610377 | check if allowed user :param user_id: telegram user id :return True if user is valid , False otherwise Wrapper for creating a private bot :param handled_action: the action to perform | 2.517031 | 3 |
enaml/core/byteplay/__init__.py | timgates42/enaml | 0 | 8265 | <reponame>timgates42/enaml
#------------------------------------------------------------------------------
# Copyright (c) 2013-2018, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from ...compat import USE_WORDCODE
if USE_WORDCODE:
from .wbyteplay import *
else:
from .byteplay3 import *
| #------------------------------------------------------------------------------
# Copyright (c) 2013-2018, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from ...compat import USE_WORDCODE
if USE_WORDCODE:
from .wbyteplay import *
else:
from .byteplay3 import * | en | 0.449666 | #------------------------------------------------------------------------------ # Copyright (c) 2013-2018, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #------------------------------------------------------------------------------ | 1.353228 | 1 |
cassiopeia/datastores/riotapi/match.py | artemigkh/cassiopeia | 1 | 8266 | from time import time
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator, Union
import arrow
import datetime
import math
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Season, Queue, SEASON_IDS, QUEUE_IDS
from ...dto.match import MatchDto, MatchListDto, TimelineDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_current_time(query: MutableMapping[str, Any], context: PipelineContext = None) -> int:
return int(time()) * 1000
class MatchAPI(RiotAPIService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
_validate_get_match_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(MatchDto)
@validate_query(_validate_get_match_query, convert_region_to_platform)
def get_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["gameId"] = query["id"]
data["region"] = query["platform"].region.value
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
return MatchDto(data)
_validate_get_many_match_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(MatchDto)
@validate_query(_validate_get_many_match_query, convert_region_to_platform)
def get_many_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
for participant in data["participants"]:
participant.setdefault("runes", [])
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
data["gameId"] = id
data["region"] = query["platform"].region.value
yield MatchDto(data)
return generator()
_validate_get_match_list_query = Query. \
has("accountId").as_(str).also. \
has("platform").as_(Platform).also. \
has("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
has("beginIndex").as_(int).also. \
has("maxNumberOfMatches").as_(float).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get.register(MatchListDto)
@validate_query(_validate_get_match_list_query, convert_region_to_platform)
def get_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchListDto:
params = {}
riot_index_interval = 100
riot_date_interval = datetime.timedelta(days=7)
begin_time = query["beginTime"] # type: arrow.Arrow
end_time = query.get("endTime", arrow.now()) # type: arrow.Arrow
if isinstance(begin_time, int):
begin_time = arrow.get(begin_time / 1000)
if isinstance(end_time, int):
end_time = arrow.get(end_time / 1000)
def determine_calling_method(begin_time, end_time) -> str:
"""Returns either "by_date" or "by_index"."""
matches_per_date_interval = 10 # This is an assumption
seconds_per_day = (60 * 60 * 24)
riot_date_interval_in_days = riot_date_interval.total_seconds() / seconds_per_day # in units of days
npulls_by_date = (end_time - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days
npulls_by_index = (arrow.now() - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days * matches_per_date_interval / riot_index_interval
if math.ceil(npulls_by_date) < math.ceil(npulls_by_index):
by = "by_date"
else:
by = "by_index"
return by
calling_method = determine_calling_method(begin_time, end_time)
if calling_method == "by_date":
params["beginTime"] = begin_time.timestamp * 1000
if "endTime" in query:
params["endTime"] = min((begin_time + riot_date_interval).timestamp * 1000, query["endTime"])
else:
params["endTime"] = (begin_time + riot_date_interval).timestamp * 1000
else:
params["beginIndex"] = query["beginIndex"]
params["endIndex"] = query["beginIndex"] + min(riot_index_interval, query["maxNumberOfMatches"])
params["endIndex"] = int(params["endIndex"])
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
champions = query["champion.ids"]
params["champion"] = champions
else:
champions = set()
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=query["accountId"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError:
data = {"matches": []}
data["accountId"] = query["accountId"]
data["region"] = query["platform"].region.value
data["season"] = seasons
data["champion"] = champions
data["queue"] = queues
if calling_method == "by_index":
data["beginIndex"] = params["beginIndex"]
data["endIndex"] = params["endIndex"]
data["maxNumberOfMatches"] = query["maxNumberOfMatches"]
else:
data["beginTime"] = params["beginTime"]
data["endTime"] = params["endTime"]
for match in data["matches"]:
match["accountId"] = query["accountId"]
match["region"] = Platform(match["platformId"]).region.value
return MatchListDto(data)
_validate_get_many_match_list_query = Query. \
has("accountIds").as_(Iterable).also. \
has("platform").as_(Platform).also. \
can_have("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
can_have("beginIndex").as_(int).also. \
can_have("endIndex").as_(int).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get_many.register(MatchListDto)
@validate_query(_validate_get_many_match_list_query, convert_region_to_platform)
def get_many_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchListDto, None, None]:
params = {}
if "beginIndex" in query:
params["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
params["endIndex"] = query["endIndex"]
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
params["champion"] = {query["champion.ids"]}
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
def generator():
for id in query["accountIds"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["accountId"] = id
data["region"] = query["platform"].region.value
if "beginIndex" in query:
data["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
data["endIndex"] = query["endIndex"]
if "seasons" in query:
data["seasons"] = seasons
if "champion.ids" in query:
data["champion"] = params["champion"]
if "queues" in query:
params["queue"] = queues
yield MatchListDto(data)
return generator()
_validate_get_timeline_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(TimelineDto)
@validate_query(_validate_get_timeline_query, convert_region_to_platform)
def get_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> TimelineDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = query["id"]
data["region"] = query["platform"].region.value
return TimelineDto(data)
_validate_get_many_timeline_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(TimelineDto)
@validate_query(_validate_get_many_timeline_query, convert_region_to_platform)
def get_many_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[TimelineDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = id
data["region"] = query["platform"].region.value
yield TimelineDto(data)
return generator()
| from time import time
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator, Union
import arrow
import datetime
import math
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import RiotAPIService, APINotFoundError
from ...data import Platform, Season, Queue, SEASON_IDS, QUEUE_IDS
from ...dto.match import MatchDto, MatchListDto, TimelineDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_current_time(query: MutableMapping[str, Any], context: PipelineContext = None) -> int:
return int(time()) * 1000
class MatchAPI(RiotAPIService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
_validate_get_match_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(MatchDto)
@validate_query(_validate_get_match_query, convert_region_to_platform)
def get_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["gameId"] = query["id"]
data["region"] = query["platform"].region.value
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
return MatchDto(data)
_validate_get_many_match_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(MatchDto)
@validate_query(_validate_get_many_match_query, convert_region_to_platform)
def get_many_match(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matches/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matches/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
for participant in data["participants"]:
participant.setdefault("runes", [])
for p in data["participantIdentities"]:
aid = p.get("player", {}).get("currentAccountId", None)
if aid == 0:
p["player"]["bot"] = True
data["gameId"] = id
data["region"] = query["platform"].region.value
yield MatchDto(data)
return generator()
_validate_get_match_list_query = Query. \
has("accountId").as_(str).also. \
has("platform").as_(Platform).also. \
has("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
has("beginIndex").as_(int).also. \
has("maxNumberOfMatches").as_(float).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get.register(MatchListDto)
@validate_query(_validate_get_match_list_query, convert_region_to_platform)
def get_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> MatchListDto:
params = {}
riot_index_interval = 100
riot_date_interval = datetime.timedelta(days=7)
begin_time = query["beginTime"] # type: arrow.Arrow
end_time = query.get("endTime", arrow.now()) # type: arrow.Arrow
if isinstance(begin_time, int):
begin_time = arrow.get(begin_time / 1000)
if isinstance(end_time, int):
end_time = arrow.get(end_time / 1000)
def determine_calling_method(begin_time, end_time) -> str:
"""Returns either "by_date" or "by_index"."""
matches_per_date_interval = 10 # This is an assumption
seconds_per_day = (60 * 60 * 24)
riot_date_interval_in_days = riot_date_interval.total_seconds() / seconds_per_day # in units of days
npulls_by_date = (end_time - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days
npulls_by_index = (arrow.now() - begin_time).total_seconds() / seconds_per_day / riot_date_interval_in_days * matches_per_date_interval / riot_index_interval
if math.ceil(npulls_by_date) < math.ceil(npulls_by_index):
by = "by_date"
else:
by = "by_index"
return by
calling_method = determine_calling_method(begin_time, end_time)
if calling_method == "by_date":
params["beginTime"] = begin_time.timestamp * 1000
if "endTime" in query:
params["endTime"] = min((begin_time + riot_date_interval).timestamp * 1000, query["endTime"])
else:
params["endTime"] = (begin_time + riot_date_interval).timestamp * 1000
else:
params["beginIndex"] = query["beginIndex"]
params["endIndex"] = query["beginIndex"] + min(riot_index_interval, query["maxNumberOfMatches"])
params["endIndex"] = int(params["endIndex"])
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
champions = query["champion.ids"]
params["champion"] = champions
else:
champions = set()
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=query["accountId"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError:
data = {"matches": []}
data["accountId"] = query["accountId"]
data["region"] = query["platform"].region.value
data["season"] = seasons
data["champion"] = champions
data["queue"] = queues
if calling_method == "by_index":
data["beginIndex"] = params["beginIndex"]
data["endIndex"] = params["endIndex"]
data["maxNumberOfMatches"] = query["maxNumberOfMatches"]
else:
data["beginTime"] = params["beginTime"]
data["endTime"] = params["endTime"]
for match in data["matches"]:
match["accountId"] = query["accountId"]
match["region"] = Platform(match["platformId"]).region.value
return MatchListDto(data)
_validate_get_many_match_list_query = Query. \
has("accountIds").as_(Iterable).also. \
has("platform").as_(Platform).also. \
can_have("beginTime").as_(int).also. \
can_have("endTime").as_(int).also. \
can_have("beginIndex").as_(int).also. \
can_have("endIndex").as_(int).also. \
can_have("seasons").as_(Iterable).also. \
can_have("champion.ids").as_(Iterable).also. \
can_have("queues").as_(Iterable)
@get_many.register(MatchListDto)
@validate_query(_validate_get_many_match_list_query, convert_region_to_platform)
def get_many_match_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[MatchListDto, None, None]:
params = {}
if "beginIndex" in query:
params["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
params["endIndex"] = query["endIndex"]
if "seasons" in query:
seasons = {Season(season) for season in query["seasons"]}
params["season"] = {SEASON_IDS[season] for season in seasons}
else:
seasons = set()
if "champion.ids" in query:
params["champion"] = {query["champion.ids"]}
if "queues" in query:
queues = {Queue(queue) for queue in query["queues"]}
params["queue"] = {QUEUE_IDS[queue] for queue in queues}
else:
queues = set()
def generator():
for id in query["accountIds"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}".format(platform=query["platform"].value.lower(), accountId=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "matchlists/by-account/accountId")
data = self._get(url, params, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["accountId"] = id
data["region"] = query["platform"].region.value
if "beginIndex" in query:
data["beginIndex"] = query["beginIndex"]
if "endIndex" in query:
data["endIndex"] = query["endIndex"]
if "seasons" in query:
data["seasons"] = seasons
if "champion.ids" in query:
data["champion"] = params["champion"]
if "queues" in query:
params["queue"] = queues
yield MatchListDto(data)
return generator()
_validate_get_timeline_query = Query. \
has("id").as_(int).also. \
has("platform").as_(Platform)
@get.register(TimelineDto)
@validate_query(_validate_get_timeline_query, convert_region_to_platform)
def get_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> TimelineDto:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=query["id"])
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = query["id"]
data["region"] = query["platform"].region.value
return TimelineDto(data)
_validate_get_many_timeline_query = Query. \
has("ids").as_(Iterable).also. \
has("platform").as_(Platform)
@get_many.register(TimelineDto)
@validate_query(_validate_get_many_timeline_query, convert_region_to_platform)
def get_many_match_timeline(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[TimelineDto, None, None]:
def generator():
for id in query["ids"]:
url = "https://{platform}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}".format(platform=query["platform"].value.lower(), id=id)
try:
app_limiter, method_limiter = self._get_rate_limiter(query["platform"], "timelines/by-match/id")
data = self._get(url, {}, app_limiter=app_limiter, method_limiter=method_limiter)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data["matchId"] = id
data["region"] = query["platform"].region.value
yield TimelineDto(data)
return generator()
| en | 0.74761 | # type: arrow.Arrow # type: arrow.Arrow Returns either "by_date" or "by_index". # This is an assumption # in units of days | 2.135264 | 2 |
Lib/site-packages/hackedit/vendor/jedi/cache.py | fochoao/cpython | 1 | 8267 | """
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- module caching (`load_parser` and `save_parser`), which uses pickle and is
really important to assure low load times of modules like ``numpy``.
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
import os
import sys
import json
import hashlib
import gc
import inspect
import shutil
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from jedi import settings
from jedi import common
from jedi import debug
_time_caches = {}
# for fast_parser, should not be deleted
parser_cache = {}
class ParserCacheItem(object):
def __init__(self, parser, change_time=None):
self.parser = parser
if change_time is None:
change_time = time.time()
self.change_time = change_time
def clear_time_caches(delete_all=False):
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def time_cache(time_add_setting):
"""
s
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
"""
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
expiry, value = dct[key]
if expiry > time.time():
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if key is not None:
dct[key] = time.time() + time_add, value
return value
return wrapper
return _temp
@time_cache("call_signatures_validity")
def cache_call_signatures(evaluator, call, source, user_pos):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = common.splitlines(source)
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[call.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = call.get_parent_until().path
yield None if module_path is None else (module_path, before_bracket, call.start_pos)
yield evaluator.eval_element(call)
def underscore_memoization(func):
"""
Decorator for methods::
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
Becomes::
class A(object):
@underscore_memoization
def x(self):
return 10
A now has an attribute ``_x`` written by this decorator.
"""
name = '_' + func.__name__
def wrapper(self):
try:
return getattr(self, name)
except AttributeError:
result = func(self)
if inspect.isgenerator(result):
result = list(result)
setattr(self, name, result)
return result
return wrapper
def memoize_method(method):
"""A normal memoize function."""
def wrapper(self, *args, **kwargs):
dct = self.__dict__.setdefault('_memoize_method_dct', {})
key = (args, frozenset(kwargs.items()))
try:
return dct[key]
except KeyError:
result = method(self, *args, **kwargs)
dct[key] = result
return result
return wrapper
def memoize_function(obj):
""" A normal memoize function for memoizing free functions. """
cache = obj.cache = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def cache_star_import(func):
@time_cache("star_import_cache_validity")
def wrapper(self):
yield self.base # The cache key
yield func(self)
return wrapper
def _invalidate_star_import_cache_module(module, only_main=False):
""" Important if some new modules are being reparsed """
try:
t, modules = _time_caches['star_import_cache_validity'][module]
except KeyError:
pass
else:
del _time_caches['star_import_cache_validity'][module]
def invalidate_star_import_cache(path):
"""On success returns True."""
try:
parser_cache_item = parser_cache[path]
except KeyError:
pass
else:
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
def load_parser(path):
"""
Returns the module or None, if it fails.
"""
p_time = os.path.getmtime(path) if path else None
try:
parser_cache_item = parser_cache[path]
if not path or p_time <= parser_cache_item.change_time:
return parser_cache_item.parser
else:
# In case there is already a module cached and this module
# has to be reparsed, we also need to invalidate the import
# caches.
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
except KeyError:
if settings.use_filesystem_cache:
return ParserPickling.load_parser(path, p_time)
def save_parser(path, parser, pickling=True):
try:
p_time = None if path is None else os.path.getmtime(path)
except OSError:
p_time = None
pickling = False
item = ParserCacheItem(parser, p_time)
parser_cache[path] = item
if settings.use_filesystem_cache and pickling:
ParserPickling.save_parser(path, item)
class ParserPickling(object):
version = 24
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
parser representation classes. For example, the following changes
are regarded as incompatible.
- Class name is changed.
- Class is moved to another module.
- Defined slot of the class is changed.
"""
def __init__(self):
self.__index = None
self.py_tag = 'cpython-%s%s' % sys.version_info[:2]
"""
Short name for distinguish Python implementations and versions.
It's like `sys.implementation.cache_tag` but for Python < 3.3
we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation
.. todo:: Detect interpreter (e.g., PyPy).
"""
def load_parser(self, path, original_changed_time):
try:
pickle_changed_time = self._index[path]
except KeyError:
return None
if original_changed_time is not None \
and pickle_changed_time < original_changed_time:
# the pickle file is outdated
return None
with open(self._get_hashed_path(path), 'rb') as f:
try:
gc.disable()
parser_cache_item = pickle.load(f)
finally:
gc.enable()
debug.dbg('pickle loaded: %s', path)
parser_cache[path] = parser_cache_item
return parser_cache_item.parser
def save_parser(self, path, parser_cache_item):
self.__index = None
try:
files = self._index
except KeyError:
files = {}
self._index = files
with open(self._get_hashed_path(path), 'wb') as f:
pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)
files[path] = parser_cache_item.change_time
self._flush_index()
@property
def _index(self):
if self.__index is None:
try:
with open(self._get_path('index.json')) as f:
data = json.load(f)
except (IOError, ValueError):
self.__index = {}
else:
# 0 means version is not defined (= always delete cache):
if data.get('version', 0) != self.version:
self.clear_cache()
self.__index = {}
else:
self.__index = data['index']
return self.__index
def _remove_old_modules(self):
# TODO use
change = False
if change:
self._flush_index(self)
self._index # reload index
def _flush_index(self):
data = {'version': self.version, 'index': self._index}
with open(self._get_path('index.json'), 'w') as f:
json.dump(data, f)
self.__index = None
def clear_cache(self):
shutil.rmtree(self._cache_directory())
def _get_hashed_path(self, path):
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
def _get_path(self, file):
dir = self._cache_directory()
if not os.path.exists(dir):
os.makedirs(dir)
return os.path.join(dir, file)
def _cache_directory(self):
return os.path.join(settings.cache_directory, self.py_tag)
# is a singleton
ParserPickling = ParserPickling()
| """
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- module caching (`load_parser` and `save_parser`), which uses pickle and is
really important to assure low load times of modules like ``numpy``.
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
import os
import sys
import json
import hashlib
import gc
import inspect
import shutil
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from jedi import settings
from jedi import common
from jedi import debug
_time_caches = {}
# for fast_parser, should not be deleted
parser_cache = {}
class ParserCacheItem(object):
def __init__(self, parser, change_time=None):
self.parser = parser
if change_time is None:
change_time = time.time()
self.change_time = change_time
def clear_time_caches(delete_all=False):
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def time_cache(time_add_setting):
"""
s
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
"""
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
expiry, value = dct[key]
if expiry > time.time():
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if key is not None:
dct[key] = time.time() + time_add, value
return value
return wrapper
return _temp
@time_cache("call_signatures_validity")
def cache_call_signatures(evaluator, call, source, user_pos):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = common.splitlines(source)
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[call.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = call.get_parent_until().path
yield None if module_path is None else (module_path, before_bracket, call.start_pos)
yield evaluator.eval_element(call)
def underscore_memoization(func):
"""
Decorator for methods::
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
Becomes::
class A(object):
@underscore_memoization
def x(self):
return 10
A now has an attribute ``_x`` written by this decorator.
"""
name = '_' + func.__name__
def wrapper(self):
try:
return getattr(self, name)
except AttributeError:
result = func(self)
if inspect.isgenerator(result):
result = list(result)
setattr(self, name, result)
return result
return wrapper
def memoize_method(method):
"""A normal memoize function."""
def wrapper(self, *args, **kwargs):
dct = self.__dict__.setdefault('_memoize_method_dct', {})
key = (args, frozenset(kwargs.items()))
try:
return dct[key]
except KeyError:
result = method(self, *args, **kwargs)
dct[key] = result
return result
return wrapper
def memoize_function(obj):
""" A normal memoize function for memoizing free functions. """
cache = obj.cache = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def cache_star_import(func):
@time_cache("star_import_cache_validity")
def wrapper(self):
yield self.base # The cache key
yield func(self)
return wrapper
def _invalidate_star_import_cache_module(module, only_main=False):
""" Important if some new modules are being reparsed """
try:
t, modules = _time_caches['star_import_cache_validity'][module]
except KeyError:
pass
else:
del _time_caches['star_import_cache_validity'][module]
def invalidate_star_import_cache(path):
"""On success returns True."""
try:
parser_cache_item = parser_cache[path]
except KeyError:
pass
else:
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
def load_parser(path):
"""
Returns the module or None, if it fails.
"""
p_time = os.path.getmtime(path) if path else None
try:
parser_cache_item = parser_cache[path]
if not path or p_time <= parser_cache_item.change_time:
return parser_cache_item.parser
else:
# In case there is already a module cached and this module
# has to be reparsed, we also need to invalidate the import
# caches.
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
except KeyError:
if settings.use_filesystem_cache:
return ParserPickling.load_parser(path, p_time)
def save_parser(path, parser, pickling=True):
try:
p_time = None if path is None else os.path.getmtime(path)
except OSError:
p_time = None
pickling = False
item = ParserCacheItem(parser, p_time)
parser_cache[path] = item
if settings.use_filesystem_cache and pickling:
ParserPickling.save_parser(path, item)
class ParserPickling(object):
version = 24
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
parser representation classes. For example, the following changes
are regarded as incompatible.
- Class name is changed.
- Class is moved to another module.
- Defined slot of the class is changed.
"""
def __init__(self):
self.__index = None
self.py_tag = 'cpython-%s%s' % sys.version_info[:2]
"""
Short name for distinguish Python implementations and versions.
It's like `sys.implementation.cache_tag` but for Python < 3.3
we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation
.. todo:: Detect interpreter (e.g., PyPy).
"""
def load_parser(self, path, original_changed_time):
try:
pickle_changed_time = self._index[path]
except KeyError:
return None
if original_changed_time is not None \
and pickle_changed_time < original_changed_time:
# the pickle file is outdated
return None
with open(self._get_hashed_path(path), 'rb') as f:
try:
gc.disable()
parser_cache_item = pickle.load(f)
finally:
gc.enable()
debug.dbg('pickle loaded: %s', path)
parser_cache[path] = parser_cache_item
return parser_cache_item.parser
def save_parser(self, path, parser_cache_item):
self.__index = None
try:
files = self._index
except KeyError:
files = {}
self._index = files
with open(self._get_hashed_path(path), 'wb') as f:
pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)
files[path] = parser_cache_item.change_time
self._flush_index()
@property
def _index(self):
if self.__index is None:
try:
with open(self._get_path('index.json')) as f:
data = json.load(f)
except (IOError, ValueError):
self.__index = {}
else:
# 0 means version is not defined (= always delete cache):
if data.get('version', 0) != self.version:
self.clear_cache()
self.__index = {}
else:
self.__index = data['index']
return self.__index
def _remove_old_modules(self):
# TODO use
change = False
if change:
self._flush_index(self)
self._index # reload index
def _flush_index(self):
data = {'version': self.version, 'index': self._index}
with open(self._get_path('index.json'), 'w') as f:
json.dump(data, f)
self.__index = None
def clear_cache(self):
shutil.rmtree(self._cache_directory())
def _get_hashed_path(self, path):
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
def _get_path(self, file):
dir = self._cache_directory()
if not os.path.exists(dir):
os.makedirs(dir)
return os.path.join(dir, file)
def _cache_directory(self):
return os.path.join(settings.cache_directory, self.py_tag)
# is a singleton
ParserPickling = ParserPickling()
| en | 0.868387 | This caching is very important for speed and memory optimizations. There's nothing really spectacular, just some decorators. The following cache types are available: - module caching (`load_parser` and `save_parser`), which uses pickle and is really important to assure low load times of modules like ``numpy``. - ``time_cache`` can be used to cache something for just a limited time span, which can be useful if there's user interaction and the user cannot react faster than a certain time. This module is one of the reasons why |jedi| is not thread-safe. As you can see there are global variables, which are holding the cache information. Some of these variables are being cleaned after every API usage. # for fast_parser, should not be deleted Jedi caches many things, that should be completed after each completion finishes. :param delete_all: Deletes also the cache that is normally not deleted, like parser cache, which is important for faster parsing. # normally just kill the expired entries, not all # check time_cache for expired entries # delete expired entries s This decorator works as follows: Call it with a setting and after that use the function with a callable that returns the key. But: This function is only called if the key is not available. After a certain amount of time (`time_add_setting`) the cache is invalid. This function calculates the cache key. Decorator for methods:: class A(object): def x(self): if self._x: self._x = 10 return self._x Becomes:: class A(object): @underscore_memoization def x(self): return 10 A now has an attribute ``_x`` written by this decorator. A normal memoize function. A normal memoize function for memoizing free functions. # The cache key Important if some new modules are being reparsed On success returns True. Returns the module or None, if it fails. # In case there is already a module cached and this module # has to be reparsed, we also need to invalidate the import # caches. Version number (integer) for file system cache. Increment this number when there are any incompatible changes in parser representation classes. For example, the following changes are regarded as incompatible. - Class name is changed. - Class is moved to another module. - Defined slot of the class is changed. Short name for distinguish Python implementations and versions. It's like `sys.implementation.cache_tag` but for Python < 3.3 we generate something similar. See: http://docs.python.org/3/library/sys.html#sys.implementation .. todo:: Detect interpreter (e.g., PyPy). # the pickle file is outdated # 0 means version is not defined (= always delete cache): # TODO use # reload index # is a singleton | 3.196703 | 3 |
sandia_hand/ros/sandia_hand_teleop/simple_grasp/simple_grasp.py | adarshrs/Drone-Simulator-for-ROS-Kinetic | 0 | 8268 | <reponame>adarshrs/Drone-Simulator-for-ROS-Kinetic
#!/usr/bin/env python
#
# Software License Agreement (Apache License)
#
# Copyright 2013 Open Source Robotics Foundation
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import roslib; roslib.load_manifest('sandia_hand_teleop')
import rospy
import sys
from sandia_hand_msgs.srv import SimpleGraspSrv, SimpleGraspSrvResponse, SimpleGraspWithSlew, SimpleGraspWithSlewResponse
from sandia_hand_msgs.msg import SimpleGrasp
from osrf_msgs.msg import JointCommands
g_jc_pub = None
g_jc = JointCommands()
g_prev_jc_target = JointCommands()
def grasp_srv(req):
grasp_cb(req.grasp)
return SimpleGraspSrvResponse()
def grasp_slew_srv(req):
#print "going to %s in %.3f" % (req.grasp.name, req.slew_duration)
rate = rospy.Rate(100.0)
t_start = rospy.Time.now()
t_end = t_start + rospy.Duration(req.slew_duration)
while rospy.Time.now() < t_end:
dt = (rospy.Time.now() - t_start).to_sec()
dt_norm = dt / req.slew_duration
#print "%.3f" % dt_norm
grasp_spline(req.grasp.name, req.grasp.closed_amount, dt_norm)
rate.sleep()
grasp_spline(req.grasp.name, req.grasp.closed_amount, 1.0)
return SimpleGraspWithSlewResponse()
def grasp_spline(grasp_name, closed_amount, spline_amount):
global g_jc_pub, g_jc, g_prev_jc_target
#print "request: grasp [%s] amount [%f]" % (grasp_name, closed_amount)
# save some typing
gn = grasp_name
x = closed_amount
if x < 0:
x = 0
elif x > 1:
x = 1
origin = [0] * 12
g0 = [0] * 12
if (gn == "cylindrical"):
g0 = [0,1.5,1.7, 0,1.5,1.7, 0,1.5,1.7, 0.2,.8,1.2]
elif (gn == "spherical"):
origin = [-0.7,0,0, 0.1,0,0, 0.7,0,0, 0,0,0]
g0 = [0,1.4,1.4, 0,1.4,1.4, 0,1.4,1.4, 0,0.7,0.7]
elif (gn == "prismatic"):
origin = [0,1.4,0, 0,1.4,0, 0,1.4,0, -0.1,0.8,-0.8]
g0 = [0,0,1.4, 0,0,1.4, 0,0,1.4, 0,0,1.4]
elif (gn == "finger_0_test"):
g0 = [0,1.5,1.7, 0,0,0, 0,0,0, 0,0,0]
elif (gn == "number_one"):
origin = [0,0,0, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,1 ]
elif (gn == "peace"):
origin = [-0.2,0,0, 0.05,0,0, 0,1.5,1.5, 0.4,0.8,1 ]
elif (gn == "asl_a"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,1.5,1.5, 1.5,0.9,0.2 ]
elif (gn == "asl_b"):
origin = [0.1,0,0, 0,0,0, -0.1,0,0, 1,0.8,0.9 ]
elif (gn == "asl_c"):
origin = [0,0.7,0.9, 0,0.7,0.9, 0,0.7,0.9, 0,0.4,0.4 ]
elif (gn == "asl_d"):
origin = [0,0,0, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,1 ]
elif (gn == "asl_e"):
origin = [0,1,1.8, 0,1,1.8, 0,1,1.8, 1.5,0.6,1]
elif (gn == "asl_f"):
origin = [0,1.3,1.2, 0.1,0,0, 0.2,0,0, 0.3,0.7,0.7 ]
elif (gn == "asl_g"):
origin = [0,1.5,0, 0,1.5,1.5, 0,1.5,1.5, 0,1,-.4 ]
elif (gn == "asl_h"):
origin = [0.1,1.5,0, 0,1.5,0, 0,1.5,1.5, 0,1,0.6 ]
elif (gn == "asl_i"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,0,0, 1.5,1.0,0.3 ]
elif (gn == "asl_j"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,0,0, 1.5,1.0,0.3 ]
g0 = [0,0,0, 0,0,0, 0,0,0, 0.5,1,1]
g1 = [0,0,0, 0,0,0, 0,0,0, 0,1,1]
elif (gn == "asl_k"):
origin = [0,0,0, 0,1.5,0, 0,1.5,1.5, 1.5,1.0,0.3]
elif (gn == "asl_l"):
origin = [0,0,0, 0,1.5,1.5, 0,1.5,1.5, 1.5,0,0]
elif (gn == "asl_m"):
origin = [0,1,1.5, 0,1,1.5, 0,1,1.5, 0,1,1]
elif (gn == "asl_n"):
origin = [0,1,1.5, 0,1,1.5, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_o"):
origin = [0.1,1.3,1.2, 0,1.3,1.2, -0.1,1.3,1.2, 0.2,0.8,0.5]
elif (gn == "asl_p"):
origin = [0,0,0, 0,1.5,0, 0,1.5,1.5, 1.5,1,0.3]
elif (gn == "asl_q"):
origin = [0,1.3,1.2, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,0.5]
elif (gn == "asl_r"):
origin = [0.1,0,0, -0.1,0,0, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_s"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,1.5,1.5, 0,1,0.2]
elif (gn == "asl_t"):
origin = [-.4,1.3,1.5, 0,1.5,1.5, 0,1.5,1.5, 0.4,1,1]
elif (gn == "asl_u"):
origin = [0,0,0, 0,0,0, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_v"):
origin = [-0.3,0,0, 0.1,0,0, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_w"):
origin = [-0.3,0,0, 0,0,0, 0.3,0,0, 0,1,1]
elif (gn == "asl_x"):
origin = [0,0,1.5, 0,1.5,1.5, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_y"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0.3,0,0, 1.5,0,0]
elif (gn == "asl_z"):
origin = [0,1.0,0, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,0.8]
g0 = [0.3,0.3,0, 0,0,0, 0,0,0, 0,0,0]
g1 = [-0.3,0.3,0, 0,0,0, 0,0,0, 0,0,0]
else:
return None # bogus
g_jc.position = [0] * 12
if (spline_amount < 0):
spline_amount = 0
elif (spline_amount > 1):
spline_amount = 1
for i in xrange(0, 12):
target = origin[i] + g0[i] * x
prev_target = g_prev_jc_target.position[i]
#g_jc.position[i] = origin[i] + g0[i] * x
#delta = target - g_prev_jc_target.position[i]
# compute convex combination between old and new targets
g_jc.position[i] = ( spline_amount) * target + \
(1.0 - spline_amount) * prev_target
#print "joint state: %s" % (str(g_jc.position))
g_jc_pub.publish(g_jc)
if (spline_amount == 1.0):
for i in xrange(0, 12):
g_prev_jc_target.position[i] = g_jc.position[i] # todo: make this better
def grasp_cb(msg):
grasp_spline(msg.name, msg.closed_amount, 1)
if __name__ == '__main__':
rospy.init_node('simple_grasp')
g_jc.name = ["f0_j0", "f0_j1", "f0_j2",
"f1_j0", "f1_j1", "f1_j2",
"f2_j0", "f2_j1", "f2_j2",
"f3_j0", "f3_j1", "f3_j2"]
g_jc.position = [0] * 12
g_prev_jc_target.position = [0] * 12
g_jc_pub = rospy.Publisher('joint_commands', JointCommands, queue_size=1) # same namespace
g_jc_srv = rospy.Service('simple_grasp', SimpleGraspSrv, grasp_srv)
g_sgws_srv = rospy.Service('simple_grasp_with_slew', SimpleGraspWithSlew, grasp_slew_srv)
g_jc_sub = rospy.Subscriber('simple_grasp', SimpleGrasp, grasp_cb)
print "simple grasp service is now running."
rospy.spin()
| #!/usr/bin/env python
#
# Software License Agreement (Apache License)
#
# Copyright 2013 Open Source Robotics Foundation
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import roslib; roslib.load_manifest('sandia_hand_teleop')
import rospy
import sys
from sandia_hand_msgs.srv import SimpleGraspSrv, SimpleGraspSrvResponse, SimpleGraspWithSlew, SimpleGraspWithSlewResponse
from sandia_hand_msgs.msg import SimpleGrasp
from osrf_msgs.msg import JointCommands
g_jc_pub = None
g_jc = JointCommands()
g_prev_jc_target = JointCommands()
def grasp_srv(req):
grasp_cb(req.grasp)
return SimpleGraspSrvResponse()
def grasp_slew_srv(req):
#print "going to %s in %.3f" % (req.grasp.name, req.slew_duration)
rate = rospy.Rate(100.0)
t_start = rospy.Time.now()
t_end = t_start + rospy.Duration(req.slew_duration)
while rospy.Time.now() < t_end:
dt = (rospy.Time.now() - t_start).to_sec()
dt_norm = dt / req.slew_duration
#print "%.3f" % dt_norm
grasp_spline(req.grasp.name, req.grasp.closed_amount, dt_norm)
rate.sleep()
grasp_spline(req.grasp.name, req.grasp.closed_amount, 1.0)
return SimpleGraspWithSlewResponse()
def grasp_spline(grasp_name, closed_amount, spline_amount):
global g_jc_pub, g_jc, g_prev_jc_target
#print "request: grasp [%s] amount [%f]" % (grasp_name, closed_amount)
# save some typing
gn = grasp_name
x = closed_amount
if x < 0:
x = 0
elif x > 1:
x = 1
origin = [0] * 12
g0 = [0] * 12
if (gn == "cylindrical"):
g0 = [0,1.5,1.7, 0,1.5,1.7, 0,1.5,1.7, 0.2,.8,1.2]
elif (gn == "spherical"):
origin = [-0.7,0,0, 0.1,0,0, 0.7,0,0, 0,0,0]
g0 = [0,1.4,1.4, 0,1.4,1.4, 0,1.4,1.4, 0,0.7,0.7]
elif (gn == "prismatic"):
origin = [0,1.4,0, 0,1.4,0, 0,1.4,0, -0.1,0.8,-0.8]
g0 = [0,0,1.4, 0,0,1.4, 0,0,1.4, 0,0,1.4]
elif (gn == "finger_0_test"):
g0 = [0,1.5,1.7, 0,0,0, 0,0,0, 0,0,0]
elif (gn == "number_one"):
origin = [0,0,0, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,1 ]
elif (gn == "peace"):
origin = [-0.2,0,0, 0.05,0,0, 0,1.5,1.5, 0.4,0.8,1 ]
elif (gn == "asl_a"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,1.5,1.5, 1.5,0.9,0.2 ]
elif (gn == "asl_b"):
origin = [0.1,0,0, 0,0,0, -0.1,0,0, 1,0.8,0.9 ]
elif (gn == "asl_c"):
origin = [0,0.7,0.9, 0,0.7,0.9, 0,0.7,0.9, 0,0.4,0.4 ]
elif (gn == "asl_d"):
origin = [0,0,0, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,1 ]
elif (gn == "asl_e"):
origin = [0,1,1.8, 0,1,1.8, 0,1,1.8, 1.5,0.6,1]
elif (gn == "asl_f"):
origin = [0,1.3,1.2, 0.1,0,0, 0.2,0,0, 0.3,0.7,0.7 ]
elif (gn == "asl_g"):
origin = [0,1.5,0, 0,1.5,1.5, 0,1.5,1.5, 0,1,-.4 ]
elif (gn == "asl_h"):
origin = [0.1,1.5,0, 0,1.5,0, 0,1.5,1.5, 0,1,0.6 ]
elif (gn == "asl_i"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,0,0, 1.5,1.0,0.3 ]
elif (gn == "asl_j"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,0,0, 1.5,1.0,0.3 ]
g0 = [0,0,0, 0,0,0, 0,0,0, 0.5,1,1]
g1 = [0,0,0, 0,0,0, 0,0,0, 0,1,1]
elif (gn == "asl_k"):
origin = [0,0,0, 0,1.5,0, 0,1.5,1.5, 1.5,1.0,0.3]
elif (gn == "asl_l"):
origin = [0,0,0, 0,1.5,1.5, 0,1.5,1.5, 1.5,0,0]
elif (gn == "asl_m"):
origin = [0,1,1.5, 0,1,1.5, 0,1,1.5, 0,1,1]
elif (gn == "asl_n"):
origin = [0,1,1.5, 0,1,1.5, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_o"):
origin = [0.1,1.3,1.2, 0,1.3,1.2, -0.1,1.3,1.2, 0.2,0.8,0.5]
elif (gn == "asl_p"):
origin = [0,0,0, 0,1.5,0, 0,1.5,1.5, 1.5,1,0.3]
elif (gn == "asl_q"):
origin = [0,1.3,1.2, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,0.5]
elif (gn == "asl_r"):
origin = [0.1,0,0, -0.1,0,0, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_s"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0,1.5,1.5, 0,1,0.2]
elif (gn == "asl_t"):
origin = [-.4,1.3,1.5, 0,1.5,1.5, 0,1.5,1.5, 0.4,1,1]
elif (gn == "asl_u"):
origin = [0,0,0, 0,0,0, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_v"):
origin = [-0.3,0,0, 0.1,0,0, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_w"):
origin = [-0.3,0,0, 0,0,0, 0.3,0,0, 0,1,1]
elif (gn == "asl_x"):
origin = [0,0,1.5, 0,1.5,1.5, 0,1.5,1.5, 0,1,1]
elif (gn == "asl_y"):
origin = [0,1.5,1.5, 0,1.5,1.5, 0.3,0,0, 1.5,0,0]
elif (gn == "asl_z"):
origin = [0,1.0,0, 0,1.5,1.5, 0,1.5,1.5, 0.4,0.8,0.8]
g0 = [0.3,0.3,0, 0,0,0, 0,0,0, 0,0,0]
g1 = [-0.3,0.3,0, 0,0,0, 0,0,0, 0,0,0]
else:
return None # bogus
g_jc.position = [0] * 12
if (spline_amount < 0):
spline_amount = 0
elif (spline_amount > 1):
spline_amount = 1
for i in xrange(0, 12):
target = origin[i] + g0[i] * x
prev_target = g_prev_jc_target.position[i]
#g_jc.position[i] = origin[i] + g0[i] * x
#delta = target - g_prev_jc_target.position[i]
# compute convex combination between old and new targets
g_jc.position[i] = ( spline_amount) * target + \
(1.0 - spline_amount) * prev_target
#print "joint state: %s" % (str(g_jc.position))
g_jc_pub.publish(g_jc)
if (spline_amount == 1.0):
for i in xrange(0, 12):
g_prev_jc_target.position[i] = g_jc.position[i] # todo: make this better
def grasp_cb(msg):
grasp_spline(msg.name, msg.closed_amount, 1)
if __name__ == '__main__':
rospy.init_node('simple_grasp')
g_jc.name = ["f0_j0", "f0_j1", "f0_j2",
"f1_j0", "f1_j1", "f1_j2",
"f2_j0", "f2_j1", "f2_j2",
"f3_j0", "f3_j1", "f3_j2"]
g_jc.position = [0] * 12
g_prev_jc_target.position = [0] * 12
g_jc_pub = rospy.Publisher('joint_commands', JointCommands, queue_size=1) # same namespace
g_jc_srv = rospy.Service('simple_grasp', SimpleGraspSrv, grasp_srv)
g_sgws_srv = rospy.Service('simple_grasp_with_slew', SimpleGraspWithSlew, grasp_slew_srv)
g_jc_sub = rospy.Subscriber('simple_grasp', SimpleGrasp, grasp_cb)
print "simple grasp service is now running."
rospy.spin() | en | 0.771973 | #!/usr/bin/env python # # Software License Agreement (Apache License) # # Copyright 2013 Open Source Robotics Foundation # Author: <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #print "going to %s in %.3f" % (req.grasp.name, req.slew_duration) #print "%.3f" % dt_norm #print "request: grasp [%s] amount [%f]" % (grasp_name, closed_amount) # save some typing # bogus #g_jc.position[i] = origin[i] + g0[i] * x #delta = target - g_prev_jc_target.position[i] # compute convex combination between old and new targets #print "joint state: %s" % (str(g_jc.position)) # todo: make this better # same namespace | 2.097647 | 2 |
ui/ui_prestamo_libros.py | edzzn/Manejo_Liberia | 0 | 8269 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PrestamoDeLibros.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(140, 70, 121, 41))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(140, 160, 121, 41))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton.setText(_translate("Form", "Solicitar", None))
self.pushButton_2.setText(_translate("Form", "Reservar", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PrestamoDeLibros.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(140, 70, 121, 41))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(140, 160, 121, 41))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton.setText(_translate("Form", "Solicitar", None))
self.pushButton_2.setText(_translate("Form", "Reservar", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| en | 0.73915 | # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'PrestamoDeLibros.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! | 1.857017 | 2 |
src/zope/app/content/__init__.py | zopefoundation/zope.app.content | 0 | 8270 | <reponame>zopefoundation/zope.app.content<filename>src/zope/app/content/__init__.py
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Content Type convenience lookup functions."""
from zope.interface import provider
from zope.interface import providedBy
from zope.schema.interfaces import IVocabularyFactory
from zope.app.content.interfaces import IContentType
from zope.componentvocabulary.vocabulary import UtilityVocabulary
from zope.security.proxy import removeSecurityProxy
def queryType(object, interface):
"""Returns the object's interface which implements interface.
>>> from zope.interface import Interface
>>> class IContentType(Interface):
... pass
>>> from zope.interface import Interface, implementer, directlyProvides
>>> class I(Interface):
... pass
>>> class J(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> @implementer(I)
... class C(object):
... pass
>>> @implementer(J, I)
... class D(object):
... pass
>>> obj = C()
>>> c1_ctype = queryType(obj, IContentType)
>>> c1_ctype.__name__
'I'
>>> class I1(I):
... pass
>>> class I2(I1):
... pass
>>> class I3(Interface):
... pass
>>> @implementer(I1)
... class C1(object):
... pass
>>> obj1 = C1()
>>> c1_ctype = queryType(obj1, IContentType)
>>> c1_ctype.__name__
'I'
>>> @implementer(I2)
... class C2(object):
... pass
>>> obj2 = C2()
>>> c2_ctype = queryType(obj2, IContentType)
>>> c2_ctype.__name__
'I'
>>> @implementer(I3)
... class C3(object):
... pass
>>> obj3 = C3()
If Interface doesn't provide `IContentType`, `queryType` returns ``None``.
>>> c3_ctype = queryType(obj3, IContentType)
>>> c3_ctype
>>> c3_ctype is None
True
>>> class I4(I):
... pass
>>> directlyProvides(I4, IContentType)
>>> @implementer(I4)
... class C4(object):
... pass
>>> obj4 = C4()
>>> c4_ctype = queryType(obj4, IContentType)
>>> c4_ctype.__name__
'I4'
"""
# Remove the security proxy, so that we can introspect the type of the
# object's interfaces.
naked = removeSecurityProxy(object)
object_iro = providedBy(naked).__iro__
for iface in object_iro:
if interface.providedBy(iface):
return iface
return None
def queryContentType(object):
"""Returns the interface implemented by object which implements
:class:`zope.app.content.interfaces.IContentType`.
>>> from zope.interface import Interface, implementer, directlyProvides
>>> class I(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> @implementer(I)
... class C(object):
... pass
>>> obj = C()
>>> c1_ctype = queryContentType(obj)
>>> c1_ctype.__name__
'I'
"""
return queryType(object, IContentType)
@provider(IVocabularyFactory)
class ContentTypesVocabulary(UtilityVocabulary):
interface = IContentType
| ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Content Type convenience lookup functions."""
from zope.interface import provider
from zope.interface import providedBy
from zope.schema.interfaces import IVocabularyFactory
from zope.app.content.interfaces import IContentType
from zope.componentvocabulary.vocabulary import UtilityVocabulary
from zope.security.proxy import removeSecurityProxy
def queryType(object, interface):
"""Returns the object's interface which implements interface.
>>> from zope.interface import Interface
>>> class IContentType(Interface):
... pass
>>> from zope.interface import Interface, implementer, directlyProvides
>>> class I(Interface):
... pass
>>> class J(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> @implementer(I)
... class C(object):
... pass
>>> @implementer(J, I)
... class D(object):
... pass
>>> obj = C()
>>> c1_ctype = queryType(obj, IContentType)
>>> c1_ctype.__name__
'I'
>>> class I1(I):
... pass
>>> class I2(I1):
... pass
>>> class I3(Interface):
... pass
>>> @implementer(I1)
... class C1(object):
... pass
>>> obj1 = C1()
>>> c1_ctype = queryType(obj1, IContentType)
>>> c1_ctype.__name__
'I'
>>> @implementer(I2)
... class C2(object):
... pass
>>> obj2 = C2()
>>> c2_ctype = queryType(obj2, IContentType)
>>> c2_ctype.__name__
'I'
>>> @implementer(I3)
... class C3(object):
... pass
>>> obj3 = C3()
If Interface doesn't provide `IContentType`, `queryType` returns ``None``.
>>> c3_ctype = queryType(obj3, IContentType)
>>> c3_ctype
>>> c3_ctype is None
True
>>> class I4(I):
... pass
>>> directlyProvides(I4, IContentType)
>>> @implementer(I4)
... class C4(object):
... pass
>>> obj4 = C4()
>>> c4_ctype = queryType(obj4, IContentType)
>>> c4_ctype.__name__
'I4'
"""
# Remove the security proxy, so that we can introspect the type of the
# object's interfaces.
naked = removeSecurityProxy(object)
object_iro = providedBy(naked).__iro__
for iface in object_iro:
if interface.providedBy(iface):
return iface
return None
def queryContentType(object):
"""Returns the interface implemented by object which implements
:class:`zope.app.content.interfaces.IContentType`.
>>> from zope.interface import Interface, implementer, directlyProvides
>>> class I(Interface):
... pass
>>> directlyProvides(I, IContentType)
>>> @implementer(I)
... class C(object):
... pass
>>> obj = C()
>>> c1_ctype = queryContentType(obj)
>>> c1_ctype.__name__
'I'
"""
return queryType(object, IContentType)
@provider(IVocabularyFactory)
class ContentTypesVocabulary(UtilityVocabulary):
interface = IContentType | en | 0.573087 | ############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## Content Type convenience lookup functions. Returns the object's interface which implements interface. >>> from zope.interface import Interface >>> class IContentType(Interface): ... pass >>> from zope.interface import Interface, implementer, directlyProvides >>> class I(Interface): ... pass >>> class J(Interface): ... pass >>> directlyProvides(I, IContentType) >>> @implementer(I) ... class C(object): ... pass >>> @implementer(J, I) ... class D(object): ... pass >>> obj = C() >>> c1_ctype = queryType(obj, IContentType) >>> c1_ctype.__name__ 'I' >>> class I1(I): ... pass >>> class I2(I1): ... pass >>> class I3(Interface): ... pass >>> @implementer(I1) ... class C1(object): ... pass >>> obj1 = C1() >>> c1_ctype = queryType(obj1, IContentType) >>> c1_ctype.__name__ 'I' >>> @implementer(I2) ... class C2(object): ... pass >>> obj2 = C2() >>> c2_ctype = queryType(obj2, IContentType) >>> c2_ctype.__name__ 'I' >>> @implementer(I3) ... class C3(object): ... pass >>> obj3 = C3() If Interface doesn't provide `IContentType`, `queryType` returns ``None``. >>> c3_ctype = queryType(obj3, IContentType) >>> c3_ctype >>> c3_ctype is None True >>> class I4(I): ... pass >>> directlyProvides(I4, IContentType) >>> @implementer(I4) ... class C4(object): ... pass >>> obj4 = C4() >>> c4_ctype = queryType(obj4, IContentType) >>> c4_ctype.__name__ 'I4' # Remove the security proxy, so that we can introspect the type of the # object's interfaces. Returns the interface implemented by object which implements :class:`zope.app.content.interfaces.IContentType`. >>> from zope.interface import Interface, implementer, directlyProvides >>> class I(Interface): ... pass >>> directlyProvides(I, IContentType) >>> @implementer(I) ... class C(object): ... pass >>> obj = C() >>> c1_ctype = queryContentType(obj) >>> c1_ctype.__name__ 'I' | 1.89661 | 2 |
python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py | yschiebelhut/ewm-cloud-robotics | 0 | 8271 | <filename>python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""Run the SAP EWM robot configurator."""
import sys
import signal
import traceback
import logging
import time
from robcoewmrobotconfigurator.ewm_robot_sync import EWMRobotSync
from robcoewmrobotconfigurator.robotconfigcontroller import RobotConfigurationController
from robcoewmrobotconfigurator.robco_robot_api import RobCoRobotAPI
_LOGGER = logging.getLogger(__name__)
class MainLoopController:
"""Control the main loop."""
def __init__(self):
"""Construct."""
# Shutdown Handler
self.shutdown = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
# Sleep handler
self.last_time = time.time()
def exit_gracefully(self, signum, frame):
"""Set shutdown flag on SIGTERM and SIGINT."""
self.shutdown = True
_LOGGER.info('Closing application because signal %s received', signum)
def sleep(self, seconds: float):
"""Sleep maximum n seconds after the last call."""
timediff = time.time() - self.last_time
if timediff < seconds:
time.sleep(seconds-timediff)
self.last_time = time.time()
def run_robotconfigurator():
"""Run one instance of the robot configurator."""
# Register handler to control main loop
loop_control = MainLoopController()
# Create CR watcher instances
k8s_rb = RobCoRobotAPI()
k8s_rc = RobotConfigurationController()
# Create EWM robot syncer instance
robotsync = EWMRobotSync(k8s_rc)
# Register callback functions
k8s_rb.register_callback('ConfigurationController', ['ADDED'], k8s_rc.robco_robot_cb)
k8s_rc.register_callback(
'EWMRobotSync', ['ADDED', 'MODIFIED', 'REPROCESS'], robotsync.robotconfiguration_cb)
# Start
k8s_rb.run()
k8s_rc.run(reprocess=True)
_LOGGER.info('SAP EWM Robot Configurator started')
try:
# Looping while K8S watchers are running
while loop_control.shutdown is False:
# Refresh bearer token when using OAuth
if robotsync.odataconfig.authorization == robotsync.odataconfig.AUTH_OAUTH:
robotsync.odatahandler.refresh_access_token()
# Check if K8S CR handler exception occured
for k, exc in k8s_rb.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobCoRobotAPI. Raising it in main '
'thread', k)
raise exc
for k, exc in k8s_rc.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobotConfigurationController. Raising '
'it in main thread', k)
raise exc
# Sleep maximum 1.0 second
loop_control.sleep(1.0)
except KeyboardInterrupt:
_LOGGER.info('Keyboard interrupt - terminating')
except SystemExit:
_LOGGER.info('System exit - terminating')
finally:
# Stop K8S CR watchers
_LOGGER.info('Stopping K8S CR watchers')
k8s_rb.stop_watcher()
k8s_rc.stop_watcher()
# Shutdown threadpool executor
robotsync.executor.shutdown()
if __name__ == '__main__':
# Create root logger if running as main program
ROOT_LOGGER = logging.getLogger()
ROOT_LOGGER.setLevel(logging.INFO)
# Create console handler and set level to info
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
# Create formatter
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to ch
CH.setFormatter(FORMATTER)
# Add ch to logger
ROOT_LOGGER.addHandler(CH)
# Run robot master
try:
run_robotconfigurator()
except Exception: # pylint: disable=broad-except
EXC_INFO = sys.exc_info()
_LOGGER.critical(
'Unexpected error "%s" - "%s" - TRACEBACK: %s', EXC_INFO[0], EXC_INFO[1],
traceback.format_exception(*EXC_INFO))
sys.exit('Application terminated with exception: "{}" - "{}"'.format(
EXC_INFO[0], EXC_INFO[1]))
| <filename>python-modules/robcoewmrobotconfigurator/robcoewmrobotconfigurator/run.py
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""Run the SAP EWM robot configurator."""
import sys
import signal
import traceback
import logging
import time
from robcoewmrobotconfigurator.ewm_robot_sync import EWMRobotSync
from robcoewmrobotconfigurator.robotconfigcontroller import RobotConfigurationController
from robcoewmrobotconfigurator.robco_robot_api import RobCoRobotAPI
_LOGGER = logging.getLogger(__name__)
class MainLoopController:
"""Control the main loop."""
def __init__(self):
"""Construct."""
# Shutdown Handler
self.shutdown = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
# Sleep handler
self.last_time = time.time()
def exit_gracefully(self, signum, frame):
"""Set shutdown flag on SIGTERM and SIGINT."""
self.shutdown = True
_LOGGER.info('Closing application because signal %s received', signum)
def sleep(self, seconds: float):
"""Sleep maximum n seconds after the last call."""
timediff = time.time() - self.last_time
if timediff < seconds:
time.sleep(seconds-timediff)
self.last_time = time.time()
def run_robotconfigurator():
"""Run one instance of the robot configurator."""
# Register handler to control main loop
loop_control = MainLoopController()
# Create CR watcher instances
k8s_rb = RobCoRobotAPI()
k8s_rc = RobotConfigurationController()
# Create EWM robot syncer instance
robotsync = EWMRobotSync(k8s_rc)
# Register callback functions
k8s_rb.register_callback('ConfigurationController', ['ADDED'], k8s_rc.robco_robot_cb)
k8s_rc.register_callback(
'EWMRobotSync', ['ADDED', 'MODIFIED', 'REPROCESS'], robotsync.robotconfiguration_cb)
# Start
k8s_rb.run()
k8s_rc.run(reprocess=True)
_LOGGER.info('SAP EWM Robot Configurator started')
try:
# Looping while K8S watchers are running
while loop_control.shutdown is False:
# Refresh bearer token when using OAuth
if robotsync.odataconfig.authorization == robotsync.odataconfig.AUTH_OAUTH:
robotsync.odatahandler.refresh_access_token()
# Check if K8S CR handler exception occured
for k, exc in k8s_rb.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobCoRobotAPI. Raising it in main '
'thread', k)
raise exc
for k, exc in k8s_rc.thread_exceptions.items():
_LOGGER.error(
'Uncovered exception in "%s" thread of RobotConfigurationController. Raising '
'it in main thread', k)
raise exc
# Sleep maximum 1.0 second
loop_control.sleep(1.0)
except KeyboardInterrupt:
_LOGGER.info('Keyboard interrupt - terminating')
except SystemExit:
_LOGGER.info('System exit - terminating')
finally:
# Stop K8S CR watchers
_LOGGER.info('Stopping K8S CR watchers')
k8s_rb.stop_watcher()
k8s_rc.stop_watcher()
# Shutdown threadpool executor
robotsync.executor.shutdown()
if __name__ == '__main__':
# Create root logger if running as main program
ROOT_LOGGER = logging.getLogger()
ROOT_LOGGER.setLevel(logging.INFO)
# Create console handler and set level to info
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
# Create formatter
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to ch
CH.setFormatter(FORMATTER)
# Add ch to logger
ROOT_LOGGER.addHandler(CH)
# Run robot master
try:
run_robotconfigurator()
except Exception: # pylint: disable=broad-except
EXC_INFO = sys.exc_info()
_LOGGER.critical(
'Unexpected error "%s" - "%s" - TRACEBACK: %s', EXC_INFO[0], EXC_INFO[1],
traceback.format_exception(*EXC_INFO))
sys.exit('Application terminated with exception: "{}" - "{}"'.format(
EXC_INFO[0], EXC_INFO[1]))
| en | 0.721967 | #!/usr/bin/env python3 # encoding: utf-8 # # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. # # This file is part of ewm-cloud-robotics # (see https://github.com/SAP/ewm-cloud-robotics). # # This file is licensed under the Apache Software License, v. 2 except as noted # otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE) # Run the SAP EWM robot configurator. Control the main loop. Construct. # Shutdown Handler # Sleep handler Set shutdown flag on SIGTERM and SIGINT. Sleep maximum n seconds after the last call. Run one instance of the robot configurator. # Register handler to control main loop # Create CR watcher instances # Create EWM robot syncer instance # Register callback functions # Start # Looping while K8S watchers are running # Refresh bearer token when using OAuth # Check if K8S CR handler exception occured # Sleep maximum 1.0 second # Stop K8S CR watchers # Shutdown threadpool executor # Create root logger if running as main program # Create console handler and set level to info # Create formatter # Add formatter to ch # Add ch to logger # Run robot master # pylint: disable=broad-except | 2.351717 | 2 |
website/addons/forward/views/__init__.py | DanielSBrown/osf.io | 1 | 8272 | from . import config, widget # noqa
| from . import config, widget # noqa
| none | 1 | 1.071691 | 1 |
|
hwtest/automated/usb3_test.py | crvallance/wlanpi-hwtest | 0 | 8273 | from hwtest.shell_utils import run_command
def test_linux_usb3hub():
"""
Test for Linux Foundation 3.0 root hub in `lsusb` output
"""
resp = run_command(["lsusb"])
assert "1d6b:0003" in resp
| from hwtest.shell_utils import run_command
def test_linux_usb3hub():
"""
Test for Linux Foundation 3.0 root hub in `lsusb` output
"""
resp = run_command(["lsusb"])
assert "1d6b:0003" in resp
| en | 0.66985 | Test for Linux Foundation 3.0 root hub in `lsusb` output | 1.951249 | 2 |
ninjabackend.py | tp-m/meson | 0 | 8274 | # Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import backends
import environment, mesonlib
import build
import mlog
import dependencies
from mesonlib import File
from meson_install import InstallData
from build import InvalidArguments
from coredata import MesonException
import os, sys, pickle, re
import subprocess, shutil
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
else:
quote_char = "'"
execute_wrapper = ''
def ninja_quote(text):
return text.replace(' ', '$ ').replace(':', '$:')
class RawFilename():
def __init__(self, fname):
self.fname = fname
def split(self, c):
return self.fname.split(c)
def startswith(self, s):
return self.fname.startswith(s)
class NinjaBuildElement():
def __init__(self, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = []
self.orderdeps = []
self.elems = []
def add_dep(self, dep):
if isinstance(dep, list):
self.deps += dep
else:
self.deps.append(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps += dep
else:
self.orderdeps.append(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
line = 'build %s: %s %s' % (' '.join([ninja_quote(i) for i in self.outfilenames]),\
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
for e in self.elems:
(name, elems) = e
should_quote = True
if name == 'DEPFILE' or name == 'DESC' or name == 'pool':
should_quote = False
line = ' %s = ' % name
q_templ = quote_char + "%s" + quote_char
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
templ = noq_templ
else:
templ = q_templ
i = i.replace('\\', '\\\\')
if quote_char == '"':
i = i.replace('"', '\\"')
newelems.append(templ % ninja_quote(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.source_suffix_in_objs = True
self.ninja_filename = 'build.ninja'
self.fortran_deps = {}
self.all_outputs = {}
def check_outputs(self, elem):
for n in elem.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
def detect_vs_dep_prefix(self, outfile, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
if shutil.which('cl') is None:
return outfile
outfile.close()
open(os.path.join(self.environment.get_scratch_dir(), 'incdetect.c'),
'w').write('''#include<stdio.h>
int dummy;
''')
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.environment.get_scratch_dir())
(stdo, _) = pc.communicate()
for line in stdo.split(b'\r\n'):
if line.endswith(b'stdio.h'):
matchstr = b':'.join(line.split(b':')[0:2]) + b':'
binfile = open(tempfilename, 'ab')
binfile.write(b'msvc_deps_prefix = ' + matchstr + b'\r\n')
binfile.close()
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
outfile = open(tempfilename, 'w')
outfile.write('# This is the build file for project "%s"\n' % self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
outfile = self.detect_vs_dep_prefix(outfile, tempfilename)
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
[self.generate_target(t, outfile) for t in self.build.get_targets().values()]
if len(self.build.pot) > 0:
outfile.write('# Build rules for localisation.\n\n')
self.generate_po(outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
if self.environment.coredata.get_builtin_option('coverage'):
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
outfile.close()
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
builddir = self.environment.get_build_dir()
jsondb = subprocess.check_output([ninja_exe, '-t', 'compdb', 'c_COMPILER', 'cpp_COMPILER'], cwd=builddir)
open(os.path.join(builddir, 'compile_commands.json'), 'wb').write(jsondb)
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
continue
for src in gensource.get_outfilelist():
if self.environment.is_header(src):
header_deps.append(os.path.join(self.get_target_private_dir(target), src))
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
gen_src_deps = []
if name in self.processed_targets:
return
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in self.environment.coredata.compilers.keys() and self.has_rust(target):
self.generate_rust_target(target, outfile)
return
if 'cs' in self.environment.coredata.compilers.keys() and self.has_cs(target):
self.generate_cs_target(target, outfile)
return
if 'vala' in self.environment.coredata.compilers.keys() and self.has_vala(target):
gen_src_deps += self.generate_vala_compile(target, outfile)
if 'swift' in self.environment.coredata.compilers.keys() and self.has_swift(target):
self.generate_swift_target(target, outfile)
return
self.scan_fortran_module_outputs(target)
# The following deals with C/C++ compilation.
(gen_src, gen_other_deps) = self.process_dep_gens(outfile, target)
gen_src_deps += gen_src
self.process_target_dependencies(target, outfile)
self.generate_custom_generator_rules(target, outfile)
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.get_builtin_option('use_pch')
is_unity = self.environment.coredata.get_builtin_option('unity')
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = gen_other_deps
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
for src in gensource.output:
src = os.path.join(self.get_target_dir(gensource), src)
if self.environment.is_source(src) and not self.environment.is_header(src):
if is_unity:
unity_deps.append(os.path.join(self.environment.get_build_dir(), RawFilename(src)))
else:
obj_list.append(self.generate_single_compile(target, outfile, RawFilename(src), True,
header_deps))
elif self.environment.is_object(src):
obj_list.append(src)
elif self.environment.is_library(src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(RawFilename(src))
else:
for src in gensource.get_outfilelist():
if self.environment.is_object(src):
obj_list.append(os.path.join(self.get_target_private_dir(target), src))
elif not self.environment.is_header(src):
if is_unity:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
unity_deps.append(rel_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps))
src_list = []
for src in gen_src_deps:
src_list.append(src)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True, [], header_deps))
for src in target.get_sources():
if src.endswith('.vala'):
continue
if not self.environment.is_header(src):
src_list.append(src)
if is_unity:
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target, src_list)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
self.processed_targets[name] = True
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if not tname in self.processed_targets:
self.generate_target(t, outfile)
def generate_custom_target(self, target, outfile):
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
fname = i.get_filename()
if isinstance(fname, list):
fname = fname[0]
deps.append(os.path.join(self.get_target_dir(i), fname))
if target.build_always:
deps.append('PHONY')
elem = NinjaBuildElement(ofilenames, 'CUSTOM_COMMAND', srcs)
for i in target.depend_files:
if isinstance(i, mesonlib.File):
deps.append(i.rel_to_builddir(self.build_to_src))
else:
deps.append(os.path.join(self.build_to_src, i))
elem.add_dep(deps)
for d in target.extra_depends:
tmp = d.get_filename()
if not isinstance(tmp, list):
tmp = [tmp]
for fname in tmp:
elem.add_dep(os.path.join(self.get_target_dir(d), fname))
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Generating %s with a custom command.' % target.name)
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
runnerscript = os.path.join(self.environment.get_script_dir(), 'commandrunner.py')
deps = []
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
deps.append(relfname)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
mlog.debug(str(i))
raise MesonException('Unreachable code in generate_run_target.')
elem = NinjaBuildElement(target.name, 'CUSTOM_COMMAND', deps)
cmd = [sys.executable, runnerscript, self.environment.get_source_dir(), self.environment.get_build_dir(), target.subdir]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() \
and self.environment.cross_info.config['binaries'].get('exe_wrapper', None) is not None:
cmd += [self.environment.cross_info.config['binaries']['exe_wrapper']]
cmd.append(abs_exe)
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_po(self, outfile):
for p in self.build.pot:
(packagename, languages, subdir) = p
input_file = os.path.join(subdir, 'POTFILES')
elem = NinjaBuildElement('pot', 'GEN_POT', [])
elem.add_item('PACKAGENAME', packagename)
elem.add_item('OUTFILE', packagename + '.pot')
elem.add_item('FILELIST', os.path.join(self.environment.get_source_dir(), input_file))
elem.add_item('OUTDIR', os.path.join(self.environment.get_source_dir(), subdir))
elem.write(outfile)
self.check_outputs(elem)
for l in languages:
infile = os.path.join(self.environment.get_source_dir(), subdir, l + '.po')
outfilename = os.path.join(subdir, l + '.gmo')
lelem = NinjaBuildElement(outfilename, 'GEN_GMO', infile)
lelem.add_item('INFILE', infile)
lelem.add_item('OUTFILE', outfilename)
lelem.write(outfile)
self.check_outputs(lelem)
def generate_coverage_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement('coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement('coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
self.check_outputs(elem)
if lcov_exe and genhtml_exe:
added_rule = True
phony_elem = NinjaBuildElement('coverage-html', 'phony', 'coveragereport/index.html')
phony_elem.write(outfile)
elem = NinjaBuildElement('coveragereport/index.html', 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),\
'--capture', '--output-file', 'coverage.info', '--no-checksum',\
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),\
'--output-directory', self.environment.get_log_dir(), '--title', 'Code coverage',\
'--legend', '--show-details', 'coverage.info']
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
self.check_outputs(elem)
elem.write(outfile)
if not added_rule:
mlog.log(mlog.red('Warning:'), 'coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
script_root = self.environment.get_script_dir()
install_script = os.path.join(script_root, 'meson_install.py')
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
depfixer = os.path.join(script_root, 'depfixer.py')
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(), depfixer)
elem = NinjaBuildElement('install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, install_script, install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_po_install(d, elem)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
self.check_outputs(elem)
ofile = open(install_data_file, 'wb')
pickle.dump(d, ofile)
def generate_po_install(self, d, elem):
for p in self.build.pot:
(package_name, languages, subdir) = p
# FIXME: assumes only one po package per source
d.po_package_name = package_name
for lang in languages:
rel_src = os.path.join(subdir, lang + '.gmo')
src_file = os.path.join(self.environment.get_build_dir(), rel_src)
d.po.append((src_file, self.environment.coredata.get_builtin_option('localedir'), lang))
elem.add_dep(rel_src)
def generate_target_install(self, d):
libdir = self.environment.get_libdir()
bindir = self.environment.get_bindir()
should_strip = self.environment.coredata.get_builtin_option('strip')
for t in self.build.get_targets().values():
if t.should_install():
outdir = t.get_custom_install_dir()
if outdir is None:
if isinstance(t, build.Executable):
outdir = bindir
else:
outdir = libdir
i = [self.get_target_filename(t), outdir, t.get_aliaslist(),\
should_strip, t.install_rpath]
d.targets.append(i)
def generate_custom_install_script(self, d):
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
abspath = os.path.join(self.environment.get_source_dir(), h.get_source_subdir(), f)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, f + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
if de.in_sourcetree:
srcprefix = self.environment.get_source_dir()
else:
srcprefix = self.environment.get_build_dir()
srcabs = os.path.join(srcprefix, de.source_subdir, f)
dstabs = os.path.join(subdir, f)
i = [srcabs, dstabs]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
src_dir = os.path.join(self.environment.get_source_dir(), sd.source_subdir, sd.installable_subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, dst_dir])
def write_test_suite_targets(self, cmd, outfile):
suites = {}
for t in self.build.get_tests():
for s in t.suite:
suites[s] = True
suites = list(suites.keys())
suites.sort()
for s in suites:
if s == '':
visible_name = 'for top level tests'
else:
visible_name = s
elem = NinjaBuildElement('test-' + s, 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd + ['--suite=' + s])
elem.add_item('DESC', 'Running test suite %s.' % visible_name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_tests(self, outfile):
self.serialise_tests()
valgrind = environment.find_valgrind()
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
cmd = [sys.executable, test_script, test_data]
elem = NinjaBuildElement('test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.write_test_suite_targets(cmd, outfile)
if valgrind:
velem = NinjaBuildElement('test-valgrind', 'CUSTOM_COMMAND', ['all', 'PHONY'])
velem.add_item('COMMAND', cmd + ['--wrapper=' + valgrind])
velem.add_item('DESC', 'Running test suite under Valgrind.')
velem.add_item('pool', 'console')
velem.write(outfile)
self.check_outputs(velem)
# And then benchmarks.
benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
cmd = [sys.executable, benchmark_script, benchmark_data]
elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (quote_char + ninja_quote(sys.executable) + quote_char,
quote_char + ninja_quote(self.environment.get_build_command()) + quote_char,
quote_char + ninja_quote(self.environment.get_source_dir()) + quote_char,
quote_char + ninja_quote(self.environment.get_build_dir()) + quote_char)
outfile.write(" command = %s %s %s %s --backend ninja secret-handshake\n" % c)
outfile.write(' description = Regenerating build files\n')
outfile.write(' generator = 1\n\n')
if len(self.build.pot) > 0:
self.generate_gettext_rules(outfile)
outfile.write('\n')
def generate_gettext_rules(self, outfile):
rule = 'rule GEN_POT\n'
command = " command = xgettext --package-name=$PACKAGENAME -p $OUTDIR -f $FILELIST -D '%s' -k_ -o $OUTFILE\n" % \
self.environment.get_source_dir()
desc = " description = Creating pot file for package $PACKAGENAME.\n"
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
rule = 'rule GEN_GMO\n'
command = ' command = msgfmt $INFILE -o $OUTFILE\n'
desc = ' description = Generating gmo file $OUTFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
subdir = target.get_subdir()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'java')
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c+m+e+f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
for cls in class_list:
commands += ['-C', self.get_target_private_dir(target), cls]
elem = NinjaBuildElement(outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
self.check_outputs(elem)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
self.check_outputs(elem)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return (args, deps)
def generate_cs_target(self, target, outfile):
buildtype = self.environment.coredata.get_builtin_option('buildtype')
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'cs')
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
self.check_outputs(elem)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
self.check_outputs(element)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating jar $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def split_vala_sources(self, sources):
src = []
vapi_src = []
for s in sources:
if s.endswith('.vapi'):
vapi_src.append(s)
else:
src.append(s)
return (src, vapi_src)
def determine_dep_vapis(self, target):
result = []
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = os.path.splitext(os.path.split(i)[1])[0] + '.vapi'
fullname = os.path.join(self.get_target_private_dir(dep), vapiname)
result.append(fullname)
break
return result
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
valac = self.environment.coredata.compilers['vala']
(src, vapi_src) = self.split_vala_sources(target.get_sources())
vapi_src = [x.rel_to_builddir(self.build_to_src) for x in vapi_src]
extra_dep_files = []
vala_input_files = []
for s in src:
if s.endswith('.vala'):
vala_input_files.append(s.rel_to_builddir(self.build_to_src))
namebase = os.path.splitext(os.path.split(vala_input_files[0])[1])[0]
hname = namebase + '.h'
vapiname = namebase + '.vapi'
outputs = [vapiname]
args = ['-d', self.get_target_private_dir(target)]
args += ['-C']#, '-o', cname]
if not isinstance(target, build.Executable):
outputs.append(hname)
args += ['-H', hname]
args += ['--vapi=' + vapiname]
for src in vala_input_files:
namebase = os.path.splitext(os.path.split(src)[1])[0] + '.c'
outputs.append(namebase)
if self.environment.coredata.get_builtin_option('werror'):
args += valac.get_werror_args()
for d in target.external_deps:
if isinstance(d, dependencies.PkgConfigDependency):
if d.name == 'glib-2.0' and d.version_requirement is not None \
and d.version_requirement.startswith(('>=', '==')):
args += ['--target-glib', d.version_requirement[2:]]
args += ['--pkg', d.name]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
args += dependency_vapis
outputs = [os.path.join(self.get_target_private_dir(target), x) for x in outputs]
element = NinjaBuildElement(outputs,
valac.get_language() + '_COMPILER',
vala_input_files + vapi_src)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
self.check_outputs(element)
return outputs
def generate_rust_target(self, target, outfile):
rustc = self.environment.coredata.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif isinstance(target, build.SharedLibrary):
cratetype = 'rlib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
depfile = target.name + '.d'
args += ['--out-dir', target.subdir]
args += ['--emit', 'dep-info', '--emit', 'link']
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = {}
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
element = NinjaBuildElement(target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
self.check_outputs(element)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = []
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
for ifile in genlist.get_filename():
rel = os.path.join(self.get_target_dir(genlist), ifile)
all_srcs.append(rel)
else:
for ifile in genlist.get_outfilelist():
rel = os.path.join(self.get_target_private_dir(target), ifile)
all_srcs.append(rel)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return (srcs, others)
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = self.environment.coredata.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
elem = NinjaBuildElement(out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if self.build.has_language('java'):
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
if mesonlib.is_windows():
command_templ = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS %s $in
'''
else:
command_templ = ' command = %s $LINK_ARGS %s $in\n'
command = command_templ %\
(' '.join(static_linker.get_exelist()),
' '.join(static_linker.get_output_args('$out')))
description = ' description = Static linking library $out\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for compiler in complist:
langname = compiler.get_language()
if langname == 'java' or langname == 'vala' or\
langname == 'rust' or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing
'''
else:
command_template = ' command = %s %s $ARGS %s $in $LINK_ARGS $aliasing\n'
command = command_template % \
(' '.join(compiler.get_linker_exelist()),\
' '.join(cross_args),\
' '.join(compiler.get_linker_output_args('$out')))
description = ' description = Linking target $out'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
scriptdir = self.environment.get_script_dir()
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s $CROSS\n' % (ninja_quote(sys.executable),
ninja_quote(os.path.join(scriptdir, 'symbolextractor.py')),
'$in', '$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling cs target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [sys.executable,
os.path.join(self.environment.get_script_dir(), 'dirchanger.py'),
'$RUNDIR'] + compiler.get_exelist()
invoc = ' '.join([ninja_quote(i) for i in full_exe])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_compile_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s %s %s $in
'''
else:
command_template = ' command = %s %s $ARGS %s %s %s $in\n'
command = command_template % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),
' '.join(quoted_depargs),\
' '.join(compiler.get_output_args('$out')),\
' '.join(compiler.get_compile_only_args()))
description = ' description = Compiling %s object $out\n' % langname
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = %s %s $ARGS %s %s %s $in\n" % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),\
' '.join(quoted_depargs),\
output,\
' '.join(compiler.get_compile_only_args()))
description = ' description = Precompiling header %s\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
qstr = quote_char + "%s" + quote_char
for compiler in self.build.compilers:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, False, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for compiler in cclist:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, True, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, True, outfile)
outfile.write('\n')
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile('@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = '@OUTPUT%d@' % index
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def generate_custom_generator_rules(self, target, outfile):
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue # Customtarget has already written its output rules
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_infilelist()
outfilelist = genlist.get_outfilelist()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = os.path.join(self.build_to_src, curfile)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)\
for x in base_args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
cmdlist = exe_arr + final_args
elem = NinjaBuildElement(outfiles, 'CUSTOM_COMMAND', infilename)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
self.check_outputs(elem)
def scan_fortran_module_outputs(self, target):
compiler = None
for c in self.build.compilers:
if c.get_language() == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for generated Fortran sources,
# but those are really rare. I hope.
if not compiler.can_compile(s):
continue
for line in open(os.path.join(self.environment.get_source_dir(), s.subdir, s.fname)):
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == 'procedure': # MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments('Namespace collision: module %s defined in two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps= self.fortran_deps[target.get_basename()]
for line in open(src):
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1)
if usename not in tdeps:
# The module is not provided by any source file. This is due to
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as OpenMP
# There's no easy way to tell which is which (that I know of)
# so just ignore this and go on. Ideally we would print a
# warning message to the user but this is a common occurrance,
# which would lead to lots of distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
if(isinstance(src, str) and src.endswith('.h')):
raise RuntimeError('Fug')
if isinstance(src, RawFilename) and src.fname.endswith('.h'):
raise RuntimeError('Fug')
extra_orderdeps = []
compiler = self.get_compiler_for_source(src)
commands = self.generate_basic_compiler_args(target, compiler)
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
commands += compiler.get_include_args(tmppath, False)
if curdir == '':
curdir = '.'
commands += compiler.get_include_args(curdir, False)
for d in target.external_deps:
if d.need_threads():
commands += compiler.thread_flags()
break
if isinstance(src, RawFilename):
rel_src = src.fname
elif is_generated:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
abs_src = os.path.join(self.environment.get_source_dir(), rel_src)
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise build.InvalidArguments('Invalid source type.')
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, RawFilename):
src_filename = src.fname
elif isinstance(src, File):
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
if self.environment.coredata.get_builtin_option('use_pch'):
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if len(pchlist) == 0:
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
for i in target.get_include_dirs():
basedir = i.get_curdir()
for d in i.get_incdirs():
expdir = os.path.join(basedir, d)
srctreedir = os.path.join(self.build_to_src, expdir)
bargs = compiler.get_include_args(expdir, i.is_system)
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += bargs
commands += sargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
custom_target_include_dirs = []
for i in target.generated:
if isinstance(i, build.CustomTarget):
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
for i in custom_target_include_dirs:
commands+= compiler.get_include_args(i, False)
if self.environment.coredata.get_builtin_option('use_pch'):
commands += self.get_pch_include_args(compiler, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
self.check_outputs(depelem)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
element.add_orderdep(extra_orderdeps)
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
self.check_outputs(element)
return rel_obj
def has_dir_part(self, fname):
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, [objname])
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, []) # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if len(pch) == 0:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
raise build.InvalidArguments('Precompiled header of "%s" must not be in the same directory as source, please put it in a subdirectory.' % target.get_basename())
compiler = self.get_compiler_for_lang(lang)
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
self.check_outputs(elem)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
self.check_outputs(elem)
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
commands += linker.get_linker_always_args()
commands += linker.get_buildtype_linker_args(self.environment.coredata.get_builtin_option('buildtype'))
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
if not(isinstance(target, build.StaticLibrary)):
commands += self.environment.coredata.external_link_args[linker.get_language()]
if isinstance(target, build.Executable):
commands += linker.get_std_exe_link_args()
elif isinstance(target, build.SharedLibrary):
commands += linker.get_std_shared_lib_link_args()
commands += linker.get_pic_args()
if hasattr(target, 'soversion'):
soversion = target.soversion
else:
soversion = None
commands += linker.get_soname_args(target.name, abspath, soversion)
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
# Link arguments of static libraries are not put in the command line of
# the library. They are instead appended to the command line where
# the static library is used.
if linker_base == 'STATIC':
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
if not(isinstance(target, build.StaticLibrary)):
for dep in target.get_external_deps():
commands += dep.get_link_args()
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands += dep.get_link_args()
commands += linker.build_rpath_args(self.environment.get_build_dir(),\
self.determine_rpath_dirs(target), target.install_rpath)
if self.environment.coredata.get_builtin_option('coverage'):
commands += linker.get_coverage_link_args()
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
commands = linker.unixtype_flags_to_native(commands)
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets += [os.path.join(self.environment.source_dir,
target.subdir, t) for t in target.link_depends]
elem = NinjaBuildElement(outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
self.check_outputs(elem)
return elem
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
for f in t.output:
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(t), f))
return libs
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
prospective = self.get_target_dir(ld)
if not prospective in result:
result.append(prospective)
return result
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
basename = target.get_filename()
aliases = target.get_aliaslist()
if not mesonlib.is_windows():
for alias in aliases:
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
os.symlink(basename, aliasfile)
else:
mlog.debug("Library versioning disabled because host does not support symlinks.")
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement('clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files')
gcno_elem.write(outfile)
self.check_outputs(gcno_elem)
gcda_elem = NinjaBuildElement('clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files')
gcda_elem.write(outfile)
self.check_outputs(gcda_elem)
def is_compilable_file(self, filename):
if filename.endswith('.cpp') or\
filename.endswith('.c') or\
filename.endswith('.cxx') or\
filename.endswith('.cc') or\
filename.endswith('.C'):
return True
return False
def process_dep_gens(self, outfile, target):
src_deps = []
other_deps = []
for rule in self.dep_rules.values():
srcs = target.get_original_kwargs().get(rule.src_keyword, [])
if isinstance(srcs, str):
srcs = [srcs]
for src in srcs:
plainname = os.path.split(src)[1]
basename = plainname.split('.')[0]
outname = rule.name_templ.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
outfilename = os.path.join(self.get_target_private_dir(target), outname)
infilename = os.path.join(self.build_to_src, target.get_source_subdir(), src)
elem = NinjaBuildElement(outfilename, rule.name, infilename)
elem.write(outfile)
self.check_outputs(elem)
if self.is_compilable_file(outfilename):
src_deps.append(outfilename)
else:
other_deps.append(outfilename)
return (src_deps, other_deps)
def generate_ending(self, outfile):
targetlist = [self.get_target_filename(t) for t in self.build.get_targets().values()\
if not isinstance(t, build.RunTarget)]
elem = NinjaBuildElement('all', 'phony', targetlist)
elem.write(outfile)
self.check_outputs(elem)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect ninja command')
elem = NinjaBuildElement('clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning')
if self.environment.coredata.get_builtin_option('coverage'):
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
self.check_outputs(elem)
deps = self.get_regen_filelist()
elem = NinjaBuildElement('build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(deps, 'phony', '')
elem.write(outfile)
self.check_outputs(elem)
| # Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import backends
import environment, mesonlib
import build
import mlog
import dependencies
from mesonlib import File
from meson_install import InstallData
from build import InvalidArguments
from coredata import MesonException
import os, sys, pickle, re
import subprocess, shutil
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
else:
quote_char = "'"
execute_wrapper = ''
def ninja_quote(text):
return text.replace(' ', '$ ').replace(':', '$:')
class RawFilename():
def __init__(self, fname):
self.fname = fname
def split(self, c):
return self.fname.split(c)
def startswith(self, s):
return self.fname.startswith(s)
class NinjaBuildElement():
def __init__(self, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = []
self.orderdeps = []
self.elems = []
def add_dep(self, dep):
if isinstance(dep, list):
self.deps += dep
else:
self.deps.append(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps += dep
else:
self.orderdeps.append(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
line = 'build %s: %s %s' % (' '.join([ninja_quote(i) for i in self.outfilenames]),\
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
for e in self.elems:
(name, elems) = e
should_quote = True
if name == 'DEPFILE' or name == 'DESC' or name == 'pool':
should_quote = False
line = ' %s = ' % name
q_templ = quote_char + "%s" + quote_char
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
templ = noq_templ
else:
templ = q_templ
i = i.replace('\\', '\\\\')
if quote_char == '"':
i = i.replace('"', '\\"')
newelems.append(templ % ninja_quote(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.source_suffix_in_objs = True
self.ninja_filename = 'build.ninja'
self.fortran_deps = {}
self.all_outputs = {}
def check_outputs(self, elem):
for n in elem.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
def detect_vs_dep_prefix(self, outfile, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
if shutil.which('cl') is None:
return outfile
outfile.close()
open(os.path.join(self.environment.get_scratch_dir(), 'incdetect.c'),
'w').write('''#include<stdio.h>
int dummy;
''')
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.environment.get_scratch_dir())
(stdo, _) = pc.communicate()
for line in stdo.split(b'\r\n'):
if line.endswith(b'stdio.h'):
matchstr = b':'.join(line.split(b':')[0:2]) + b':'
binfile = open(tempfilename, 'ab')
binfile.write(b'msvc_deps_prefix = ' + matchstr + b'\r\n')
binfile.close()
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
outfile = open(tempfilename, 'w')
outfile.write('# This is the build file for project "%s"\n' % self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
outfile = self.detect_vs_dep_prefix(outfile, tempfilename)
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
[self.generate_target(t, outfile) for t in self.build.get_targets().values()]
if len(self.build.pot) > 0:
outfile.write('# Build rules for localisation.\n\n')
self.generate_po(outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
if self.environment.coredata.get_builtin_option('coverage'):
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
outfile.close()
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
builddir = self.environment.get_build_dir()
jsondb = subprocess.check_output([ninja_exe, '-t', 'compdb', 'c_COMPILER', 'cpp_COMPILER'], cwd=builddir)
open(os.path.join(builddir, 'compile_commands.json'), 'wb').write(jsondb)
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
continue
for src in gensource.get_outfilelist():
if self.environment.is_header(src):
header_deps.append(os.path.join(self.get_target_private_dir(target), src))
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
gen_src_deps = []
if name in self.processed_targets:
return
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in self.environment.coredata.compilers.keys() and self.has_rust(target):
self.generate_rust_target(target, outfile)
return
if 'cs' in self.environment.coredata.compilers.keys() and self.has_cs(target):
self.generate_cs_target(target, outfile)
return
if 'vala' in self.environment.coredata.compilers.keys() and self.has_vala(target):
gen_src_deps += self.generate_vala_compile(target, outfile)
if 'swift' in self.environment.coredata.compilers.keys() and self.has_swift(target):
self.generate_swift_target(target, outfile)
return
self.scan_fortran_module_outputs(target)
# The following deals with C/C++ compilation.
(gen_src, gen_other_deps) = self.process_dep_gens(outfile, target)
gen_src_deps += gen_src
self.process_target_dependencies(target, outfile)
self.generate_custom_generator_rules(target, outfile)
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.get_builtin_option('use_pch')
is_unity = self.environment.coredata.get_builtin_option('unity')
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = gen_other_deps
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
for gensource in target.get_generated_sources():
if isinstance(gensource, build.CustomTarget):
for src in gensource.output:
src = os.path.join(self.get_target_dir(gensource), src)
if self.environment.is_source(src) and not self.environment.is_header(src):
if is_unity:
unity_deps.append(os.path.join(self.environment.get_build_dir(), RawFilename(src)))
else:
obj_list.append(self.generate_single_compile(target, outfile, RawFilename(src), True,
header_deps))
elif self.environment.is_object(src):
obj_list.append(src)
elif self.environment.is_library(src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(RawFilename(src))
else:
for src in gensource.get_outfilelist():
if self.environment.is_object(src):
obj_list.append(os.path.join(self.get_target_private_dir(target), src))
elif not self.environment.is_header(src):
if is_unity:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
unity_deps.append(rel_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps))
src_list = []
for src in gen_src_deps:
src_list.append(src)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, True, [], header_deps))
for src in target.get_sources():
if src.endswith('.vala'):
continue
if not self.environment.is_header(src):
src_list.append(src)
if is_unity:
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target, src_list)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
self.processed_targets[name] = True
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if not tname in self.processed_targets:
self.generate_target(t, outfile)
def generate_custom_target(self, target, outfile):
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
fname = i.get_filename()
if isinstance(fname, list):
fname = fname[0]
deps.append(os.path.join(self.get_target_dir(i), fname))
if target.build_always:
deps.append('PHONY')
elem = NinjaBuildElement(ofilenames, 'CUSTOM_COMMAND', srcs)
for i in target.depend_files:
if isinstance(i, mesonlib.File):
deps.append(i.rel_to_builddir(self.build_to_src))
else:
deps.append(os.path.join(self.build_to_src, i))
elem.add_dep(deps)
for d in target.extra_depends:
tmp = d.get_filename()
if not isinstance(tmp, list):
tmp = [tmp]
for fname in tmp:
elem.add_dep(os.path.join(self.get_target_dir(d), fname))
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Generating %s with a custom command.' % target.name)
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
runnerscript = os.path.join(self.environment.get_script_dir(), 'commandrunner.py')
deps = []
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
deps.append(relfname)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
mlog.debug(str(i))
raise MesonException('Unreachable code in generate_run_target.')
elem = NinjaBuildElement(target.name, 'CUSTOM_COMMAND', deps)
cmd = [sys.executable, runnerscript, self.environment.get_source_dir(), self.environment.get_build_dir(), target.subdir]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() \
and self.environment.cross_info.config['binaries'].get('exe_wrapper', None) is not None:
cmd += [self.environment.cross_info.config['binaries']['exe_wrapper']]
cmd.append(abs_exe)
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_po(self, outfile):
for p in self.build.pot:
(packagename, languages, subdir) = p
input_file = os.path.join(subdir, 'POTFILES')
elem = NinjaBuildElement('pot', 'GEN_POT', [])
elem.add_item('PACKAGENAME', packagename)
elem.add_item('OUTFILE', packagename + '.pot')
elem.add_item('FILELIST', os.path.join(self.environment.get_source_dir(), input_file))
elem.add_item('OUTDIR', os.path.join(self.environment.get_source_dir(), subdir))
elem.write(outfile)
self.check_outputs(elem)
for l in languages:
infile = os.path.join(self.environment.get_source_dir(), subdir, l + '.po')
outfilename = os.path.join(subdir, l + '.gmo')
lelem = NinjaBuildElement(outfilename, 'GEN_GMO', infile)
lelem.add_item('INFILE', infile)
lelem.add_item('OUTFILE', outfilename)
lelem.write(outfile)
self.check_outputs(lelem)
def generate_coverage_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement('coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement('coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_build_dir(),\
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
self.check_outputs(elem)
if lcov_exe and genhtml_exe:
added_rule = True
phony_elem = NinjaBuildElement('coverage-html', 'phony', 'coveragereport/index.html')
phony_elem.write(outfile)
elem = NinjaBuildElement('coveragereport/index.html', 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),\
'--capture', '--output-file', 'coverage.info', '--no-checksum',\
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),\
'--output-directory', self.environment.get_log_dir(), '--title', 'Code coverage',\
'--legend', '--show-details', 'coverage.info']
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
self.check_outputs(elem)
elem.write(outfile)
if not added_rule:
mlog.log(mlog.red('Warning:'), 'coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
script_root = self.environment.get_script_dir()
install_script = os.path.join(script_root, 'meson_install.py')
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
depfixer = os.path.join(script_root, 'depfixer.py')
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(), depfixer)
elem = NinjaBuildElement('install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, install_script, install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_po_install(d, elem)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
self.check_outputs(elem)
ofile = open(install_data_file, 'wb')
pickle.dump(d, ofile)
def generate_po_install(self, d, elem):
for p in self.build.pot:
(package_name, languages, subdir) = p
# FIXME: assumes only one po package per source
d.po_package_name = package_name
for lang in languages:
rel_src = os.path.join(subdir, lang + '.gmo')
src_file = os.path.join(self.environment.get_build_dir(), rel_src)
d.po.append((src_file, self.environment.coredata.get_builtin_option('localedir'), lang))
elem.add_dep(rel_src)
def generate_target_install(self, d):
libdir = self.environment.get_libdir()
bindir = self.environment.get_bindir()
should_strip = self.environment.coredata.get_builtin_option('strip')
for t in self.build.get_targets().values():
if t.should_install():
outdir = t.get_custom_install_dir()
if outdir is None:
if isinstance(t, build.Executable):
outdir = bindir
else:
outdir = libdir
i = [self.get_target_filename(t), outdir, t.get_aliaslist(),\
should_strip, t.install_rpath]
d.targets.append(i)
def generate_custom_install_script(self, d):
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
abspath = os.path.join(self.environment.get_source_dir(), h.get_source_subdir(), f)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, f + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
if de.in_sourcetree:
srcprefix = self.environment.get_source_dir()
else:
srcprefix = self.environment.get_build_dir()
srcabs = os.path.join(srcprefix, de.source_subdir, f)
dstabs = os.path.join(subdir, f)
i = [srcabs, dstabs]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
src_dir = os.path.join(self.environment.get_source_dir(), sd.source_subdir, sd.installable_subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, dst_dir])
def write_test_suite_targets(self, cmd, outfile):
suites = {}
for t in self.build.get_tests():
for s in t.suite:
suites[s] = True
suites = list(suites.keys())
suites.sort()
for s in suites:
if s == '':
visible_name = 'for top level tests'
else:
visible_name = s
elem = NinjaBuildElement('test-' + s, 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd + ['--suite=' + s])
elem.add_item('DESC', 'Running test suite %s.' % visible_name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_tests(self, outfile):
self.serialise_tests()
valgrind = environment.find_valgrind()
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
cmd = [sys.executable, test_script, test_data]
elem = NinjaBuildElement('test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
self.write_test_suite_targets(cmd, outfile)
if valgrind:
velem = NinjaBuildElement('test-valgrind', 'CUSTOM_COMMAND', ['all', 'PHONY'])
velem.add_item('COMMAND', cmd + ['--wrapper=' + valgrind])
velem.add_item('DESC', 'Running test suite under Valgrind.')
velem.add_item('pool', 'console')
velem.write(outfile)
self.check_outputs(velem)
# And then benchmarks.
benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
cmd = [sys.executable, benchmark_script, benchmark_data]
elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (quote_char + ninja_quote(sys.executable) + quote_char,
quote_char + ninja_quote(self.environment.get_build_command()) + quote_char,
quote_char + ninja_quote(self.environment.get_source_dir()) + quote_char,
quote_char + ninja_quote(self.environment.get_build_dir()) + quote_char)
outfile.write(" command = %s %s %s %s --backend ninja secret-handshake\n" % c)
outfile.write(' description = Regenerating build files\n')
outfile.write(' generator = 1\n\n')
if len(self.build.pot) > 0:
self.generate_gettext_rules(outfile)
outfile.write('\n')
def generate_gettext_rules(self, outfile):
rule = 'rule GEN_POT\n'
command = " command = xgettext --package-name=$PACKAGENAME -p $OUTDIR -f $FILELIST -D '%s' -k_ -o $OUTFILE\n" % \
self.environment.get_source_dir()
desc = " description = Creating pot file for package $PACKAGENAME.\n"
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
rule = 'rule GEN_GMO\n'
command = ' command = msgfmt $INFILE -o $OUTFILE\n'
desc = ' description = Generating gmo file $OUTFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(desc)
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
subdir = target.get_subdir()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'java')
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c+m+e+f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
for cls in class_list:
commands += ['-C', self.get_target_private_dir(target), cls]
elem = NinjaBuildElement(outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
self.check_outputs(elem)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
self.check_outputs(elem)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return (args, deps)
def generate_cs_target(self, target, outfile):
buildtype = self.environment.coredata.get_builtin_option('buildtype')
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = self.get_compiler_for_source(src_list[0])
assert(compiler.get_language() == 'cs')
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
self.check_outputs(elem)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
self.check_outputs(element)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating jar $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def split_vala_sources(self, sources):
src = []
vapi_src = []
for s in sources:
if s.endswith('.vapi'):
vapi_src.append(s)
else:
src.append(s)
return (src, vapi_src)
def determine_dep_vapis(self, target):
result = []
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = os.path.splitext(os.path.split(i)[1])[0] + '.vapi'
fullname = os.path.join(self.get_target_private_dir(dep), vapiname)
result.append(fullname)
break
return result
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
valac = self.environment.coredata.compilers['vala']
(src, vapi_src) = self.split_vala_sources(target.get_sources())
vapi_src = [x.rel_to_builddir(self.build_to_src) for x in vapi_src]
extra_dep_files = []
vala_input_files = []
for s in src:
if s.endswith('.vala'):
vala_input_files.append(s.rel_to_builddir(self.build_to_src))
namebase = os.path.splitext(os.path.split(vala_input_files[0])[1])[0]
hname = namebase + '.h'
vapiname = namebase + '.vapi'
outputs = [vapiname]
args = ['-d', self.get_target_private_dir(target)]
args += ['-C']#, '-o', cname]
if not isinstance(target, build.Executable):
outputs.append(hname)
args += ['-H', hname]
args += ['--vapi=' + vapiname]
for src in vala_input_files:
namebase = os.path.splitext(os.path.split(src)[1])[0] + '.c'
outputs.append(namebase)
if self.environment.coredata.get_builtin_option('werror'):
args += valac.get_werror_args()
for d in target.external_deps:
if isinstance(d, dependencies.PkgConfigDependency):
if d.name == 'glib-2.0' and d.version_requirement is not None \
and d.version_requirement.startswith(('>=', '==')):
args += ['--target-glib', d.version_requirement[2:]]
args += ['--pkg', d.name]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
args += dependency_vapis
outputs = [os.path.join(self.get_target_private_dir(target), x) for x in outputs]
element = NinjaBuildElement(outputs,
valac.get_language() + '_COMPILER',
vala_input_files + vapi_src)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
self.check_outputs(element)
return outputs
def generate_rust_target(self, target, outfile):
rustc = self.environment.coredata.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif isinstance(target, build.SharedLibrary):
cratetype = 'rlib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.environment.coredata.get_builtin_option('buildtype'))
depfile = target.name + '.d'
args += ['--out-dir', target.subdir]
args += ['--emit', 'dep-info', '--emit', 'link']
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = {}
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
element = NinjaBuildElement(target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
self.check_outputs(element)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = []
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
for ifile in genlist.get_filename():
rel = os.path.join(self.get_target_dir(genlist), ifile)
all_srcs.append(rel)
else:
for ifile in genlist.get_outfilelist():
rel = os.path.join(self.get_target_private_dir(target), ifile)
all_srcs.append(rel)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return (srcs, others)
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = self.environment.coredata.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
elem = NinjaBuildElement(out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
self.check_outputs(elem)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if self.build.has_language('java'):
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
if mesonlib.is_windows():
command_templ = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS %s $in
'''
else:
command_templ = ' command = %s $LINK_ARGS %s $in\n'
command = command_templ %\
(' '.join(static_linker.get_exelist()),
' '.join(static_linker.get_output_args('$out')))
description = ' description = Static linking library $out\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for compiler in complist:
langname = compiler.get_language()
if langname == 'java' or langname == 'vala' or\
langname == 'rust' or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing
'''
else:
command_template = ' command = %s %s $ARGS %s $in $LINK_ARGS $aliasing\n'
command = command_template % \
(' '.join(compiler.get_linker_exelist()),\
' '.join(cross_args),\
' '.join(compiler.get_linker_output_args('$out')))
description = ' description = Linking target $out'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
scriptdir = self.environment.get_script_dir()
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s $CROSS\n' % (ninja_quote(sys.executable),
ninja_quote(os.path.join(scriptdir, 'symbolextractor.py')),
'$in', '$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling cs target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [sys.executable,
os.path.join(self.environment.get_script_dir(), 'dirchanger.py'),
'$RUNDIR'] + compiler.get_exelist()
invoc = ' '.join([ninja_quote(i) for i in full_exe])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_compile_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
if mesonlib.is_windows():
command_template = ''' command = %s @$out.rsp
rspfile = $out.rsp
rspfile_content = %s $ARGS %s %s %s $in
'''
else:
command_template = ' command = %s %s $ARGS %s %s %s $in\n'
command = command_template % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),
' '.join(quoted_depargs),\
' '.join(compiler.get_output_args('$out')),\
' '.join(compiler.get_compile_only_args()))
description = ' description = Compiling %s object $out\n' % langname
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = %s %s $ARGS %s %s %s $in\n" % \
(' '.join(compiler.get_exelist()),\
' '.join(cross_args),\
' '.join(quoted_depargs),\
output,\
' '.join(compiler.get_compile_only_args()))
description = ' description = Precompiling header %s\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
qstr = quote_char + "%s" + quote_char
for compiler in self.build.compilers:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, False, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for compiler in cclist:
langname = compiler.get_language()
self.generate_compile_rule_for(langname, compiler, qstr, True, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, True, outfile)
outfile.write('\n')
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile('@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = '@OUTPUT%d@' % index
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def generate_custom_generator_rules(self, target, outfile):
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue # Customtarget has already written its output rules
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_infilelist()
outfilelist = genlist.get_outfilelist()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = os.path.join(self.build_to_src, curfile)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)\
for x in base_args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
cmdlist = exe_arr + final_args
elem = NinjaBuildElement(outfiles, 'CUSTOM_COMMAND', infilename)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
self.check_outputs(elem)
def scan_fortran_module_outputs(self, target):
compiler = None
for c in self.build.compilers:
if c.get_language() == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for generated Fortran sources,
# but those are really rare. I hope.
if not compiler.can_compile(s):
continue
for line in open(os.path.join(self.environment.get_source_dir(), s.subdir, s.fname)):
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == 'procedure': # MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments('Namespace collision: module %s defined in two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps= self.fortran_deps[target.get_basename()]
for line in open(src):
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1)
if usename not in tdeps:
# The module is not provided by any source file. This is due to
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as OpenMP
# There's no easy way to tell which is which (that I know of)
# so just ignore this and go on. Ideally we would print a
# warning message to the user but this is a common occurrance,
# which would lead to lots of distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
if(isinstance(src, str) and src.endswith('.h')):
raise RuntimeError('Fug')
if isinstance(src, RawFilename) and src.fname.endswith('.h'):
raise RuntimeError('Fug')
extra_orderdeps = []
compiler = self.get_compiler_for_source(src)
commands = self.generate_basic_compiler_args(target, compiler)
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
commands += compiler.get_include_args(tmppath, False)
if curdir == '':
curdir = '.'
commands += compiler.get_include_args(curdir, False)
for d in target.external_deps:
if d.need_threads():
commands += compiler.thread_flags()
break
if isinstance(src, RawFilename):
rel_src = src.fname
elif is_generated:
if self.has_dir_part(src):
rel_src = src
else:
rel_src = os.path.join(self.get_target_private_dir(target), src)
abs_src = os.path.join(self.environment.get_source_dir(), rel_src)
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise build.InvalidArguments('Invalid source type.')
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, RawFilename):
src_filename = src.fname
elif isinstance(src, File):
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
if self.environment.coredata.get_builtin_option('use_pch'):
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if len(pchlist) == 0:
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
for i in target.get_include_dirs():
basedir = i.get_curdir()
for d in i.get_incdirs():
expdir = os.path.join(basedir, d)
srctreedir = os.path.join(self.build_to_src, expdir)
bargs = compiler.get_include_args(expdir, i.is_system)
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += bargs
commands += sargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
custom_target_include_dirs = []
for i in target.generated:
if isinstance(i, build.CustomTarget):
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
for i in custom_target_include_dirs:
commands+= compiler.get_include_args(i, False)
if self.environment.coredata.get_builtin_option('use_pch'):
commands += self.get_pch_include_args(compiler, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
self.check_outputs(depelem)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, RawFilename):
d = d.fname
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
element.add_orderdep(extra_orderdeps)
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
self.check_outputs(element)
return rel_obj
def has_dir_part(self, fname):
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, [objname])
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return (commands, dep, dst, []) # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if len(pch) == 0:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
raise build.InvalidArguments('Precompiled header of "%s" must not be in the same directory as source, please put it in a subdirectory.' % target.get_basename())
compiler = self.get_compiler_for_lang(lang)
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
self.check_outputs(elem)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
self.check_outputs(elem)
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
commands += linker.get_linker_always_args()
commands += linker.get_buildtype_linker_args(self.environment.coredata.get_builtin_option('buildtype'))
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
if not(isinstance(target, build.StaticLibrary)):
commands += self.environment.coredata.external_link_args[linker.get_language()]
if isinstance(target, build.Executable):
commands += linker.get_std_exe_link_args()
elif isinstance(target, build.SharedLibrary):
commands += linker.get_std_shared_lib_link_args()
commands += linker.get_pic_args()
if hasattr(target, 'soversion'):
soversion = target.soversion
else:
soversion = None
commands += linker.get_soname_args(target.name, abspath, soversion)
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
# Link arguments of static libraries are not put in the command line of
# the library. They are instead appended to the command line where
# the static library is used.
if linker_base == 'STATIC':
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
if not(isinstance(target, build.StaticLibrary)):
for dep in target.get_external_deps():
commands += dep.get_link_args()
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands += dep.get_link_args()
commands += linker.build_rpath_args(self.environment.get_build_dir(),\
self.determine_rpath_dirs(target), target.install_rpath)
if self.environment.coredata.get_builtin_option('coverage'):
commands += linker.get_coverage_link_args()
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
commands = linker.unixtype_flags_to_native(commands)
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets += [os.path.join(self.environment.source_dir,
target.subdir, t) for t in target.link_depends]
elem = NinjaBuildElement(outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
self.check_outputs(elem)
return elem
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
for f in t.output:
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(t), f))
return libs
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
prospective = self.get_target_dir(ld)
if not prospective in result:
result.append(prospective)
return result
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
basename = target.get_filename()
aliases = target.get_aliaslist()
if not mesonlib.is_windows():
for alias in aliases:
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
os.symlink(basename, aliasfile)
else:
mlog.debug("Library versioning disabled because host does not support symlinks.")
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement('clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files')
gcno_elem.write(outfile)
self.check_outputs(gcno_elem)
gcda_elem = NinjaBuildElement('clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files')
gcda_elem.write(outfile)
self.check_outputs(gcda_elem)
def is_compilable_file(self, filename):
if filename.endswith('.cpp') or\
filename.endswith('.c') or\
filename.endswith('.cxx') or\
filename.endswith('.cc') or\
filename.endswith('.C'):
return True
return False
def process_dep_gens(self, outfile, target):
src_deps = []
other_deps = []
for rule in self.dep_rules.values():
srcs = target.get_original_kwargs().get(rule.src_keyword, [])
if isinstance(srcs, str):
srcs = [srcs]
for src in srcs:
plainname = os.path.split(src)[1]
basename = plainname.split('.')[0]
outname = rule.name_templ.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
outfilename = os.path.join(self.get_target_private_dir(target), outname)
infilename = os.path.join(self.build_to_src, target.get_source_subdir(), src)
elem = NinjaBuildElement(outfilename, rule.name, infilename)
elem.write(outfile)
self.check_outputs(elem)
if self.is_compilable_file(outfilename):
src_deps.append(outfilename)
else:
other_deps.append(outfilename)
return (src_deps, other_deps)
def generate_ending(self, outfile):
targetlist = [self.get_target_filename(t) for t in self.build.get_targets().values()\
if not isinstance(t, build.RunTarget)]
elem = NinjaBuildElement('all', 'phony', targetlist)
elem.write(outfile)
self.check_outputs(elem)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect ninja command')
elem = NinjaBuildElement('clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning')
if self.environment.coredata.get_builtin_option('coverage'):
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
self.check_outputs(elem)
deps = self.get_regen_filelist()
elem = NinjaBuildElement('build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(deps, 'phony', '')
elem.write(outfile)
self.check_outputs(elem)
| en | 0.889518 | # Copyright 2012-2014 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is the only way I could find to make this work on all # platforms including Windows command shell. Slash is a dir separator # on Windows, too, so all characters are unambiguous and, more importantly, # do not require quoting. # Hackety hack hack VS writes its dependency in a locale dependent format. Detect the search prefix to use. #include<stdio.h> int dummy; # Only ovewrite the old build file after the new one has been # fully created. # http://clang.llvm.org/docs/JSONCompilationDatabase.html # Get all generated headers. Any source file might need them so # we need to add an order dependency to them. # The following deals with C/C++ compilation. # Generated sources that must be built before compiling a Unity target. # Assume anything not specifically a source file is a header. This is because # people generate files with weird suffixes (.inc, .fh) that they then include # in their source files. # Generated targets are ordered deps because the must exist # before the sources compiling them are used. After the first # compile we get precise dependency info from dep files. # This should work in all cases. If it does not, then just # move them from orderdeps to proper deps. # FIXME, should not grab element at zero but rather expand all. # FIXME: assumes only one po package per source # And then benchmarks. # target type.') Vala is compiled into C. Set up all necessary build steps here. #, '-o', cname] # We need absolute paths because swiftc needs to be invoked in a subdir # and this is the easiest way about it. # Relative to swift invocation dir # Relative to build.ninja # Swiftc does not seem to be able to emit objects and module files in one go. command = %s @$out.rsp rspfile = $out.rsp rspfile_content = $LINK_ARGS %s $in # Native compiler masquerades as the cross compiler. command = %s @$out.rsp rspfile = $out.rsp rspfile_content = %s $ARGS %s $in $LINK_ARGS $aliasing # ValaC does this always to take advantage of it. # Workaround for these issues: # https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485 rule FORTRAN_DEP_HACK command = %s description = Dep hack restat = 1 command = %s @$out.rsp rspfile = $out.rsp rspfile_content = %s $ARGS %s %s %s $in # In case we are going a target-only build, make the native compilers # masquerade as cross compilers. # Customtarget has already written its output rules # We have consumed output files, so drop them from the list of remaining outputs. # FIXME, does not work for generated Fortran sources, # but those are really rare. I hope. # MODULE PROCEDURE construct # The module is not provided by any source file. This is due to # a) missing file/typo/etc # b) using a module provided by the compiler, such as OpenMP # There's no easy way to tell which is which (that I know of) # so just ignore this and go on. Ideally we would print a # warning message to the user but this is a common occurrance, # which would lead to lots of distracting noise. # Check if a source uses a module it exports itself. # Potential bug if multiple targets have a file with # the same name. # Dependency hack. Remove once multiple outputs in Ninja is fixed: # https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8 # Fortran is a bit weird (again). When you link against a library, just compiling a source file # requires the mod files that are output when single files are built. To do this right we would need to # scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so # instead just have an ordered dependendy on the library. This ensures all required mod files are created. # The real deps are then detected via dep file generation from the compiler. This breaks on compilers that # produce incorrect dep files but such is life. # Gcc does not create an object file during pch generation. # Fixme. # Link arguments of static libraries are not put in the command line of # the library. They are instead appended to the command line where # the static library is used. # External deps must be last because target link libraries may depend on them. | 1.907139 | 2 |
tests/strategies/common/test_cputime.py | y-tetsu/othello | 10 | 8275 | """Tests of cputime.py
"""
import unittest
from reversi.strategies.common import CPU_TIME
class TestCputime(unittest.TestCase):
"""cputime
"""
def test_cputime(self):
self.assertEqual(CPU_TIME, 0.5)
| """Tests of cputime.py
"""
import unittest
from reversi.strategies.common import CPU_TIME
class TestCputime(unittest.TestCase):
"""cputime
"""
def test_cputime(self):
self.assertEqual(CPU_TIME, 0.5)
| en | 0.526849 | Tests of cputime.py cputime | 2.305937 | 2 |
experiments/cifar10_recon.py | coopersigrist/RecurrentNeuralSystem- | 3 | 8276 | <reponame>coopersigrist/RecurrentNeuralSystem-
# -*- coding: utf-8 -*-
"""ReNS experiments - CIFAR10
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1byZ4xTfCK2x1Rhkxpl-Vv4sqA-bo4bis
# SETUP
"""
#@title Insatlling Pyorch
# !pip install torch
# !pip install torchvision
#@title Import Dependencies
import numpy as np
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from tqdm import tqdm
from typing import Optional, Union, Tuple, List, Sequence, Iterable
import math
from scipy.spatial.distance import euclidean
from torch.nn.modules.utils import _pair
from torchvision import models
from sklearn.metrics import jaccard_score
import matplotlib.pyplot as plt
from models.models import RegularAutoEncoder, ModulatedAutoEncoder, PseudoRecAutoEncoder
"""# TRAINING"""
batch_size = 32
num_epochs = 5
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Load MNIST data.
train_data = dsets.CIFAR10(root = './data', train = True,
transform = transform, download = True)
test_data = dsets.CIFAR10(root = './data', train = False,
transform = transform)
train_gen = torch.utils.data.DataLoader(dataset = train_data,
batch_size = batch_size,
shuffle = True)
test_gen = torch.utils.data.DataLoader(dataset = test_data,
batch_size = batch_size,
shuffle = False)
reflexor_size = 500
image_size = 32
channels = 3
# net = recurrentLayer(784, 784, 10, 5, 10, 0)
net1 = RegularAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
net2 = ModulatedAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
net3 = PseudoRecAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
lr = .0001 # size of step
loss_function = nn.MSELoss()
# Unnormalize the image to display it
def img_fix(img):
return np.transpose((img / 2 + 0.5).numpy(), (1, 2, 0))
# Commented out IPython magic to ensure Python compatibility.
train_losses = [[],[],[]]
test_losses = [[],[],[]]
real_imgs = [[],[],[]]
reconstructed_imgs = [[],[],[]]
param_counts = np.ones(3)
steps = [[],[],[]]
for num, net in enumerate([net1, net2, net3]):
optimizer = torch.optim.Adam( net.parameters(), lr=lr)
param_counts[num] = (sum(p.numel() for p in net.parameters() if p.requires_grad))
for epoch in range(num_epochs):
for i ,(images,labels) in enumerate(train_gen):
#images = Variable(images.view(-1,28*28))
labels = Variable(images.view(-1,3 * image_size ** 2))
optimizer.zero_grad()
outputs = net(images)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 300 == 0:
temp_loss = loss.item()
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_data)//batch_size, temp_loss))
dupe = Variable(outputs[0].data, requires_grad=False)
# plt.imshow(img_fix(images[0]))
# plt.show()
# plt.imshow(img_fix(dupe.view(3, image_size, image_size)))
# plt.show()
train_losses[num].append(temp_loss)
steps[num].append((50000 * epoch) + ((i + 1) * batch_size))
real_imgs[num].append(img_fix(images[0]))
reconstructed_imgs[num].append(img_fix(dupe.view(3, image_size, image_size)))
# Test Data
score = 0
total = 0
for images,labels in test_gen:
#images = Variable(images.view(-1,784))
output = net(images)
score += loss_function(output, images.view(-1, 3 * image_size ** 2)).item()
test_losses[num].append((score))
plt.plot(steps[0], train_losses[0], label= "Baseline")
plt.plot(steps[1], train_losses[1], label= "Modulated")
plt.plot(steps[2], train_losses[2], label= "Recurrent with Modulation")
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.legend()
plt.show()
plt.plot(steps[0], test_losses[0], label= "Baseline")
plt.plot(steps[1], test_losses[1], label= "Modulated")
plt.plot(steps[2], test_losses[2], label= "Recurrent with Modulation")
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Testing loss history')
plt.legend()
plt.show()
for num,count in enumerate(param_counts):
param_counts[num] /= 1000
plt.bar(["Base", "Modulated", "ReNS"], param_counts)
plt.xlabel('Model')
plt.ylabel('# of thousands of Parameters')
plt.show()
from mpl_toolkits.axes_grid1 import ImageGrid
num_smaples = len(real_imgs[0])
for num in [0,1,2]:
fig = plt.figure(figsize=(20.,20.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(2, num_smaples), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for ax, im in zip(grid, real_imgs[num]+reconstructed_imgs[num]):
# Iterating over the grid returns the Axes.
ax.imshow(im)
ax.axis("off")
plt.show()
| # -*- coding: utf-8 -*-
"""ReNS experiments - CIFAR10
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1byZ4xTfCK2x1Rhkxpl-Vv4sqA-bo4bis
# SETUP
"""
#@title Insatlling Pyorch
# !pip install torch
# !pip install torchvision
#@title Import Dependencies
import numpy as np
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from tqdm import tqdm
from typing import Optional, Union, Tuple, List, Sequence, Iterable
import math
from scipy.spatial.distance import euclidean
from torch.nn.modules.utils import _pair
from torchvision import models
from sklearn.metrics import jaccard_score
import matplotlib.pyplot as plt
from models.models import RegularAutoEncoder, ModulatedAutoEncoder, PseudoRecAutoEncoder
"""# TRAINING"""
batch_size = 32
num_epochs = 5
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Load MNIST data.
train_data = dsets.CIFAR10(root = './data', train = True,
transform = transform, download = True)
test_data = dsets.CIFAR10(root = './data', train = False,
transform = transform)
train_gen = torch.utils.data.DataLoader(dataset = train_data,
batch_size = batch_size,
shuffle = True)
test_gen = torch.utils.data.DataLoader(dataset = test_data,
batch_size = batch_size,
shuffle = False)
reflexor_size = 500
image_size = 32
channels = 3
# net = recurrentLayer(784, 784, 10, 5, 10, 0)
net1 = RegularAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
net2 = ModulatedAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
net3 = PseudoRecAutoEncoder(channels * image_size ** 2, channels * image_size ** 2, reflexor_size)
lr = .0001 # size of step
loss_function = nn.MSELoss()
# Unnormalize the image to display it
def img_fix(img):
return np.transpose((img / 2 + 0.5).numpy(), (1, 2, 0))
# Commented out IPython magic to ensure Python compatibility.
train_losses = [[],[],[]]
test_losses = [[],[],[]]
real_imgs = [[],[],[]]
reconstructed_imgs = [[],[],[]]
param_counts = np.ones(3)
steps = [[],[],[]]
for num, net in enumerate([net1, net2, net3]):
optimizer = torch.optim.Adam( net.parameters(), lr=lr)
param_counts[num] = (sum(p.numel() for p in net.parameters() if p.requires_grad))
for epoch in range(num_epochs):
for i ,(images,labels) in enumerate(train_gen):
#images = Variable(images.view(-1,28*28))
labels = Variable(images.view(-1,3 * image_size ** 2))
optimizer.zero_grad()
outputs = net(images)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 300 == 0:
temp_loss = loss.item()
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_data)//batch_size, temp_loss))
dupe = Variable(outputs[0].data, requires_grad=False)
# plt.imshow(img_fix(images[0]))
# plt.show()
# plt.imshow(img_fix(dupe.view(3, image_size, image_size)))
# plt.show()
train_losses[num].append(temp_loss)
steps[num].append((50000 * epoch) + ((i + 1) * batch_size))
real_imgs[num].append(img_fix(images[0]))
reconstructed_imgs[num].append(img_fix(dupe.view(3, image_size, image_size)))
# Test Data
score = 0
total = 0
for images,labels in test_gen:
#images = Variable(images.view(-1,784))
output = net(images)
score += loss_function(output, images.view(-1, 3 * image_size ** 2)).item()
test_losses[num].append((score))
plt.plot(steps[0], train_losses[0], label= "Baseline")
plt.plot(steps[1], train_losses[1], label= "Modulated")
plt.plot(steps[2], train_losses[2], label= "Recurrent with Modulation")
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.legend()
plt.show()
plt.plot(steps[0], test_losses[0], label= "Baseline")
plt.plot(steps[1], test_losses[1], label= "Modulated")
plt.plot(steps[2], test_losses[2], label= "Recurrent with Modulation")
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Testing loss history')
plt.legend()
plt.show()
for num,count in enumerate(param_counts):
param_counts[num] /= 1000
plt.bar(["Base", "Modulated", "ReNS"], param_counts)
plt.xlabel('Model')
plt.ylabel('# of thousands of Parameters')
plt.show()
from mpl_toolkits.axes_grid1 import ImageGrid
num_smaples = len(real_imgs[0])
for num in [0,1,2]:
fig = plt.figure(figsize=(20.,20.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(2, num_smaples), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for ax, im in zip(grid, real_imgs[num]+reconstructed_imgs[num]):
# Iterating over the grid returns the Axes.
ax.imshow(im)
ax.axis("off")
plt.show() | en | 0.592041 | # -*- coding: utf-8 -*- ReNS experiments - CIFAR10 Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1byZ4xTfCK2x1Rhkxpl-Vv4sqA-bo4bis # SETUP #@title Insatlling Pyorch # !pip install torch # !pip install torchvision #@title Import Dependencies # TRAINING # Load MNIST data. # net = recurrentLayer(784, 784, 10, 5, 10, 0) # size of step # Unnormalize the image to display it # Commented out IPython magic to ensure Python compatibility. #images = Variable(images.view(-1,28*28)) # plt.imshow(img_fix(images[0])) # plt.show() # plt.imshow(img_fix(dupe.view(3, image_size, image_size))) # plt.show() # Test Data #images = Variable(images.view(-1,784)) # similar to subplot(111) # creates 2x2 grid of axes # pad between axes in inch. # Iterating over the grid returns the Axes. | 2.436215 | 2 |
horizon/forms/__init__.py | ameoba/horizon | 2 | 8277 | <gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# FIXME(gabriel): Legacy imports for API compatibility.
from django.forms import * # noqa
from django.forms import widgets
# Convenience imports for public API components.
from horizon.forms.base import DateForm # noqa
from horizon.forms.base import SelfHandlingForm # noqa
from horizon.forms.base import SelfHandlingMixin # noqa
from horizon.forms.fields import DynamicChoiceField # noqa
from horizon.forms.fields import DynamicTypedChoiceField # noqa
from horizon.forms.views import ModalFormMixin # noqa
from horizon.forms.views import ModalFormView # noqa
assert widgets
assert SelfHandlingMixin
assert SelfHandlingForm
assert DateForm
assert ModalFormView
assert ModalFormMixin
assert DynamicTypedChoiceField
assert DynamicChoiceField
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# FIXME(gabriel): Legacy imports for API compatibility.
from django.forms import * # noqa
from django.forms import widgets
# Convenience imports for public API components.
from horizon.forms.base import DateForm # noqa
from horizon.forms.base import SelfHandlingForm # noqa
from horizon.forms.base import SelfHandlingMixin # noqa
from horizon.forms.fields import DynamicChoiceField # noqa
from horizon.forms.fields import DynamicTypedChoiceField # noqa
from horizon.forms.views import ModalFormMixin # noqa
from horizon.forms.views import ModalFormView # noqa
assert widgets
assert SelfHandlingMixin
assert SelfHandlingForm
assert DateForm
assert ModalFormView
assert ModalFormMixin
assert DynamicTypedChoiceField
assert DynamicChoiceField | en | 0.771797 | # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # FIXME(gabriel): Legacy imports for API compatibility. # noqa # Convenience imports for public API components. # noqa # noqa # noqa # noqa # noqa # noqa # noqa | 1.196175 | 1 |
heat/tests/test_rpc_listener_client.py | noironetworks/heat | 1 | 8278 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import oslo_messaging as messaging
from heat.rpc import api as rpc_api
from heat.rpc import listener_client as rpc_client
from heat.tests import common
class ListenerClientTest(common.HeatTestCase):
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
def test_engine_alive_ok(self, rpc_client_method):
mock_rpc_client = rpc_client_method.return_value
mock_prepare_method = mock_rpc_client.prepare
mock_prepare_client = mock_prepare_method.return_value
mock_cnxt = mock.Mock()
listener_client = rpc_client.EngineListenerClient('engine-007')
rpc_client_method.assert_called_once_with(
version=rpc_client.EngineListenerClient.BASE_RPC_API_VERSION,
topic=rpc_api.LISTENER_TOPIC, server='engine-007',
)
mock_prepare_method.assert_called_once_with(timeout=2)
self.assertEqual(mock_prepare_client,
listener_client._client,
"Failed to create RPC client")
ret = listener_client.is_alive(mock_cnxt)
self.assertTrue(ret)
mock_prepare_client.call.assert_called_once_with(mock_cnxt,
'listening')
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
def test_engine_alive_timeout(self, rpc_client_method):
mock_rpc_client = rpc_client_method.return_value
mock_prepare_method = mock_rpc_client.prepare
mock_prepare_client = mock_prepare_method.return_value
mock_cnxt = mock.Mock()
listener_client = rpc_client.EngineListenerClient('engine-007')
rpc_client_method.assert_called_once_with(
version=rpc_client.EngineListenerClient.BASE_RPC_API_VERSION,
topic=rpc_api.LISTENER_TOPIC, server='engine-007',
)
mock_prepare_method.assert_called_once_with(timeout=2)
self.assertEqual(mock_prepare_client,
listener_client._client,
"Failed to create RPC client")
mock_prepare_client.call.side_effect = messaging.MessagingTimeout(
'too slow')
ret = listener_client.is_alive(mock_cnxt)
self.assertFalse(ret)
mock_prepare_client.call.assert_called_once_with(mock_cnxt,
'listening')
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import oslo_messaging as messaging
from heat.rpc import api as rpc_api
from heat.rpc import listener_client as rpc_client
from heat.tests import common
class ListenerClientTest(common.HeatTestCase):
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
def test_engine_alive_ok(self, rpc_client_method):
mock_rpc_client = rpc_client_method.return_value
mock_prepare_method = mock_rpc_client.prepare
mock_prepare_client = mock_prepare_method.return_value
mock_cnxt = mock.Mock()
listener_client = rpc_client.EngineListenerClient('engine-007')
rpc_client_method.assert_called_once_with(
version=rpc_client.EngineListenerClient.BASE_RPC_API_VERSION,
topic=rpc_api.LISTENER_TOPIC, server='engine-007',
)
mock_prepare_method.assert_called_once_with(timeout=2)
self.assertEqual(mock_prepare_client,
listener_client._client,
"Failed to create RPC client")
ret = listener_client.is_alive(mock_cnxt)
self.assertTrue(ret)
mock_prepare_client.call.assert_called_once_with(mock_cnxt,
'listening')
@mock.patch('heat.common.messaging.get_rpc_client',
return_value=mock.Mock())
def test_engine_alive_timeout(self, rpc_client_method):
mock_rpc_client = rpc_client_method.return_value
mock_prepare_method = mock_rpc_client.prepare
mock_prepare_client = mock_prepare_method.return_value
mock_cnxt = mock.Mock()
listener_client = rpc_client.EngineListenerClient('engine-007')
rpc_client_method.assert_called_once_with(
version=rpc_client.EngineListenerClient.BASE_RPC_API_VERSION,
topic=rpc_api.LISTENER_TOPIC, server='engine-007',
)
mock_prepare_method.assert_called_once_with(timeout=2)
self.assertEqual(mock_prepare_client,
listener_client._client,
"Failed to create RPC client")
mock_prepare_client.call.side_effect = messaging.MessagingTimeout(
'too slow')
ret = listener_client.is_alive(mock_cnxt)
self.assertFalse(ret)
mock_prepare_client.call.assert_called_once_with(mock_cnxt,
'listening')
| en | 0.859194 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.174883 | 2 |
amadeus/travel/trip_parser_jobs/_status.py | akshitsingla/amadeus-python | 125 | 8279 | <reponame>akshitsingla/amadeus-python<gh_stars>100-1000
from amadeus.client.decorator import Decorator
class TripParserStatus(Decorator, object):
def __init__(self, client, job_id):
Decorator.__init__(self, client)
self.job_id = job_id
def get(self, **params):
'''
Returns the parsing status and the link to the result
in case of successful parsing.
.. code-block:: python
amadeus.travel.trip_parser_jobs.status('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get(
'/v2/travel/trip-parser-jobs/{0}'.format(self.job_id),
**params)
| from amadeus.client.decorator import Decorator
class TripParserStatus(Decorator, object):
def __init__(self, client, job_id):
Decorator.__init__(self, client)
self.job_id = job_id
def get(self, **params):
'''
Returns the parsing status and the link to the result
in case of successful parsing.
.. code-block:: python
amadeus.travel.trip_parser_jobs.status('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get(
'/v2/travel/trip-parser-jobs/{0}'.format(self.job_id),
**params) | en | 0.57136 | Returns the parsing status and the link to the result in case of successful parsing. .. code-block:: python amadeus.travel.trip_parser_jobs.status('XXX').get :rtype: amadeus.Response :raises amadeus.ResponseError: if the request could not be completed | 2.625707 | 3 |
tools/third_party/iniconfig/testing/test_iniconfig.py | meyerweb/wpt | 2,479 | 8280 | <gh_stars>1000+
import py
import pytest
from iniconfig import IniConfig, ParseError, __all__ as ALL
from iniconfig import iscommentline
from textwrap import dedent
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line': (
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
@pytest.fixture(params=sorted(check_tokens))
def input_expected(request):
return check_tokens[request.param]
@pytest.fixture
def input(input_expected):
return input_expected[0]
@pytest.fixture
def expected(input_expected):
return input_expected[1]
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
assert excinfo.value.lineno == 0
@py.test.mark.parametrize('line', [
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
with pytest.raises(TypeError):
IniConfig(data=path.read())
def test_iniconfig_section_first(tmpdir):
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='name=1')
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\n[section]')
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\nname = Alice\nname = bob')
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=(
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
))
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2', 'value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
with pytest.raises(KeyError):
config["other"]
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items == [('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line)
| import py
import pytest
from iniconfig import IniConfig, ParseError, __all__ as ALL
from iniconfig import iscommentline
from textwrap import dedent
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line': (
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
@pytest.fixture(params=sorted(check_tokens))
def input_expected(request):
return check_tokens[request.param]
@pytest.fixture
def input(input_expected):
return input_expected[0]
@pytest.fixture
def expected(input_expected):
return input_expected[1]
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
assert excinfo.value.lineno == 0
@py.test.mark.parametrize('line', [
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
with pytest.raises(TypeError):
IniConfig(data=path.read())
def test_iniconfig_section_first(tmpdir):
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='name=1')
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\n[section]')
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\nname = Alice\nname = bob')
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=(
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
))
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2', 'value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
with pytest.raises(KeyError):
config["other"]
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items == [('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line) | en | 0.630085 | #comment', # only for testing purposes - _parse() does not use state except path [section1] value=1 [section2] value=2 [section1] value=1 [section2] value=2 [section2] #cpython dict ordered before section value = 1 value2 = 2 # dict ordered before value [section] a = 1 b = 2 [distutils] index-servers = pypi other [pypi] repository: <repository-url> username: <username> password: <password> [other] repository: http://example.com/pypi username: <username> password: <password> #qwe", | 2.486889 | 2 |
jskparser/jskparser/util.py | natebragg/java-sketch | 15 | 8281 | import os
from subprocess import call
from . import glob2
pwd = os.path.dirname(__file__)
def get_files_from_path(path, ext):
# use set to remove duplicate files. weird...but it happens
if os.path.isfile(path): return set([os.path.abspath(path)])
else: # i.e., folder
files = glob2.glob(os.path.abspath(os.path.join(path, "**/*.{}".format(ext))))
return set(sorted(files)) # to guarantee the order of files read
"""
handling javajskparser AST
"""
def toAST(files, ext, add_libs):
prg_files = []
for f in files:
prg_files.extend(get_files_from_path(f, "java"))
if not prg_files: exit('jskparser.util: File(s) not found!')
java_in = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/API.java'))
json_out = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/java.json'))
if add_libs:
obj_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Object.java'))
str_path = os.path.abspath(os.path.join(pwd, '../../model/lang/String.java'))
num_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Number.java'))
int_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Integer.java'))
char_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Character.java'))
itbl_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Iterable.java'))
iter_path = os.path.abspath(os.path.join(pwd, '../../model/util/Iterator.java'))
arr_path = os.path.abspath(os.path.join(pwd, '../../model/util/Arrays.java'))
list_path = os.path.abspath(os.path.join(pwd, '../../model/util/List.java'))
alist_path = os.path.abspath(os.path.join(pwd, '../../model/util/ArrayList.java'))
llist_path = os.path.abspath(os.path.join(pwd, '../../model/util/LinkedList.java'))
hmap_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashMap.java'))
hset_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashSet.java'))
if obj_path not in prg_files: prg_files.append(obj_path)
if str_path not in prg_files: prg_files.append(str_path)
if num_path not in prg_files: prg_files.append(num_path)
if int_path not in prg_files: prg_files.append(int_path)
if char_path not in prg_files: prg_files.append(char_path)
if itbl_path not in prg_files: prg_files.append(itbl_path)
if iter_path not in prg_files: prg_files.append(iter_path)
if arr_path not in prg_files: prg_files.append(arr_path)
if list_path not in prg_files: prg_files.append(list_path)
if alist_path not in prg_files: prg_files.append(alist_path)
if llist_path not in prg_files: prg_files.append(llist_path)
if hmap_path not in prg_files: prg_files.append(hmap_path)
if hset_path not in prg_files: prg_files.append(hset_path)
api = ""
for fname in prg_files:
with open(fname, 'r') as fd:
api += fd.read()
with open(java_in, 'w') as fd:
fd.write(api)
# this classpath stuff seems awful. Jsonify is hardcoded, passing a
# single string to subprocess.call is platform dependant, and shell=True
# can be a security vulnerability (if allowed to take user input).
# This just got a whole lot nastier
cmd = 'cd ' + pwd + '/..; /usr/bin/java -cp .:javaparser/javaparser-core/target/classes:$HOME/.m2/repository/com/cedarsoftware/json-io/4.3.0/json-io-4.3.0.jar jskparser.Jsonify ' + java_in + ' ' + json_out
ret = call(cmd, shell=True)
if ret != 0: exit('Problem parsing.')
return json_out
| import os
from subprocess import call
from . import glob2
pwd = os.path.dirname(__file__)
def get_files_from_path(path, ext):
# use set to remove duplicate files. weird...but it happens
if os.path.isfile(path): return set([os.path.abspath(path)])
else: # i.e., folder
files = glob2.glob(os.path.abspath(os.path.join(path, "**/*.{}".format(ext))))
return set(sorted(files)) # to guarantee the order of files read
"""
handling javajskparser AST
"""
def toAST(files, ext, add_libs):
prg_files = []
for f in files:
prg_files.extend(get_files_from_path(f, "java"))
if not prg_files: exit('jskparser.util: File(s) not found!')
java_in = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/API.java'))
json_out = os.path.abspath(os.path.join(pwd, '../tests/ir_asts/java.json'))
if add_libs:
obj_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Object.java'))
str_path = os.path.abspath(os.path.join(pwd, '../../model/lang/String.java'))
num_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Number.java'))
int_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Integer.java'))
char_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Character.java'))
itbl_path = os.path.abspath(os.path.join(pwd, '../../model/lang/Iterable.java'))
iter_path = os.path.abspath(os.path.join(pwd, '../../model/util/Iterator.java'))
arr_path = os.path.abspath(os.path.join(pwd, '../../model/util/Arrays.java'))
list_path = os.path.abspath(os.path.join(pwd, '../../model/util/List.java'))
alist_path = os.path.abspath(os.path.join(pwd, '../../model/util/ArrayList.java'))
llist_path = os.path.abspath(os.path.join(pwd, '../../model/util/LinkedList.java'))
hmap_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashMap.java'))
hset_path = os.path.abspath(os.path.join(pwd, '../../model/util/HashSet.java'))
if obj_path not in prg_files: prg_files.append(obj_path)
if str_path not in prg_files: prg_files.append(str_path)
if num_path not in prg_files: prg_files.append(num_path)
if int_path not in prg_files: prg_files.append(int_path)
if char_path not in prg_files: prg_files.append(char_path)
if itbl_path not in prg_files: prg_files.append(itbl_path)
if iter_path not in prg_files: prg_files.append(iter_path)
if arr_path not in prg_files: prg_files.append(arr_path)
if list_path not in prg_files: prg_files.append(list_path)
if alist_path not in prg_files: prg_files.append(alist_path)
if llist_path not in prg_files: prg_files.append(llist_path)
if hmap_path not in prg_files: prg_files.append(hmap_path)
if hset_path not in prg_files: prg_files.append(hset_path)
api = ""
for fname in prg_files:
with open(fname, 'r') as fd:
api += fd.read()
with open(java_in, 'w') as fd:
fd.write(api)
# this classpath stuff seems awful. Jsonify is hardcoded, passing a
# single string to subprocess.call is platform dependant, and shell=True
# can be a security vulnerability (if allowed to take user input).
# This just got a whole lot nastier
cmd = 'cd ' + pwd + '/..; /usr/bin/java -cp .:javaparser/javaparser-core/target/classes:$HOME/.m2/repository/com/cedarsoftware/json-io/4.3.0/json-io-4.3.0.jar jskparser.Jsonify ' + java_in + ' ' + json_out
ret = call(cmd, shell=True)
if ret != 0: exit('Problem parsing.')
return json_out
| en | 0.814609 | # use set to remove duplicate files. weird...but it happens # i.e., folder # to guarantee the order of files read handling javajskparser AST # this classpath stuff seems awful. Jsonify is hardcoded, passing a # single string to subprocess.call is platform dependant, and shell=True # can be a security vulnerability (if allowed to take user input). # This just got a whole lot nastier | 2.613084 | 3 |
fiftyone/core/patches.py | SNeugber/fiftyone | 0 | 8282 | """
Patches views.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.utils as etau
import fiftyone.core.aggregations as foa
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.view as fov
_SINGLE_TYPES_MAP = {
fol.Detections: fol.Detection,
fol.Polylines: fol.Polyline,
}
_PATCHES_TYPES = (fol.Detections, fol.Polylines)
_NO_MATCH_ID = ""
class _PatchView(fos.SampleView):
@property
def _sample_id(self):
return self._doc.sample_id
def save(self):
super().save()
self._view._sync_source_sample(self)
class PatchView(_PatchView):
"""A patch in a :class:`PatchesView`.
:class:`PatchView` instances should not be created manually; they are
generated by iterating over :class:`PatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`PatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class EvaluationPatchView(_PatchView):
"""A patch in an :class:`EvaluationPatchesView`.
:class:`EvaluationPatchView` instances should not be created manually; they
are generated by iterating over :class:`EvaluationPatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`EvaluationPatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class _PatchesView(fov.DatasetView):
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
if _stages is None:
_stages = []
self._source_collection = source_collection
self._patches_stage = patches_stage
self._patches_dataset = patches_dataset
self.__stages = _stages
def __copy__(self):
return self.__class__(
self._source_collection,
deepcopy(self._patches_stage),
self._patches_dataset,
_stages=deepcopy(self.__stages),
)
@property
def _base_view(self):
return self.__class__(
self._source_collection,
self._patches_stage,
self._patches_dataset,
)
@property
def _dataset(self):
return self._patches_dataset
@property
def _root_dataset(self):
return self._source_collection._root_dataset
@property
def _stages(self):
return self.__stages
@property
def _all_stages(self):
return (
self._source_collection.view()._all_stages
+ [self._patches_stage]
+ self.__stages
)
@property
def _label_fields(self):
raise NotImplementedError("subclass must implement _label_fields")
@property
def _element_str(self):
return "patch"
@property
def _elements_str(self):
return "patches"
@property
def name(self):
return self.dataset_name + "-patches"
@classmethod
def _get_default_sample_fields(
cls, include_private=False, use_db_fields=False
):
fields = super()._get_default_sample_fields(
include_private=include_private, use_db_fields=use_db_fields
)
if use_db_fields:
return fields + ("_sample_id",)
return fields + ("sample_id",)
def set_values(self, field_name, *args, **kwargs):
field = field_name.split(".", 1)[0]
must_sync = field in self._label_fields
# The `set_values()` operation could change the contents of this view,
# so we first record the sample IDs that need to be synced
if must_sync and self._stages:
ids = self.values("_id")
else:
ids = None
super().set_values(field_name, *args, **kwargs)
if must_sync:
self._sync_source_field(field, ids=ids)
def save(self, fields=None):
"""Overwrites the object patches in the source dataset with the
contents of the view.
If this view contains any additional fields that were not extracted
from the source dataset, these fields are not saved.
.. warning::
This will permanently delete any omitted, filtered, or otherwise
modified patches from the source dataset.
Args:
fields (None): an optional field or list of fields to save. If
specified, only these fields are overwritten
"""
if etau.is_str(fields):
fields = [fields]
super().save(fields=fields)
if fields is None:
fields = self._label_fields
else:
fields = [l for l in fields if l in self._label_fields]
#
# IMPORTANT: we sync the contents of `_patches_dataset`, not `self`
# here because the `save()` call above updated the dataset, which means
# this view may no longer have the same contents (e.g., if `skip()` is
# involved)
#
self._sync_source_root(fields)
def reload(self):
self._root_dataset.reload()
#
# Regenerate the patches dataset
#
# This assumes that calling `load_view()` when the current patches
# dataset has been deleted will cause a new one to be generated
#
self._patches_dataset.delete()
_view = self._patches_stage.load_view(self._source_collection)
self._patches_dataset = _view._patches_dataset
def _sync_source_sample(self, sample):
for field in self._label_fields:
self._sync_source_sample_field(sample, field)
def _sync_source_sample_field(self, sample, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
doc = sample._doc.field_to_mongo(field)
if is_list_field:
doc = doc[label_type._LABEL_LIST_FIELD]
self._source_collection._set_labels_by_id(
field, [sample.sample_id], [doc]
)
def _sync_source_field(self, field, ids=None):
_, label_path = self._patches_dataset._get_label_field_path(field)
if ids is not None:
view = self._patches_dataset.mongo(
[{"$match": {"_id": {"$in": ids}}}]
)
else:
view = self._patches_dataset
sample_ids, docs = view.aggregate(
[foa.Values("sample_id"), foa.Values(label_path, _raw=True)]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
def _sync_source_root(self, fields):
for field in fields:
self._sync_source_root_field(field)
def _sync_source_root_field(self, field):
_, id_path = self._get_label_field_path(field, "id")
label_path = id_path.rsplit(".", 1)[0]
#
# Sync label updates
#
sample_ids, docs, label_ids = self._patches_dataset.aggregate(
[
foa.Values("sample_id"),
foa.Values(label_path, _raw=True),
foa.Values(id_path, unwind=True),
]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
#
# Sync label deletions
#
_, src_id_path = self._source_collection._get_label_field_path(
field, "id"
)
src_ids = self._source_collection.values(src_id_path, unwind=True)
delete_ids = set(src_ids) - set(label_ids)
if delete_ids:
self._source_collection._dataset.delete_labels(
ids=delete_ids, fields=field
)
def _get_ids_map(self, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
_, id_path = self._get_label_field_path(field, "id")
sample_ids, label_ids = self.values(["id", id_path])
ids_map = {}
if is_list_field:
for sample_id, _label_ids in zip(sample_ids, label_ids):
if not _label_ids:
continue
for label_id in _label_ids:
ids_map[label_id] = sample_id
else:
for sample_id, label_id in zip(sample_ids, label_ids):
if not label_id:
continue
ids_map[label_id] = sample_id
return ids_map
class PatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` of patches from a
:class:`fiftyone.core.dataset.Dataset`.
Patches views contain an ordered collection of patch samples, each of which
contains a subset of a sample of the parent dataset corresponding to a
single object or logical grouping of of objects.
Patches retrieved from patches views are returned as :class:`PatchView`
objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToPatches` stage that
defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = PatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
self._patches_field = patches_stage.field
@property
def _label_fields(self):
return [self._patches_field]
@property
def patches_field(self):
"""The field from which the patches in this view were extracted."""
return self._patches_field
class EvaluationPatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` containing evaluation patches
from a :class:`fiftyone.core.dataset.Dataset`.
Evalation patches views contain an ordered collection of evaluation
examples, each of which contains the ground truth and/or predicted labels
for a true positive, false positive, or false negative example from an
evaluation run on the underlying dataset.
Patches retrieved from patches views are returned as
:class:`EvaluationPatchView` objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToEvaluationPatches`
stage that defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = EvaluationPatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
eval_key = patches_stage.eval_key
eval_info = source_collection.get_evaluation_info(eval_key)
self._gt_field = eval_info.config.gt_field
self._pred_field = eval_info.config.pred_field
@property
def _label_fields(self):
return [self._gt_field, self._pred_field]
@property
def gt_field(self):
"""The ground truth field for the evaluation patches in this view."""
return self._gt_field
@property
def pred_field(self):
"""The predictions field for the evaluation patches in this view."""
return self._pred_field
def make_patches_dataset(
sample_collection, field, keep_label_lists=False, name=None
):
"""Creates a dataset that contains one sample per object patch in the
specified field of the collection.
Fields other than ``field`` and the default sample fields will not be
included in the returned dataset. A ``sample_id`` field will be added that
records the sample ID from which each patch was taken.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field: the patches field, which must be of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
keep_label_lists (False): whether to store the patches in label list
fields of the same type as the input collection rather than using
their single label variants
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
if keep_label_lists:
field_type = sample_collection._get_label_field_type(field)
else:
field_type = _get_single_label_field_type(sample_collection, field)
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field(
field, fof.EmbeddedDocumentField, embedded_doc_type=field_type
)
patches_view = _make_patches_view(
sample_collection, field, keep_label_lists=keep_label_lists
)
_write_samples(dataset, patches_view)
return dataset
def _get_single_label_field_type(sample_collection, field):
label_type = sample_collection._get_label_field_type(field)
if label_type not in _SINGLE_TYPES_MAP:
raise ValueError("Unsupported label field type %s" % label_type)
return _SINGLE_TYPES_MAP[label_type]
def make_evaluation_dataset(sample_collection, eval_key, name=None):
"""Creates a dataset based on the results of the evaluation with the given
key that contains one sample for each true positive, false positive, and
false negative example in the input collection, respectively.
True positive examples will result in samples with both their ground truth
and predicted fields populated, while false positive/negative examples will
only have one of their corresponding predicted/ground truth fields
populated, respectively.
If multiple predictions are matched to a ground truth object (e.g., if the
evaluation protocol includes a crowd attribute), then all matched
predictions will be stored in the single sample along with the ground truth
object.
The returned dataset will also have top-level ``type`` and ``iou`` fields
populated based on the evaluation results for that example, as well as a
``sample_id`` field recording the sample ID of the example, and a ``crowd``
field if the evaluation protocol defines a crowd attribute.
.. note::
The returned dataset will contain patches for the contents of the input
collection, which may differ from the view on which the ``eval_key``
evaluation was performed. This may exclude some labels that were
evaluated and/or include labels that were not evaluated.
If you would like to see patches for the exact view on which an
evaluation was performed, first call
:meth:`load_evaluation_view() <fiftyone.core.collections.SampleCollection.load_evaluation_view>`
to load the view and then convert to patches.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
eval_key: an evaluation key that corresponds to the evaluation of
ground truth/predicted fields that are of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
# Parse evaluation info
eval_info = sample_collection.get_evaluation_info(eval_key)
pred_field = eval_info.config.pred_field
gt_field = eval_info.config.gt_field
if hasattr(eval_info.config, "iscrowd"):
crowd_attr = eval_info.config.iscrowd
else:
crowd_attr = None
pred_type = sample_collection._get_label_field_type(pred_field)
gt_type = sample_collection._get_label_field_type(gt_field)
# Setup dataset with correct schema
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
pred_field, fof.EmbeddedDocumentField, embedded_doc_type=pred_type
)
dataset.add_sample_field(
gt_field, fof.EmbeddedDocumentField, embedded_doc_type=gt_type
)
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field("type", fof.StringField)
dataset.add_sample_field("iou", fof.FloatField)
if crowd_attr is not None:
dataset.add_sample_field("crowd", fof.BooleanField)
# Add ground truth patches
gt_view = _make_eval_view(
sample_collection, eval_key, gt_field, crowd_attr=crowd_attr
)
_write_samples(dataset, gt_view)
# Merge matched predictions
_merge_matched_labels(dataset, sample_collection, eval_key, pred_field)
# Add unmatched predictions
unmatched_pred_view = _make_eval_view(
sample_collection, eval_key, pred_field, skip_matched=True
)
_add_samples(dataset, unmatched_pred_view)
return dataset
def _make_patches_view(sample_collection, field, keep_label_lists=False):
if sample_collection._is_frames:
raise ValueError(
"Creating patches views into frame views is not yet supported"
)
if sample_collection._is_frame_field(field):
raise ValueError(
"Frame label patches cannot be directly extracted; you must first "
"convert your video dataset to frames via `to_frames()`"
)
label_type = sample_collection._get_label_field_type(field)
if issubclass(label_type, _PATCHES_TYPES):
list_field = field + "." + label_type._LABEL_LIST_FIELD
else:
raise ValueError(
"Invalid label field type %s. Extracting patches is only "
"supported for the following types: %s"
% (label_type, _PATCHES_TYPES)
)
pipeline = [
{
"$project": {
"_id": True,
"_sample_id": "$_id",
"_media_type": True,
"filepath": True,
"metadata": True,
"tags": True,
field + "._cls": True,
list_field: True,
}
},
{"$unwind": "$" + list_field},
{"$set": {"_rand": {"$rand": {}}}},
{"$set": {"_id": "$" + list_field + "._id"}},
]
if keep_label_lists:
pipeline.append({"$set": {list_field: ["$" + list_field]}})
else:
pipeline.append({"$set": {field: "$" + list_field}})
return sample_collection.mongo(pipeline)
def _make_eval_view(
sample_collection, eval_key, field, skip_matched=False, crowd_attr=None
):
eval_type = field + "." + eval_key
eval_id = field + "." + eval_key + "_id"
eval_iou = field + "." + eval_key + "_iou"
view = _make_patches_view(sample_collection, field)
if skip_matched:
view = view.mongo(
[
{
"$match": {
"$expr": {
"$or": [
{"$eq": ["$" + eval_id, _NO_MATCH_ID]},
{"$not": {"$gt": ["$" + eval_id, None]}},
]
}
}
}
]
)
view = view.mongo(
[{"$set": {"type": "$" + eval_type, "iou": "$" + eval_iou}}]
)
if crowd_attr is not None:
crowd_path1 = "$" + field + "." + crowd_attr
# @todo remove Attributes usage
crowd_path2 = "$" + field + ".attributes." + crowd_attr + ".value"
view = view.mongo(
[
{
"$set": {
"crowd": {
"$cond": {
"if": {"$gt": [crowd_path1, None]},
"then": {"$toBool": crowd_path1},
"else": {
"$cond": {
"if": {"$gt": [crowd_path2, None]},
"then": {"$toBool": crowd_path2},
"else": None,
}
},
}
}
}
}
]
)
return _upgrade_labels(view, field)
def _upgrade_labels(view, field):
tmp_field = "_" + field
label_type = view._get_label_field_type(field)
return view.mongo(
[
{"$set": {tmp_field: "$" + field}},
{"$unset": field},
{
"$set": {
field: {
"_cls": label_type.__name__,
label_type._LABEL_LIST_FIELD: ["$" + tmp_field],
}
}
},
{"$unset": tmp_field},
]
)
def _merge_matched_labels(dataset, src_collection, eval_key, field):
field_type = src_collection._get_label_field_type(field)
list_field = field + "." + field_type._LABEL_LIST_FIELD
eval_id = eval_key + "_id"
eval_field = list_field + "." + eval_id
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.extend(
[
{"$project": {list_field: True}},
{"$unwind": "$" + list_field},
{
"$match": {
"$expr": {
"$and": [
{"$gt": ["$" + eval_field, None]},
{"$ne": ["$" + eval_field, _NO_MATCH_ID]},
]
}
}
},
{
"$group": {
"_id": {"$toObjectId": "$" + eval_field},
"_labels": {"$push": "$" + list_field},
}
},
{
"$project": {
field: {
"_cls": field_type.__name__,
field_type._LABEL_LIST_FIELD: "$_labels",
}
},
},
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "merge",
"whenNotMatched": "discard",
}
},
]
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _write_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append({"$out": dataset._sample_collection_name})
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _add_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append(
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "keepExisting",
"whenNotMatched": "insert",
}
}
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
| """
Patches views.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.utils as etau
import fiftyone.core.aggregations as foa
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.view as fov
_SINGLE_TYPES_MAP = {
fol.Detections: fol.Detection,
fol.Polylines: fol.Polyline,
}
_PATCHES_TYPES = (fol.Detections, fol.Polylines)
_NO_MATCH_ID = ""
class _PatchView(fos.SampleView):
@property
def _sample_id(self):
return self._doc.sample_id
def save(self):
super().save()
self._view._sync_source_sample(self)
class PatchView(_PatchView):
"""A patch in a :class:`PatchesView`.
:class:`PatchView` instances should not be created manually; they are
generated by iterating over :class:`PatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`PatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class EvaluationPatchView(_PatchView):
"""A patch in an :class:`EvaluationPatchesView`.
:class:`EvaluationPatchView` instances should not be created manually; they
are generated by iterating over :class:`EvaluationPatchesView` instances.
Args:
doc: a :class:`fiftyone.core.odm.DatasetSampleDocument`
view: the :class:`EvaluationPatchesView` that the patch belongs to
selected_fields (None): a set of field names that this view is
restricted to
excluded_fields (None): a set of field names that are excluded from
this view
filtered_fields (None): a set of field names of list fields that are
filtered in this view
"""
pass
class _PatchesView(fov.DatasetView):
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
if _stages is None:
_stages = []
self._source_collection = source_collection
self._patches_stage = patches_stage
self._patches_dataset = patches_dataset
self.__stages = _stages
def __copy__(self):
return self.__class__(
self._source_collection,
deepcopy(self._patches_stage),
self._patches_dataset,
_stages=deepcopy(self.__stages),
)
@property
def _base_view(self):
return self.__class__(
self._source_collection,
self._patches_stage,
self._patches_dataset,
)
@property
def _dataset(self):
return self._patches_dataset
@property
def _root_dataset(self):
return self._source_collection._root_dataset
@property
def _stages(self):
return self.__stages
@property
def _all_stages(self):
return (
self._source_collection.view()._all_stages
+ [self._patches_stage]
+ self.__stages
)
@property
def _label_fields(self):
raise NotImplementedError("subclass must implement _label_fields")
@property
def _element_str(self):
return "patch"
@property
def _elements_str(self):
return "patches"
@property
def name(self):
return self.dataset_name + "-patches"
@classmethod
def _get_default_sample_fields(
cls, include_private=False, use_db_fields=False
):
fields = super()._get_default_sample_fields(
include_private=include_private, use_db_fields=use_db_fields
)
if use_db_fields:
return fields + ("_sample_id",)
return fields + ("sample_id",)
def set_values(self, field_name, *args, **kwargs):
field = field_name.split(".", 1)[0]
must_sync = field in self._label_fields
# The `set_values()` operation could change the contents of this view,
# so we first record the sample IDs that need to be synced
if must_sync and self._stages:
ids = self.values("_id")
else:
ids = None
super().set_values(field_name, *args, **kwargs)
if must_sync:
self._sync_source_field(field, ids=ids)
def save(self, fields=None):
"""Overwrites the object patches in the source dataset with the
contents of the view.
If this view contains any additional fields that were not extracted
from the source dataset, these fields are not saved.
.. warning::
This will permanently delete any omitted, filtered, or otherwise
modified patches from the source dataset.
Args:
fields (None): an optional field or list of fields to save. If
specified, only these fields are overwritten
"""
if etau.is_str(fields):
fields = [fields]
super().save(fields=fields)
if fields is None:
fields = self._label_fields
else:
fields = [l for l in fields if l in self._label_fields]
#
# IMPORTANT: we sync the contents of `_patches_dataset`, not `self`
# here because the `save()` call above updated the dataset, which means
# this view may no longer have the same contents (e.g., if `skip()` is
# involved)
#
self._sync_source_root(fields)
def reload(self):
self._root_dataset.reload()
#
# Regenerate the patches dataset
#
# This assumes that calling `load_view()` when the current patches
# dataset has been deleted will cause a new one to be generated
#
self._patches_dataset.delete()
_view = self._patches_stage.load_view(self._source_collection)
self._patches_dataset = _view._patches_dataset
def _sync_source_sample(self, sample):
for field in self._label_fields:
self._sync_source_sample_field(sample, field)
def _sync_source_sample_field(self, sample, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
doc = sample._doc.field_to_mongo(field)
if is_list_field:
doc = doc[label_type._LABEL_LIST_FIELD]
self._source_collection._set_labels_by_id(
field, [sample.sample_id], [doc]
)
def _sync_source_field(self, field, ids=None):
_, label_path = self._patches_dataset._get_label_field_path(field)
if ids is not None:
view = self._patches_dataset.mongo(
[{"$match": {"_id": {"$in": ids}}}]
)
else:
view = self._patches_dataset
sample_ids, docs = view.aggregate(
[foa.Values("sample_id"), foa.Values(label_path, _raw=True)]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
def _sync_source_root(self, fields):
for field in fields:
self._sync_source_root_field(field)
def _sync_source_root_field(self, field):
_, id_path = self._get_label_field_path(field, "id")
label_path = id_path.rsplit(".", 1)[0]
#
# Sync label updates
#
sample_ids, docs, label_ids = self._patches_dataset.aggregate(
[
foa.Values("sample_id"),
foa.Values(label_path, _raw=True),
foa.Values(id_path, unwind=True),
]
)
self._source_collection._set_labels_by_id(field, sample_ids, docs)
#
# Sync label deletions
#
_, src_id_path = self._source_collection._get_label_field_path(
field, "id"
)
src_ids = self._source_collection.values(src_id_path, unwind=True)
delete_ids = set(src_ids) - set(label_ids)
if delete_ids:
self._source_collection._dataset.delete_labels(
ids=delete_ids, fields=field
)
def _get_ids_map(self, field):
label_type = self._patches_dataset._get_label_field_type(field)
is_list_field = issubclass(label_type, fol._LABEL_LIST_FIELDS)
_, id_path = self._get_label_field_path(field, "id")
sample_ids, label_ids = self.values(["id", id_path])
ids_map = {}
if is_list_field:
for sample_id, _label_ids in zip(sample_ids, label_ids):
if not _label_ids:
continue
for label_id in _label_ids:
ids_map[label_id] = sample_id
else:
for sample_id, label_id in zip(sample_ids, label_ids):
if not label_id:
continue
ids_map[label_id] = sample_id
return ids_map
class PatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` of patches from a
:class:`fiftyone.core.dataset.Dataset`.
Patches views contain an ordered collection of patch samples, each of which
contains a subset of a sample of the parent dataset corresponding to a
single object or logical grouping of of objects.
Patches retrieved from patches views are returned as :class:`PatchView`
objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToPatches` stage that
defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = PatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
self._patches_field = patches_stage.field
@property
def _label_fields(self):
return [self._patches_field]
@property
def patches_field(self):
"""The field from which the patches in this view were extracted."""
return self._patches_field
class EvaluationPatchesView(_PatchesView):
"""A :class:`fiftyone.core.view.DatasetView` containing evaluation patches
from a :class:`fiftyone.core.dataset.Dataset`.
Evalation patches views contain an ordered collection of evaluation
examples, each of which contains the ground truth and/or predicted labels
for a true positive, false positive, or false negative example from an
evaluation run on the underlying dataset.
Patches retrieved from patches views are returned as
:class:`EvaluationPatchView` objects.
Args:
source_collection: the
:class:`fiftyone.core.collections.SampleCollection` from which this
view was created
patches_stage: the :class:`fiftyone.core.stages.ToEvaluationPatches`
stage that defines how the patches were extracted
patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves
the patches in this view
"""
_SAMPLE_CLS = EvaluationPatchView
def __init__(
self, source_collection, patches_stage, patches_dataset, _stages=None
):
super().__init__(
source_collection, patches_stage, patches_dataset, _stages=_stages
)
eval_key = patches_stage.eval_key
eval_info = source_collection.get_evaluation_info(eval_key)
self._gt_field = eval_info.config.gt_field
self._pred_field = eval_info.config.pred_field
@property
def _label_fields(self):
return [self._gt_field, self._pred_field]
@property
def gt_field(self):
"""The ground truth field for the evaluation patches in this view."""
return self._gt_field
@property
def pred_field(self):
"""The predictions field for the evaluation patches in this view."""
return self._pred_field
def make_patches_dataset(
sample_collection, field, keep_label_lists=False, name=None
):
"""Creates a dataset that contains one sample per object patch in the
specified field of the collection.
Fields other than ``field`` and the default sample fields will not be
included in the returned dataset. A ``sample_id`` field will be added that
records the sample ID from which each patch was taken.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field: the patches field, which must be of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
keep_label_lists (False): whether to store the patches in label list
fields of the same type as the input collection rather than using
their single label variants
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
if keep_label_lists:
field_type = sample_collection._get_label_field_type(field)
else:
field_type = _get_single_label_field_type(sample_collection, field)
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field(
field, fof.EmbeddedDocumentField, embedded_doc_type=field_type
)
patches_view = _make_patches_view(
sample_collection, field, keep_label_lists=keep_label_lists
)
_write_samples(dataset, patches_view)
return dataset
def _get_single_label_field_type(sample_collection, field):
label_type = sample_collection._get_label_field_type(field)
if label_type not in _SINGLE_TYPES_MAP:
raise ValueError("Unsupported label field type %s" % label_type)
return _SINGLE_TYPES_MAP[label_type]
def make_evaluation_dataset(sample_collection, eval_key, name=None):
"""Creates a dataset based on the results of the evaluation with the given
key that contains one sample for each true positive, false positive, and
false negative example in the input collection, respectively.
True positive examples will result in samples with both their ground truth
and predicted fields populated, while false positive/negative examples will
only have one of their corresponding predicted/ground truth fields
populated, respectively.
If multiple predictions are matched to a ground truth object (e.g., if the
evaluation protocol includes a crowd attribute), then all matched
predictions will be stored in the single sample along with the ground truth
object.
The returned dataset will also have top-level ``type`` and ``iou`` fields
populated based on the evaluation results for that example, as well as a
``sample_id`` field recording the sample ID of the example, and a ``crowd``
field if the evaluation protocol defines a crowd attribute.
.. note::
The returned dataset will contain patches for the contents of the input
collection, which may differ from the view on which the ``eval_key``
evaluation was performed. This may exclude some labels that were
evaluated and/or include labels that were not evaluated.
If you would like to see patches for the exact view on which an
evaluation was performed, first call
:meth:`load_evaluation_view() <fiftyone.core.collections.SampleCollection.load_evaluation_view>`
to load the view and then convert to patches.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
eval_key: an evaluation key that corresponds to the evaluation of
ground truth/predicted fields that are of type
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
name (None): a name for the returned dataset
Returns:
a :class:`fiftyone.core.dataset.Dataset`
"""
# Parse evaluation info
eval_info = sample_collection.get_evaluation_info(eval_key)
pred_field = eval_info.config.pred_field
gt_field = eval_info.config.gt_field
if hasattr(eval_info.config, "iscrowd"):
crowd_attr = eval_info.config.iscrowd
else:
crowd_attr = None
pred_type = sample_collection._get_label_field_type(pred_field)
gt_type = sample_collection._get_label_field_type(gt_field)
# Setup dataset with correct schema
dataset = fod.Dataset(name, _patches=True)
dataset.media_type = fom.IMAGE
dataset.add_sample_field(
pred_field, fof.EmbeddedDocumentField, embedded_doc_type=pred_type
)
dataset.add_sample_field(
gt_field, fof.EmbeddedDocumentField, embedded_doc_type=gt_type
)
dataset.add_sample_field(
"sample_id", fof.ObjectIdField, db_field="_sample_id"
)
dataset.add_sample_field("type", fof.StringField)
dataset.add_sample_field("iou", fof.FloatField)
if crowd_attr is not None:
dataset.add_sample_field("crowd", fof.BooleanField)
# Add ground truth patches
gt_view = _make_eval_view(
sample_collection, eval_key, gt_field, crowd_attr=crowd_attr
)
_write_samples(dataset, gt_view)
# Merge matched predictions
_merge_matched_labels(dataset, sample_collection, eval_key, pred_field)
# Add unmatched predictions
unmatched_pred_view = _make_eval_view(
sample_collection, eval_key, pred_field, skip_matched=True
)
_add_samples(dataset, unmatched_pred_view)
return dataset
def _make_patches_view(sample_collection, field, keep_label_lists=False):
if sample_collection._is_frames:
raise ValueError(
"Creating patches views into frame views is not yet supported"
)
if sample_collection._is_frame_field(field):
raise ValueError(
"Frame label patches cannot be directly extracted; you must first "
"convert your video dataset to frames via `to_frames()`"
)
label_type = sample_collection._get_label_field_type(field)
if issubclass(label_type, _PATCHES_TYPES):
list_field = field + "." + label_type._LABEL_LIST_FIELD
else:
raise ValueError(
"Invalid label field type %s. Extracting patches is only "
"supported for the following types: %s"
% (label_type, _PATCHES_TYPES)
)
pipeline = [
{
"$project": {
"_id": True,
"_sample_id": "$_id",
"_media_type": True,
"filepath": True,
"metadata": True,
"tags": True,
field + "._cls": True,
list_field: True,
}
},
{"$unwind": "$" + list_field},
{"$set": {"_rand": {"$rand": {}}}},
{"$set": {"_id": "$" + list_field + "._id"}},
]
if keep_label_lists:
pipeline.append({"$set": {list_field: ["$" + list_field]}})
else:
pipeline.append({"$set": {field: "$" + list_field}})
return sample_collection.mongo(pipeline)
def _make_eval_view(
sample_collection, eval_key, field, skip_matched=False, crowd_attr=None
):
eval_type = field + "." + eval_key
eval_id = field + "." + eval_key + "_id"
eval_iou = field + "." + eval_key + "_iou"
view = _make_patches_view(sample_collection, field)
if skip_matched:
view = view.mongo(
[
{
"$match": {
"$expr": {
"$or": [
{"$eq": ["$" + eval_id, _NO_MATCH_ID]},
{"$not": {"$gt": ["$" + eval_id, None]}},
]
}
}
}
]
)
view = view.mongo(
[{"$set": {"type": "$" + eval_type, "iou": "$" + eval_iou}}]
)
if crowd_attr is not None:
crowd_path1 = "$" + field + "." + crowd_attr
# @todo remove Attributes usage
crowd_path2 = "$" + field + ".attributes." + crowd_attr + ".value"
view = view.mongo(
[
{
"$set": {
"crowd": {
"$cond": {
"if": {"$gt": [crowd_path1, None]},
"then": {"$toBool": crowd_path1},
"else": {
"$cond": {
"if": {"$gt": [crowd_path2, None]},
"then": {"$toBool": crowd_path2},
"else": None,
}
},
}
}
}
}
]
)
return _upgrade_labels(view, field)
def _upgrade_labels(view, field):
tmp_field = "_" + field
label_type = view._get_label_field_type(field)
return view.mongo(
[
{"$set": {tmp_field: "$" + field}},
{"$unset": field},
{
"$set": {
field: {
"_cls": label_type.__name__,
label_type._LABEL_LIST_FIELD: ["$" + tmp_field],
}
}
},
{"$unset": tmp_field},
]
)
def _merge_matched_labels(dataset, src_collection, eval_key, field):
field_type = src_collection._get_label_field_type(field)
list_field = field + "." + field_type._LABEL_LIST_FIELD
eval_id = eval_key + "_id"
eval_field = list_field + "." + eval_id
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.extend(
[
{"$project": {list_field: True}},
{"$unwind": "$" + list_field},
{
"$match": {
"$expr": {
"$and": [
{"$gt": ["$" + eval_field, None]},
{"$ne": ["$" + eval_field, _NO_MATCH_ID]},
]
}
}
},
{
"$group": {
"_id": {"$toObjectId": "$" + eval_field},
"_labels": {"$push": "$" + list_field},
}
},
{
"$project": {
field: {
"_cls": field_type.__name__,
field_type._LABEL_LIST_FIELD: "$_labels",
}
},
},
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "merge",
"whenNotMatched": "discard",
}
},
]
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _write_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append({"$out": dataset._sample_collection_name})
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
def _add_samples(dataset, src_collection):
pipeline = src_collection._pipeline(detach_frames=True)
pipeline.append(
{
"$merge": {
"into": dataset._sample_collection_name,
"on": "_id",
"whenMatched": "keepExisting",
"whenNotMatched": "insert",
}
}
)
src_collection._dataset._aggregate(pipeline=pipeline, attach_frames=False)
| en | 0.877297 | Patches views. | Copyright 2017-2021, Voxel51, Inc. | `voxel51.com <https://voxel51.com/>`_ | A patch in a :class:`PatchesView`. :class:`PatchView` instances should not be created manually; they are generated by iterating over :class:`PatchesView` instances. Args: doc: a :class:`fiftyone.core.odm.DatasetSampleDocument` view: the :class:`PatchesView` that the patch belongs to selected_fields (None): a set of field names that this view is restricted to excluded_fields (None): a set of field names that are excluded from this view filtered_fields (None): a set of field names of list fields that are filtered in this view A patch in an :class:`EvaluationPatchesView`. :class:`EvaluationPatchView` instances should not be created manually; they are generated by iterating over :class:`EvaluationPatchesView` instances. Args: doc: a :class:`fiftyone.core.odm.DatasetSampleDocument` view: the :class:`EvaluationPatchesView` that the patch belongs to selected_fields (None): a set of field names that this view is restricted to excluded_fields (None): a set of field names that are excluded from this view filtered_fields (None): a set of field names of list fields that are filtered in this view # The `set_values()` operation could change the contents of this view, # so we first record the sample IDs that need to be synced Overwrites the object patches in the source dataset with the contents of the view. If this view contains any additional fields that were not extracted from the source dataset, these fields are not saved. .. warning:: This will permanently delete any omitted, filtered, or otherwise modified patches from the source dataset. Args: fields (None): an optional field or list of fields to save. If specified, only these fields are overwritten # # IMPORTANT: we sync the contents of `_patches_dataset`, not `self` # here because the `save()` call above updated the dataset, which means # this view may no longer have the same contents (e.g., if `skip()` is # involved) # # # Regenerate the patches dataset # # This assumes that calling `load_view()` when the current patches # dataset has been deleted will cause a new one to be generated # # # Sync label updates # # # Sync label deletions # A :class:`fiftyone.core.view.DatasetView` of patches from a :class:`fiftyone.core.dataset.Dataset`. Patches views contain an ordered collection of patch samples, each of which contains a subset of a sample of the parent dataset corresponding to a single object or logical grouping of of objects. Patches retrieved from patches views are returned as :class:`PatchView` objects. Args: source_collection: the :class:`fiftyone.core.collections.SampleCollection` from which this view was created patches_stage: the :class:`fiftyone.core.stages.ToPatches` stage that defines how the patches were extracted patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves the patches in this view The field from which the patches in this view were extracted. A :class:`fiftyone.core.view.DatasetView` containing evaluation patches from a :class:`fiftyone.core.dataset.Dataset`. Evalation patches views contain an ordered collection of evaluation examples, each of which contains the ground truth and/or predicted labels for a true positive, false positive, or false negative example from an evaluation run on the underlying dataset. Patches retrieved from patches views are returned as :class:`EvaluationPatchView` objects. Args: source_collection: the :class:`fiftyone.core.collections.SampleCollection` from which this view was created patches_stage: the :class:`fiftyone.core.stages.ToEvaluationPatches` stage that defines how the patches were extracted patches_dataset: the :class:`fiftyone.core.dataset.Dataset` that serves the patches in this view The ground truth field for the evaluation patches in this view. The predictions field for the evaluation patches in this view. Creates a dataset that contains one sample per object patch in the specified field of the collection. Fields other than ``field`` and the default sample fields will not be included in the returned dataset. A ``sample_id`` field will be added that records the sample ID from which each patch was taken. Args: sample_collection: a :class:`fiftyone.core.collections.SampleCollection` field: the patches field, which must be of type :class:`fiftyone.core.labels.Detections` or :class:`fiftyone.core.labels.Polylines` keep_label_lists (False): whether to store the patches in label list fields of the same type as the input collection rather than using their single label variants name (None): a name for the returned dataset Returns: a :class:`fiftyone.core.dataset.Dataset` Creates a dataset based on the results of the evaluation with the given key that contains one sample for each true positive, false positive, and false negative example in the input collection, respectively. True positive examples will result in samples with both their ground truth and predicted fields populated, while false positive/negative examples will only have one of their corresponding predicted/ground truth fields populated, respectively. If multiple predictions are matched to a ground truth object (e.g., if the evaluation protocol includes a crowd attribute), then all matched predictions will be stored in the single sample along with the ground truth object. The returned dataset will also have top-level ``type`` and ``iou`` fields populated based on the evaluation results for that example, as well as a ``sample_id`` field recording the sample ID of the example, and a ``crowd`` field if the evaluation protocol defines a crowd attribute. .. note:: The returned dataset will contain patches for the contents of the input collection, which may differ from the view on which the ``eval_key`` evaluation was performed. This may exclude some labels that were evaluated and/or include labels that were not evaluated. If you would like to see patches for the exact view on which an evaluation was performed, first call :meth:`load_evaluation_view() <fiftyone.core.collections.SampleCollection.load_evaluation_view>` to load the view and then convert to patches. Args: sample_collection: a :class:`fiftyone.core.collections.SampleCollection` eval_key: an evaluation key that corresponds to the evaluation of ground truth/predicted fields that are of type :class:`fiftyone.core.labels.Detections` or :class:`fiftyone.core.labels.Polylines` name (None): a name for the returned dataset Returns: a :class:`fiftyone.core.dataset.Dataset` # Parse evaluation info # Setup dataset with correct schema # Add ground truth patches # Merge matched predictions # Add unmatched predictions # @todo remove Attributes usage | 1.944527 | 2 |
{{cookiecutter.repo_name}}/setup.py | ocesaulo/cookiecutter-ocn_sci | 0 | 8283 | <filename>{{cookiecutter.repo_name}}/setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
{%- set license_classifiers = {
'MIT license': 'License :: OSI Approved :: MIT License',
'BSD license': 'License :: OSI Approved :: BSD License',
'ISC license': 'License :: OSI Approved :: ISC License (ISCL)',
'Apache Software License 2.0': 'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
} %}
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
tests_requirements = ['pytest'],
setup_requirements = ['pytest-runner']
requirements = [
# package requirements go here
]
setup(
name='{{ cookiecutter.repo_name }}',
version=__version__,
description="{{ cookiecutter.project_short_description }}",
long_description=readme,
author="{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
author_email='{{ cookiecutter.email }}',
url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}',
packages=find_packages(include=['{{ cookiecutter.repo_name }}'],
exclude=('docs', 'tests*',)),
{%- if cookiecutter.open_source_license in license_classifiers %}
license="{{ cookiecutter.open_source_license }}",
{%- endif %}
install_requires=install_requires,
dependency_links=dependency_links,
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
keywords='{{ cookiecutter.repo_name }}',
classifiers=[
'Programming Language :: Python :: 3.6',
]
)
| <filename>{{cookiecutter.repo_name}}/setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
{%- set license_classifiers = {
'MIT license': 'License :: OSI Approved :: MIT License',
'BSD license': 'License :: OSI Approved :: BSD License',
'ISC license': 'License :: OSI Approved :: ISC License (ISCL)',
'Apache Software License 2.0': 'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
} %}
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
tests_requirements = ['pytest'],
setup_requirements = ['pytest-runner']
requirements = [
# package requirements go here
]
setup(
name='{{ cookiecutter.repo_name }}',
version=__version__,
description="{{ cookiecutter.project_short_description }}",
long_description=readme,
author="{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
author_email='{{ cookiecutter.email }}',
url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.repo_name }}',
packages=find_packages(include=['{{ cookiecutter.repo_name }}'],
exclude=('docs', 'tests*',)),
{%- if cookiecutter.open_source_license in license_classifiers %}
license="{{ cookiecutter.open_source_license }}",
{%- endif %}
install_requires=install_requires,
dependency_links=dependency_links,
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
keywords='{{ cookiecutter.repo_name }}',
classifiers=[
'Programming Language :: Python :: 3.6',
]
)
| en | 0.636109 | #!/usr/bin/env python # -*- coding: utf-8 -*- The setup script. # get the dependencies and installs # package requirements go here | 1.545547 | 2 |
src/zope/app/debug/debug.py | zopefoundation/zope.app.debug | 0 | 8284 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Code to initialize the application server
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
import base64
import time
import sys
from pdb import Pdb
from io import BytesIO
from zope.publisher.publish import publish as _publish, debug_call
from zope.publisher.browser import TestRequest, setDefaultSkin
from zope.app.publication.browser import BrowserPublication
from zope.app.appsetup import config, database
try:
from time import process_time as time_process_time # pragma: PY3
except ImportError:
from time import clock as time_process_time # pragma: PY2
try:
import urllib.parse as urllib # pragma: PY3
except ImportError:
import urllib # pragma: PY2
try:
text_type = unicode # pragma: PY2
except NameError:
text_type = str # pragma: PY3
class Debugger(object):
pdb = Pdb
def __init__(self, db=None, config_file=None, stdout=None):
if db is None and config_file is None:
db = 'Data.fs'
config_file = 'site.zcml'
if config_file is not None:
config(config_file)
self.db = database(db)
self.stdout = stdout
@classmethod
def fromDatabase(cls, db):
inst = cls.__new__(cls)
inst.db = db
return inst
def root(self):
"""Get the top-level application object
The object returned is connected to an open database connection.
"""
from zope.app.publication.zopepublication import ZopePublication
return self.db.open().root()[ZopePublication.root_name]
def _request(self,
path='/', stdin='', basic=None,
environment=None, form=None,
request=None, publication=BrowserPublication):
"""Create a request
"""
env = {}
if isinstance(stdin, text_type):
stdin = stdin.encode("utf-8")
if isinstance(stdin, bytes):
stdin = BytesIO(stdin)
p = path.split('?')
if len(p) == 1:
env['PATH_INFO'] = p[0]
elif len(p) == 2:
env['PATH_INFO'], env['QUERY_STRING'] = p
else:
raise ValueError("Too many ?s in path", path)
env['PATH_INFO'] = urllib.unquote(env['PATH_INFO'])
if environment is not None:
env.update(environment)
if basic:
basic_bytes = basic.encode('ascii') if not isinstance(
basic, bytes) else basic
basic64_bytes = base64.b64encode(basic_bytes)
basic64 = basic64_bytes.decode('ascii').strip()
env['HTTP_AUTHORIZATION'] = "Basic %s" % basic64
pub = publication(self.db)
if request is not None:
request = request(stdin, env)
else:
request = TestRequest(stdin, env)
setDefaultSkin(request)
request.setPublication(pub)
if form:
request.form.update(form)
return request
def publish(self, path='/', stdin='', *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(path, stdin, *args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request)
getStatus = getattr(request.response, 'getStatus', lambda: None)
headers = sorted(request.response.getHeaders())
print(
'Status %s\r\n%s\r\n\r\n%s' % (
request.response.getStatusString(),
'\r\n'.join([("%s: %s" % h) for h in headers]),
request.response.consumeBody(),
), file=self.stdout or sys.stdout)
return time.time() - t, time_process_time() - pt, getStatus()
def run(self, *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(*args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request, handle_errors=False)
getStatus = getattr(request.response, 'getStatus', lambda: None)
return time.time() - t, time_process_time() - pt, getStatus()
def debug(self, *args, **kw):
out = self.stdout or sys.stdout
class ZopePdb(self.Pdb):
done_pub = False
done_ob = False
def do_pub(self, arg):
if self.done_pub:
print('pub already done.', file=out)
return
self.do_s('')
self.do_s('')
self.do_c('')
self.done_pub = True
def do_ob(self, arg):
if self.done_ob:
print('ob already done.', file=out)
return
self.do_pub('')
self.do_c('')
self.done_ob = True
dbg = ZopePdb()
request = self._request(*args, **kw)
fbreak(dbg, _publish)
fbreak(dbg, debug_call)
print('* Type c<cr> to jump to published object call.',
file=out)
dbg.runcall(_publish, request)
return dbg
def getlineno(code):
return code.co_firstlineno
def fbreak(db, meth):
try:
meth = meth.__func__
except AttributeError:
pass
code = meth.__code__
lineno = getlineno(code)
filename = code.co_filename
db.set_break(filename, lineno)
| ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Code to initialize the application server
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
import base64
import time
import sys
from pdb import Pdb
from io import BytesIO
from zope.publisher.publish import publish as _publish, debug_call
from zope.publisher.browser import TestRequest, setDefaultSkin
from zope.app.publication.browser import BrowserPublication
from zope.app.appsetup import config, database
try:
from time import process_time as time_process_time # pragma: PY3
except ImportError:
from time import clock as time_process_time # pragma: PY2
try:
import urllib.parse as urllib # pragma: PY3
except ImportError:
import urllib # pragma: PY2
try:
text_type = unicode # pragma: PY2
except NameError:
text_type = str # pragma: PY3
class Debugger(object):
pdb = Pdb
def __init__(self, db=None, config_file=None, stdout=None):
if db is None and config_file is None:
db = 'Data.fs'
config_file = 'site.zcml'
if config_file is not None:
config(config_file)
self.db = database(db)
self.stdout = stdout
@classmethod
def fromDatabase(cls, db):
inst = cls.__new__(cls)
inst.db = db
return inst
def root(self):
"""Get the top-level application object
The object returned is connected to an open database connection.
"""
from zope.app.publication.zopepublication import ZopePublication
return self.db.open().root()[ZopePublication.root_name]
def _request(self,
path='/', stdin='', basic=None,
environment=None, form=None,
request=None, publication=BrowserPublication):
"""Create a request
"""
env = {}
if isinstance(stdin, text_type):
stdin = stdin.encode("utf-8")
if isinstance(stdin, bytes):
stdin = BytesIO(stdin)
p = path.split('?')
if len(p) == 1:
env['PATH_INFO'] = p[0]
elif len(p) == 2:
env['PATH_INFO'], env['QUERY_STRING'] = p
else:
raise ValueError("Too many ?s in path", path)
env['PATH_INFO'] = urllib.unquote(env['PATH_INFO'])
if environment is not None:
env.update(environment)
if basic:
basic_bytes = basic.encode('ascii') if not isinstance(
basic, bytes) else basic
basic64_bytes = base64.b64encode(basic_bytes)
basic64 = basic64_bytes.decode('ascii').strip()
env['HTTP_AUTHORIZATION'] = "Basic %s" % basic64
pub = publication(self.db)
if request is not None:
request = request(stdin, env)
else:
request = TestRequest(stdin, env)
setDefaultSkin(request)
request.setPublication(pub)
if form:
request.form.update(form)
return request
def publish(self, path='/', stdin='', *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(path, stdin, *args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request)
getStatus = getattr(request.response, 'getStatus', lambda: None)
headers = sorted(request.response.getHeaders())
print(
'Status %s\r\n%s\r\n\r\n%s' % (
request.response.getStatusString(),
'\r\n'.join([("%s: %s" % h) for h in headers]),
request.response.consumeBody(),
), file=self.stdout or sys.stdout)
return time.time() - t, time_process_time() - pt, getStatus()
def run(self, *args, **kw):
t, pt = time.time(), time_process_time()
request = self._request(*args, **kw)
# agroszer: 2008.feb.1.: if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = _publish(request, handle_errors=False)
getStatus = getattr(request.response, 'getStatus', lambda: None)
return time.time() - t, time_process_time() - pt, getStatus()
def debug(self, *args, **kw):
out = self.stdout or sys.stdout
class ZopePdb(self.Pdb):
done_pub = False
done_ob = False
def do_pub(self, arg):
if self.done_pub:
print('pub already done.', file=out)
return
self.do_s('')
self.do_s('')
self.do_c('')
self.done_pub = True
def do_ob(self, arg):
if self.done_ob:
print('ob already done.', file=out)
return
self.do_pub('')
self.do_c('')
self.done_ob = True
dbg = ZopePdb()
request = self._request(*args, **kw)
fbreak(dbg, _publish)
fbreak(dbg, debug_call)
print('* Type c<cr> to jump to published object call.',
file=out)
dbg.runcall(_publish, request)
return dbg
def getlineno(code):
return code.co_firstlineno
def fbreak(db, meth):
try:
meth = meth.__func__
except AttributeError:
pass
code = meth.__code__
lineno = getlineno(code)
filename = code.co_filename
db.set_break(filename, lineno)
| en | 0.526207 | ############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## Code to initialize the application server # pragma: PY3 # pragma: PY2 # pragma: PY3 # pragma: PY2 # pragma: PY2 # pragma: PY3 Get the top-level application object The object returned is connected to an open database connection. Create a request # agroszer: 2008.feb.1.: if a retry occurs in the publisher, # the response will be LOST, so we must accept the returned request # agroszer: 2008.feb.1.: if a retry occurs in the publisher, # the response will be LOST, so we must accept the returned request | 1.980211 | 2 |
transfer_learning.py | terryli710/SIIM-ACR-Pneumothorax-Classification | 0 | 8285 | <reponame>terryli710/SIIM-ACR-Pneumothorax-Classification
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 22:42:54 2020
@author: mike
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications import VGG16
from tensorflow.keras import layers
from sklearn.preprocessing import OneHotEncoder
from skimage.transform import resize
import matplotlib.pyplot as plt
train_data = np.load("train_data.npy")
x_data = np.zeros((210,204,204,3))
y_data = np.zeros(210)
for i in range(210):
img = train_data[i,1:].reshape(1024,1024)
img_resized = resize(img,(204,204))
y_data[i] = train_data[i,0]
x_data[i,:,:,0] = img_resized.astype(int)
x_data[i,:,:,1] = img_resized.astype(int)
x_data[i,:,:,2] = img_resized.astype(int)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.2, random_state=42)
y_train = OneHotEncoder().fit_transform(y_train.reshape(-1,1)).toarray()
y_test = OneHotEncoder().fit_transform(y_test.reshape(-1,1)).toarray()
base_model = VGG16(include_top=False, weights='vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
input_shape=(204, 204, 3))
base_model.trainable = False
inputs = tf.keras.Input(shape=(204, 204, 3))
x = base_model(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
outputs = tf.keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),loss="binary_crossentropy",metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=16, epochs=5)
pred = model.predict(x_train)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[0],score[1]) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 22:42:54 2020
@author: mike
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications import VGG16
from tensorflow.keras import layers
from sklearn.preprocessing import OneHotEncoder
from skimage.transform import resize
import matplotlib.pyplot as plt
train_data = np.load("train_data.npy")
x_data = np.zeros((210,204,204,3))
y_data = np.zeros(210)
for i in range(210):
img = train_data[i,1:].reshape(1024,1024)
img_resized = resize(img,(204,204))
y_data[i] = train_data[i,0]
x_data[i,:,:,0] = img_resized.astype(int)
x_data[i,:,:,1] = img_resized.astype(int)
x_data[i,:,:,2] = img_resized.astype(int)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.2, random_state=42)
y_train = OneHotEncoder().fit_transform(y_train.reshape(-1,1)).toarray()
y_test = OneHotEncoder().fit_transform(y_test.reshape(-1,1)).toarray()
base_model = VGG16(include_top=False, weights='vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
input_shape=(204, 204, 3))
base_model.trainable = False
inputs = tf.keras.Input(shape=(204, 204, 3))
x = base_model(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
outputs = tf.keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),loss="binary_crossentropy",metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=16, epochs=5)
pred = model.predict(x_train)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[0],score[1]) | en | 0.554216 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon May 18 22:42:54 2020 @author: mike | 2.388856 | 2 |
core/tests/test_polyflow/test_workflows/test_hyperband.py | erexer/polyaxon | 0 | 8286 | <reponame>erexer/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow.exceptions import ValidationError
from tests.utils import BaseTestCase, assert_equal_dict
from polyaxon.polyflow.matrix import V1Hyperband
from polyaxon.polyflow.optimization import V1Optimization, V1OptimizationMetric
@pytest.mark.workflow_mark
class TestWorkflowV1Hyperbands(BaseTestCase):
def test_hyperband_config(self):
config_dict = {
"kind": "hyperband",
"maxIterations": 10,
"eta": 3,
"resource": {"name": "steps", "type": "int"},
"resume": False,
"metric": V1OptimizationMetric(
name="loss", optimization=V1Optimization.MINIMIZE
).to_dict(),
"params": {"lr": {"kind": "choice", "value": [[0.1], [0.9]]}},
}
config = V1Hyperband.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
# Raises for negative values
config_dict["maxIterations"] = 0
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["maxIterations"] = -0.5
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["maxIterations"] = 3
# Add numRuns percent
config_dict["eta"] = -0.5
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["eta"] = 2.9
config = V1Hyperband.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
| #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow.exceptions import ValidationError
from tests.utils import BaseTestCase, assert_equal_dict
from polyaxon.polyflow.matrix import V1Hyperband
from polyaxon.polyflow.optimization import V1Optimization, V1OptimizationMetric
@pytest.mark.workflow_mark
class TestWorkflowV1Hyperbands(BaseTestCase):
def test_hyperband_config(self):
config_dict = {
"kind": "hyperband",
"maxIterations": 10,
"eta": 3,
"resource": {"name": "steps", "type": "int"},
"resume": False,
"metric": V1OptimizationMetric(
name="loss", optimization=V1Optimization.MINIMIZE
).to_dict(),
"params": {"lr": {"kind": "choice", "value": [[0.1], [0.9]]}},
}
config = V1Hyperband.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
# Raises for negative values
config_dict["maxIterations"] = 0
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["maxIterations"] = -0.5
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["maxIterations"] = 3
# Add numRuns percent
config_dict["eta"] = -0.5
with self.assertRaises(ValidationError):
V1Hyperband.from_dict(config_dict)
config_dict["eta"] = 2.9
config = V1Hyperband.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict) | en | 0.799155 | #!/usr/bin/python # # Copyright 2018-2020 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Raises for negative values # Add numRuns percent | 1.842221 | 2 |
Class Work oop.py | fatimatswanya/fatimaCSC102 | 0 | 8287 |
class Student:
studentLevel = 'first year computer science 2020/2021 session'
studentCounter = 0
registeredCourse='csc102'
def __init__(self, thename, thematricno, thesex,thehostelname,theage,thecsc102examscore):
self.name = thename
self.matricno = thematricno
self.sex = thesex
self.hostelname =thehostelname
self.age=theage
self.csc102examscore=thecsc102examscore
Student.studentCounter = Student.studentCounter + 1
def getName(self):
return self.name
def setName(self, thenewName):
self.name = thenewName
def agedeterminer(self):
if self.age>16:
print('Student is above 16')
def finalscore(self):
if self.csc102examscore < 45:
print('You will carryover this course, sorry')
else:
print('You have passed')
@classmethod
def course():
print(f'Students registered course is {Student.registeredCourse}')
@staticmethod
def PAUNanthem():
print('Pau, here we come, Pau, here we come ')
@staticmethod
def ODDorEVEN(num):
if num % 2==0:
print('Number is even')
else:
print('Number is odd')
@classmethod
def studentnum(cls):
print(Student.studentCounter)
studendt1 = Student('<NAME>', '021074', 'M','Amethyst','16', '49')
print(studendt1.getName())
studendt1.setName('<NAME>')
print(studendt1.getName())
Student.PAUNanthem() |
class Student:
studentLevel = 'first year computer science 2020/2021 session'
studentCounter = 0
registeredCourse='csc102'
def __init__(self, thename, thematricno, thesex,thehostelname,theage,thecsc102examscore):
self.name = thename
self.matricno = thematricno
self.sex = thesex
self.hostelname =thehostelname
self.age=theage
self.csc102examscore=thecsc102examscore
Student.studentCounter = Student.studentCounter + 1
def getName(self):
return self.name
def setName(self, thenewName):
self.name = thenewName
def agedeterminer(self):
if self.age>16:
print('Student is above 16')
def finalscore(self):
if self.csc102examscore < 45:
print('You will carryover this course, sorry')
else:
print('You have passed')
@classmethod
def course():
print(f'Students registered course is {Student.registeredCourse}')
@staticmethod
def PAUNanthem():
print('Pau, here we come, Pau, here we come ')
@staticmethod
def ODDorEVEN(num):
if num % 2==0:
print('Number is even')
else:
print('Number is odd')
@classmethod
def studentnum(cls):
print(Student.studentCounter)
studendt1 = Student('<NAME>', '021074', 'M','Amethyst','16', '49')
print(studendt1.getName())
studendt1.setName('<NAME>')
print(studendt1.getName())
Student.PAUNanthem() | none | 1 | 3.660264 | 4 |
|
clickhouse_sqlalchemy/drivers/reflection.py | Fozar/clickhouse-sqlalchemy | 0 | 8288 | <reponame>Fozar/clickhouse-sqlalchemy
from sqlalchemy.engine import reflection
from clickhouse_sqlalchemy import Table, engines
class ClickHouseInspector(reflection.Inspector):
def reflect_table(self, table, *args, **kwargs):
# This check is necessary to support direct instantiation of
# `clickhouse_sqlalchemy.Table` and then reflection of it.
if not isinstance(table, Table):
table.metadata.remove(table)
ch_table = Table._make_from_standard(
table, _extend_on=kwargs.get('_extend_on')
)
else:
ch_table = table
super(ClickHouseInspector, self).reflect_table(
ch_table, *args, **kwargs
)
with self._operation_context() as conn:
schema = conn.schema_for_object(ch_table)
self._reflect_engine(ch_table.name, schema, ch_table)
def _reflect_engine(self, table_name, schema, table):
should_reflect = (
self.dialect.supports_engine_reflection and
self.dialect.engine_reflection
)
if not should_reflect:
return
engine_cls_by_name = {e.__name__: e for e in engines.__all__}
e = self.get_engine(table_name, schema=table.schema)
if not e:
raise ValueError("Cannot find engine for table '%s'" % table_name)
engine_cls = engine_cls_by_name.get(e['engine'])
if engine_cls is not None:
engine = engine_cls.reflect(table, **e)
engine._set_parent(table)
else:
table.engine = None
def get_engine(self, table_name, schema=None, **kw):
with self._operation_context() as conn:
return self.dialect.get_engine(
conn, table_name, schema=schema, info_cache=self.info_cache,
**kw
)
| from sqlalchemy.engine import reflection
from clickhouse_sqlalchemy import Table, engines
class ClickHouseInspector(reflection.Inspector):
def reflect_table(self, table, *args, **kwargs):
# This check is necessary to support direct instantiation of
# `clickhouse_sqlalchemy.Table` and then reflection of it.
if not isinstance(table, Table):
table.metadata.remove(table)
ch_table = Table._make_from_standard(
table, _extend_on=kwargs.get('_extend_on')
)
else:
ch_table = table
super(ClickHouseInspector, self).reflect_table(
ch_table, *args, **kwargs
)
with self._operation_context() as conn:
schema = conn.schema_for_object(ch_table)
self._reflect_engine(ch_table.name, schema, ch_table)
def _reflect_engine(self, table_name, schema, table):
should_reflect = (
self.dialect.supports_engine_reflection and
self.dialect.engine_reflection
)
if not should_reflect:
return
engine_cls_by_name = {e.__name__: e for e in engines.__all__}
e = self.get_engine(table_name, schema=table.schema)
if not e:
raise ValueError("Cannot find engine for table '%s'" % table_name)
engine_cls = engine_cls_by_name.get(e['engine'])
if engine_cls is not None:
engine = engine_cls.reflect(table, **e)
engine._set_parent(table)
else:
table.engine = None
def get_engine(self, table_name, schema=None, **kw):
with self._operation_context() as conn:
return self.dialect.get_engine(
conn, table_name, schema=schema, info_cache=self.info_cache,
**kw
) | en | 0.87837 | # This check is necessary to support direct instantiation of # `clickhouse_sqlalchemy.Table` and then reflection of it. | 2.291766 | 2 |
tests/test_disque.py | abdul-khalid/pydisque | 1 | 8289 | """
Unit Tests for the pydisque module.
Currently, most of these tests require a fresh instance of
Disque to be valid and pass.
"""
import unittest
import json
import time
import random
import six
from pydisque.client import Client
from redis.exceptions import ResponseError
class TestDisque(unittest.TestCase):
"""TestCase class for pydisque."""
testID = None
def setUp(self):
"""Setup the tests."""
self.client = Client(['localhost:7711'])
self.client.connect()
self.testID = "%d.%d" % (time.time(),
random.randint(1000, 1000000))
def test_publish_and_receive(self):
"""Test the most important functions of pydisque."""
t1 = str(time.time())
self.client.add_job("test_q", t1, timeout=100)
jobs = self.client.get_job(['test_q'])
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert job == six.b(t1)
self.client.ack_job(job_id)
assert len(self.client.get_job(['test_q'], timeout=100)) == 0
def test_nack(self):
"""Fetch the queue, return a job, check that it's back."""
t1 = str(time.time())
queuename = "test_nack." + self.testID
self.client.add_job(queuename, str(t1), timeout=100)
jobs = self.client.get_job([queuename])
# NACK the first read
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert len(jobs) == 1
assert job == six.b(t1)
self.client.nack_job(job_id)
# this time ACK it
jobs = self.client.get_job([queuename])
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert job == six.b(t1)
self.client.ack_job(job_id)
assert len(self.client.get_job([queuename], timeout=100)) == 0
def test_qpeek(self):
"""
Test qpeek.
Ran into some problems with an ENQUEUE/DEQUEUE test that
was using qpeek, checking core functionality of qpeek().
"""
queuename = "test_qpeek-%s" % self.testID
job_id = self.client.add_job(queuename, "Peek A Boo")
peeked = self.client.qpeek(queuename, 1)
assert peeked[0][1] == job_id
def test_qscan(self):
"""
Test the qscan function.
This test relies on add_job() being functional, and
the local disque not being a disque proxy to a mesh.
TODO: unique the queues with self.testID.
"""
t1 = str(time.time())
self.client.add_job("q1", t1, timeout=100)
self.client.add_job("q2", t1, timeout=100)
qb = self.client.qscan()
assert qb[0]
assert qb[1]
assert six.b("q1") in qb[1]
assert six.b("q2") in qb[1]
def test_jscan(self):
"""Simple test of the jscan function."""
t1 = time.time()
queuename = "test_jscan-%s" % self.testID
j1 = self.client.add_job(queuename, str(t1), timeout=100)
jerbs = self.client.jscan(queue=queuename)
assert j1 in jerbs[1]
def test_del_job(self):
"""Simple test of del_job, needs qpeek.
FIXME: This function has grown ugly.
"""
t1 = time.time()
queuename = "test_del_job-%s" % self.testID
j1 = self.client.add_job(queuename, str(t1))
jerbs = self.client.qpeek(queuename, 1)
jlist = []
for item in jerbs:
jlist.append(item[1])
assert j1 in jlist
self.client.del_job(j1)
jerbs = self.client.qpeek(queuename, 1)
jlist = []
for item in jerbs:
jlist.append(item[1])
assert j1 not in jerbs
def test_qlen(self):
"""Simple test of qlen."""
queuename = "test_qlen-%s" % self.testID
lengthOfTest = 100
test_job = "Useless Job."
for x in range(lengthOfTest):
self.client.add_job(queuename, test_job)
assert self.client.qlen(queuename) == lengthOfTest
def test_qstat(self):
"""Testing QSTAT (default behavior)."""
queuename = "test_qstat-%s" % self.testID
testqueue = ["a", "b", "c"]
for x in testqueue:
self.client.add_job(queuename, x)
stat = self.client.qstat(queuename)
# check the basics
assert 'jobs-in' in stat
assert 'jobs-out' in stat
def test_qstat_dict(self):
"""Testing QSTAT's (new dict behavior)."""
queuename = "test_qstat_dict-%s" % self.testID
testqueue = ["a", "b", "c"]
for x in testqueue:
self.client.add_job(queuename, x)
stat = self.client.qstat(queuename, True)
assert stat.get('jobs-in', None) is not None
assert stat.get('jobs-out', None) is not None
def test_shownack(self):
"""Test that NACK and SHOW work appropriately."""
queuename = "test_show-%s" % self.testID
test_job = "Show me."
self.client.add_job(queuename, test_job)
jobs = self.client.get_job([queuename])
for queue_name, job_id, job in jobs:
self.client.nack_job(job_id)
shown = self.client.show(job_id, True)
assert shown.get('body') == test_job
assert shown.get('nacks') == 1
def test_pause(self):
"""Test that a PAUSE message is acknowledged."""
queuename = "test_show-%s" % self.testID
test_job = "Jerbs, they are a thing"
self.client.pause(queuename, kw_in=True)
try:
job_id = self.client.add_job(queuename, test_job)
except ResponseError:
pass
# can we add a job again?
self.client.pause(queuename, kw_none=True)
job_id = self.client.add_job(queuename, test_job)
jobs = self.client.get_job([queuename])
# TODO(canardleteer): add a test of PAUSE SHOW
def test_get_job(self):
queue_name = "test_get_job." + self.testID
job = str(time.time())
job_id = self.client.add_job(queue_name, job)
expected = [(queue_name, job_id, job)]
got = self.client.get_job([queue_name], withcounters=False)
assert expected == got
def test_get_job_withcounters(self):
queue_name = "test_get_job." + self.testID
job = str(time.time())
job_id = self.client.add_job(queue_name, job)
nacks = 0
additional_deliveries = 0
expected = [(queue_name, job_id, job, nacks, additional_deliveries)]
got = self.client.get_job([queue_name], withcounters=True)
assert expected == got
if __name__ == '__main__':
unittest.main()
| """
Unit Tests for the pydisque module.
Currently, most of these tests require a fresh instance of
Disque to be valid and pass.
"""
import unittest
import json
import time
import random
import six
from pydisque.client import Client
from redis.exceptions import ResponseError
class TestDisque(unittest.TestCase):
"""TestCase class for pydisque."""
testID = None
def setUp(self):
"""Setup the tests."""
self.client = Client(['localhost:7711'])
self.client.connect()
self.testID = "%d.%d" % (time.time(),
random.randint(1000, 1000000))
def test_publish_and_receive(self):
"""Test the most important functions of pydisque."""
t1 = str(time.time())
self.client.add_job("test_q", t1, timeout=100)
jobs = self.client.get_job(['test_q'])
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert job == six.b(t1)
self.client.ack_job(job_id)
assert len(self.client.get_job(['test_q'], timeout=100)) == 0
def test_nack(self):
"""Fetch the queue, return a job, check that it's back."""
t1 = str(time.time())
queuename = "test_nack." + self.testID
self.client.add_job(queuename, str(t1), timeout=100)
jobs = self.client.get_job([queuename])
# NACK the first read
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert len(jobs) == 1
assert job == six.b(t1)
self.client.nack_job(job_id)
# this time ACK it
jobs = self.client.get_job([queuename])
assert len(jobs) == 1
for queue_name, job_id, job in jobs:
assert job == six.b(t1)
self.client.ack_job(job_id)
assert len(self.client.get_job([queuename], timeout=100)) == 0
def test_qpeek(self):
"""
Test qpeek.
Ran into some problems with an ENQUEUE/DEQUEUE test that
was using qpeek, checking core functionality of qpeek().
"""
queuename = "test_qpeek-%s" % self.testID
job_id = self.client.add_job(queuename, "Peek A Boo")
peeked = self.client.qpeek(queuename, 1)
assert peeked[0][1] == job_id
def test_qscan(self):
"""
Test the qscan function.
This test relies on add_job() being functional, and
the local disque not being a disque proxy to a mesh.
TODO: unique the queues with self.testID.
"""
t1 = str(time.time())
self.client.add_job("q1", t1, timeout=100)
self.client.add_job("q2", t1, timeout=100)
qb = self.client.qscan()
assert qb[0]
assert qb[1]
assert six.b("q1") in qb[1]
assert six.b("q2") in qb[1]
def test_jscan(self):
"""Simple test of the jscan function."""
t1 = time.time()
queuename = "test_jscan-%s" % self.testID
j1 = self.client.add_job(queuename, str(t1), timeout=100)
jerbs = self.client.jscan(queue=queuename)
assert j1 in jerbs[1]
def test_del_job(self):
"""Simple test of del_job, needs qpeek.
FIXME: This function has grown ugly.
"""
t1 = time.time()
queuename = "test_del_job-%s" % self.testID
j1 = self.client.add_job(queuename, str(t1))
jerbs = self.client.qpeek(queuename, 1)
jlist = []
for item in jerbs:
jlist.append(item[1])
assert j1 in jlist
self.client.del_job(j1)
jerbs = self.client.qpeek(queuename, 1)
jlist = []
for item in jerbs:
jlist.append(item[1])
assert j1 not in jerbs
def test_qlen(self):
"""Simple test of qlen."""
queuename = "test_qlen-%s" % self.testID
lengthOfTest = 100
test_job = "Useless Job."
for x in range(lengthOfTest):
self.client.add_job(queuename, test_job)
assert self.client.qlen(queuename) == lengthOfTest
def test_qstat(self):
"""Testing QSTAT (default behavior)."""
queuename = "test_qstat-%s" % self.testID
testqueue = ["a", "b", "c"]
for x in testqueue:
self.client.add_job(queuename, x)
stat = self.client.qstat(queuename)
# check the basics
assert 'jobs-in' in stat
assert 'jobs-out' in stat
def test_qstat_dict(self):
"""Testing QSTAT's (new dict behavior)."""
queuename = "test_qstat_dict-%s" % self.testID
testqueue = ["a", "b", "c"]
for x in testqueue:
self.client.add_job(queuename, x)
stat = self.client.qstat(queuename, True)
assert stat.get('jobs-in', None) is not None
assert stat.get('jobs-out', None) is not None
def test_shownack(self):
"""Test that NACK and SHOW work appropriately."""
queuename = "test_show-%s" % self.testID
test_job = "Show me."
self.client.add_job(queuename, test_job)
jobs = self.client.get_job([queuename])
for queue_name, job_id, job in jobs:
self.client.nack_job(job_id)
shown = self.client.show(job_id, True)
assert shown.get('body') == test_job
assert shown.get('nacks') == 1
def test_pause(self):
"""Test that a PAUSE message is acknowledged."""
queuename = "test_show-%s" % self.testID
test_job = "Jerbs, they are a thing"
self.client.pause(queuename, kw_in=True)
try:
job_id = self.client.add_job(queuename, test_job)
except ResponseError:
pass
# can we add a job again?
self.client.pause(queuename, kw_none=True)
job_id = self.client.add_job(queuename, test_job)
jobs = self.client.get_job([queuename])
# TODO(canardleteer): add a test of PAUSE SHOW
def test_get_job(self):
queue_name = "test_get_job." + self.testID
job = str(time.time())
job_id = self.client.add_job(queue_name, job)
expected = [(queue_name, job_id, job)]
got = self.client.get_job([queue_name], withcounters=False)
assert expected == got
def test_get_job_withcounters(self):
queue_name = "test_get_job." + self.testID
job = str(time.time())
job_id = self.client.add_job(queue_name, job)
nacks = 0
additional_deliveries = 0
expected = [(queue_name, job_id, job, nacks, additional_deliveries)]
got = self.client.get_job([queue_name], withcounters=True)
assert expected == got
if __name__ == '__main__':
unittest.main()
| en | 0.807898 | Unit Tests for the pydisque module. Currently, most of these tests require a fresh instance of Disque to be valid and pass. TestCase class for pydisque. Setup the tests. Test the most important functions of pydisque. Fetch the queue, return a job, check that it's back. # NACK the first read # this time ACK it Test qpeek. Ran into some problems with an ENQUEUE/DEQUEUE test that was using qpeek, checking core functionality of qpeek(). Test the qscan function. This test relies on add_job() being functional, and the local disque not being a disque proxy to a mesh. TODO: unique the queues with self.testID. Simple test of the jscan function. Simple test of del_job, needs qpeek. FIXME: This function has grown ugly. Simple test of qlen. Testing QSTAT (default behavior). # check the basics Testing QSTAT's (new dict behavior). Test that NACK and SHOW work appropriately. Test that a PAUSE message is acknowledged. # can we add a job again? # TODO(canardleteer): add a test of PAUSE SHOW | 2.811587 | 3 |
src/runner.py | samirsahoo007/Naive-Bayes-and-Decision-Tree-Classifiers | 1 | 8290 | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner.py ]
# Synopsis [ main program that runs the 'Naive Bayes' and 'Decision Tree' training / testing ]
# Author [ <NAME> (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import csv
import argparse
import numpy as np
from data_loader import data_loader
from classifiers import naive_bayes_runner
from classifiers import decision_tree_runner
##################
# CONFIGURATIONS #
##################
def get_config():
parser = argparse.ArgumentParser(description='descrip_msg')
classifier = parser.add_argument_group('classifier')
classifier.add_argument('--classifier', type=str, default='', help='classifier to be specified by user')
classifier.add_argument('--naive_bayes', action='store_true', help='enable Naive Bayes classification mode')
classifier.add_argument('--decision_tree', action='store_true', help='enable Decision Tree classification mode')
mode_args = parser.add_argument_group('mode')
mode_args.add_argument('--search_opt', action='store_true', help='search for optimal parameters for classifiers')
mode_args.add_argument('--run_all', action='store_true', help='run all distribution assumption for the Naive Bayes classifier')
mode_args.add_argument('--visualize_tree', action='store_true', help='plot and visualize the Decision Tree classifier')
data_args = parser.add_argument_group('data')
data_args.add_argument('--data_news', action='store_true', help='Training and testing on the News dataset')
data_args.add_argument('--data_mushroom', action='store_true', help='Training and testing on the Mushroom dataset')
data_args.add_argument('--data_income', action='store_true', help='Training and testing on the Income dataset')
path_args = parser.add_argument_group('train_path')
path_args.add_argument('--train_path', type=str, default='', help='training path to be specified by user')
path_args.add_argument('--train_path_news', type=str, default='../data/news/news_train.csv', help='path to the News training dataset')
path_args.add_argument('--train_path_mushroom', type=str, default='../data/mushroom/mushroom_train.csv', help='path to the Mushroom training dataset')
path_args.add_argument('--train_path_income', type=str, default='../data/income/income_train.csv', help='path to the Income training dataset')
path_args = parser.add_argument_group('test_path')
path_args.add_argument('--test_path', type=str, default='', help='testing path to be specified by user')
path_args.add_argument('--test_path_news', type=str, default='../data/news/news_test.csv', help='path to the News testing dataset')
path_args.add_argument('--test_path_mushroom', type=str, default='../data/mushroom/mushroom_test.csv', help='path to the Mushroom testing dataset')
path_args.add_argument('--test_path_income', type=str, default='../data/income/income_test.csv', help='path to the Income testing dataset')
path_args = parser.add_argument_group('output_path')
path_args.add_argument('--output_path', type=str, default='../result/output.csv', help='path to save model prediction')
args = parser.parse_args()
args = error_handling(args)
return args
##################
# ERROR HANDLING #
##################
def error_handling(args):
if args.classifier != '':
args.naive_bayes = True if args.classifier == 'N' else False
args.decision_tree = True if args.classifier == 'D' else False
if args.naive_bayes and args.decision_tree == True:
raise AssertionError('Please choose one classifier at once, or specify the correct classifier!')
if args.search_opt and args.run_all and args.visualize_tree == True:
raise AssertionError('Please choose one mode at a time!')
if args.data_news and args.data_mushroom and args.income == True:
raise AssertionError('Please choose one and at least one dataset at a time!')
if args.train_path != '' and args.test_path != '':
if not os.path.isfile(args.train_path) or not os.path.isfile(args.test_path):
raise AssertionError('The given file path is invalid!')
if args.data_news:
args.train_path_news = args.train_path
args.test_path_news = args.test_path
elif args.data_mushroom:
args.train_path_mushroom = args.train_path
args.test_path_mushroom = args.test_path
elif args.data_income:
args.train_path_income = args.train_path
args.test_path_income = args.test_path
else:
raise AssertionError('Must choose a dataset!')
return args
#################
# OUTPUT WRITER #
#################
def output_writer(path, result):
with open(path, 'w') as f:
file = csv.writer(f, delimiter=',', quotechar='\r')
for item in result:
file.writerow([int(item)])
print('Results have been successfully saved to: %s' % (path))
return True
########
# MAIN #
########
"""
main function
"""
def main():
args = get_config()
loader = data_loader(args)
#---fetch data---#
if args.data_news:
train_x, train_y, test_x, test_y = loader.fetch_news()
MODEL = 'NEWS'
elif args.data_mushroom:
train_x, train_y, test_x, test_y = loader.fetch_mushroom()
MODEL = 'MUSHROOM'
elif args.data_income:
train_x, train_y, test_x, test_y = loader.fetch_income() # -> test_y == None
MODEL = 'INCOME'
###############
# NAIVE BAYES #
###############
if args.naive_bayes:
#---construct model---#
naive_bayes = naive_bayes_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
naive_bayes.search_alpha()
elif args.run_all:
naive_bayes.run_best_all()
else:
pred_y = naive_bayes.run_best()
output_writer(args.output_path, pred_y)
#################
# DECISION TREE #
#################
if args.decision_tree:
#---construct model---#
decision_tree = decision_tree_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
decision_tree.search_max_depth()
elif args.visualize_tree:
decision_tree.visualize()
else:
pred_y = decision_tree.run_best()
output_writer(args.output_path, pred_y)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner.py ]
# Synopsis [ main program that runs the 'Naive Bayes' and 'Decision Tree' training / testing ]
# Author [ <NAME> (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import csv
import argparse
import numpy as np
from data_loader import data_loader
from classifiers import naive_bayes_runner
from classifiers import decision_tree_runner
##################
# CONFIGURATIONS #
##################
def get_config():
parser = argparse.ArgumentParser(description='descrip_msg')
classifier = parser.add_argument_group('classifier')
classifier.add_argument('--classifier', type=str, default='', help='classifier to be specified by user')
classifier.add_argument('--naive_bayes', action='store_true', help='enable Naive Bayes classification mode')
classifier.add_argument('--decision_tree', action='store_true', help='enable Decision Tree classification mode')
mode_args = parser.add_argument_group('mode')
mode_args.add_argument('--search_opt', action='store_true', help='search for optimal parameters for classifiers')
mode_args.add_argument('--run_all', action='store_true', help='run all distribution assumption for the Naive Bayes classifier')
mode_args.add_argument('--visualize_tree', action='store_true', help='plot and visualize the Decision Tree classifier')
data_args = parser.add_argument_group('data')
data_args.add_argument('--data_news', action='store_true', help='Training and testing on the News dataset')
data_args.add_argument('--data_mushroom', action='store_true', help='Training and testing on the Mushroom dataset')
data_args.add_argument('--data_income', action='store_true', help='Training and testing on the Income dataset')
path_args = parser.add_argument_group('train_path')
path_args.add_argument('--train_path', type=str, default='', help='training path to be specified by user')
path_args.add_argument('--train_path_news', type=str, default='../data/news/news_train.csv', help='path to the News training dataset')
path_args.add_argument('--train_path_mushroom', type=str, default='../data/mushroom/mushroom_train.csv', help='path to the Mushroom training dataset')
path_args.add_argument('--train_path_income', type=str, default='../data/income/income_train.csv', help='path to the Income training dataset')
path_args = parser.add_argument_group('test_path')
path_args.add_argument('--test_path', type=str, default='', help='testing path to be specified by user')
path_args.add_argument('--test_path_news', type=str, default='../data/news/news_test.csv', help='path to the News testing dataset')
path_args.add_argument('--test_path_mushroom', type=str, default='../data/mushroom/mushroom_test.csv', help='path to the Mushroom testing dataset')
path_args.add_argument('--test_path_income', type=str, default='../data/income/income_test.csv', help='path to the Income testing dataset')
path_args = parser.add_argument_group('output_path')
path_args.add_argument('--output_path', type=str, default='../result/output.csv', help='path to save model prediction')
args = parser.parse_args()
args = error_handling(args)
return args
##################
# ERROR HANDLING #
##################
def error_handling(args):
if args.classifier != '':
args.naive_bayes = True if args.classifier == 'N' else False
args.decision_tree = True if args.classifier == 'D' else False
if args.naive_bayes and args.decision_tree == True:
raise AssertionError('Please choose one classifier at once, or specify the correct classifier!')
if args.search_opt and args.run_all and args.visualize_tree == True:
raise AssertionError('Please choose one mode at a time!')
if args.data_news and args.data_mushroom and args.income == True:
raise AssertionError('Please choose one and at least one dataset at a time!')
if args.train_path != '' and args.test_path != '':
if not os.path.isfile(args.train_path) or not os.path.isfile(args.test_path):
raise AssertionError('The given file path is invalid!')
if args.data_news:
args.train_path_news = args.train_path
args.test_path_news = args.test_path
elif args.data_mushroom:
args.train_path_mushroom = args.train_path
args.test_path_mushroom = args.test_path
elif args.data_income:
args.train_path_income = args.train_path
args.test_path_income = args.test_path
else:
raise AssertionError('Must choose a dataset!')
return args
#################
# OUTPUT WRITER #
#################
def output_writer(path, result):
with open(path, 'w') as f:
file = csv.writer(f, delimiter=',', quotechar='\r')
for item in result:
file.writerow([int(item)])
print('Results have been successfully saved to: %s' % (path))
return True
########
# MAIN #
########
"""
main function
"""
def main():
args = get_config()
loader = data_loader(args)
#---fetch data---#
if args.data_news:
train_x, train_y, test_x, test_y = loader.fetch_news()
MODEL = 'NEWS'
elif args.data_mushroom:
train_x, train_y, test_x, test_y = loader.fetch_mushroom()
MODEL = 'MUSHROOM'
elif args.data_income:
train_x, train_y, test_x, test_y = loader.fetch_income() # -> test_y == None
MODEL = 'INCOME'
###############
# NAIVE BAYES #
###############
if args.naive_bayes:
#---construct model---#
naive_bayes = naive_bayes_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
naive_bayes.search_alpha()
elif args.run_all:
naive_bayes.run_best_all()
else:
pred_y = naive_bayes.run_best()
output_writer(args.output_path, pred_y)
#################
# DECISION TREE #
#################
if args.decision_tree:
#---construct model---#
decision_tree = decision_tree_runner(MODEL, train_x, train_y, test_x, test_y)
#---modes---#
if args.search_opt:
decision_tree.search_max_depth()
elif args.visualize_tree:
decision_tree.visualize()
else:
pred_y = decision_tree.run_best()
output_writer(args.output_path, pred_y)
if __name__ == '__main__':
main()
| de | 0.234536 | # -*- coding: utf-8 -*- # ********************************************************************************************* # FileName [ runner.py ] # Synopsis [ main program that runs the 'Naive Bayes' and 'Decision Tree' training / testing ] # Author [ <NAME> (Andi611) ] # Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ] ********************************************************************************************* ############### # IMPORTATION # ############### ################## # CONFIGURATIONS # ################## ################## # ERROR HANDLING # ################## ################# # OUTPUT WRITER # ################# ######## # MAIN # ######## main function #---fetch data---# # -> test_y == None ############### # NAIVE BAYES # ############### #---construct model---# #---modes---# ################# # DECISION TREE # ################# #---construct model---# #---modes---# | 2.371207 | 2 |
igibson/metrics/agent.py | Nick-AhSen/iGibson | 0 | 8291 | import copy
import numpy as np
import pybullet as p
from igibson.metrics.metric_base import MetricBase
class BehaviorRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.agent_grasping = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_local_pos = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_reset = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_work = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_distance = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["left_hand", "right_hand"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_work = {part: 0 for part in ["left_hand", "right_hand", "body"]}
agent_distance = {part: 0 for part in ["left_hand", "right_hand", "body"]}
for part in ["left_hand", "right_hand", "body"]:
self.next_state_cache[part] = {
"position": np.array(p.getBasePositionAndOrientation(robot.parts[part].get_body_id())[0]),
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
if robot.action[19] > 0 and robot.action[27] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
if robot.action[19] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(True)
elif robot.action[27] > 0:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
else:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(False)
for part in self.state_cache:
delta_pos = np.linalg.norm(self.next_state_cache[part]["position"] - self.state_cache[part]["position"])
self.agent_pos[part].append(list(self.state_cache[part]["position"]))
# Exclude agent teleports
delta_pos = np.clip(delta_pos, -self.clip, self.clip)
if robot.parts[part].movement_cid is None:
force = 0
work = 0
else:
force = p.getConstraintState(robot.parts[part].movement_cid)
work = np.abs((delta_pos * np.linalg.norm(force)))
distance = np.abs(delta_pos)
if part in ["left_hand", "right_hand"]:
self.agent_local_pos[part].append(list(robot.parts[part].get_local_position_orientation()[0]))
if part in ["left_hand", "right_hand"] and (
len(p.getContactPoints(robot.parts[part].get_body_id())) > 0
or robot.parts[part].object_in_hand is not None
):
self.delta_agent_grasp_distance[part].append(distance)
self.agent_grasping[part].append(True)
elif part in ["left_hand", "right_hand"]:
self.delta_agent_grasp_distance[part].append(0)
self.agent_grasping[part].append(False)
agent_work[part] = work
agent_distance[part] = distance
self.delta_agent_work[part].append(work)
self.delta_agent_distance[part].append(distance)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"work": {
"timestep": self.delta_agent_work,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
"reset": {
"timestep": self.agent_reset,
},
}
class FetchRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["gripper", "body"]}
self.agent_grasping = {part: [] for part in ["gripper"]}
self.agent_local_pos = {part: [] for part in ["gripper"]}
self.delta_agent_distance = {part: [] for part in ["gripper", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["gripper"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_distance = {part: 0 for part in self.agent_pos}
self.next_state_cache = {
"gripper": {"position": robot.get_end_effector_position()},
"body": {"position": robot.get_position()},
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
self.agent_pos["body"].append(list(self.state_cache["body"]["position"]))
delta_pos = np.linalg.norm(
np.array(self.next_state_cache["body"]["position"]) - self.state_cache["body"]["position"]
)
distance = np.abs(delta_pos)
self.delta_agent_distance["body"].append(distance)
self.agent_pos["gripper"].append(list(self.state_cache["gripper"]["position"]))
delta_pos = np.linalg.norm(
self.next_state_cache["gripper"]["position"] - self.state_cache["gripper"]["position"]
)
gripper_distance = np.abs(delta_pos)
self.delta_agent_distance["gripper"].append(gripper_distance)
self.agent_local_pos["gripper"].append(list(robot.get_relative_eef_position()))
contacts = p.getContactPoints(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id)
if len(contacts) > 0:
self.delta_agent_grasp_distance["gripper"].append(gripper_distance)
self.agent_grasping["gripper"].append(True)
else:
self.delta_agent_grasp_distance["gripper"].append(0)
self.agent_grasping["gripper"].append(False)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
}
| import copy
import numpy as np
import pybullet as p
from igibson.metrics.metric_base import MetricBase
class BehaviorRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.agent_grasping = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_local_pos = {part: [] for part in ["left_hand", "right_hand"]}
self.agent_reset = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_work = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_distance = {part: [] for part in ["left_hand", "right_hand", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["left_hand", "right_hand"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_work = {part: 0 for part in ["left_hand", "right_hand", "body"]}
agent_distance = {part: 0 for part in ["left_hand", "right_hand", "body"]}
for part in ["left_hand", "right_hand", "body"]:
self.next_state_cache[part] = {
"position": np.array(p.getBasePositionAndOrientation(robot.parts[part].get_body_id())[0]),
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
if robot.action[19] > 0 and robot.action[27] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
if robot.action[19] > 0:
self.agent_reset["left_hand"].append(True)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(True)
elif robot.action[27] > 0:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(True)
self.agent_reset["body"].append(True)
else:
self.agent_reset["left_hand"].append(False)
self.agent_reset["right_hand"].append(False)
self.agent_reset["body"].append(False)
for part in self.state_cache:
delta_pos = np.linalg.norm(self.next_state_cache[part]["position"] - self.state_cache[part]["position"])
self.agent_pos[part].append(list(self.state_cache[part]["position"]))
# Exclude agent teleports
delta_pos = np.clip(delta_pos, -self.clip, self.clip)
if robot.parts[part].movement_cid is None:
force = 0
work = 0
else:
force = p.getConstraintState(robot.parts[part].movement_cid)
work = np.abs((delta_pos * np.linalg.norm(force)))
distance = np.abs(delta_pos)
if part in ["left_hand", "right_hand"]:
self.agent_local_pos[part].append(list(robot.parts[part].get_local_position_orientation()[0]))
if part in ["left_hand", "right_hand"] and (
len(p.getContactPoints(robot.parts[part].get_body_id())) > 0
or robot.parts[part].object_in_hand is not None
):
self.delta_agent_grasp_distance[part].append(distance)
self.agent_grasping[part].append(True)
elif part in ["left_hand", "right_hand"]:
self.delta_agent_grasp_distance[part].append(0)
self.agent_grasping[part].append(False)
agent_work[part] = work
agent_distance[part] = distance
self.delta_agent_work[part].append(work)
self.delta_agent_distance[part].append(distance)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"work": {
"timestep": self.delta_agent_work,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
"reset": {
"timestep": self.agent_reset,
},
}
class FetchRobotMetric(MetricBase):
def __init__(self):
self.initialized = False
self.state_cache = {}
self.next_state_cache = {}
self.agent_pos = {part: [] for part in ["gripper", "body"]}
self.agent_grasping = {part: [] for part in ["gripper"]}
self.agent_local_pos = {part: [] for part in ["gripper"]}
self.delta_agent_distance = {part: [] for part in ["gripper", "body"]}
self.delta_agent_grasp_distance = {part: [] for part in ["gripper"]}
self.clip = 0.2
def step_callback(self, igbhvr_act_inst, _):
robot = igbhvr_act_inst.simulator.robots[0]
agent_distance = {part: 0 for part in self.agent_pos}
self.next_state_cache = {
"gripper": {"position": robot.get_end_effector_position()},
"body": {"position": robot.get_position()},
}
if not self.initialized:
self.state_cache = copy.deepcopy(self.next_state_cache)
self.initialized = True
self.agent_pos["body"].append(list(self.state_cache["body"]["position"]))
delta_pos = np.linalg.norm(
np.array(self.next_state_cache["body"]["position"]) - self.state_cache["body"]["position"]
)
distance = np.abs(delta_pos)
self.delta_agent_distance["body"].append(distance)
self.agent_pos["gripper"].append(list(self.state_cache["gripper"]["position"]))
delta_pos = np.linalg.norm(
self.next_state_cache["gripper"]["position"] - self.state_cache["gripper"]["position"]
)
gripper_distance = np.abs(delta_pos)
self.delta_agent_distance["gripper"].append(gripper_distance)
self.agent_local_pos["gripper"].append(list(robot.get_relative_eef_position()))
contacts = p.getContactPoints(bodyA=robot.robot_ids[0], linkIndexA=robot.eef_link_id)
if len(contacts) > 0:
self.delta_agent_grasp_distance["gripper"].append(gripper_distance)
self.agent_grasping["gripper"].append(True)
else:
self.delta_agent_grasp_distance["gripper"].append(0)
self.agent_grasping["gripper"].append(False)
self.state_cache = copy.deepcopy(self.next_state_cache)
def gather_results(self):
return {
"agent_distance": {
"timestep": self.delta_agent_distance,
},
"grasp_distance": {
"timestep": self.delta_agent_grasp_distance,
},
"pos": {
"timestep": self.agent_pos,
},
"local_pos": {
"timestep": self.agent_local_pos,
},
"grasping": {
"timestep": self.agent_grasping,
},
}
| en | 0.372812 | # Exclude agent teleports | 2.174202 | 2 |
fontslice/__init__.py | Arahabica/font-subset-css | 0 | 8292 | <filename>fontslice/__init__.py<gh_stars>0
import sys
from .main import (
_chunk_list,
_get_unicode_range_hash,
convert_unicode_range,
get_120_unicode_ranges,
get_unicode_ranges_from_text,
generate_css,
main,
)
__all__ = [
"_chunk_list",
"_get_unicode_range_hash",
"convert_unicode_range",
"get_120_unicode_ranges",
"get_unicode_ranges_from_text",
"generate_css",
"main",
]
if __name__ == "__main__":
sys.exit(main())
| <filename>fontslice/__init__.py<gh_stars>0
import sys
from .main import (
_chunk_list,
_get_unicode_range_hash,
convert_unicode_range,
get_120_unicode_ranges,
get_unicode_ranges_from_text,
generate_css,
main,
)
__all__ = [
"_chunk_list",
"_get_unicode_range_hash",
"convert_unicode_range",
"get_120_unicode_ranges",
"get_unicode_ranges_from_text",
"generate_css",
"main",
]
if __name__ == "__main__":
sys.exit(main())
| none | 1 | 1.797869 | 2 |
|
src/ttkbootstrap/dialogs/dialogs.py | MrJaatt/ttkbootstrap | 1 | 8293 | <filename>src/ttkbootstrap/dialogs/dialogs.py<gh_stars>1-10
"""
This module contains various base dialog base classes that can be
used to create custom dialogs for the end user.
These classes serve as the basis for the pre-defined static helper
methods in the `Messagebox`, and `Querybox` container classes.
"""
import calendar
import textwrap
from datetime import datetime
from tkinter import font
import ttkbootstrap as ttk
from ttkbootstrap import utility
from ttkbootstrap.icons import Icon
from ttkbootstrap.constants import *
from tkinter import BaseWidget
from ttkbootstrap.localization import MessageCatalog
class Dialog(BaseWidget):
"""A simple dialog base class."""
def __init__(self, parent=None, title="", alert=False):
"""
Parameters:
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent window.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
alert (bool):
Ring the display's bell when the dialog is shown.
"""
BaseWidget._setup(self, parent, {})
self._winsys = self.master.tk.call("tk", "windowingsystem")
self._toplevel = None
self._title = title or " "
self._result = None
self._alert = alert
self._initial_focus = None
def _locate(self):
toplevel = self._toplevel
master = toplevel.master
screen_height = toplevel.winfo_screenheight()
screen_width = toplevel.winfo_screenwidth()
toplevel.update_idletasks()
if master.winfo_viewable():
m_width = master.winfo_width()
m_height = master.winfo_height()
m_x = master.winfo_rootx()
m_y = master.winfo_rooty()
else:
m_width = screen_width
m_height = screen_height
m_x = m_y = 0
w_width = toplevel.winfo_reqwidth()
w_height = toplevel.winfo_reqheight()
x = int(m_x + (m_width - w_width) * 0.45)
y = int(m_y + (m_height - w_height) * 0.3)
if x + w_width > screen_width:
x = screen_width - w_width
elif x < 0:
x = 0
if y + w_height > screen_height:
y = screen_height - w_height
elif y < 0:
y = 0
toplevel.geometry(f"+{x}+{y}")
def show(self):
"""Show the popup dialog"""
self._result = None
self.build()
self._locate()
self._toplevel.deiconify()
if self._alert:
self._toplevel.bell()
if self._initial_focus:
self._initial_focus.focus_force()
self._toplevel.grab_set()
self._toplevel.wait_window()
def create_body(self, master):
"""Create the dialog body.
This method should be overridden and is called by the `build`
method. Set the `self._initial_focus` for the widget that
should receive the initial focus.
Parameters:
master (Widget):
The parent widget.
"""
raise NotImplementedError
def create_buttonbox(self, master):
"""Create the dialog button box.
This method should be overridden and is called by the `build`
method. Set the `self._initial_focus` for the button that
should receive the intial focus.
Parameters:
master (Widget):
The parent widget.
"""
raise NotImplementedError
def build(self):
"""Build the dialog from settings"""
# setup toplevel based on widowing system
if self._winsys == "win32":
self._toplevel = ttk.Toplevel(
transient=self.master,
title=self._title,
resizable=(0, 0),
minsize=(250, 15),
iconify=True,
)
else:
self._toplevel = ttk.Toplevel(
transient=self.master,
title=self._title,
resizable=(0, 0),
windowtype="dialog",
iconify=True,
)
self._toplevel.withdraw() # reset the iconify state
# bind <Escape> event to window close
self._toplevel.bind("<Escape>", lambda _: self._toplevel.destroy())
# set position of popup from parent window
#self._locate()
# create widgets
self.create_body(self._toplevel)
self.create_buttonbox(self._toplevel)
# update the window before showing
self._toplevel.update_idletasks()
@property
def result(self):
"""Returns the result of the dialog."""
return self._result
class MessageDialog(Dialog):
"""A simple modal dialog class that can be used to build simple
message dialogs.
Displays a message and a set of buttons. Each of the buttons in the
message window is identified by a unique symbolic name. After the
message window is popped up, the message box awaits for the user to
select one of the buttons. Then it returns the symbolic name of the
selected button. Use a `Toplevel` widget for more advanced modal
dialog designs.
"""
def __init__(
self,
message,
title=" ",
buttons=None,
command=None,
width=50,
parent=None,
alert=False,
default=None,
padding=(20, 20),
icon=None,
**kwargs
):
"""
Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
buttons (List[str]):
A list of buttons to appear at the bottom of the popup
messagebox. The buttons can be a list of strings which
will define the symbolic name and the button text.
`['OK', 'Cancel']`. Alternatively, you can assign a
bootstyle to each button by using the colon to separate the
button text and the bootstyle. If no colon is found, then
the style is set to 'primary' by default.
`['OK:success','Cancel:danger']`.
command (Tuple[Callable, str]):
The function to invoke when the user closes the dialog.
The actual command is a tuple that consists of the
function to call and the symbolic name of the button that
closes the dialog.
width (int):
The maximum number of characters per line in the message.
If the text stretches beyond the limit, the line will break
at the word.
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent window.
alert (bool):
Ring the display's bell when the dialog is shown.
default (str):
The symbolic name of the default button. The default
button is invoked when the the <Return> key is pressed.
If no default is provided, the right-most button in the
button list will be set as the default.,
padding (Union[int, Tuple[int]]):
The amount of space between the border and the widget
contents.
icon (str):
An image path, path-like object or image data to be
displayed to the left of the text.
**kwargs (Dict):
Other optional keyword arguments.
Example:
```python
root = tk.Tk()
md = MessageDialog("Displays a message with buttons.")
md.show()
```
"""
super().__init__(parent, title, alert)
self._message = message
self._command = command
self._width = width
self._alert = alert
self._default = (default,)
self._padding = padding
self._icon = icon
self._localize = kwargs.get('localize')
if buttons is None:
self._buttons = [
f"{MessageCatalog.translate('Cancel')}:secondary",
f"{MessageCatalog.translate('OK')}:primary"
]
else:
self._buttons = buttons
def create_body(self, master):
"""Overrides the parent method; adds the message section."""
container = ttk.Frame(master, padding=self._padding)
if self._icon:
try:
# assume this is image data
self._img = ttk.PhotoImage(data=self._icon)
icon_lbl = ttk.Label(container, image=self._img)
icon_lbl.pack(side=LEFT, padx=5)
except:
try:
# assume this is a file path
self._img = ttk.PhotoImage(file=self._icon)
icon_lbl = ttk.Label(container, image=self._img)
icon_lbl.pack(side=LEFT, padx=5)
except:
# icon is neither data nor a valid file path
print('MessageDialog icon is invalid')
if self._message:
for msg in self._message.split("\n"):
message = "\n".join(textwrap.wrap(msg, width=self._width))
message_label = ttk.Label(container, text=message)
message_label.pack(pady=(0, 3), fill=X, anchor=N)
container.pack(fill=X, expand=True)
def create_buttonbox(self, master):
"""Overrides the parent method; adds the message buttonbox"""
frame = ttk.Frame(master, padding=(5, 5))
button_list = []
for i, button in enumerate(self._buttons[::-1]):
cnf = button.split(":")
if len(cnf) == 2:
text, bootstyle = cnf
else:
text = cnf[0]
bootstyle = "secondary"
if self._localize == True:
text = MessageCatalog.translate(text)
btn = ttk.Button(frame, bootstyle=bootstyle, text=text)
btn.bind("<Return>", lambda _: btn.invoke())
btn.configure(command=lambda b=btn: self.on_button_press(b))
btn.pack(padx=2, side=RIGHT)
btn.lower() # set focus traversal left-to-right
button_list.append(btn)
if self._default is not None and text == self._default:
self._initial_focus = btn
elif self._default is None and i == 0:
self._initial_focus = btn
# bind default button to return key press and set focus
self._toplevel.bind("<Return>", lambda _, b=btn: b.invoke())
self._toplevel.bind("<KP_Enter>", lambda _, b=btn: b.invoke())
ttk.Separator(self._toplevel).pack(fill=X)
frame.pack(side=BOTTOM, fill=X, anchor=S)
if not self._initial_focus:
self._initial_focus = button_list[0]
def on_button_press(self, button):
"""Save result, destroy the toplevel, and execute command."""
self._result = button["text"]
command = self._command
if command is not None:
command()
self._toplevel.destroy()
def show(self):
"""Create and display the popup messagebox."""
super().show()
class QueryDialog(Dialog):
"""A simple modal dialog class that can be used to build simple
data input dialogs. Displays a prompt, and input box, and a set of
buttons. Additional data manipulation can be performed on the
user input post-hoc by overriding the `apply` method.
Use a `Toplevel` widget for more advanced modal dialog designs.
"""
def __init__(
self,
prompt,
title=" ",
initialvalue="",
minvalue=None,
maxvalue=None,
width=65,
datatype=str,
padding=(20, 20),
parent=None,
):
"""
Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
initialvalue (Any):
The initial value in the entry widget.
minvalue (Any):
The minimum allowed value. Only valid for int and float
data types.
maxvalue (Any):
The maximum allowed value. Only valid for int and float
data types.
width (int):
The maximum number of characters per line in the
message. If the text stretches beyond the limit, the
line will break at the word.
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent
window.
padding (Union[int, Tuple[int]]):
The amount of space between the border and the widget
contents.
datatype (Union[int, str, float]):
The data type used to validate the entry value.
"""
super().__init__(parent, title)
self._prompt = prompt
self._initialvalue = initialvalue
self._minvalue = minvalue
self._maxvalue = maxvalue
self._width = width
self._datatype = datatype
self._padding = padding
self._result = None
def create_body(self, master):
"""Overrides the parent method; adds the message and input
section."""
frame = ttk.Frame(master, padding=self._padding)
if self._prompt:
for p in self._prompt.split("\n"):
prompt = "\n".join(textwrap.wrap(p, width=self._width))
prompt_label = ttk.Label(frame, text=prompt)
prompt_label.pack(pady=(0, 5), fill=X, anchor=N)
entry = ttk.Entry(master=frame)
entry.insert(END, self._initialvalue)
entry.pack(pady=(0, 5), fill=X)
entry.bind("<Return>", self.on_submit)
entry.bind("<KP_Enter>", self.on_submit)
entry.bind("<Escape>", self.on_cancel)
frame.pack(fill=X, expand=True)
self._initial_focus = entry
def create_buttonbox(self, master):
"""Overrides the parent method; adds the message buttonbox"""
frame = ttk.Frame(master, padding=(5, 10))
submit = ttk.Button(
master=frame,
bootstyle="primary",
text=MessageCatalog.translate("Submit"),
command=self.on_submit,
)
submit.pack(padx=5, side=RIGHT)
submit.lower() # set focus traversal left-to-right
cancel = ttk.Button(
master=frame,
bootstyle="secondary",
text=MessageCatalog.translate("Cancel"),
command=self.on_cancel,
)
cancel.pack(padx=5, side=RIGHT)
cancel.lower() # set focus traversal left-to-right
ttk.Separator(self._toplevel).pack(fill=X)
frame.pack(side=BOTTOM, fill=X, anchor=S)
def on_submit(self, *_):
"""Save result, destroy the toplevel, and apply any post-hoc
data manipulations."""
self._result = self._initial_focus.get()
valid_result = self.validate()
if not valid_result:
return # keep toplevel open for valid response
self._toplevel.destroy()
self.apply()
def on_cancel(self, *_):
"""Close the toplevel and return empty."""
self._toplevel.destroy()
return
def validate(self):
"""Validate the data
This method is called automatically to validate the data before
the dialog is destroyed. Can be subclassed and overridden.
"""
# no default checks required for string data types
if self._datatype not in [float, int, complex]:
return True
# convert result to appropriate data type
try:
self._result = self._datatype(self._result)
except ValueError:
msg = MessageCatalog.translate('Should be of data type')
Messagebox.ok(
message=f"{msg} `{self._datatype}`",
title=MessageCatalog.translate("Invalid data type"),
)
return False
# max value range
if self._maxvalue is not None:
if self._result > self._maxvalue:
msg = MessageCatalog.translate('Number cannot be greater than')
Messagebox.ok(
message=f"{msg} {self._maxvalue}",
title=MessageCatalog.translate("Out of range"),
)
return False
# min value range
if self._minvalue is not None:
if self._result < self._minvalue:
msg = MessageCatalog.translate('Number cannot be less than')
Messagebox.ok(
message=f"{msg} {self._minvalue}",
title=MessageCatalog.translate("Out of range"),
)
return False
# valid result
return True
def apply(self):
"""Process the data.
This method is called automatically to process the data after
the dialog is destroyed. By default, it does nothing.
"""
pass # override
class DatePickerDialog:
"""A dialog that displays a calendar popup and returns the
selected date as a datetime object.
The current date is displayed by default unless the `startdate`
parameter is provided.
The month can be changed by clicking the chevrons to the left
and right of the month-year title.
Left-click the arrow to move the calendar by one month.
Right-click the arrow to move the calendar by one year.
Right-click the title to reset the calendar to the start date.
The starting weekday can be changed with the `firstweekday`
parameter for geographies that do not start the calendar on
Sunday, which is the default.
The widget grabs focus and all screen events until released.
If you want to cancel a date selection, click the 'X' button
at the top-right corner of the widget.
The bootstyle api may be used to change the style of the widget.
The available colors include -> primary, secondary, success,
info, warning, danger, light, dark.

"""
def __init__(
self,
parent=None,
title=" ",
firstweekday=6,
startdate=None,
bootstyle=PRIMARY,
):
"""
Parameters:
parent (Widget):
The parent widget; the popup will appear to the
bottom-right of the parent widget. If no parent is
provided, the widget is centered on the screen.
title (str):
The text that appears on the titlebar.
firstweekday (int):
Specifies the first day of the week. 0=Monday,
1=Tuesday, etc...
startdate (datetime):
The date to be in focus when the widget is
displayed.
bootstyle (str):
The following colors can be used to change the color of
the title and hover / pressed color -> primary,
secondary, info, warning, success, danger, light, dark.
"""
self.parent = parent
self.root = ttk.Toplevel(
title=title,
transient=self.parent,
resizable=(False, False),
topmost=True,
minsize=(226, 1),
iconify=True
)
self.firstweekday = firstweekday
self.startdate = startdate or datetime.today().date()
self.bootstyle = bootstyle or PRIMARY
self.date_selected = self.startdate
self.date = startdate or self.date_selected
self.calendar = calendar.Calendar(firstweekday=firstweekday)
self.titlevar = ttk.StringVar()
self.datevar = ttk.IntVar()
self._setup_calendar()
self.root.grab_set()
self.root.wait_window()
def _setup_calendar(self):
"""Setup the calendar widget"""
# create the widget containers
self.frm_calendar = ttk.Frame(
master=self.root, padding=0, borderwidth=0, relief=FLAT
)
self.frm_calendar.pack(fill=BOTH, expand=YES)
self.frm_title = ttk.Frame(self.frm_calendar, padding=(3, 3))
self.frm_title.pack(fill=X)
self.frm_header = ttk.Frame(self.frm_calendar, bootstyle=SECONDARY)
self.frm_header.pack(fill=X)
# setup the toplevel widget
self.root.withdraw() # reset the iconify state
self.frm_calendar.update_idletasks() # actualize geometry
# create visual components
self._draw_titlebar()
self._draw_calendar()
# make toplevel visible
self._set_window_position()
self.root.deiconify()
def _update_widget_bootstyle(self):
self.frm_title.configure(bootstyle=self.bootstyle)
self.title.configure(bootstyle=f"{self.bootstyle}-inverse")
self.prev_period.configure(style=f"Chevron.{self.bootstyle}.TButton")
self.next_period.configure(style=f"Chevron.{self.bootstyle}.TButton")
def _draw_calendar(self):
self._update_widget_bootstyle()
self._set_title()
self._current_month_days()
self.frm_dates = ttk.Frame(self.frm_calendar)
self.frm_dates.pack(fill=BOTH, expand=YES)
for row, weekday_list in enumerate(self.monthdays):
for col, day in enumerate(weekday_list):
self.frm_dates.columnconfigure(col, weight=1)
if day == 0:
ttk.Label(
master=self.frm_dates,
text=self.monthdates[row][col].day,
anchor=CENTER,
padding=5,
bootstyle=SECONDARY,
).grid(row=row, column=col, sticky=NSEW)
else:
if all(
[
day == self.date_selected.day,
self.date.month == self.date_selected.month,
self.date.year == self.date_selected.year,
]
):
day_style = "secondary-toolbutton"
else:
day_style = f"{self.bootstyle}-calendar"
def selected(x=row, y=col):
self._on_date_selected(x, y)
btn = ttk.Radiobutton(
master=self.frm_dates,
variable=self.datevar,
value=day,
text=day,
bootstyle=day_style,
padding=5,
command=selected,
)
btn.grid(row=row, column=col, sticky=NSEW)
def _draw_titlebar(self):
"""Draw the calendar title bar which includes the month title
and the buttons that increment and decrement the selected
month.
In addition to the previous and next MONTH commands that are
assigned to the button press, a "right-click" event is assigned
to each button that causes the calendar to move to the previous
and next YEAR.
"""
# create and pack the title and action buttons
self.prev_period = ttk.Button(
master=self.frm_title, text="«", command=self.on_prev_month
)
self.prev_period.pack(side=LEFT)
self.title = ttk.Label(
master=self.frm_title,
textvariable=self.titlevar,
anchor=CENTER,
font="-weight bold",
)
self.title.pack(side=LEFT, fill=X, expand=YES)
self.next_period = ttk.Button(
master=self.frm_title,
text="»",
command=self.on_next_month,
)
self.next_period.pack(side=LEFT)
# bind "year" callbacks to action buttons
self.prev_period.bind("<Button-3>", self.on_prev_year, "+")
self.next_period.bind("<Button-3>", self.on_next_year, "+")
self.title.bind("<Button-1>", self.on_reset_date)
# create and pack days of the week header
for col in self._header_columns():
ttk.Label(
master=self.frm_header,
text=col,
anchor=CENTER,
padding=5,
bootstyle=(SECONDARY, INVERSE),
).pack(side=LEFT, fill=X, expand=YES)
def _set_title(self):
_titledate = f'{self.date.strftime("%B %Y")}'
self.titlevar.set(value=_titledate)
def _current_month_days(self):
"""Fetch the day numbers and dates for all days in the current
month. `monthdays` is a list of days as integers, and
`monthdates` is a list of `datetime` objects.
"""
self.monthdays = self.calendar.monthdayscalendar(
year=self.date.year, month=self.date.month
)
self.monthdates = self.calendar.monthdatescalendar(
year=self.date.year, month=self.date.month
)
def _header_columns(self):
"""Create and return a list of weekdays to be used as a header
in the calendar. The order of the weekdays is based on the
`firstweekday` property.
Returns:
List[str]:
A list of weekday column names for the calendar header.
"""
weekdays = [MessageCatalog.translate("Mo"),
MessageCatalog.translate("Tu"),
MessageCatalog.translate("We"),
MessageCatalog.translate("Th"),
MessageCatalog.translate("Fr"),
MessageCatalog.translate("Sa"),
MessageCatalog.translate("Su")]
header = weekdays[self.firstweekday :] + weekdays[: self.firstweekday]
return header
def _on_date_selected(self, row, col):
"""Callback for selecting a date.
An index is assigned to each date button that corresponds to
the dates in the `monthdates` matrix. When the user clicks a
button to select a date, the index from this button is used
to lookup the date value of the button based on the row and
column index reference. This value is saved in the
`date_selected` property and the `Toplevel` is destroyed.
Parameters:
index (Tuple[int, int]):
A row and column index of the date selected; to be
found in the `monthdates` matrix.
Returns:
datetime:
The date selected
"""
self.date_selected = self.monthdates[row][col]
self.root.destroy()
def _selection_callback(func):
"""Calls the decorated `func` and redraws the calendar."""
def inner(self, *args):
func(self, *args)
self.frm_dates.destroy()
self._draw_calendar()
return inner
@_selection_callback
def on_next_month(self):
"""Increment the calendar data to the next month"""
year, month = self._nextmonth(self.date.year, self.date.month)
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_next_year(self, *_):
"""Increment the calendar data to the next year"""
year = self.date.year + 1
month = self.date.month
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_prev_month(self):
"""Decrement the calendar to the previous year"""
year, month = self._prevmonth(self.date.year, self.date.month)
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_prev_year(self, *_):
year = self.date.year - 1
month = self.date.month
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_reset_date(self, *_):
"""Set the calendar to the start date"""
self.date = self.startdate
def _set_window_position(self):
"""Move the window the to bottom-right of the parent widget, or
to the middle of the screen if no parent is provided.
"""
width = self.root.winfo_reqwidth()
height = self.root.winfo_reqheight()
if self.parent:
xpos = self.parent.winfo_rootx() + self.parent.winfo_width()
ypos = self.parent.winfo_rooty() + self.parent.winfo_height()
self.root.geometry(f"+{xpos}+{ypos}")
else:
xpos = self.root.winfo_screenwidth() // 2 - width
ypos = self.root.winfo_screenheight() // 2 - height
self.root.geometry(f"+{xpos}+{ypos}")
@staticmethod
def _nextmonth(year, month):
if month == 12:
return year+1, 1
else:
return year, month+1
@staticmethod
def _prevmonth(year, month):
if month == 1:
return year-1, 12
else:
return year, month-1
class FontDialog(Dialog):
"""A dialog that displays a variety of options for choosing a font.
This dialog constructs and returns a `Font` object based on the
options selected by the user. The initial font is based on OS
settings and will vary.
The font object is returned when the **Ok** button is pressed and
can be passed to any widget that accepts a _font_ configuration
option.

"""
def __init__(self, title="Font Selector", parent=None):
title = MessageCatalog.translate(title)
super().__init__(parent=parent, title=title)
self._style = ttk.Style()
self._default = font.nametofont("TkDefaultFont")
self._actual = self._default.actual()
self._size = ttk.Variable(value=self._actual["size"])
self._family = ttk.Variable(value=self._actual["family"])
self._slant = ttk.Variable(value=self._actual["slant"])
self._weight = ttk.Variable(value=self._actual["weight"])
self._overstrike = ttk.Variable(value=self._actual["overstrike"])
self._underline = ttk.Variable(value=self._actual["underline"])
self._preview_font = font.Font()
self._slant.trace_add("write", self._update_font_preview)
self._weight.trace_add("write", self._update_font_preview)
self._overstrike.trace_add("write", self._update_font_preview)
self._underline.trace_add("write", self._update_font_preview)
_headingfont = font.nametofont("TkHeadingFont")
_headingfont.configure(weight="bold")
self._update_font_preview()
self._families = set([self._family.get()])
for f in font.families():
if all([f, not f.startswith("@"), "emoji" not in f.lower()]):
self._families.add(f)
def create_body(self, master):
width = utility.scale_size(master, 600)
height = utility.scale_size(master, 500)
self._toplevel.geometry(f"{width}x{height}")
family_size_frame = ttk.Frame(master, padding=10)
family_size_frame.pack(fill=X, anchor=N)
self._initial_focus = self._font_families_selector(family_size_frame)
self._font_size_selector(family_size_frame)
self._font_options_selectors(master, padding=10)
self._font_preview(master, padding=10)
def create_buttonbox(self, master):
container = ttk.Frame(master, padding=(5, 10))
container.pack(fill=X)
ok_btn = ttk.Button(
master=container,
bootstyle="primary",
text=MessageCatalog.translate("OK"),
command=self._on_submit,
)
ok_btn.pack(side=RIGHT, padx=5)
ok_btn.bind("<Return>", lambda _: ok_btn.invoke())
cancel_btn = ttk.Button(
master=container,
bootstyle="secondary",
text=MessageCatalog.translate("Cancel"),
command=self._on_cancel,
)
cancel_btn.pack(side=RIGHT, padx=5)
cancel_btn.bind("<Return>", lambda _: cancel_btn.invoke())
def _font_families_selector(self, master):
container = ttk.Frame(master)
container.pack(fill=BOTH, expand=YES, side=LEFT)
header = ttk.Label(container, text=MessageCatalog.translate("Family"), font="TkHeadingFont")
header.pack(fill=X, pady=(0, 2), anchor=N)
listbox = ttk.Treeview(
master=container,
height=5,
show="",
columns=[0],
)
listbox.column(0, width=utility.scale_size(listbox, 250))
listbox.pack(side=LEFT, fill=BOTH, expand=YES)
listbox_vbar = ttk.Scrollbar(
container,
command=listbox.yview,
orient=VERTICAL,
bootstyle="rounded",
)
listbox_vbar.pack(side=RIGHT, fill=Y)
listbox.configure(yscrollcommand=listbox_vbar.set)
for f in self._families:
listbox.insert("", iid=f, index=END, tags=[f], values=[f])
listbox.tag_configure(f, font=(f, self._size.get()))
iid = self._family.get()
listbox.selection_set(iid) # select default value
listbox.see(iid) # ensure default is visible
listbox.bind(
"<<TreeviewSelect>>", lambda e: self._on_select_font_family(e)
)
return listbox
def _font_size_selector(self, master):
container = ttk.Frame(master)
container.pack(side=LEFT, fill=Y, padx=(10, 0))
header = ttk.Label(container, text=MessageCatalog.translate("Size"), font="TkHeadingFont")
header.pack(fill=X, pady=(0, 2), anchor=N)
sizes_listbox = ttk.Treeview(container, height=7, columns=[0], show="")
sizes_listbox.column(0, width=utility.scale_size(sizes_listbox, 24))
sizes = [*range(8, 13), *range(13, 30, 2), 36, 48, 72]
for s in sizes:
sizes_listbox.insert("", iid=s, index=END, values=[s])
iid = self._size.get()
sizes_listbox.selection_set(iid)
sizes_listbox.see(iid)
sizes_listbox.bind(
"<<TreeviewSelect>>", lambda e: self._on_select_font_size(e)
)
sizes_listbox_vbar = ttk.Scrollbar(
master=container,
orient=VERTICAL,
command=sizes_listbox.yview,
bootstyle="round",
)
sizes_listbox.configure(yscrollcommand=sizes_listbox_vbar.set)
sizes_listbox.pack(side=LEFT, fill=Y, expand=YES, anchor=N)
sizes_listbox_vbar.pack(side=LEFT, fill=Y, expand=YES)
def _font_options_selectors(self, master, padding: int):
container = ttk.Frame(master, padding=padding)
container.pack(fill=X, padx=2, pady=2, anchor=N)
weight_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Weight"), padding=5)
weight_lframe.pack(side=LEFT, fill=X, expand=YES)
opt_normal = ttk.Radiobutton(
master=weight_lframe,
text=MessageCatalog.translate("normal"),
value="normal",
variable=self._weight,
)
opt_normal.invoke()
opt_normal.pack(side=LEFT, padx=5, pady=5)
opt_bold = ttk.Radiobutton(
master=weight_lframe,
text=MessageCatalog.translate("bold"),
value="bold",
variable=self._weight,
)
opt_bold.pack(side=LEFT, padx=5, pady=5)
slant_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Slant"), padding=5)
slant_lframe.pack(side=LEFT, fill=X, padx=10, expand=YES)
opt_roman = ttk.Radiobutton(
master=slant_lframe,
text=MessageCatalog.translate("roman"),
value="roman",
variable=self._slant,
)
opt_roman.invoke()
opt_roman.pack(side=LEFT, padx=5, pady=5)
opt_italic = ttk.Radiobutton(
master=slant_lframe,
text=MessageCatalog.translate("italic"),
value="italic",
variable=self._slant,
)
opt_italic.pack(side=LEFT, padx=5, pady=5)
effects_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Effects"), padding=5)
effects_lframe.pack(side=LEFT, padx=(2, 0), fill=X, expand=YES)
opt_underline = ttk.Checkbutton(
master=effects_lframe, text=MessageCatalog.translate("underline"), variable=self._underline
)
opt_underline.pack(side=LEFT, padx=5, pady=5)
opt_overstrike = ttk.Checkbutton(
master=effects_lframe, text=MessageCatalog.translate("overstrike"), variable=self._overstrike
)
opt_overstrike.pack(side=LEFT, padx=5, pady=5)
def _font_preview(self, master, padding: int):
container = ttk.Frame(master, padding=padding)
container.pack(fill=BOTH, expand=YES, anchor=N)
header = ttk.Label(container, text=MessageCatalog.translate("Preview"), font="TkHeadingFont")
header.pack(fill=X, pady=2, anchor=N)
content = MessageCatalog.translate("The quick brown fox jumps over the lazy dog.")
self._preview_text = ttk.Text(
master=container,
height=3,
font=self._preview_font,
highlightbackground=self._style.colors.primary,
)
self._preview_text.insert(END, content)
self._preview_text.pack(fill=BOTH, expand=YES)
container.pack_propagate(False)
def _on_select_font_family(self, e):
tree: ttk.Treeview = self._toplevel.nametowidget(e.widget)
fontfamily = tree.selection()[0]
self._family.set(value=fontfamily)
self._update_font_preview()
def _on_select_font_size(self, e):
tree: ttk.Treeview = self._toplevel.nametowidget(e.widget)
fontsize = tree.selection()[0]
self._size.set(value=fontsize)
self._update_font_preview()
def _on_submit(self) -> font.Font:
self._toplevel.destroy()
return self.result
def _on_cancel(self):
self._toplevel.destroy()
def _update_font_preview(self, *_):
family = self._family.get()
size = self._size.get()
slant = self._slant.get()
overstrike = self._overstrike.get()
underline = self._underline.get()
self._preview_font.config(
family=family,
size=size,
slant=slant,
overstrike=overstrike,
underline=underline,
)
try:
self._preview_text.configure(font=self._preview_font)
except:
pass
self._result = self._preview_font
class Messagebox:
"""This class contains various static methods that show popups with
a message to the end user with various arrangments of buttons
and alert options."""
@staticmethod
def show_info(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and an INFO
icon.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.info,
localize=True
)
sd.show()
@staticmethod
def show_warning(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and a
warning icon. Also will ring the display bell.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.warning,
alert=True,
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def show_error(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and an
error icon. Also will ring the display bell.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.error,
alert=True,
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def show_question(
message,
title=" ",
parent=None,
buttons=["No:secondary", "Yes:primary"],
**kwargs,
):
"""Display a modal dialog box with yes, no buttons and a
question icon. Also will ring the display bell. You may also
change the button scheme using the `buttons` parameter.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
buttons (List[str]):
A list of buttons to appear at the bottom of the popup
messagebox. The buttons can be a list of strings which
will define the symbolic name and the button text.
`['OK', 'Cancel']`. Alternatively, you can assign a
bootstyle to each button by using the colon to separate the
button text and the bootstyle. If no colon is found, then
the style is set to 'primary' by default.
`['Yes:success','No:danger']`.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=buttons,
icon=Icon.question,
alert=True,
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def ok(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with an OK button and and optional
bell alert.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["OK:primary"],
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def okcancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Displays a modal dialog box with OK and Cancel buttons and
return the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title, message=message, parent=parent, alert=alert, localize=True, **kwargs
)
sd.show()
return sd.result
@staticmethod
def yesno(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with YES and NO buttons and return
the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
buttons=["No", "Yes:primary"],
alert=alert,
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def yesnocancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with YES, NO, and Cancel buttons,
and return the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["Cancel", "No", "Yes:primary"],
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def retrycancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with RETRY and Cancel buttons;
returns the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["Cancel", "Retry:primary"],
localize=True,
**kwargs,
)
sd.show()
return sd.result
class Querybox:
"""This class contains various static methods that request data
from the end user."""
@staticmethod
def get_color(
parent=None,
title="Color Chooser",
initialcolor=None,
):
"""Show a color picker and return the select color when the
user pressed OK.

Parameters:
parent (Widget):
The parent widget.
title (str):
Optional text that appears on the titlebar.
initialcolor (str):
The initial color to display in the 'Current' color
frame.
Returns:
Tuple[rgb, hsl, hex]
The selected color in various colors models.
"""
from ttkbootstrap.dialogs.colorchooser import ColorChooserDialog
cd = ColorChooserDialog(parent, title, initialcolor)
cd.show()
return cd.result
@staticmethod
def get_date(
parent=None,
title=" ",
firstweekday=6,
startdate=None,
bootstyle="primary",
):
"""Shows a calendar popup and returns the selection.

Parameters:
parent (Widget):
The parent widget; the popup will appear to the
bottom-right of the parent widget. If no parent is
provided, the widget is centered on the screen.
title (str):
The text that appears on the popup titlebar.
firstweekday (int):
Specifies the first day of the week. `0` is Monday, `6` is
Sunday (the default).
startdate (datetime):
The date to be in focus when the widget is displayed;
bootstyle (str):
The following colors can be used to change the color of the
title and hover / pressed color -> primary, secondary, info,
warning, success, danger, light, dark.
Returns:
datetime:
The date selected; the current date if no date is selected.
"""
chooser = DatePickerDialog(
parent=parent,
title=title,
firstweekday=firstweekday,
startdate=startdate,
bootstyle=bootstyle,
)
return chooser.date_selected
@staticmethod
def get_string(
prompt="", title=" ", initialvalue=None, parent=None, **kwargs
):
"""Request a string type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (Any):
The initial value in the entry widget.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
str:
The string value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt, title, initialvalue, parent=parent, **kwargs
)
dialog.show()
return dialog._result
@staticmethod
def get_integer(
prompt="",
title=" ",
initialvalue=None,
minvalue=None,
maxvalue=None,
parent=None,
**kwargs,
):
"""Request an integer type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (int):
The initial value in the entry widget.
minvalue (int):
The minimum allowed value.
maxvalue (int):
The maximum allowed value.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
int:
The integer value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt,
title,
initialvalue,
minvalue,
maxvalue,
datatype=int,
parent=parent,
**kwargs,
)
dialog.show()
return dialog._result
@staticmethod
def get_float(
prompt="",
title=" ",
initialvalue=None,
minvalue=None,
maxvalue=None,
parent=None,
**kwargs,
):
"""Request a float type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (float):
The initial value in the entry widget.
minvalue (float):
The minimum allowed value.
maxvalue (float):
The maximum allowed value.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
float:
The float value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt,
title,
initialvalue,
minvalue,
maxvalue,
datatype=float,
parent=parent,
**kwargs,
)
dialog.show()
return dialog._result
@staticmethod
def get_font(parent=None, **kwargs):
"""Request a customized font

Parameters:
parent (Widget):
Makes the window the logical parent of the dialog box. The
dialog is displayed on top of its parent window.
**kwargs (Dict):
Other keyword arguments.
Returns:
Font:
A font object.
"""
dialog = FontDialog(parent=parent, **kwargs)
dialog.show()
return dialog.result
| <filename>src/ttkbootstrap/dialogs/dialogs.py<gh_stars>1-10
"""
This module contains various base dialog base classes that can be
used to create custom dialogs for the end user.
These classes serve as the basis for the pre-defined static helper
methods in the `Messagebox`, and `Querybox` container classes.
"""
import calendar
import textwrap
from datetime import datetime
from tkinter import font
import ttkbootstrap as ttk
from ttkbootstrap import utility
from ttkbootstrap.icons import Icon
from ttkbootstrap.constants import *
from tkinter import BaseWidget
from ttkbootstrap.localization import MessageCatalog
class Dialog(BaseWidget):
"""A simple dialog base class."""
def __init__(self, parent=None, title="", alert=False):
"""
Parameters:
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent window.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
alert (bool):
Ring the display's bell when the dialog is shown.
"""
BaseWidget._setup(self, parent, {})
self._winsys = self.master.tk.call("tk", "windowingsystem")
self._toplevel = None
self._title = title or " "
self._result = None
self._alert = alert
self._initial_focus = None
def _locate(self):
toplevel = self._toplevel
master = toplevel.master
screen_height = toplevel.winfo_screenheight()
screen_width = toplevel.winfo_screenwidth()
toplevel.update_idletasks()
if master.winfo_viewable():
m_width = master.winfo_width()
m_height = master.winfo_height()
m_x = master.winfo_rootx()
m_y = master.winfo_rooty()
else:
m_width = screen_width
m_height = screen_height
m_x = m_y = 0
w_width = toplevel.winfo_reqwidth()
w_height = toplevel.winfo_reqheight()
x = int(m_x + (m_width - w_width) * 0.45)
y = int(m_y + (m_height - w_height) * 0.3)
if x + w_width > screen_width:
x = screen_width - w_width
elif x < 0:
x = 0
if y + w_height > screen_height:
y = screen_height - w_height
elif y < 0:
y = 0
toplevel.geometry(f"+{x}+{y}")
def show(self):
"""Show the popup dialog"""
self._result = None
self.build()
self._locate()
self._toplevel.deiconify()
if self._alert:
self._toplevel.bell()
if self._initial_focus:
self._initial_focus.focus_force()
self._toplevel.grab_set()
self._toplevel.wait_window()
def create_body(self, master):
"""Create the dialog body.
This method should be overridden and is called by the `build`
method. Set the `self._initial_focus` for the widget that
should receive the initial focus.
Parameters:
master (Widget):
The parent widget.
"""
raise NotImplementedError
def create_buttonbox(self, master):
"""Create the dialog button box.
This method should be overridden and is called by the `build`
method. Set the `self._initial_focus` for the button that
should receive the intial focus.
Parameters:
master (Widget):
The parent widget.
"""
raise NotImplementedError
def build(self):
"""Build the dialog from settings"""
# setup toplevel based on widowing system
if self._winsys == "win32":
self._toplevel = ttk.Toplevel(
transient=self.master,
title=self._title,
resizable=(0, 0),
minsize=(250, 15),
iconify=True,
)
else:
self._toplevel = ttk.Toplevel(
transient=self.master,
title=self._title,
resizable=(0, 0),
windowtype="dialog",
iconify=True,
)
self._toplevel.withdraw() # reset the iconify state
# bind <Escape> event to window close
self._toplevel.bind("<Escape>", lambda _: self._toplevel.destroy())
# set position of popup from parent window
#self._locate()
# create widgets
self.create_body(self._toplevel)
self.create_buttonbox(self._toplevel)
# update the window before showing
self._toplevel.update_idletasks()
@property
def result(self):
"""Returns the result of the dialog."""
return self._result
class MessageDialog(Dialog):
"""A simple modal dialog class that can be used to build simple
message dialogs.
Displays a message and a set of buttons. Each of the buttons in the
message window is identified by a unique symbolic name. After the
message window is popped up, the message box awaits for the user to
select one of the buttons. Then it returns the symbolic name of the
selected button. Use a `Toplevel` widget for more advanced modal
dialog designs.
"""
def __init__(
self,
message,
title=" ",
buttons=None,
command=None,
width=50,
parent=None,
alert=False,
default=None,
padding=(20, 20),
icon=None,
**kwargs
):
"""
Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
buttons (List[str]):
A list of buttons to appear at the bottom of the popup
messagebox. The buttons can be a list of strings which
will define the symbolic name and the button text.
`['OK', 'Cancel']`. Alternatively, you can assign a
bootstyle to each button by using the colon to separate the
button text and the bootstyle. If no colon is found, then
the style is set to 'primary' by default.
`['OK:success','Cancel:danger']`.
command (Tuple[Callable, str]):
The function to invoke when the user closes the dialog.
The actual command is a tuple that consists of the
function to call and the symbolic name of the button that
closes the dialog.
width (int):
The maximum number of characters per line in the message.
If the text stretches beyond the limit, the line will break
at the word.
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent window.
alert (bool):
Ring the display's bell when the dialog is shown.
default (str):
The symbolic name of the default button. The default
button is invoked when the the <Return> key is pressed.
If no default is provided, the right-most button in the
button list will be set as the default.,
padding (Union[int, Tuple[int]]):
The amount of space between the border and the widget
contents.
icon (str):
An image path, path-like object or image data to be
displayed to the left of the text.
**kwargs (Dict):
Other optional keyword arguments.
Example:
```python
root = tk.Tk()
md = MessageDialog("Displays a message with buttons.")
md.show()
```
"""
super().__init__(parent, title, alert)
self._message = message
self._command = command
self._width = width
self._alert = alert
self._default = (default,)
self._padding = padding
self._icon = icon
self._localize = kwargs.get('localize')
if buttons is None:
self._buttons = [
f"{MessageCatalog.translate('Cancel')}:secondary",
f"{MessageCatalog.translate('OK')}:primary"
]
else:
self._buttons = buttons
def create_body(self, master):
"""Overrides the parent method; adds the message section."""
container = ttk.Frame(master, padding=self._padding)
if self._icon:
try:
# assume this is image data
self._img = ttk.PhotoImage(data=self._icon)
icon_lbl = ttk.Label(container, image=self._img)
icon_lbl.pack(side=LEFT, padx=5)
except:
try:
# assume this is a file path
self._img = ttk.PhotoImage(file=self._icon)
icon_lbl = ttk.Label(container, image=self._img)
icon_lbl.pack(side=LEFT, padx=5)
except:
# icon is neither data nor a valid file path
print('MessageDialog icon is invalid')
if self._message:
for msg in self._message.split("\n"):
message = "\n".join(textwrap.wrap(msg, width=self._width))
message_label = ttk.Label(container, text=message)
message_label.pack(pady=(0, 3), fill=X, anchor=N)
container.pack(fill=X, expand=True)
def create_buttonbox(self, master):
"""Overrides the parent method; adds the message buttonbox"""
frame = ttk.Frame(master, padding=(5, 5))
button_list = []
for i, button in enumerate(self._buttons[::-1]):
cnf = button.split(":")
if len(cnf) == 2:
text, bootstyle = cnf
else:
text = cnf[0]
bootstyle = "secondary"
if self._localize == True:
text = MessageCatalog.translate(text)
btn = ttk.Button(frame, bootstyle=bootstyle, text=text)
btn.bind("<Return>", lambda _: btn.invoke())
btn.configure(command=lambda b=btn: self.on_button_press(b))
btn.pack(padx=2, side=RIGHT)
btn.lower() # set focus traversal left-to-right
button_list.append(btn)
if self._default is not None and text == self._default:
self._initial_focus = btn
elif self._default is None and i == 0:
self._initial_focus = btn
# bind default button to return key press and set focus
self._toplevel.bind("<Return>", lambda _, b=btn: b.invoke())
self._toplevel.bind("<KP_Enter>", lambda _, b=btn: b.invoke())
ttk.Separator(self._toplevel).pack(fill=X)
frame.pack(side=BOTTOM, fill=X, anchor=S)
if not self._initial_focus:
self._initial_focus = button_list[0]
def on_button_press(self, button):
"""Save result, destroy the toplevel, and execute command."""
self._result = button["text"]
command = self._command
if command is not None:
command()
self._toplevel.destroy()
def show(self):
"""Create and display the popup messagebox."""
super().show()
class QueryDialog(Dialog):
"""A simple modal dialog class that can be used to build simple
data input dialogs. Displays a prompt, and input box, and a set of
buttons. Additional data manipulation can be performed on the
user input post-hoc by overriding the `apply` method.
Use a `Toplevel` widget for more advanced modal dialog designs.
"""
def __init__(
self,
prompt,
title=" ",
initialvalue="",
minvalue=None,
maxvalue=None,
width=65,
datatype=str,
padding=(20, 20),
parent=None,
):
"""
Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box.
This option is ignored on Mac OS X, where platform
guidelines forbid the use of a title on this kind of
dialog.
initialvalue (Any):
The initial value in the entry widget.
minvalue (Any):
The minimum allowed value. Only valid for int and float
data types.
maxvalue (Any):
The maximum allowed value. Only valid for int and float
data types.
width (int):
The maximum number of characters per line in the
message. If the text stretches beyond the limit, the
line will break at the word.
parent (Widget):
Makes the window the logical parent of the message box.
The messagebox is displayed on top of its parent
window.
padding (Union[int, Tuple[int]]):
The amount of space between the border and the widget
contents.
datatype (Union[int, str, float]):
The data type used to validate the entry value.
"""
super().__init__(parent, title)
self._prompt = prompt
self._initialvalue = initialvalue
self._minvalue = minvalue
self._maxvalue = maxvalue
self._width = width
self._datatype = datatype
self._padding = padding
self._result = None
def create_body(self, master):
"""Overrides the parent method; adds the message and input
section."""
frame = ttk.Frame(master, padding=self._padding)
if self._prompt:
for p in self._prompt.split("\n"):
prompt = "\n".join(textwrap.wrap(p, width=self._width))
prompt_label = ttk.Label(frame, text=prompt)
prompt_label.pack(pady=(0, 5), fill=X, anchor=N)
entry = ttk.Entry(master=frame)
entry.insert(END, self._initialvalue)
entry.pack(pady=(0, 5), fill=X)
entry.bind("<Return>", self.on_submit)
entry.bind("<KP_Enter>", self.on_submit)
entry.bind("<Escape>", self.on_cancel)
frame.pack(fill=X, expand=True)
self._initial_focus = entry
def create_buttonbox(self, master):
"""Overrides the parent method; adds the message buttonbox"""
frame = ttk.Frame(master, padding=(5, 10))
submit = ttk.Button(
master=frame,
bootstyle="primary",
text=MessageCatalog.translate("Submit"),
command=self.on_submit,
)
submit.pack(padx=5, side=RIGHT)
submit.lower() # set focus traversal left-to-right
cancel = ttk.Button(
master=frame,
bootstyle="secondary",
text=MessageCatalog.translate("Cancel"),
command=self.on_cancel,
)
cancel.pack(padx=5, side=RIGHT)
cancel.lower() # set focus traversal left-to-right
ttk.Separator(self._toplevel).pack(fill=X)
frame.pack(side=BOTTOM, fill=X, anchor=S)
def on_submit(self, *_):
"""Save result, destroy the toplevel, and apply any post-hoc
data manipulations."""
self._result = self._initial_focus.get()
valid_result = self.validate()
if not valid_result:
return # keep toplevel open for valid response
self._toplevel.destroy()
self.apply()
def on_cancel(self, *_):
"""Close the toplevel and return empty."""
self._toplevel.destroy()
return
def validate(self):
"""Validate the data
This method is called automatically to validate the data before
the dialog is destroyed. Can be subclassed and overridden.
"""
# no default checks required for string data types
if self._datatype not in [float, int, complex]:
return True
# convert result to appropriate data type
try:
self._result = self._datatype(self._result)
except ValueError:
msg = MessageCatalog.translate('Should be of data type')
Messagebox.ok(
message=f"{msg} `{self._datatype}`",
title=MessageCatalog.translate("Invalid data type"),
)
return False
# max value range
if self._maxvalue is not None:
if self._result > self._maxvalue:
msg = MessageCatalog.translate('Number cannot be greater than')
Messagebox.ok(
message=f"{msg} {self._maxvalue}",
title=MessageCatalog.translate("Out of range"),
)
return False
# min value range
if self._minvalue is not None:
if self._result < self._minvalue:
msg = MessageCatalog.translate('Number cannot be less than')
Messagebox.ok(
message=f"{msg} {self._minvalue}",
title=MessageCatalog.translate("Out of range"),
)
return False
# valid result
return True
def apply(self):
"""Process the data.
This method is called automatically to process the data after
the dialog is destroyed. By default, it does nothing.
"""
pass # override
class DatePickerDialog:
"""A dialog that displays a calendar popup and returns the
selected date as a datetime object.
The current date is displayed by default unless the `startdate`
parameter is provided.
The month can be changed by clicking the chevrons to the left
and right of the month-year title.
Left-click the arrow to move the calendar by one month.
Right-click the arrow to move the calendar by one year.
Right-click the title to reset the calendar to the start date.
The starting weekday can be changed with the `firstweekday`
parameter for geographies that do not start the calendar on
Sunday, which is the default.
The widget grabs focus and all screen events until released.
If you want to cancel a date selection, click the 'X' button
at the top-right corner of the widget.
The bootstyle api may be used to change the style of the widget.
The available colors include -> primary, secondary, success,
info, warning, danger, light, dark.

"""
def __init__(
self,
parent=None,
title=" ",
firstweekday=6,
startdate=None,
bootstyle=PRIMARY,
):
"""
Parameters:
parent (Widget):
The parent widget; the popup will appear to the
bottom-right of the parent widget. If no parent is
provided, the widget is centered on the screen.
title (str):
The text that appears on the titlebar.
firstweekday (int):
Specifies the first day of the week. 0=Monday,
1=Tuesday, etc...
startdate (datetime):
The date to be in focus when the widget is
displayed.
bootstyle (str):
The following colors can be used to change the color of
the title and hover / pressed color -> primary,
secondary, info, warning, success, danger, light, dark.
"""
self.parent = parent
self.root = ttk.Toplevel(
title=title,
transient=self.parent,
resizable=(False, False),
topmost=True,
minsize=(226, 1),
iconify=True
)
self.firstweekday = firstweekday
self.startdate = startdate or datetime.today().date()
self.bootstyle = bootstyle or PRIMARY
self.date_selected = self.startdate
self.date = startdate or self.date_selected
self.calendar = calendar.Calendar(firstweekday=firstweekday)
self.titlevar = ttk.StringVar()
self.datevar = ttk.IntVar()
self._setup_calendar()
self.root.grab_set()
self.root.wait_window()
def _setup_calendar(self):
"""Setup the calendar widget"""
# create the widget containers
self.frm_calendar = ttk.Frame(
master=self.root, padding=0, borderwidth=0, relief=FLAT
)
self.frm_calendar.pack(fill=BOTH, expand=YES)
self.frm_title = ttk.Frame(self.frm_calendar, padding=(3, 3))
self.frm_title.pack(fill=X)
self.frm_header = ttk.Frame(self.frm_calendar, bootstyle=SECONDARY)
self.frm_header.pack(fill=X)
# setup the toplevel widget
self.root.withdraw() # reset the iconify state
self.frm_calendar.update_idletasks() # actualize geometry
# create visual components
self._draw_titlebar()
self._draw_calendar()
# make toplevel visible
self._set_window_position()
self.root.deiconify()
def _update_widget_bootstyle(self):
self.frm_title.configure(bootstyle=self.bootstyle)
self.title.configure(bootstyle=f"{self.bootstyle}-inverse")
self.prev_period.configure(style=f"Chevron.{self.bootstyle}.TButton")
self.next_period.configure(style=f"Chevron.{self.bootstyle}.TButton")
def _draw_calendar(self):
self._update_widget_bootstyle()
self._set_title()
self._current_month_days()
self.frm_dates = ttk.Frame(self.frm_calendar)
self.frm_dates.pack(fill=BOTH, expand=YES)
for row, weekday_list in enumerate(self.monthdays):
for col, day in enumerate(weekday_list):
self.frm_dates.columnconfigure(col, weight=1)
if day == 0:
ttk.Label(
master=self.frm_dates,
text=self.monthdates[row][col].day,
anchor=CENTER,
padding=5,
bootstyle=SECONDARY,
).grid(row=row, column=col, sticky=NSEW)
else:
if all(
[
day == self.date_selected.day,
self.date.month == self.date_selected.month,
self.date.year == self.date_selected.year,
]
):
day_style = "secondary-toolbutton"
else:
day_style = f"{self.bootstyle}-calendar"
def selected(x=row, y=col):
self._on_date_selected(x, y)
btn = ttk.Radiobutton(
master=self.frm_dates,
variable=self.datevar,
value=day,
text=day,
bootstyle=day_style,
padding=5,
command=selected,
)
btn.grid(row=row, column=col, sticky=NSEW)
def _draw_titlebar(self):
"""Draw the calendar title bar which includes the month title
and the buttons that increment and decrement the selected
month.
In addition to the previous and next MONTH commands that are
assigned to the button press, a "right-click" event is assigned
to each button that causes the calendar to move to the previous
and next YEAR.
"""
# create and pack the title and action buttons
self.prev_period = ttk.Button(
master=self.frm_title, text="«", command=self.on_prev_month
)
self.prev_period.pack(side=LEFT)
self.title = ttk.Label(
master=self.frm_title,
textvariable=self.titlevar,
anchor=CENTER,
font="-weight bold",
)
self.title.pack(side=LEFT, fill=X, expand=YES)
self.next_period = ttk.Button(
master=self.frm_title,
text="»",
command=self.on_next_month,
)
self.next_period.pack(side=LEFT)
# bind "year" callbacks to action buttons
self.prev_period.bind("<Button-3>", self.on_prev_year, "+")
self.next_period.bind("<Button-3>", self.on_next_year, "+")
self.title.bind("<Button-1>", self.on_reset_date)
# create and pack days of the week header
for col in self._header_columns():
ttk.Label(
master=self.frm_header,
text=col,
anchor=CENTER,
padding=5,
bootstyle=(SECONDARY, INVERSE),
).pack(side=LEFT, fill=X, expand=YES)
def _set_title(self):
_titledate = f'{self.date.strftime("%B %Y")}'
self.titlevar.set(value=_titledate)
def _current_month_days(self):
"""Fetch the day numbers and dates for all days in the current
month. `monthdays` is a list of days as integers, and
`monthdates` is a list of `datetime` objects.
"""
self.monthdays = self.calendar.monthdayscalendar(
year=self.date.year, month=self.date.month
)
self.monthdates = self.calendar.monthdatescalendar(
year=self.date.year, month=self.date.month
)
def _header_columns(self):
"""Create and return a list of weekdays to be used as a header
in the calendar. The order of the weekdays is based on the
`firstweekday` property.
Returns:
List[str]:
A list of weekday column names for the calendar header.
"""
weekdays = [MessageCatalog.translate("Mo"),
MessageCatalog.translate("Tu"),
MessageCatalog.translate("We"),
MessageCatalog.translate("Th"),
MessageCatalog.translate("Fr"),
MessageCatalog.translate("Sa"),
MessageCatalog.translate("Su")]
header = weekdays[self.firstweekday :] + weekdays[: self.firstweekday]
return header
def _on_date_selected(self, row, col):
"""Callback for selecting a date.
An index is assigned to each date button that corresponds to
the dates in the `monthdates` matrix. When the user clicks a
button to select a date, the index from this button is used
to lookup the date value of the button based on the row and
column index reference. This value is saved in the
`date_selected` property and the `Toplevel` is destroyed.
Parameters:
index (Tuple[int, int]):
A row and column index of the date selected; to be
found in the `monthdates` matrix.
Returns:
datetime:
The date selected
"""
self.date_selected = self.monthdates[row][col]
self.root.destroy()
def _selection_callback(func):
"""Calls the decorated `func` and redraws the calendar."""
def inner(self, *args):
func(self, *args)
self.frm_dates.destroy()
self._draw_calendar()
return inner
@_selection_callback
def on_next_month(self):
"""Increment the calendar data to the next month"""
year, month = self._nextmonth(self.date.year, self.date.month)
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_next_year(self, *_):
"""Increment the calendar data to the next year"""
year = self.date.year + 1
month = self.date.month
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_prev_month(self):
"""Decrement the calendar to the previous year"""
year, month = self._prevmonth(self.date.year, self.date.month)
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_prev_year(self, *_):
year = self.date.year - 1
month = self.date.month
self.date = datetime(year=year, month=month, day=1).date()
@_selection_callback
def on_reset_date(self, *_):
"""Set the calendar to the start date"""
self.date = self.startdate
def _set_window_position(self):
"""Move the window the to bottom-right of the parent widget, or
to the middle of the screen if no parent is provided.
"""
width = self.root.winfo_reqwidth()
height = self.root.winfo_reqheight()
if self.parent:
xpos = self.parent.winfo_rootx() + self.parent.winfo_width()
ypos = self.parent.winfo_rooty() + self.parent.winfo_height()
self.root.geometry(f"+{xpos}+{ypos}")
else:
xpos = self.root.winfo_screenwidth() // 2 - width
ypos = self.root.winfo_screenheight() // 2 - height
self.root.geometry(f"+{xpos}+{ypos}")
@staticmethod
def _nextmonth(year, month):
if month == 12:
return year+1, 1
else:
return year, month+1
@staticmethod
def _prevmonth(year, month):
if month == 1:
return year-1, 12
else:
return year, month-1
class FontDialog(Dialog):
"""A dialog that displays a variety of options for choosing a font.
This dialog constructs and returns a `Font` object based on the
options selected by the user. The initial font is based on OS
settings and will vary.
The font object is returned when the **Ok** button is pressed and
can be passed to any widget that accepts a _font_ configuration
option.

"""
def __init__(self, title="Font Selector", parent=None):
title = MessageCatalog.translate(title)
super().__init__(parent=parent, title=title)
self._style = ttk.Style()
self._default = font.nametofont("TkDefaultFont")
self._actual = self._default.actual()
self._size = ttk.Variable(value=self._actual["size"])
self._family = ttk.Variable(value=self._actual["family"])
self._slant = ttk.Variable(value=self._actual["slant"])
self._weight = ttk.Variable(value=self._actual["weight"])
self._overstrike = ttk.Variable(value=self._actual["overstrike"])
self._underline = ttk.Variable(value=self._actual["underline"])
self._preview_font = font.Font()
self._slant.trace_add("write", self._update_font_preview)
self._weight.trace_add("write", self._update_font_preview)
self._overstrike.trace_add("write", self._update_font_preview)
self._underline.trace_add("write", self._update_font_preview)
_headingfont = font.nametofont("TkHeadingFont")
_headingfont.configure(weight="bold")
self._update_font_preview()
self._families = set([self._family.get()])
for f in font.families():
if all([f, not f.startswith("@"), "emoji" not in f.lower()]):
self._families.add(f)
def create_body(self, master):
width = utility.scale_size(master, 600)
height = utility.scale_size(master, 500)
self._toplevel.geometry(f"{width}x{height}")
family_size_frame = ttk.Frame(master, padding=10)
family_size_frame.pack(fill=X, anchor=N)
self._initial_focus = self._font_families_selector(family_size_frame)
self._font_size_selector(family_size_frame)
self._font_options_selectors(master, padding=10)
self._font_preview(master, padding=10)
def create_buttonbox(self, master):
container = ttk.Frame(master, padding=(5, 10))
container.pack(fill=X)
ok_btn = ttk.Button(
master=container,
bootstyle="primary",
text=MessageCatalog.translate("OK"),
command=self._on_submit,
)
ok_btn.pack(side=RIGHT, padx=5)
ok_btn.bind("<Return>", lambda _: ok_btn.invoke())
cancel_btn = ttk.Button(
master=container,
bootstyle="secondary",
text=MessageCatalog.translate("Cancel"),
command=self._on_cancel,
)
cancel_btn.pack(side=RIGHT, padx=5)
cancel_btn.bind("<Return>", lambda _: cancel_btn.invoke())
def _font_families_selector(self, master):
container = ttk.Frame(master)
container.pack(fill=BOTH, expand=YES, side=LEFT)
header = ttk.Label(container, text=MessageCatalog.translate("Family"), font="TkHeadingFont")
header.pack(fill=X, pady=(0, 2), anchor=N)
listbox = ttk.Treeview(
master=container,
height=5,
show="",
columns=[0],
)
listbox.column(0, width=utility.scale_size(listbox, 250))
listbox.pack(side=LEFT, fill=BOTH, expand=YES)
listbox_vbar = ttk.Scrollbar(
container,
command=listbox.yview,
orient=VERTICAL,
bootstyle="rounded",
)
listbox_vbar.pack(side=RIGHT, fill=Y)
listbox.configure(yscrollcommand=listbox_vbar.set)
for f in self._families:
listbox.insert("", iid=f, index=END, tags=[f], values=[f])
listbox.tag_configure(f, font=(f, self._size.get()))
iid = self._family.get()
listbox.selection_set(iid) # select default value
listbox.see(iid) # ensure default is visible
listbox.bind(
"<<TreeviewSelect>>", lambda e: self._on_select_font_family(e)
)
return listbox
def _font_size_selector(self, master):
container = ttk.Frame(master)
container.pack(side=LEFT, fill=Y, padx=(10, 0))
header = ttk.Label(container, text=MessageCatalog.translate("Size"), font="TkHeadingFont")
header.pack(fill=X, pady=(0, 2), anchor=N)
sizes_listbox = ttk.Treeview(container, height=7, columns=[0], show="")
sizes_listbox.column(0, width=utility.scale_size(sizes_listbox, 24))
sizes = [*range(8, 13), *range(13, 30, 2), 36, 48, 72]
for s in sizes:
sizes_listbox.insert("", iid=s, index=END, values=[s])
iid = self._size.get()
sizes_listbox.selection_set(iid)
sizes_listbox.see(iid)
sizes_listbox.bind(
"<<TreeviewSelect>>", lambda e: self._on_select_font_size(e)
)
sizes_listbox_vbar = ttk.Scrollbar(
master=container,
orient=VERTICAL,
command=sizes_listbox.yview,
bootstyle="round",
)
sizes_listbox.configure(yscrollcommand=sizes_listbox_vbar.set)
sizes_listbox.pack(side=LEFT, fill=Y, expand=YES, anchor=N)
sizes_listbox_vbar.pack(side=LEFT, fill=Y, expand=YES)
def _font_options_selectors(self, master, padding: int):
container = ttk.Frame(master, padding=padding)
container.pack(fill=X, padx=2, pady=2, anchor=N)
weight_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Weight"), padding=5)
weight_lframe.pack(side=LEFT, fill=X, expand=YES)
opt_normal = ttk.Radiobutton(
master=weight_lframe,
text=MessageCatalog.translate("normal"),
value="normal",
variable=self._weight,
)
opt_normal.invoke()
opt_normal.pack(side=LEFT, padx=5, pady=5)
opt_bold = ttk.Radiobutton(
master=weight_lframe,
text=MessageCatalog.translate("bold"),
value="bold",
variable=self._weight,
)
opt_bold.pack(side=LEFT, padx=5, pady=5)
slant_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Slant"), padding=5)
slant_lframe.pack(side=LEFT, fill=X, padx=10, expand=YES)
opt_roman = ttk.Radiobutton(
master=slant_lframe,
text=MessageCatalog.translate("roman"),
value="roman",
variable=self._slant,
)
opt_roman.invoke()
opt_roman.pack(side=LEFT, padx=5, pady=5)
opt_italic = ttk.Radiobutton(
master=slant_lframe,
text=MessageCatalog.translate("italic"),
value="italic",
variable=self._slant,
)
opt_italic.pack(side=LEFT, padx=5, pady=5)
effects_lframe = ttk.Labelframe(container, text=MessageCatalog.translate("Effects"), padding=5)
effects_lframe.pack(side=LEFT, padx=(2, 0), fill=X, expand=YES)
opt_underline = ttk.Checkbutton(
master=effects_lframe, text=MessageCatalog.translate("underline"), variable=self._underline
)
opt_underline.pack(side=LEFT, padx=5, pady=5)
opt_overstrike = ttk.Checkbutton(
master=effects_lframe, text=MessageCatalog.translate("overstrike"), variable=self._overstrike
)
opt_overstrike.pack(side=LEFT, padx=5, pady=5)
def _font_preview(self, master, padding: int):
container = ttk.Frame(master, padding=padding)
container.pack(fill=BOTH, expand=YES, anchor=N)
header = ttk.Label(container, text=MessageCatalog.translate("Preview"), font="TkHeadingFont")
header.pack(fill=X, pady=2, anchor=N)
content = MessageCatalog.translate("The quick brown fox jumps over the lazy dog.")
self._preview_text = ttk.Text(
master=container,
height=3,
font=self._preview_font,
highlightbackground=self._style.colors.primary,
)
self._preview_text.insert(END, content)
self._preview_text.pack(fill=BOTH, expand=YES)
container.pack_propagate(False)
def _on_select_font_family(self, e):
tree: ttk.Treeview = self._toplevel.nametowidget(e.widget)
fontfamily = tree.selection()[0]
self._family.set(value=fontfamily)
self._update_font_preview()
def _on_select_font_size(self, e):
tree: ttk.Treeview = self._toplevel.nametowidget(e.widget)
fontsize = tree.selection()[0]
self._size.set(value=fontsize)
self._update_font_preview()
def _on_submit(self) -> font.Font:
self._toplevel.destroy()
return self.result
def _on_cancel(self):
self._toplevel.destroy()
def _update_font_preview(self, *_):
family = self._family.get()
size = self._size.get()
slant = self._slant.get()
overstrike = self._overstrike.get()
underline = self._underline.get()
self._preview_font.config(
family=family,
size=size,
slant=slant,
overstrike=overstrike,
underline=underline,
)
try:
self._preview_text.configure(font=self._preview_font)
except:
pass
self._result = self._preview_font
class Messagebox:
"""This class contains various static methods that show popups with
a message to the end user with various arrangments of buttons
and alert options."""
@staticmethod
def show_info(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and an INFO
icon.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.info,
localize=True
)
sd.show()
@staticmethod
def show_warning(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and a
warning icon. Also will ring the display bell.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.warning,
alert=True,
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def show_error(message, title=" ", parent=None, **kwargs):
"""Display a modal dialog box with an OK button and an
error icon. Also will ring the display bell.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=["OK:primary"],
icon=Icon.error,
alert=True,
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def show_question(
message,
title=" ",
parent=None,
buttons=["No:secondary", "Yes:primary"],
**kwargs,
):
"""Display a modal dialog box with yes, no buttons and a
question icon. Also will ring the display bell. You may also
change the button scheme using the `buttons` parameter.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
buttons (List[str]):
A list of buttons to appear at the bottom of the popup
messagebox. The buttons can be a list of strings which
will define the symbolic name and the button text.
`['OK', 'Cancel']`. Alternatively, you can assign a
bootstyle to each button by using the colon to separate the
button text and the bootstyle. If no colon is found, then
the style is set to 'primary' by default.
`['Yes:success','No:danger']`.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
message=message,
title=title,
parent=parent,
buttons=buttons,
icon=Icon.question,
alert=True,
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def ok(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with an OK button and and optional
bell alert.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["OK:primary"],
localize=True,
**kwargs,
)
sd.show()
@staticmethod
def okcancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Displays a modal dialog box with OK and Cancel buttons and
return the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title, message=message, parent=parent, alert=alert, localize=True, **kwargs
)
sd.show()
return sd.result
@staticmethod
def yesno(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with YES and NO buttons and return
the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
buttons=["No", "Yes:primary"],
alert=alert,
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def yesnocancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with YES, NO, and Cancel buttons,
and return the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["Cancel", "No", "Yes:primary"],
localize=True,
**kwargs,
)
sd.show()
return sd.result
@staticmethod
def retrycancel(message, title=" ", alert=False, parent=None, **kwargs):
"""Display a modal dialog box with RETRY and Cancel buttons;
returns the symbolic name of the button pressed.

Parameters:
message (str):
A message to display in the message box.
title (str):
The string displayed as the title of the messagebox. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
alert (bool):
Specified whether to ring the display bell.
parent (Union[Window, Toplevel]):
Makes the window the logical parent of the message box. The
message box is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
Union[str, None]:
The symbolic name of the button pressed, or None if the
window is closed without pressing a button.
"""
sd = MessageDialog(
title=title,
message=message,
parent=parent,
alert=alert,
buttons=["Cancel", "Retry:primary"],
localize=True,
**kwargs,
)
sd.show()
return sd.result
class Querybox:
"""This class contains various static methods that request data
from the end user."""
@staticmethod
def get_color(
parent=None,
title="Color Chooser",
initialcolor=None,
):
"""Show a color picker and return the select color when the
user pressed OK.

Parameters:
parent (Widget):
The parent widget.
title (str):
Optional text that appears on the titlebar.
initialcolor (str):
The initial color to display in the 'Current' color
frame.
Returns:
Tuple[rgb, hsl, hex]
The selected color in various colors models.
"""
from ttkbootstrap.dialogs.colorchooser import ColorChooserDialog
cd = ColorChooserDialog(parent, title, initialcolor)
cd.show()
return cd.result
@staticmethod
def get_date(
parent=None,
title=" ",
firstweekday=6,
startdate=None,
bootstyle="primary",
):
"""Shows a calendar popup and returns the selection.

Parameters:
parent (Widget):
The parent widget; the popup will appear to the
bottom-right of the parent widget. If no parent is
provided, the widget is centered on the screen.
title (str):
The text that appears on the popup titlebar.
firstweekday (int):
Specifies the first day of the week. `0` is Monday, `6` is
Sunday (the default).
startdate (datetime):
The date to be in focus when the widget is displayed;
bootstyle (str):
The following colors can be used to change the color of the
title and hover / pressed color -> primary, secondary, info,
warning, success, danger, light, dark.
Returns:
datetime:
The date selected; the current date if no date is selected.
"""
chooser = DatePickerDialog(
parent=parent,
title=title,
firstweekday=firstweekday,
startdate=startdate,
bootstyle=bootstyle,
)
return chooser.date_selected
@staticmethod
def get_string(
prompt="", title=" ", initialvalue=None, parent=None, **kwargs
):
"""Request a string type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (Any):
The initial value in the entry widget.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
str:
The string value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt, title, initialvalue, parent=parent, **kwargs
)
dialog.show()
return dialog._result
@staticmethod
def get_integer(
prompt="",
title=" ",
initialvalue=None,
minvalue=None,
maxvalue=None,
parent=None,
**kwargs,
):
"""Request an integer type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (int):
The initial value in the entry widget.
minvalue (int):
The minimum allowed value.
maxvalue (int):
The maximum allowed value.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
int:
The integer value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt,
title,
initialvalue,
minvalue,
maxvalue,
datatype=int,
parent=parent,
**kwargs,
)
dialog.show()
return dialog._result
@staticmethod
def get_float(
prompt="",
title=" ",
initialvalue=None,
minvalue=None,
maxvalue=None,
parent=None,
**kwargs,
):
"""Request a float type input from the user.

Parameters:
prompt (str):
A message to display in the message box above the entry
widget.
title (str):
The string displayed as the title of the message box. This
option is ignored on Mac OS X, where platform guidelines
forbid the use of a title on this kind of dialog.
initialvalue (float):
The initial value in the entry widget.
minvalue (float):
The minimum allowed value.
maxvalue (float):
The maximum allowed value.
parent (Widget):
Makes the window the logical parent of the message box. The
messagebox is displayed on top of its parent window.
**kwargs (Dict):
Other optional keyword arguments.
Returns:
float:
The float value of the entry widget.
"""
initialvalue = initialvalue or ""
dialog = QueryDialog(
prompt,
title,
initialvalue,
minvalue,
maxvalue,
datatype=float,
parent=parent,
**kwargs,
)
dialog.show()
return dialog._result
@staticmethod
def get_font(parent=None, **kwargs):
"""Request a customized font

Parameters:
parent (Widget):
Makes the window the logical parent of the dialog box. The
dialog is displayed on top of its parent window.
**kwargs (Dict):
Other keyword arguments.
Returns:
Font:
A font object.
"""
dialog = FontDialog(parent=parent, **kwargs)
dialog.show()
return dialog.result
| en | 0.655318 | This module contains various base dialog base classes that can be used to create custom dialogs for the end user. These classes serve as the basis for the pre-defined static helper methods in the `Messagebox`, and `Querybox` container classes. A simple dialog base class. Parameters: parent (Widget): Makes the window the logical parent of the message box. The messagebox is displayed on top of its parent window. title (str): The string displayed as the title of the message box. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. alert (bool): Ring the display's bell when the dialog is shown. Show the popup dialog Create the dialog body. This method should be overridden and is called by the `build` method. Set the `self._initial_focus` for the widget that should receive the initial focus. Parameters: master (Widget): The parent widget. Create the dialog button box. This method should be overridden and is called by the `build` method. Set the `self._initial_focus` for the button that should receive the intial focus. Parameters: master (Widget): The parent widget. Build the dialog from settings # setup toplevel based on widowing system # reset the iconify state # bind <Escape> event to window close # set position of popup from parent window #self._locate() # create widgets # update the window before showing Returns the result of the dialog. A simple modal dialog class that can be used to build simple message dialogs. Displays a message and a set of buttons. Each of the buttons in the message window is identified by a unique symbolic name. After the message window is popped up, the message box awaits for the user to select one of the buttons. Then it returns the symbolic name of the selected button. Use a `Toplevel` widget for more advanced modal dialog designs. Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the message box. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. buttons (List[str]): A list of buttons to appear at the bottom of the popup messagebox. The buttons can be a list of strings which will define the symbolic name and the button text. `['OK', 'Cancel']`. Alternatively, you can assign a bootstyle to each button by using the colon to separate the button text and the bootstyle. If no colon is found, then the style is set to 'primary' by default. `['OK:success','Cancel:danger']`. command (Tuple[Callable, str]): The function to invoke when the user closes the dialog. The actual command is a tuple that consists of the function to call and the symbolic name of the button that closes the dialog. width (int): The maximum number of characters per line in the message. If the text stretches beyond the limit, the line will break at the word. parent (Widget): Makes the window the logical parent of the message box. The messagebox is displayed on top of its parent window. alert (bool): Ring the display's bell when the dialog is shown. default (str): The symbolic name of the default button. The default button is invoked when the the <Return> key is pressed. If no default is provided, the right-most button in the button list will be set as the default., padding (Union[int, Tuple[int]]): The amount of space between the border and the widget contents. icon (str): An image path, path-like object or image data to be displayed to the left of the text. **kwargs (Dict): Other optional keyword arguments. Example: ```python root = tk.Tk() md = MessageDialog("Displays a message with buttons.") md.show() ``` Overrides the parent method; adds the message section. # assume this is image data # assume this is a file path # icon is neither data nor a valid file path Overrides the parent method; adds the message buttonbox # set focus traversal left-to-right # bind default button to return key press and set focus Save result, destroy the toplevel, and execute command. Create and display the popup messagebox. A simple modal dialog class that can be used to build simple data input dialogs. Displays a prompt, and input box, and a set of buttons. Additional data manipulation can be performed on the user input post-hoc by overriding the `apply` method. Use a `Toplevel` widget for more advanced modal dialog designs. Parameters: prompt (str): A message to display in the message box above the entry widget. title (str): The string displayed as the title of the message box. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. initialvalue (Any): The initial value in the entry widget. minvalue (Any): The minimum allowed value. Only valid for int and float data types. maxvalue (Any): The maximum allowed value. Only valid for int and float data types. width (int): The maximum number of characters per line in the message. If the text stretches beyond the limit, the line will break at the word. parent (Widget): Makes the window the logical parent of the message box. The messagebox is displayed on top of its parent window. padding (Union[int, Tuple[int]]): The amount of space between the border and the widget contents. datatype (Union[int, str, float]): The data type used to validate the entry value. Overrides the parent method; adds the message and input section. Overrides the parent method; adds the message buttonbox # set focus traversal left-to-right # set focus traversal left-to-right Save result, destroy the toplevel, and apply any post-hoc data manipulations. # keep toplevel open for valid response Close the toplevel and return empty. Validate the data This method is called automatically to validate the data before the dialog is destroyed. Can be subclassed and overridden. # no default checks required for string data types # convert result to appropriate data type # max value range # min value range # valid result Process the data. This method is called automatically to process the data after the dialog is destroyed. By default, it does nothing. # override A dialog that displays a calendar popup and returns the selected date as a datetime object. The current date is displayed by default unless the `startdate` parameter is provided. The month can be changed by clicking the chevrons to the left and right of the month-year title. Left-click the arrow to move the calendar by one month. Right-click the arrow to move the calendar by one year. Right-click the title to reset the calendar to the start date. The starting weekday can be changed with the `firstweekday` parameter for geographies that do not start the calendar on Sunday, which is the default. The widget grabs focus and all screen events until released. If you want to cancel a date selection, click the 'X' button at the top-right corner of the widget. The bootstyle api may be used to change the style of the widget. The available colors include -> primary, secondary, success, info, warning, danger, light, dark.  Parameters: parent (Widget): The parent widget; the popup will appear to the bottom-right of the parent widget. If no parent is provided, the widget is centered on the screen. title (str): The text that appears on the titlebar. firstweekday (int): Specifies the first day of the week. 0=Monday, 1=Tuesday, etc... startdate (datetime): The date to be in focus when the widget is displayed. bootstyle (str): The following colors can be used to change the color of the title and hover / pressed color -> primary, secondary, info, warning, success, danger, light, dark. Setup the calendar widget # create the widget containers # setup the toplevel widget # reset the iconify state # actualize geometry # create visual components # make toplevel visible Draw the calendar title bar which includes the month title and the buttons that increment and decrement the selected month. In addition to the previous and next MONTH commands that are assigned to the button press, a "right-click" event is assigned to each button that causes the calendar to move to the previous and next YEAR. # create and pack the title and action buttons # bind "year" callbacks to action buttons # create and pack days of the week header Fetch the day numbers and dates for all days in the current month. `monthdays` is a list of days as integers, and `monthdates` is a list of `datetime` objects. Create and return a list of weekdays to be used as a header in the calendar. The order of the weekdays is based on the `firstweekday` property. Returns: List[str]: A list of weekday column names for the calendar header. Callback for selecting a date. An index is assigned to each date button that corresponds to the dates in the `monthdates` matrix. When the user clicks a button to select a date, the index from this button is used to lookup the date value of the button based on the row and column index reference. This value is saved in the `date_selected` property and the `Toplevel` is destroyed. Parameters: index (Tuple[int, int]): A row and column index of the date selected; to be found in the `monthdates` matrix. Returns: datetime: The date selected Calls the decorated `func` and redraws the calendar. Increment the calendar data to the next month Increment the calendar data to the next year Decrement the calendar to the previous year Set the calendar to the start date Move the window the to bottom-right of the parent widget, or to the middle of the screen if no parent is provided. A dialog that displays a variety of options for choosing a font. This dialog constructs and returns a `Font` object based on the options selected by the user. The initial font is based on OS settings and will vary. The font object is returned when the **Ok** button is pressed and can be passed to any widget that accepts a _font_ configuration option.  # select default value # ensure default is visible This class contains various static methods that show popups with a message to the end user with various arrangments of buttons and alert options. Display a modal dialog box with an OK button and an INFO icon.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Display a modal dialog box with an OK button and a warning icon. Also will ring the display bell.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Display a modal dialog box with an OK button and an error icon. Also will ring the display bell.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Display a modal dialog box with yes, no buttons and a question icon. Also will ring the display bell. You may also change the button scheme using the `buttons` parameter.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. buttons (List[str]): A list of buttons to appear at the bottom of the popup messagebox. The buttons can be a list of strings which will define the symbolic name and the button text. `['OK', 'Cancel']`. Alternatively, you can assign a bootstyle to each button by using the colon to separate the button text and the bootstyle. If no colon is found, then the style is set to 'primary' by default. `['Yes:success','No:danger']`. **kwargs (Dict): Other optional keyword arguments. Returns: Union[str, None]: The symbolic name of the button pressed, or None if the window is closed without pressing a button. Display a modal dialog box with an OK button and and optional bell alert.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. alert (bool): Specified whether to ring the display bell. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Displays a modal dialog box with OK and Cancel buttons and return the symbolic name of the button pressed.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. alert (bool): Specified whether to ring the display bell. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Returns: Union[str, None]: The symbolic name of the button pressed, or None if the window is closed without pressing a button. Display a modal dialog box with YES and NO buttons and return the symbolic name of the button pressed.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. alert (bool): Specified whether to ring the display bell. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Returns: Union[str, None]: The symbolic name of the button pressed, or None if the window is closed without pressing a button. Display a modal dialog box with YES, NO, and Cancel buttons, and return the symbolic name of the button pressed.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. alert (bool): Specified whether to ring the display bell. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Optional keyword arguments. Returns: Union[str, None]: The symbolic name of the button pressed, or None if the window is closed without pressing a button. Display a modal dialog box with RETRY and Cancel buttons; returns the symbolic name of the button pressed.  Parameters: message (str): A message to display in the message box. title (str): The string displayed as the title of the messagebox. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. alert (bool): Specified whether to ring the display bell. parent (Union[Window, Toplevel]): Makes the window the logical parent of the message box. The message box is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Returns: Union[str, None]: The symbolic name of the button pressed, or None if the window is closed without pressing a button. This class contains various static methods that request data from the end user. Show a color picker and return the select color when the user pressed OK.  Parameters: parent (Widget): The parent widget. title (str): Optional text that appears on the titlebar. initialcolor (str): The initial color to display in the 'Current' color frame. Returns: Tuple[rgb, hsl, hex] The selected color in various colors models. Shows a calendar popup and returns the selection.  Parameters: parent (Widget): The parent widget; the popup will appear to the bottom-right of the parent widget. If no parent is provided, the widget is centered on the screen. title (str): The text that appears on the popup titlebar. firstweekday (int): Specifies the first day of the week. `0` is Monday, `6` is Sunday (the default). startdate (datetime): The date to be in focus when the widget is displayed; bootstyle (str): The following colors can be used to change the color of the title and hover / pressed color -> primary, secondary, info, warning, success, danger, light, dark. Returns: datetime: The date selected; the current date if no date is selected. Request a string type input from the user.  Parameters: prompt (str): A message to display in the message box above the entry widget. title (str): The string displayed as the title of the message box. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. initialvalue (Any): The initial value in the entry widget. parent (Widget): Makes the window the logical parent of the message box. The messagebox is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Returns: str: The string value of the entry widget. Request an integer type input from the user.  Parameters: prompt (str): A message to display in the message box above the entry widget. title (str): The string displayed as the title of the message box. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. initialvalue (int): The initial value in the entry widget. minvalue (int): The minimum allowed value. maxvalue (int): The maximum allowed value. parent (Widget): Makes the window the logical parent of the message box. The messagebox is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Returns: int: The integer value of the entry widget. Request a float type input from the user.  Parameters: prompt (str): A message to display in the message box above the entry widget. title (str): The string displayed as the title of the message box. This option is ignored on Mac OS X, where platform guidelines forbid the use of a title on this kind of dialog. initialvalue (float): The initial value in the entry widget. minvalue (float): The minimum allowed value. maxvalue (float): The maximum allowed value. parent (Widget): Makes the window the logical parent of the message box. The messagebox is displayed on top of its parent window. **kwargs (Dict): Other optional keyword arguments. Returns: float: The float value of the entry widget. Request a customized font  Parameters: parent (Widget): Makes the window the logical parent of the dialog box. The dialog is displayed on top of its parent window. **kwargs (Dict): Other keyword arguments. Returns: Font: A font object. | 2.806757 | 3 |
Google-Play-Store-App-Rating/code.py | venky4121994/ga-learner-dsmp-repo | 0 | 8294 | <gh_stars>0
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data.hist(['Rating'])
data = data[data['Rating']<=5]
data.hist(['Rating'])
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())
missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1)
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())
missing_data_1 = pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1)
print(missing_data_1)
# code ends here
# --------------
#Code starts here
plt.figure(figsize=(10,20))
catplot = sns.catplot(x = "Category", y = "Rating", data=data, kind="box",height=10)
catplot.set_xticklabels(rotation=90)
plt.title('Rating vs Category [BoxPlot]',size = 20)
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data['Installs'])
data['Installs'] = data['Installs'].str.replace('+','')
data['Installs'] = data['Installs'].str.replace(',','')
data['Installs'] = data['Installs'].astype('int32')
le = LabelEncoder()
data['Installs'] = le.fit_transform(data['Installs'])
graph = sns.regplot(data['Installs'],data['Rating'],data=data)
graph.set_title('Rating vs Installs [Boxplot]')
plt.show()
#Code ends here
# --------------
#Code starts here
print(data['Price'].value_counts())
data['Price'] = data['Price'].str.replace('$','')
data['Price'] = data['Price'].astype('float32')
graph2 = sns.regplot(data['Price'],data['Rating'],data=data)
graph2.set_title('Rating vs Price [RegPlot]')
#Code ends here
# --------------
#Code starts here
print(len(data['Genres'].unique()), "genres")
data['Genres'] = data['Genres'].str.split(';').str[0]
gr_mean = data[['Genres','Rating']].groupby(['Genres'],as_index=False).mean()
print(gr_mean.describe())
gr_mean=gr_mean.sort_values('Rating')
print(gr_mean.head(1))
print(gr_mean.head(1))
#Code ends here
# --------------
#Code starts here
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
data['Last Updated Days'] = (data['Last Updated'].max()-data['Last Updated']).dt.days
plt.figure(figsize = (10,10))
sns.regplot(x="Last Updated Days", y="Rating",color='lightpink',data=data)
plt.title('Rating vs Last Updated [Regplot]',size =20)
#Code ends here
| # --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data.hist(['Rating'])
data = data[data['Rating']<=5]
data.hist(['Rating'])
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())
missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1)
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())
missing_data_1 = pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1)
print(missing_data_1)
# code ends here
# --------------
#Code starts here
plt.figure(figsize=(10,20))
catplot = sns.catplot(x = "Category", y = "Rating", data=data, kind="box",height=10)
catplot.set_xticklabels(rotation=90)
plt.title('Rating vs Category [BoxPlot]',size = 20)
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data['Installs'])
data['Installs'] = data['Installs'].str.replace('+','')
data['Installs'] = data['Installs'].str.replace(',','')
data['Installs'] = data['Installs'].astype('int32')
le = LabelEncoder()
data['Installs'] = le.fit_transform(data['Installs'])
graph = sns.regplot(data['Installs'],data['Rating'],data=data)
graph.set_title('Rating vs Installs [Boxplot]')
plt.show()
#Code ends here
# --------------
#Code starts here
print(data['Price'].value_counts())
data['Price'] = data['Price'].str.replace('$','')
data['Price'] = data['Price'].astype('float32')
graph2 = sns.regplot(data['Price'],data['Rating'],data=data)
graph2.set_title('Rating vs Price [RegPlot]')
#Code ends here
# --------------
#Code starts here
print(len(data['Genres'].unique()), "genres")
data['Genres'] = data['Genres'].str.split(';').str[0]
gr_mean = data[['Genres','Rating']].groupby(['Genres'],as_index=False).mean()
print(gr_mean.describe())
gr_mean=gr_mean.sort_values('Rating')
print(gr_mean.head(1))
print(gr_mean.head(1))
#Code ends here
# --------------
#Code starts here
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
data['Last Updated Days'] = (data['Last Updated'].max()-data['Last Updated']).dt.days
plt.figure(figsize = (10,10))
sns.regplot(x="Last Updated Days", y="Rating",color='lightpink',data=data)
plt.title('Rating vs Last Updated [Regplot]',size =20)
#Code ends here | en | 0.546025 | # -------------- #Importing header files #Code starts here #Code ends here # -------------- # code starts here # code ends here # -------------- #Code starts here #Code ends here # -------------- #Importing header files #Code starts here #Code ends here # -------------- #Code starts here #Code ends here # -------------- #Code starts here #Code ends here # -------------- #Code starts here #Code ends here | 2.944648 | 3 |
converters/brat2iob.py | Banguiskode/nerds | 15 | 8295 | import argparse
import operator
import os
import re
import shutil
import spacy
import tempfile
from nerds.utils import spans_to_tokens, get_logger
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
def apply_annotations(sentences, annotations, tokenizer):
""" Apply annotation spans to the sentence spans to create a list of tokens
and tags.
Args:
sentences (list((int, int, str))): list of sentence spans.
annotations (list((int, int, str))): list of annotation spans.
tokenizer (spacy LM): SpaCy EN language model.
Returns:
tokens_tags_list (list((list(str), list(str)))): list of list of token
tag pairs. Each list of token-tag pairs corresponds to a single
sentence.
"""
tokens_tags_list = []
for sent_start, sent_end, sent_text in sentences:
sent_annots = [a for a in annotations if a[0] >= sent_start and a[1] <= sent_end]
# convert document offsets to sentence offsets
sent_annots = [(s[0] - sent_start, s[1] - sent_start, s[2]) for s in sent_annots]
tokens, tags = spans_to_tokens(sent_text, sent_annots, tokenizer)
tokens_tags_list.append(zip(tokens, tags))
return tokens_tags_list
def convert_brat_to_iob(input_dir, output_file, nlp):
""" Convenience Convertor function.
Args:
input_dir (str): the directory where the BRAT .txt and .ann files
are located.
output_file (str): the full path name of file to write output in
IOB format to.
nlp (SpaCy LM): reference to the SpaCy EN model.
Returns:
None.
"""
fout = open(output_file, "w")
for text_file in os.listdir(input_dir):
# only process .txt and .ann pairs in specified directory
if not text_file.endswith(".txt"):
continue
annot_file = text_file[:-4] + ".ann"
if not os.path.exists(os.path.join(input_dir, annot_file)):
# do not process file if no corresponding .ann file
continue
# process file pair
logger.info("Processing file: {:s}".format(text_file))
sentences = segment_text_to_sentences(os.path.join(input_dir, text_file), nlp)
annotations = parse_text_annotations(os.path.join(input_dir, annot_file))
tokens_tags_list = apply_annotations(sentences, annotations, nlp)
for tokens_tags in tokens_tags_list:
for token, tag in tokens_tags:
fout.write("{:s}\t{:s}\n".format(token, tag))
fout.write("\n")
fout.close()
def do_self_test(nlp):
""" Simple self-test with small dataset to prove that this works okay. """
text = "<NAME>, 61 years old, will join the board as a nonexecutive director, Nov. 29. Mr. Vinken is chairman of Elsevier N.V., the Dutch publishing group."
annotations = [
"T1 PER 0 13 <NAME>",
"T2 PER 86 96 Mr. Vinken",
"T3 DATE 15 27 61 years old",
"T4 DATE 77 84 Nov. 29",
"T5 ORG 112 125 Elsevier N.V.",
"T6 NORP 131 136 Dutch"
]
input_dir = tempfile.mkdtemp(dir="/tmp")
ftext = open(os.path.join(input_dir, "test.txt"), "w")
ftext.write(text)
ftext.close()
fann = open(os.path.join(input_dir, "test.ann"), "w")
for line in annotations:
fann.write(line + "\n")
fann.close()
output_file = os.path.join(input_dir, "test.iob")
convert_brat_to_iob(input_dir, output_file, nlp)
fout = open(output_file, "r")
for line in fout:
logger.warn(line.strip())
shutil.rmtree(input_dir)
################################ main ################################
#
# usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t]
# Script to convert BRAT annotations to IOB (NERDS) format.
# optional arguments:
# -h, --help show this help message and exit
# -i INPUT_DIR, --input_dir INPUT_DIR
# Directory to store BRAT .txt and .ann files.
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# Output file to write IOB output to.
# -t, --test Runs self test.
######################################################################
parser = argparse.ArgumentParser(
description="Script to convert BRAT annotations to IOB (NERDS) format.")
parser.add_argument("-i", "--input_dir", help="Directory to store BRAT .txt and .ann files.")
parser.add_argument("-o", "--output_file", help="Output file to write IOB output to.")
parser.add_argument("-t", "--test", help="Runs self test.", action="store_true")
args = parser.parse_args()
logger = get_logger()
input_dir = args.input_dir
output_file = args.output_file
self_test = args.test
nlp = spacy.load("en")
if self_test:
logger.info("Executing self test...")
do_self_test(nlp)
else:
logger.info("Reading BRAT .txt and .ann files from: {:s}".format(input_dir))
logger.info("Writing IOB tokens/tags to file: {:s}".format(output_file))
convert_brat_to_iob(input_dir, output_file, nlp)
| import argparse
import operator
import os
import re
import shutil
import spacy
import tempfile
from nerds.utils import spans_to_tokens, get_logger
def segment_text_to_sentences(text_file, sentence_splitter):
""" Segment text into sentences. Text is provided by BRAT in .txt
file.
Args:
text_file (str): the full path to the BRAT .txt file.
sentence_splitter (spacy LM): SpaCy EN language model.
Returns:
sentences (list((int, int, str))): list of sentence spans.
Spans are triples of (start_offset, end_offset, text),
where offset is relative to the text.
"""
sentences = []
ftext = open(text_file, "r")
for line in ftext:
splits = sentence_splitter(line.strip())
for sent in splits.sents:
sentences.append((sent.start_char, sent.end_char, sent.text))
ftext.close()
return sentences
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots
def apply_annotations(sentences, annotations, tokenizer):
""" Apply annotation spans to the sentence spans to create a list of tokens
and tags.
Args:
sentences (list((int, int, str))): list of sentence spans.
annotations (list((int, int, str))): list of annotation spans.
tokenizer (spacy LM): SpaCy EN language model.
Returns:
tokens_tags_list (list((list(str), list(str)))): list of list of token
tag pairs. Each list of token-tag pairs corresponds to a single
sentence.
"""
tokens_tags_list = []
for sent_start, sent_end, sent_text in sentences:
sent_annots = [a for a in annotations if a[0] >= sent_start and a[1] <= sent_end]
# convert document offsets to sentence offsets
sent_annots = [(s[0] - sent_start, s[1] - sent_start, s[2]) for s in sent_annots]
tokens, tags = spans_to_tokens(sent_text, sent_annots, tokenizer)
tokens_tags_list.append(zip(tokens, tags))
return tokens_tags_list
def convert_brat_to_iob(input_dir, output_file, nlp):
""" Convenience Convertor function.
Args:
input_dir (str): the directory where the BRAT .txt and .ann files
are located.
output_file (str): the full path name of file to write output in
IOB format to.
nlp (SpaCy LM): reference to the SpaCy EN model.
Returns:
None.
"""
fout = open(output_file, "w")
for text_file in os.listdir(input_dir):
# only process .txt and .ann pairs in specified directory
if not text_file.endswith(".txt"):
continue
annot_file = text_file[:-4] + ".ann"
if not os.path.exists(os.path.join(input_dir, annot_file)):
# do not process file if no corresponding .ann file
continue
# process file pair
logger.info("Processing file: {:s}".format(text_file))
sentences = segment_text_to_sentences(os.path.join(input_dir, text_file), nlp)
annotations = parse_text_annotations(os.path.join(input_dir, annot_file))
tokens_tags_list = apply_annotations(sentences, annotations, nlp)
for tokens_tags in tokens_tags_list:
for token, tag in tokens_tags:
fout.write("{:s}\t{:s}\n".format(token, tag))
fout.write("\n")
fout.close()
def do_self_test(nlp):
""" Simple self-test with small dataset to prove that this works okay. """
text = "<NAME>, 61 years old, will join the board as a nonexecutive director, Nov. 29. Mr. Vinken is chairman of Elsevier N.V., the Dutch publishing group."
annotations = [
"T1 PER 0 13 <NAME>",
"T2 PER 86 96 Mr. Vinken",
"T3 DATE 15 27 61 years old",
"T4 DATE 77 84 Nov. 29",
"T5 ORG 112 125 Elsevier N.V.",
"T6 NORP 131 136 Dutch"
]
input_dir = tempfile.mkdtemp(dir="/tmp")
ftext = open(os.path.join(input_dir, "test.txt"), "w")
ftext.write(text)
ftext.close()
fann = open(os.path.join(input_dir, "test.ann"), "w")
for line in annotations:
fann.write(line + "\n")
fann.close()
output_file = os.path.join(input_dir, "test.iob")
convert_brat_to_iob(input_dir, output_file, nlp)
fout = open(output_file, "r")
for line in fout:
logger.warn(line.strip())
shutil.rmtree(input_dir)
################################ main ################################
#
# usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t]
# Script to convert BRAT annotations to IOB (NERDS) format.
# optional arguments:
# -h, --help show this help message and exit
# -i INPUT_DIR, --input_dir INPUT_DIR
# Directory to store BRAT .txt and .ann files.
# -o OUTPUT_FILE, --output_file OUTPUT_FILE
# Output file to write IOB output to.
# -t, --test Runs self test.
######################################################################
parser = argparse.ArgumentParser(
description="Script to convert BRAT annotations to IOB (NERDS) format.")
parser.add_argument("-i", "--input_dir", help="Directory to store BRAT .txt and .ann files.")
parser.add_argument("-o", "--output_file", help="Output file to write IOB output to.")
parser.add_argument("-t", "--test", help="Runs self test.", action="store_true")
args = parser.parse_args()
logger = get_logger()
input_dir = args.input_dir
output_file = args.output_file
self_test = args.test
nlp = spacy.load("en")
if self_test:
logger.info("Executing self test...")
do_self_test(nlp)
else:
logger.info("Reading BRAT .txt and .ann files from: {:s}".format(input_dir))
logger.info("Writing IOB tokens/tags to file: {:s}".format(output_file))
convert_brat_to_iob(input_dir, output_file, nlp)
| en | 0.576124 | Segment text into sentences. Text is provided by BRAT in .txt file. Args: text_file (str): the full path to the BRAT .txt file. sentence_splitter (spacy LM): SpaCy EN language model. Returns: sentences (list((int, int, str))): list of sentence spans. Spans are triples of (start_offset, end_offset, text), where offset is relative to the text. Parses BRAT annotations provided in the .ann file and converts them to annotation spans of (start_position, end_position, entity_class). Args: ann_file (str): full path to the BRAT .ann file. Returns: annotations (list((int, int, str))): list of annotation spans. Spans are triples of (start_offset, end_offset, entity_class) where offset is relative to the text. Apply annotation spans to the sentence spans to create a list of tokens and tags. Args: sentences (list((int, int, str))): list of sentence spans. annotations (list((int, int, str))): list of annotation spans. tokenizer (spacy LM): SpaCy EN language model. Returns: tokens_tags_list (list((list(str), list(str)))): list of list of token tag pairs. Each list of token-tag pairs corresponds to a single sentence. # convert document offsets to sentence offsets Convenience Convertor function. Args: input_dir (str): the directory where the BRAT .txt and .ann files are located. output_file (str): the full path name of file to write output in IOB format to. nlp (SpaCy LM): reference to the SpaCy EN model. Returns: None. # only process .txt and .ann pairs in specified directory # do not process file if no corresponding .ann file # process file pair Simple self-test with small dataset to prove that this works okay. ################################ main ################################ # # usage: brat2iob.py [-h] [-i INPUT_DIR] [-o OUTPUT_FILE] [-t] # Script to convert BRAT annotations to IOB (NERDS) format. # optional arguments: # -h, --help show this help message and exit # -i INPUT_DIR, --input_dir INPUT_DIR # Directory to store BRAT .txt and .ann files. # -o OUTPUT_FILE, --output_file OUTPUT_FILE # Output file to write IOB output to. # -t, --test Runs self test. ###################################################################### | 3.04345 | 3 |
kraken/lib/util.py | zjsteyn/kraken | 1 | 8296 | """
Ocropus's magic PIL-numpy array conversion routines. They express slightly
different behavior from PIL.Image.toarray().
"""
import unicodedata
import numpy as np
from PIL import Image
__all__ = ['pil2array', 'array2pil']
def pil2array(im: Image.Image, alpha: int = 0) -> np.array:
if im.mode == '1':
return np.array(im.convert('L'))
return np.array(im)
def array2pil(a: np.array) -> Image:
if a.dtype == np.dtype("B"):
if a.ndim == 2:
return Image.frombytes("L", (a.shape[1], a.shape[0]),
a.tostring())
elif a.ndim == 3:
return Image.frombytes("RGB", (a.shape[1], a.shape[0]),
a.tostring())
else:
raise Exception("bad image rank")
elif a.dtype == np.dtype('float32'):
return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tostring())
else:
raise Exception("unknown image type")
def is_bitonal(im: Image.Image) -> bool:
"""
Tests a PIL.Image for bitonality.
Args:
im (PIL.Image.Image): Image to test
Returns:
True if the image contains only two different color values. False
otherwise.
"""
return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
def get_im_str(im: Image.Image) -> str:
return im.filename if hasattr(im, 'filename') else str(im)
def is_printable(char: str) -> bool:
"""
Determines if a chode point is printable/visible when printed.
Args:
char (str): Input code point.
Returns:
True if printable, False otherwise.
"""
letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu')
numbers = ('Nd', 'Nl', 'No')
punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps')
symbol = ('Sc', 'Sk', 'Sm', 'So')
printable = letters + numbers + punctuation + symbol
return unicodedata.category(char) in printable
def make_printable(char: str) -> str:
"""
Takes a Unicode code point and return a printable representation of it.
Args:
char (str): Input code point
Returns:
Either the original code point, the name of the code point if it is a
combining mark, whitespace etc., or the hex code if it is a control
symbol.
"""
if not char or is_printable(char):
return char
elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'):
return '0x{:x}'.format(ord(char))
else:
return unicodedata.name(char)
| """
Ocropus's magic PIL-numpy array conversion routines. They express slightly
different behavior from PIL.Image.toarray().
"""
import unicodedata
import numpy as np
from PIL import Image
__all__ = ['pil2array', 'array2pil']
def pil2array(im: Image.Image, alpha: int = 0) -> np.array:
if im.mode == '1':
return np.array(im.convert('L'))
return np.array(im)
def array2pil(a: np.array) -> Image:
if a.dtype == np.dtype("B"):
if a.ndim == 2:
return Image.frombytes("L", (a.shape[1], a.shape[0]),
a.tostring())
elif a.ndim == 3:
return Image.frombytes("RGB", (a.shape[1], a.shape[0]),
a.tostring())
else:
raise Exception("bad image rank")
elif a.dtype == np.dtype('float32'):
return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tostring())
else:
raise Exception("unknown image type")
def is_bitonal(im: Image.Image) -> bool:
"""
Tests a PIL.Image for bitonality.
Args:
im (PIL.Image.Image): Image to test
Returns:
True if the image contains only two different color values. False
otherwise.
"""
return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
def get_im_str(im: Image.Image) -> str:
return im.filename if hasattr(im, 'filename') else str(im)
def is_printable(char: str) -> bool:
"""
Determines if a chode point is printable/visible when printed.
Args:
char (str): Input code point.
Returns:
True if printable, False otherwise.
"""
letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu')
numbers = ('Nd', 'Nl', 'No')
punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps')
symbol = ('Sc', 'Sk', 'Sm', 'So')
printable = letters + numbers + punctuation + symbol
return unicodedata.category(char) in printable
def make_printable(char: str) -> str:
"""
Takes a Unicode code point and return a printable representation of it.
Args:
char (str): Input code point
Returns:
Either the original code point, the name of the code point if it is a
combining mark, whitespace etc., or the hex code if it is a control
symbol.
"""
if not char or is_printable(char):
return char
elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'):
return '0x{:x}'.format(ord(char))
else:
return unicodedata.name(char)
| en | 0.578868 | Ocropus's magic PIL-numpy array conversion routines. They express slightly different behavior from PIL.Image.toarray(). Tests a PIL.Image for bitonality. Args: im (PIL.Image.Image): Image to test Returns: True if the image contains only two different color values. False otherwise. Determines if a chode point is printable/visible when printed. Args: char (str): Input code point. Returns: True if printable, False otherwise. Takes a Unicode code point and return a printable representation of it. Args: char (str): Input code point Returns: Either the original code point, the name of the code point if it is a combining mark, whitespace etc., or the hex code if it is a control symbol. | 3.165573 | 3 |
analysis/calculate_holding_amount.py | hao44le/ico_top_holder_analysis | 538 | 8297 | import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
def update_y_array(X,y,timestamp,amount):
target_index = 0
for i in range(len(X)):
x_time = X[i]
if timestamp < x_time:
target_index = i
break
for i in range(target_index,len(y)):
y[i] += amount
return y
def perform_bfs_on_accounts(out_txs,top_holder_type,acc,m_type='OUT'):
print("\t"+m_type)
unique_out = set()
for out in out_txs:
unique_out.add(out[3])
unique_out = list(unique_out)[:5]
for out in unique_out:
print("\t"+out)
if out not in all_acc_types:
investor_type = identify_investor_type(out)
if investor_type == affliate_type:
investor_type = identify_investor_type_token(out)
print("\t\t{}".format(investor_type))
else:
investor_type = all_acc_types[out]
if investor_type == exchange_type:
top_holder_type[acc] = deposit_account if m_type == "OUT" else withdraw_account
all_acc_types[out] = investor_type
if acc not in top_holder_type:
top_holder_type[acc] = holding_account
return top_holder_type
def calculate_holding_amount(X,escape_accounts,txs):
top_holder_type = dict()
for acc in txs:
tx = txs[acc]
if acc in escape_accounts:
continue
#如果当前账户从来没有向外打过token,ignore
out_txs = [item for item in tx if item[2] == 'OUT']
if len(out_txs) == 0:
print("\tholding account")
top_holder_type[acc] = holding_account
continue
# build all traxe Y: holding_amount, deposit_amount, withdraw_amount
amount_trace_y = [0] * len(X)
for holder in txs:
if holder in escape_accounts:
continue
if holder not in top_holder_type:
print("{} not identified! ".format(holder))
continue
holder_type = top_holder_type[holder]
holder_txs = txs[holder]
print("{} {}".format(holder,holder_type))
for tx in holder_txs:
[timestamp,from_a,tx_type,to_a,amount] = tx
if holder_type == holding_account:
if tx_type == in_type:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,amount)
else:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,-amount)
return amount_trace_y
| import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
def update_y_array(X,y,timestamp,amount):
target_index = 0
for i in range(len(X)):
x_time = X[i]
if timestamp < x_time:
target_index = i
break
for i in range(target_index,len(y)):
y[i] += amount
return y
def perform_bfs_on_accounts(out_txs,top_holder_type,acc,m_type='OUT'):
print("\t"+m_type)
unique_out = set()
for out in out_txs:
unique_out.add(out[3])
unique_out = list(unique_out)[:5]
for out in unique_out:
print("\t"+out)
if out not in all_acc_types:
investor_type = identify_investor_type(out)
if investor_type == affliate_type:
investor_type = identify_investor_type_token(out)
print("\t\t{}".format(investor_type))
else:
investor_type = all_acc_types[out]
if investor_type == exchange_type:
top_holder_type[acc] = deposit_account if m_type == "OUT" else withdraw_account
all_acc_types[out] = investor_type
if acc not in top_holder_type:
top_holder_type[acc] = holding_account
return top_holder_type
def calculate_holding_amount(X,escape_accounts,txs):
top_holder_type = dict()
for acc in txs:
tx = txs[acc]
if acc in escape_accounts:
continue
#如果当前账户从来没有向外打过token,ignore
out_txs = [item for item in tx if item[2] == 'OUT']
if len(out_txs) == 0:
print("\tholding account")
top_holder_type[acc] = holding_account
continue
# build all traxe Y: holding_amount, deposit_amount, withdraw_amount
amount_trace_y = [0] * len(X)
for holder in txs:
if holder in escape_accounts:
continue
if holder not in top_holder_type:
print("{} not identified! ".format(holder))
continue
holder_type = top_holder_type[holder]
holder_txs = txs[holder]
print("{} {}".format(holder,holder_type))
for tx in holder_txs:
[timestamp,from_a,tx_type,to_a,amount] = tx
if holder_type == holding_account:
if tx_type == in_type:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,amount)
else:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,-amount)
return amount_trace_y
| en | 0.391642 | #如果当前账户从来没有向外打过token,ignore # build all traxe Y: holding_amount, deposit_amount, withdraw_amount | 2.221177 | 2 |
textbox/trainer/trainer.py | JBoRu/TextBox-1 | 1 | 8298 | # @Time : 2020/11/14
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/12/2, 2020/11/27, 2020/12/3, 2020/12/26
# @Author : <NAME>, <NAME>, <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>
r"""
textbox.trainer.trainer
################################
"""
import os
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from torch.utils.data import DataLoader
from time import time
from logging import getLogger
from textbox.module.Optimizer.optim import ScheduledOptim
from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator
from textbox.utils import ensure_dir, early_stopping
class AbstractTrainer(object):
r"""Trainer Class is used to manage the training and evaluation processes of text generation system models.
AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according
to different training and evaluation strategies.
"""
def __init__(self, config, model):
self.config = config
self.model = model
def fit(self, train_data):
r"""Train the model based on the train data.
"""
raise NotImplementedError('Method [next] should be implemented.')
def evaluate(self, eval_data):
r"""Evaluate the model based on the eval data.
"""
raise NotImplementedError('Method [next] should be implemented.')
class Trainer(AbstractTrainer):
r"""The basic Trainer for basic training and evaluation strategies in text generation systems.
This class defines common functions for training and evaluation processes of most text generation system models,
including fit(), evalute(), resume_checkpoint() and some other features helpful for model training and evaluation.
Generally speaking, this class can serve most text generation system models, If the training process of the model
is to simply optimize a single loss without involving any complex training strategies, such as adversarial learning,
pre-training and so on.
Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information
for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on.
More information can be found in [placeholder]. `model` is the instantiated object of a Model Class.
"""
def __init__(self, config, model):
super(Trainer, self).__init__(config, model)
self.logger = getLogger()
self.learner = config['learner']
self.learning_rate = config['learning_rate']
self.epochs = config['epochs']
self.eval_step = min(config['eval_step'], self.epochs)
self.stopping_step = config['stopping_step']
self.test_batch_size = config['eval_batch_size']
self.device = config['device']
self.embedding_size = config['embedding_size']
self.warmup_steps = config['warmup_steps']
self.checkpoint_dir = config['checkpoint_dir']
ensure_dir(self.checkpoint_dir)
saved_model_file = self.config['filename'] + '.pth'
self.saved_model_file = os.path.join(self.checkpoint_dir, saved_model_file)
self.generated_text_dir = config['generated_text_dir']
ensure_dir(self.generated_text_dir)
saved_text_file = self.config['filename'] + '.txt'
self.saved_text_file = os.path.join(self.generated_text_dir, saved_text_file)
self.start_epoch = 0
self.cur_step = 0
self.best_valid_score = 100000000
self.best_valid_result = None
self.train_loss_dict = dict()
self.optimizer = self._build_optimizer()
self.task_type = config['task_type'].lower()
if self.task_type == "translation":
self.evaluator = TranslationEvaluator(config)
elif self.task_type == "summarization":
self.evaluator = SummarizationEvaluator(config)
else:
self.evaluator = NgramEvaluator(config)
self.item_tensor = None
self.tot_item_num = None
self.iid_field = config['ITEM_ID_FIELD']
def _build_optimizer(self):
r"""Init the Optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'schedule':
optimizer = ScheduledOptim(optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),
self.learning_rate, self.embedding_size, self.warmup_steps)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
return optimizer
def _train_epoch(self, train_data, epoch_idx):
r"""Train the model in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
self.optimizer.zero_grad()
losses = self.model.calculate_loss(data, epoch_idx=epoch_idx)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
loss.backward()
self.optimizer.step()
train_loss = total_loss / len(train_data)
return train_loss
def _valid_epoch(self, valid_data):
r"""Valid the model with valid data
Args:
valid_data (DataLoader): the valid data
Returns:
float: valid score
dict: valid result
"""
self.model.eval()
total_loss = None
for batch_idx, data in enumerate(valid_data):
losses = self.model.calculate_loss(data)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
valid_loss = total_loss / len(valid_data)
ppl = np.exp(valid_loss)
return valid_loss, ppl
def _save_checkpoint(self, epoch):
r"""Store the model parameters information and training information.
Args:
epoch (int): the current epoch id
"""
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
torch.save(state, self.saved_model_file)
def _save_generated_text(self, generated_corpus):
r"""Store the generated text by our model.
Args:
corpus (list of string list):
"""
with open(self.saved_text_file, 'w') as fin:
for tokens in generated_corpus:
fin.write(' '.join(tokens) + '\n')
def resume_checkpoint(self, resume_file):
r"""Load the model parameters information and training information.
Args:
resume_file (file): the checkpoint file
"""
resume_file = str(resume_file)
checkpoint = torch.load(resume_file)
self.start_epoch = checkpoint['epoch'] + 1
self.cur_step = checkpoint['cur_step']
self.best_valid_score = checkpoint['best_valid_score']
# load architecture params from checkpoint
if checkpoint['config']['model'].lower() != self.config['model'].lower():
self.logger.warning('Architecture configuration given in config file is different from that of checkpoint. '
'This may yield an exception while state_dict is being loaded.')
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed
self.optimizer.load_state_dict(checkpoint['optimizer'])
message_output = 'Checkpoint loaded. Resume training from epoch {}'.format(self.start_epoch)
self.logger.info(message_output)
def _check_nan(self, loss):
if torch.isnan(loss):
raise ValueError('Training loss is nan')
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
train_loss_output = "epoch %d %straining [time: %.2fs, " % (epoch_idx, train_info, e_time - s_time)
if isinstance(losses, tuple):
for idx, loss in enumerate(losses):
train_loss_output += 'train_loss%d: %.4f, ' % (idx + 1, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
r"""Train the model based on the train data and the valid data.
Args:
train_data (DataLoader): the train data
valid_data (DataLoader, optional): the valid data, default: None.
If it's None, the early_stopping is invalid.
verbose (bool, optional): whether to write training and evaluation information to logger, default: True
saved (bool, optional): whether to save the model parameters, default: True
Returns:
(float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None)
"""
for epoch_idx in range(self.start_epoch, self.epochs):
# train
training_start_time = time()
train_loss = self._train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
self._save_checkpoint(epoch_idx)
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
# eval
if self.eval_step <= 0 or not valid_data:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
continue
if (epoch_idx + 1) % self.eval_step == 0:
valid_start_time = time()
with torch.no_grad():
valid_score, valid_result = self._valid_epoch(valid_data)
# valid_loss, ppl
self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping(
valid_score, self.best_valid_score, self.cur_step,
max_step=self.stopping_step, bigger=False)
# better model are supposed to provide smaller perplexity and loss
valid_end_time = time()
valid_score_output = "epoch %d evaluating [time: %.2fs, valid_loss: %f]" % \
(epoch_idx, valid_end_time - valid_start_time, valid_score)
valid_result_output = 'valid ppl: {}'.format(valid_result)
if verbose:
self.logger.info(valid_score_output)
self.logger.info(valid_result_output)
if update_flag:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current best: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
self.best_valid_result = valid_result
if stop_flag:
stop_output = 'Finished training, best eval result in epoch %d' % \
(epoch_idx - self.cur_step * self.eval_step)
if verbose:
self.logger.info(stop_output)
break
return self.best_valid_score, self.best_valid_result
def _evaluate_nll_test(self, eval_data):
r"""Calculate the negative log-likelihood of the eval_data.
Args:
eval_data (DataLoader): the eval data.
Returns:
Float: NLL_test of the eval data.
"""
total_loss = 0
for epoch_idx, eval_batch in enumerate(eval_data):
nll_test = self.model.calculate_nll_test(eval_batch, epoch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
with torch.no_grad():
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
result['nll_test'] = self._evaluate_nll_test(eval_data)
return result
def plot_train_loss(self, show=True, save_path=None):
r"""Plot the train loss in each epoch
Args:
show (bool, optional): whether to show this figure, default: True
save_path (str, optional): the data path to save the figure, default: None.
If it's None, it will not be saved.
"""
epochs = list(self.train_loss_dict.keys())
epochs.sort()
values = [float(self.train_loss_dict[epoch]) for epoch in epochs]
plt.plot(epochs, values)
plt.xticks(epochs)
plt.xlabel('Epoch')
plt.ylabel('Loss')
if show:
plt.show()
if save_path:
plt.savefig(save_path)
class UnconditionalTrainer(Trainer):
r"""UnconditionalTrainer is designed for RNN, which is a typical unconditional generator.
"""
def __init__(self, config, model):
super(UnconditionalTrainer, self).__init__(config, model)
class GANTrainer(Trainer):
r"""GANTrainer is designed for GAN, which is a generative adversarial net method.
"""
def __init__(self, config, model):
super(GANTrainer, self).__init__(config, model)
self.optimizer = None
self.g_optimizer = self._build_module_optimizer(self.model.generator)
self.d_optimizer = self._build_module_optimizer(self.model.discriminator)
self.grad_clip = config['grad_clip']
self.g_pretraining_epochs = config['g_pretraining_epochs']
self.d_pretraining_epochs = config['d_pretraining_epochs']
self.d_sample_num = config['d_sample_num']
self.d_sample_training_epochs = config['d_sample_training_epochs']
self.adversarail_training_epochs = config['adversarail_training_epochs']
self.adversarail_d_epochs = config['adversarail_d_epochs']
self.g_pretraining_loss_dict = dict()
self.d_pretraining_loss_dict = dict()
self.max_length = config['max_seq_length'] + 2
self.pad_idx = model.pad_idx
def _build_module_optimizer(self, module):
r"""Init the Module Optimizer
Args:
module (torch.nn.Mudule): Mudule class of torch.nn needed optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr=self.learning_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""The opt uses the cliped losses to conduct an optimize step to optimize model
and sum up losses to the total_loss.
Args:
losses (torch.Tensor or tuple): The loss to be backward.
total_loss (Float): Total loss in an epoch.
model (torch.nn.Mudule): The model to be optimized.
opt (torch.optim): The optimizer of the model.
Returns:
torch.Tensor or tuple: Total loss in an epoch, shape: [].
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _save_checkpoint(self, epoch):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict()
}
torch.save(state, self.saved_model_file)
def _add_pad(self, data):
r"""Pad the data to the max length of corpus.
Args:
data (torch.Tensor): The data to be padded, shape: [batch_size, max_batch_length].
Returns:
torch.Tensor: The padded data, shape: [batch_size, max_seq_length].
"""
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.pad_idx, dtype=torch.long, device=self.device)
padded_data[:, : data.shape[1]] = data
return padded_data
def _get_real_data(self, train_data):
r"""Get the target text index of the corpus train_datas.
Args:
train_data (DataLoader): the train data.
Returns:
torch.Tensor: The target text index, shape: [batch_size, max_batch_length].
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
real_data = self._add_pad(real_data)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _g_train_epoch(self, train_data, epoch_idx):
r"""Train the generator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(train_data) for l in total_loss] if isinstance(total_loss, tuple) else total_loss / len(
train_data)
total_loss = tuple(total_loss) if isinstance(total_loss, list) else total_loss
return total_loss
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs): # d_epoch
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
losses = self.model.calculate_g_adversarial_loss(epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if verbose:
self.logger.info("Start generator pretraining...")
for epoch_idx in range(self.g_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End generator pretraining...")
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class TextGANTrainer(GANTrainer):
r"""TextGANTrainer is designed for TextGAN.
"""
def __init__(self, config, model):
super(TextGANTrainer, self).__init__(config, model)
self.adversarail_g_epochs = config['adversarail_g_epochs']
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs):
for idx, real_data in enumerate(real_dataloader):
fake_data, z = self.model.sample()
losses = self.model.calculate_d_train_loss(real_data, fake_data, z, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
if (idx * self.model.batch_size >= self.d_sample_num):
break
return total_loss / min(len(real_dataloader), self.d_sample_num // self.model.batch_size) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for idx, real_data in enumerate(real_dataloader):
if (idx == self.adversarail_g_epochs):
break
losses = self.model.calculate_g_adversarial_loss(real_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss / min(len(real_dataloader), self.adversarail_g_epochs)
class RankGANTrainer(GANTrainer):
r"""RankGANTrainer is designed for RankGAN.
"""
def __init__(self, config, model):
super(RankGANTrainer, self).__init__(config, model)
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
for _ in range(self.d_sample_training_epochs):
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
losses = self.model.calculate_g_adversarial_loss(ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
d_loss = 0
for epoch_idx in range(self.adversarail_d_epochs):
d_loss += self._d_train_epoch(train_data, epoch_idx=epoch_idx)
d_loss = d_loss / self.adversarail_d_epochs
return total_loss
class ConditionalTrainer(Trainer):
r"""ConditionalTrainer is designed for seq2seq testing, which is a typically used setting.
"""
def __init__(self, config, model):
super(ConditionalTrainer, self).__init__(config, model)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
return result
class MaskGANTrainer(GANTrainer):
r""" Trainer specifically designed for MaskGAN training process.
"""
def __init__(self, config, model):
super(MaskGANTrainer, self).__init__(config, model)
self.max_length = config["max_seq_length"]
self.eos_token_idx = model.eos_idx
self.adversarail_c_epochs = config['adversarail_c_epochs']
self.g_mask_pretraining_epochs = config['g_mask_pretraining_epochs']
self.g_lr = config['gen_learning_rate']
self.d_lr = config['dis_learning_rate']
self.c_lr = config['critic_learning_rate']
self.g_optimizer = self._build_module_optimizer_(self.model.generator, self.g_lr)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, self.d_lr)
self.c_optimizer = self._build_module_optimizer_(self.model.discriminator.critic_fc_linear, self.c_lr)
self.pre_lm_weight = config["pre_lm_weight"]
self.pretrain_lm_epochs = config["pretrain_lm_epochs"]
self.checkp = config['checkp']
def _build_module_optimizer_(self, module, lr):
r""" Init the Module Optimizer with specified learning rate
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt, retain_graph=False):
r""" Add retain_graph option
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r""" Specified for maskgan output
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def pretrain_lm(self, train_data, valid_data, verbose):
r""" Pretrain rnn-based Language Model with teacher forcing mechanism
"""
def lm_forward(data):
r""" One iteration of LM forward
"""
input = data[:, :-1] # bs * self.max_len - 1
target = data[:, 1:]
bs, seq_len = target.size()
lengths = torch.tensor([seq_len] * bs)
target_present = torch.ones_like(input).byte()
device = target.device
lengths = lengths.cuda(device)
# pretaining
encoder_outputs = pre_train_lm(input, lengths, target, target_present, pretrain=True)
logit = pre_train_lm.vocab_linear(encoder_outputs)
logit = logit.permute([0, 2, 1])
lossf = torch.nn.CrossEntropyLoss()
loss = lossf(logit, target)
return loss
pre_train_lm = self.model.generator
lm_opt = self._build_module_optimizer_(pre_train_lm, lr=0.001)
for epoch in range(self.pretrain_lm_epochs):
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = lm_forward(data)
total_loss = self._optimize_step(loss, total_loss, pre_train_lm, lm_opt)
total_loss = total_loss / len(real_dataloader)
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining loss: {} ".format(epoch+1, self.pretrain_lm_epochs, total_loss))
ppl = 0.0
if (epoch+1) % 1 == 0:
pre_train_lm.eval()
validate_data = self._get_real_data(valid_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ppl = 0.0
for batch_idx, data in enumerate(validate_dataloader):
cross_entropy_loss = lm_forward(data)
ppl += math.exp(cross_entropy_loss.item())
ppl = ppl / len(validate_dataloader)
pre_train_lm.train()
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining PPL: {}...".format(epoch + 1, self.pretrain_lm_epochs, ppl))
if ppl < 110:
state_dict = {
'embedder': pre_train_lm.embedder,
'encoder': pre_train_lm.encoder.encoder,
'vocab_linear': pre_train_lm.vocab_linear
}
self.pre_lm_weight = "saved/pretrain_lm_weight" + str(epoch+1) + ".pkl"
torch.save(state_dict, self.pre_lm_weight)
if verbose:
self.logger.info("End LM pretraining. PPL: {}".format(ppl))
self.logger.info("Weigth saved in {}".format(self.pre_lm_weight))
return pre_train_lm, ppl
def _g_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(loss, total_loss, self.model.generator, self.g_optimizer)
total_loss = total_loss / len(real_dataloader)
return total_loss
def _get_validate_ppl(self, validate_data, epoch_idx):
self.model.generator.eval()
ppl = 0.0
validate_data = self._get_real_data(validate_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(validate_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx, validate=True)
ppl += math.exp(loss.item())
ppl = ppl / len(validate_dataloader)
self.model.generator.train()
return ppl
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
losses = self.model.calculate_d_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / len(real_dataloader)
def _adversarial_train_epoch(self, train_data, epoch_idx):
r""" Specified for MaskGAN adversarial training
"""
dis_total_loss = None
gen_total_loss = None
critic_total_loss = None
g_num = 0.0
d_num = 0.0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
dis_train_data = copy.deepcopy(real_dataloader)
gen_train_data = copy.deepcopy(real_dataloader)
c_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
gen_train_data = iter(gen_train_data)
_ = next(dis_train_data) # have one offset
for g_x in gen_train_data:
g_num += 1
for _ in range(3):
d_num += 1
try:
d_x = next(dis_train_data)
except StopIteration:
del dis_train_data
dis_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
d_x = next(dis_train_data)
losses = self.model.calculate_d_train_loss(d_x, epoch_idx=_)
dis_total_loss = self._optimize_step(losses, dis_total_loss, self.model.discriminator, self.d_optimizer)
gen_losses, critic_losses = self.model.calculate_g_adversarial_loss(g_x, epoch_idx=g_num)
gen_total_loss = self._optimize_step(gen_losses, gen_total_loss, self.model.generator, self.g_optimizer)
critic_total_loss = self._optimize_step(critic_losses, critic_total_loss, self.model.discriminator.critic_fc_linear, self.c_optimizer)
return {"dis_loss": dis_total_loss / d_num, "gen_loss": gen_total_loss / g_num, "critic_loss": critic_total_loss / g_num}
def _evaluate_nll_test(self, eval_data):
total_loss = 0
real_data = self._get_real_data(eval_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
nll_test = self.model.calculate_nll_test(data, batch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
def _add_eos(self, data, length):
batch_size, pad_seq_len = data.size()
padded_data = torch.full((batch_size, self.max_length), self.eos_token_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
l = int(length[i].cpu().data)
if l == self.max_length+2:
padded_data[i, :] = data[i, 1:l-1]
else:
padded_data[i, 0:l-1] = data[i, 1:l]
return padded_data
def _get_real_data(self, train_data):
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx'] # bs*batch_max_seq_len
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _save_checkpoint(self, epoch, postfix=None):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'g_opt': self.g_optimizer.state_dict(),
'd_opt': self.d_optimizer.state_dict(),
'c_opt':self.c_optimizer.state_dict()
}
if postfix is not None:
path = self.saved_model_file + "_" + str(epoch) + "_" + postfix
torch.save(state, path)
return path
else:
torch.save(state, self.saved_model_file)
def _load_generated_text(self):
r""" Load the generated text by our model to log.
"""
with open(self.saved_text_file, 'r') as fin:
samples = []
for i in range(5):
text = fin.readline()
samples.append(text)
return samples
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if self.checkp is not None:
checkpoint = torch.load(self.checkp)
self.model.load_state_dict(checkpoint['state_dict'])
self.d_optimizer.load_state_dict(checkpoint["d_opt"])
self.g_optimizer.load_state_dict(checkpoint["g_opt"])
epoch_check = checkpoint['epoch']
if verbose:
self.logger.info("Load checkpoint file from: {}".format(self.checkp))
else:
if self.pre_lm_weight is None:
if verbose:
self.logger.info("Start LM pretraining...")
pretrain_lm, ppl = self.pretrain_lm(train_data, valid_data, verbose)
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight")
else:
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight from: {}".format(self.pre_lm_weight))
if verbose:
self.logger.info("Start generator mask pretraining...")
for epoch_idx in range(self.g_mask_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
ppl = self._get_validate_ppl(valid_data, epoch_idx)
if verbose:
self.logger.info(
"Epoch {}/{} of mask pretraining PPL: {}...".format(epoch_idx + 1, self.g_mask_pretraining_epochs, ppl))
if ppl <= 90:
if verbose:
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.logger.info(">>>> [Pretrain Gen] PPL: {} save weight in {}".format(ppl, path))
self.logger.info("End generator mask pretraining...")
break
if (epoch_idx) % 10 == 0:
self.logger.info(">>>> [Pretrain Gen] Save pretrain gen check in epoch %d ..." % (epoch_idx + 1))
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>> [Pretrain Gen] test result: {}'.format(test_result))
self.logger.info('>>>> [Pretrain Gen] test result samples: {}'.format(tmp))
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if (epoch_idx+1) % 10 == 0:
path = self._save_checkpoint((epoch_idx + 1), postfix="adv_train")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>>>> [Adv] test result: {}'.format(test_result))
self.logger.info('>>>>>> [Adv] test result samples: {}'.format(tmp))
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class LeakGANTrainer(GANTrainer):
r"""Specified for leakgan trainer
"""
def __init__(self, config, model):
super(LeakGANTrainer, self).__init__(config, model)
self.interleaved_pretrain_epoch = config['interleaved_pretrain_epoch']
self.adversarail_g_epochs = config['adversarail_g_epochs']
gen_lr = config['generator_lr'] # 0.001
dis_lr = config['discriminator_lr'] # 0.00005
self.g_optimizer = self._build_module_optimizer_(self.model.generator, gen_lr) # (manager_opt, worker_opt)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, dis_lr)
self.iters_num = config['iter_num']
self.end_idx = model.end_idx
def _build_module_optimizer_(self, module, learing_rate):
r"""Specified for leakgan
"""
multi_flag = False
if module._get_name() == 'LeakGANGenerator':
manager_params, worker_params = module.split_params()
multi_flag = True
if self.learner.lower() == 'adam':
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'sgd':
if multi_flag:
manager_opt = optim.SGD(manager_params, lr=learing_rate)
worker_opt = optim.SGD(worker_params, lr=learing_rate)
else:
optimizer = optim.SGD(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'adagrad':
if multi_flag:
manager_opt = optim.Adagrad(manager_params, lr=learing_rate)
worker_opt = optim.Adagrad(worker_params, lr=learing_rate)
else:
optimizer = optim.Adagrad(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'rmsprop':
if multi_flag:
manager_opt = optim.RMSprop(manager_params, lr=learing_rate)
worker_opt = optim.RMSprop(worker_params, lr=learing_rate)
else:
optimizer = optim.RMSprop(module.parameters(), lr=learing_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
if multi_flag:
return (manager_opt, worker_opt)
else:
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""Specified for leakgan optimize
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
if isinstance(losses, tuple):
for i, (o, loss) in enumerate(zip(opt, losses)):
o.zero_grad()
loss.backward(retain_graph=True if i < len(opt) - 1 else False)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
o.step()
else:
opt.zero_grad()
losses.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r"""Specified for leakgan output format
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def _add_eos(self, data, length):
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.end_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
len = length[i].cpu().data
padded_data[i, :len] = data[i, :len]
return padded_data
def _get_real_data(self, train_data):
r"""Specified for leakgan which use eos_idx pad not pad_idx
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Specified for leakgan adversarial training
"""
self.model.generator.train()
total_g_loss = None
total_d_loss = 0
total_d_acc = 0
adv_mana_loss = 0
adv_work_loss = 0
adv_d_loss = 0
for e in range(self.adversarail_g_epochs):
losses = self.model.calculate_g_adversarial_loss(epoch_idx=e)
total_g_loss = self._optimize_step(losses, total_g_loss, self.model.generator, self.g_optimizer)
adv_mana_loss, adv_work_loss = total_g_loss
adv_mana_loss = adv_mana_loss / self.adversarail_g_epochs
adv_work_loss = adv_work_loss / self.adversarail_g_epochs
for e in range(self.adversarail_d_epochs):
loss_dict = self._d_train_epoch(train_data, epoch_idx=epoch_idx)
total_d_loss = total_d_loss + loss_dict['total_loss']
total_d_acc = total_d_acc + loss_dict['train_acc']
adv_d_loss = total_d_loss / self.adversarail_d_epochs
adv_c_loss = total_d_acc / self.adversarail_d_epochs
return {"mana_loss": adv_mana_loss, "work_loss": adv_work_loss, "dis_loss": adv_d_loss, "train_acc": adv_c_loss}
def _g_train_epoch(self, train_data, epoch_idx):
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
# interaction = interaction.to(self.device)
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(real_dataloader) for l in total_loss] if isinstance(total_loss,
tuple) else total_loss / len(
train_data)
mana_loss, work_loss = total_loss
return {"mana_loss": mana_loss, "work_loss": work_loss}
def _d_train_epoch(self, train_data, epoch_idx):
total_loss = None
total_acc = 0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
# not need sample self.d_sample_num numbers becauese only train discriminator 5 batch
d_sample_num = (self.d_sample_training_epochs + 1) * self.model.batch_size
fake_data = self.model.sample(d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
idx = 0
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
# self.model.discriminator.eval() # pretraining not use dropout
if idx == self.d_sample_training_epochs:
break
losses, acc = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
total_acc = total_acc + acc
idx += 1
total_loss = total_loss / self.d_sample_training_epochs
total_acc = total_acc / self.d_sample_training_epochs
return {"total_loss": total_loss, "train_acc": total_acc}
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# pretraining
if verbose:
self.logger.info(">> Start pretraining")
# generator pretraining
for epoch_idx in range(self.g_pretraining_epochs): # 80
if verbose:
self.logger.info(">>>> [Pretrain Gen] Start %d / %d epochs generator pretraining" % (
epoch_idx + 1, self.g_pretraining_epochs))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx + 1, training_start_time, training_end_time, train_loss,
"generator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# discriminator pretraining
for epoch_idx in range(self.d_pretraining_epochs): # 5
if verbose:
self.logger.info(">>>> [Pretrain Dis]Start %d / %d epochs discriminator pretraining..." % (
epoch_idx + 1, self.d_pretraining_epochs))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info(">> End pretraining")
# adversarial training
if verbose:
self.logger.info(">> Start adversarial training")
for epoch in range(int(self.iters_num / self.adversarail_training_epochs)):
if verbose:
self.logger.info(">>>> [Adv] Start epoch %d / 10 interleaved adversarial training" % (epoch + 1))
for epoch_idx in range(self.adversarail_training_epochs):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / %d adversarial training" % (
epoch_idx + 1, self.adversarail_training_epochs))
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
# self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
train_info="adv ")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# gen pretrain
for epoch_idx in range(5):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain generator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv generator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# dis pretrain
for epoch_idx in range(5): # d_steps
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain discriminator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv discriminator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
| # @Time : 2020/11/14
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/12/2, 2020/11/27, 2020/12/3, 2020/12/26
# @Author : <NAME>, <NAME>, <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>
r"""
textbox.trainer.trainer
################################
"""
import os
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from torch.utils.data import DataLoader
from time import time
from logging import getLogger
from textbox.module.Optimizer.optim import ScheduledOptim
from textbox.evaluator import NgramEvaluator, TranslationEvaluator, SummarizationEvaluator
from textbox.utils import ensure_dir, early_stopping
class AbstractTrainer(object):
r"""Trainer Class is used to manage the training and evaluation processes of text generation system models.
AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according
to different training and evaluation strategies.
"""
def __init__(self, config, model):
self.config = config
self.model = model
def fit(self, train_data):
r"""Train the model based on the train data.
"""
raise NotImplementedError('Method [next] should be implemented.')
def evaluate(self, eval_data):
r"""Evaluate the model based on the eval data.
"""
raise NotImplementedError('Method [next] should be implemented.')
class Trainer(AbstractTrainer):
r"""The basic Trainer for basic training and evaluation strategies in text generation systems.
This class defines common functions for training and evaluation processes of most text generation system models,
including fit(), evalute(), resume_checkpoint() and some other features helpful for model training and evaluation.
Generally speaking, this class can serve most text generation system models, If the training process of the model
is to simply optimize a single loss without involving any complex training strategies, such as adversarial learning,
pre-training and so on.
Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information
for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on.
More information can be found in [placeholder]. `model` is the instantiated object of a Model Class.
"""
def __init__(self, config, model):
super(Trainer, self).__init__(config, model)
self.logger = getLogger()
self.learner = config['learner']
self.learning_rate = config['learning_rate']
self.epochs = config['epochs']
self.eval_step = min(config['eval_step'], self.epochs)
self.stopping_step = config['stopping_step']
self.test_batch_size = config['eval_batch_size']
self.device = config['device']
self.embedding_size = config['embedding_size']
self.warmup_steps = config['warmup_steps']
self.checkpoint_dir = config['checkpoint_dir']
ensure_dir(self.checkpoint_dir)
saved_model_file = self.config['filename'] + '.pth'
self.saved_model_file = os.path.join(self.checkpoint_dir, saved_model_file)
self.generated_text_dir = config['generated_text_dir']
ensure_dir(self.generated_text_dir)
saved_text_file = self.config['filename'] + '.txt'
self.saved_text_file = os.path.join(self.generated_text_dir, saved_text_file)
self.start_epoch = 0
self.cur_step = 0
self.best_valid_score = 100000000
self.best_valid_result = None
self.train_loss_dict = dict()
self.optimizer = self._build_optimizer()
self.task_type = config['task_type'].lower()
if self.task_type == "translation":
self.evaluator = TranslationEvaluator(config)
elif self.task_type == "summarization":
self.evaluator = SummarizationEvaluator(config)
else:
self.evaluator = NgramEvaluator(config)
self.item_tensor = None
self.tot_item_num = None
self.iid_field = config['ITEM_ID_FIELD']
def _build_optimizer(self):
r"""Init the Optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(self.model.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'schedule':
optimizer = ScheduledOptim(optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),
self.learning_rate, self.embedding_size, self.warmup_steps)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
return optimizer
def _train_epoch(self, train_data, epoch_idx):
r"""Train the model in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
self.optimizer.zero_grad()
losses = self.model.calculate_loss(data, epoch_idx=epoch_idx)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
loss.backward()
self.optimizer.step()
train_loss = total_loss / len(train_data)
return train_loss
def _valid_epoch(self, valid_data):
r"""Valid the model with valid data
Args:
valid_data (DataLoader): the valid data
Returns:
float: valid score
dict: valid result
"""
self.model.eval()
total_loss = None
for batch_idx, data in enumerate(valid_data):
losses = self.model.calculate_loss(data)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
valid_loss = total_loss / len(valid_data)
ppl = np.exp(valid_loss)
return valid_loss, ppl
def _save_checkpoint(self, epoch):
r"""Store the model parameters information and training information.
Args:
epoch (int): the current epoch id
"""
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
torch.save(state, self.saved_model_file)
def _save_generated_text(self, generated_corpus):
r"""Store the generated text by our model.
Args:
corpus (list of string list):
"""
with open(self.saved_text_file, 'w') as fin:
for tokens in generated_corpus:
fin.write(' '.join(tokens) + '\n')
def resume_checkpoint(self, resume_file):
r"""Load the model parameters information and training information.
Args:
resume_file (file): the checkpoint file
"""
resume_file = str(resume_file)
checkpoint = torch.load(resume_file)
self.start_epoch = checkpoint['epoch'] + 1
self.cur_step = checkpoint['cur_step']
self.best_valid_score = checkpoint['best_valid_score']
# load architecture params from checkpoint
if checkpoint['config']['model'].lower() != self.config['model'].lower():
self.logger.warning('Architecture configuration given in config file is different from that of checkpoint. '
'This may yield an exception while state_dict is being loaded.')
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed
self.optimizer.load_state_dict(checkpoint['optimizer'])
message_output = 'Checkpoint loaded. Resume training from epoch {}'.format(self.start_epoch)
self.logger.info(message_output)
def _check_nan(self, loss):
if torch.isnan(loss):
raise ValueError('Training loss is nan')
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
train_loss_output = "epoch %d %straining [time: %.2fs, " % (epoch_idx, train_info, e_time - s_time)
if isinstance(losses, tuple):
for idx, loss in enumerate(losses):
train_loss_output += 'train_loss%d: %.4f, ' % (idx + 1, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
r"""Train the model based on the train data and the valid data.
Args:
train_data (DataLoader): the train data
valid_data (DataLoader, optional): the valid data, default: None.
If it's None, the early_stopping is invalid.
verbose (bool, optional): whether to write training and evaluation information to logger, default: True
saved (bool, optional): whether to save the model parameters, default: True
Returns:
(float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None)
"""
for epoch_idx in range(self.start_epoch, self.epochs):
# train
training_start_time = time()
train_loss = self._train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
self._save_checkpoint(epoch_idx)
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
# eval
if self.eval_step <= 0 or not valid_data:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
continue
if (epoch_idx + 1) % self.eval_step == 0:
valid_start_time = time()
with torch.no_grad():
valid_score, valid_result = self._valid_epoch(valid_data)
# valid_loss, ppl
self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping(
valid_score, self.best_valid_score, self.cur_step,
max_step=self.stopping_step, bigger=False)
# better model are supposed to provide smaller perplexity and loss
valid_end_time = time()
valid_score_output = "epoch %d evaluating [time: %.2fs, valid_loss: %f]" % \
(epoch_idx, valid_end_time - valid_start_time, valid_score)
valid_result_output = 'valid ppl: {}'.format(valid_result)
if verbose:
self.logger.info(valid_score_output)
self.logger.info(valid_result_output)
if update_flag:
if saved:
self._save_checkpoint(epoch_idx)
update_output = 'Saving current best: %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
self.best_valid_result = valid_result
if stop_flag:
stop_output = 'Finished training, best eval result in epoch %d' % \
(epoch_idx - self.cur_step * self.eval_step)
if verbose:
self.logger.info(stop_output)
break
return self.best_valid_score, self.best_valid_result
def _evaluate_nll_test(self, eval_data):
r"""Calculate the negative log-likelihood of the eval_data.
Args:
eval_data (DataLoader): the eval data.
Returns:
Float: NLL_test of the eval data.
"""
total_loss = 0
for epoch_idx, eval_batch in enumerate(eval_data):
nll_test = self.model.calculate_nll_test(eval_batch, epoch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
with torch.no_grad():
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
result['nll_test'] = self._evaluate_nll_test(eval_data)
return result
def plot_train_loss(self, show=True, save_path=None):
r"""Plot the train loss in each epoch
Args:
show (bool, optional): whether to show this figure, default: True
save_path (str, optional): the data path to save the figure, default: None.
If it's None, it will not be saved.
"""
epochs = list(self.train_loss_dict.keys())
epochs.sort()
values = [float(self.train_loss_dict[epoch]) for epoch in epochs]
plt.plot(epochs, values)
plt.xticks(epochs)
plt.xlabel('Epoch')
plt.ylabel('Loss')
if show:
plt.show()
if save_path:
plt.savefig(save_path)
class UnconditionalTrainer(Trainer):
r"""UnconditionalTrainer is designed for RNN, which is a typical unconditional generator.
"""
def __init__(self, config, model):
super(UnconditionalTrainer, self).__init__(config, model)
class GANTrainer(Trainer):
r"""GANTrainer is designed for GAN, which is a generative adversarial net method.
"""
def __init__(self, config, model):
super(GANTrainer, self).__init__(config, model)
self.optimizer = None
self.g_optimizer = self._build_module_optimizer(self.model.generator)
self.d_optimizer = self._build_module_optimizer(self.model.discriminator)
self.grad_clip = config['grad_clip']
self.g_pretraining_epochs = config['g_pretraining_epochs']
self.d_pretraining_epochs = config['d_pretraining_epochs']
self.d_sample_num = config['d_sample_num']
self.d_sample_training_epochs = config['d_sample_training_epochs']
self.adversarail_training_epochs = config['adversarail_training_epochs']
self.adversarail_d_epochs = config['adversarail_d_epochs']
self.g_pretraining_loss_dict = dict()
self.d_pretraining_loss_dict = dict()
self.max_length = config['max_seq_length'] + 2
self.pad_idx = model.pad_idx
def _build_module_optimizer(self, module):
r"""Init the Module Optimizer
Args:
module (torch.nn.Mudule): Mudule class of torch.nn needed optimizer
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr=self.learning_rate)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr=self.learning_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr=self.learning_rate)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""The opt uses the cliped losses to conduct an optimize step to optimize model
and sum up losses to the total_loss.
Args:
losses (torch.Tensor or tuple): The loss to be backward.
total_loss (Float): Total loss in an epoch.
model (torch.nn.Mudule): The model to be optimized.
opt (torch.optim): The optimizer of the model.
Returns:
torch.Tensor or tuple: Total loss in an epoch, shape: [].
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _save_checkpoint(self, epoch):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict()
}
torch.save(state, self.saved_model_file)
def _add_pad(self, data):
r"""Pad the data to the max length of corpus.
Args:
data (torch.Tensor): The data to be padded, shape: [batch_size, max_batch_length].
Returns:
torch.Tensor: The padded data, shape: [batch_size, max_seq_length].
"""
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.pad_idx, dtype=torch.long, device=self.device)
padded_data[:, : data.shape[1]] = data
return padded_data
def _get_real_data(self, train_data):
r"""Get the target text index of the corpus train_datas.
Args:
train_data (DataLoader): the train data.
Returns:
torch.Tensor: The target text index, shape: [batch_size, max_batch_length].
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
real_data = self._add_pad(real_data)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _g_train_epoch(self, train_data, epoch_idx):
r"""Train the generator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
for batch_idx, data in enumerate(train_data):
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(train_data) for l in total_loss] if isinstance(total_loss, tuple) else total_loss / len(
train_data)
total_loss = tuple(total_loss) if isinstance(total_loss, list) else total_loss
return total_loss
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs): # d_epoch
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
losses = self.model.calculate_g_adversarial_loss(epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if verbose:
self.logger.info("Start generator pretraining...")
for epoch_idx in range(self.g_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End generator pretraining...")
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class TextGANTrainer(GANTrainer):
r"""TextGANTrainer is designed for TextGAN.
"""
def __init__(self, config, model):
super(TextGANTrainer, self).__init__(config, model)
self.adversarail_g_epochs = config['adversarail_g_epochs']
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for _ in range(self.d_sample_training_epochs):
for idx, real_data in enumerate(real_dataloader):
fake_data, z = self.model.sample()
losses = self.model.calculate_d_train_loss(real_data, fake_data, z, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
if (idx * self.model.batch_size >= self.d_sample_num):
break
return total_loss / min(len(real_dataloader), self.d_sample_num // self.model.batch_size) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for idx, real_data in enumerate(real_dataloader):
if (idx == self.adversarail_g_epochs):
break
losses = self.model.calculate_g_adversarial_loss(real_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
for epoch_idx in range(self.adversarail_d_epochs):
self._d_train_epoch(train_data, epoch_idx=epoch_idx)
return total_loss / min(len(real_dataloader), self.adversarail_g_epochs)
class RankGANTrainer(GANTrainer):
r"""RankGANTrainer is designed for RankGAN.
"""
def __init__(self, config, model):
super(RankGANTrainer, self).__init__(config, model)
def _d_train_epoch(self, train_data, epoch_idx):
r"""Train the discriminator module in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
fake_data = self.model.sample(self.d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
for _ in range(self.d_sample_training_epochs):
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
losses = self.model.calculate_d_train_loss(real_data, fake_data, ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / min(len(real_dataloader), len(fake_dataloader)) / self.d_sample_training_epochs
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Adversarial training in an epoch
Args:
train_data (DataLoader): the train data
epoch_idx (int): the current epoch id
Returns:
float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains
multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a
tuple which includes the sum of loss in each part.
"""
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data)
ref_index = np.random.randint(0, real_data.shape[0], size=self.model.ref_size)
ref_data = real_data[ref_index] # ref_size * l
losses = self.model.calculate_g_adversarial_loss(ref_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
d_loss = 0
for epoch_idx in range(self.adversarail_d_epochs):
d_loss += self._d_train_epoch(train_data, epoch_idx=epoch_idx)
d_loss = d_loss / self.adversarail_d_epochs
return total_loss
class ConditionalTrainer(Trainer):
r"""ConditionalTrainer is designed for seq2seq testing, which is a typically used setting.
"""
def __init__(self, config, model):
super(ConditionalTrainer, self).__init__(config, model)
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None):
r"""Evaluate the model based on the eval data.
Args:
eval_data (DataLoader): the eval data
load_best_model (bool, optional): whether load the best model in the training process, default: True.
It should be set True, if users want to test the model after training.
model_file (str, optional): the saved model file, default: None. If users want to test the previously
trained model file, they can set this parameter.
Returns:
dict: eval result, key is the eval metric and value in the corresponding metric value
"""
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
self.model.eval()
generate_corpus = self.model.generate(eval_data)
self._save_generated_text(generate_corpus)
reference_corpus = eval_data.get_reference()
result = self.evaluator.evaluate(generate_corpus, reference_corpus)
return result
class MaskGANTrainer(GANTrainer):
r""" Trainer specifically designed for MaskGAN training process.
"""
def __init__(self, config, model):
super(MaskGANTrainer, self).__init__(config, model)
self.max_length = config["max_seq_length"]
self.eos_token_idx = model.eos_idx
self.adversarail_c_epochs = config['adversarail_c_epochs']
self.g_mask_pretraining_epochs = config['g_mask_pretraining_epochs']
self.g_lr = config['gen_learning_rate']
self.d_lr = config['dis_learning_rate']
self.c_lr = config['critic_learning_rate']
self.g_optimizer = self._build_module_optimizer_(self.model.generator, self.g_lr)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, self.d_lr)
self.c_optimizer = self._build_module_optimizer_(self.model.discriminator.critic_fc_linear, self.c_lr)
self.pre_lm_weight = config["pre_lm_weight"]
self.pretrain_lm_epochs = config["pretrain_lm_epochs"]
self.checkp = config['checkp']
def _build_module_optimizer_(self, module, lr):
r""" Init the Module Optimizer with specified learning rate
Returns:
torch.optim: the optimizer
"""
if self.learner.lower() == 'adam':
optimizer = optim.Adam(module.parameters(), lr)
elif self.learner.lower() == 'sgd':
optimizer = optim.SGD(module.parameters(), lr)
elif self.learner.lower() == 'adagrad':
optimizer = optim.Adagrad(module.parameters(), lr)
elif self.learner.lower() == 'rmsprop':
optimizer = optim.RMSprop(module.parameters(), lr)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
optimizer = optim.Adam(module.parameters(), lr)
return optimizer
def _optimize_step(self, losses, total_loss, model, opt, retain_graph=False):
r""" Add retain_graph option
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r""" Specified for maskgan output
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def pretrain_lm(self, train_data, valid_data, verbose):
r""" Pretrain rnn-based Language Model with teacher forcing mechanism
"""
def lm_forward(data):
r""" One iteration of LM forward
"""
input = data[:, :-1] # bs * self.max_len - 1
target = data[:, 1:]
bs, seq_len = target.size()
lengths = torch.tensor([seq_len] * bs)
target_present = torch.ones_like(input).byte()
device = target.device
lengths = lengths.cuda(device)
# pretaining
encoder_outputs = pre_train_lm(input, lengths, target, target_present, pretrain=True)
logit = pre_train_lm.vocab_linear(encoder_outputs)
logit = logit.permute([0, 2, 1])
lossf = torch.nn.CrossEntropyLoss()
loss = lossf(logit, target)
return loss
pre_train_lm = self.model.generator
lm_opt = self._build_module_optimizer_(pre_train_lm, lr=0.001)
for epoch in range(self.pretrain_lm_epochs):
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = lm_forward(data)
total_loss = self._optimize_step(loss, total_loss, pre_train_lm, lm_opt)
total_loss = total_loss / len(real_dataloader)
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining loss: {} ".format(epoch+1, self.pretrain_lm_epochs, total_loss))
ppl = 0.0
if (epoch+1) % 1 == 0:
pre_train_lm.eval()
validate_data = self._get_real_data(valid_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
ppl = 0.0
for batch_idx, data in enumerate(validate_dataloader):
cross_entropy_loss = lm_forward(data)
ppl += math.exp(cross_entropy_loss.item())
ppl = ppl / len(validate_dataloader)
pre_train_lm.train()
if verbose:
self.logger.info("Epoch {}/{} of LM pretraining PPL: {}...".format(epoch + 1, self.pretrain_lm_epochs, ppl))
if ppl < 110:
state_dict = {
'embedder': pre_train_lm.embedder,
'encoder': pre_train_lm.encoder.encoder,
'vocab_linear': pre_train_lm.vocab_linear
}
self.pre_lm_weight = "saved/pretrain_lm_weight" + str(epoch+1) + ".pkl"
torch.save(state_dict, self.pre_lm_weight)
if verbose:
self.logger.info("End LM pretraining. PPL: {}".format(ppl))
self.logger.info("Weigth saved in {}".format(self.pre_lm_weight))
return pre_train_lm, ppl
def _g_train_epoch(self, train_data, epoch_idx):
self.model.generator.train()
total_loss = None
real_data = self._get_real_data(train_data) # bs * self.max_len
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(loss, total_loss, self.model.generator, self.g_optimizer)
total_loss = total_loss / len(real_dataloader)
return total_loss
def _get_validate_ppl(self, validate_data, epoch_idx):
self.model.generator.eval()
ppl = 0.0
validate_data = self._get_real_data(validate_data) # bs * self.max_len
validate_dataloader = DataLoader(validate_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(validate_dataloader):
loss = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx, validate=True)
ppl += math.exp(loss.item())
ppl = ppl / len(validate_dataloader)
self.model.generator.train()
return ppl
def _d_train_epoch(self, train_data, epoch_idx):
self.model.discriminator.train()
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
losses = self.model.calculate_d_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
return total_loss / len(real_dataloader)
def _adversarial_train_epoch(self, train_data, epoch_idx):
r""" Specified for MaskGAN adversarial training
"""
dis_total_loss = None
gen_total_loss = None
critic_total_loss = None
g_num = 0.0
d_num = 0.0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
dis_train_data = copy.deepcopy(real_dataloader)
gen_train_data = copy.deepcopy(real_dataloader)
c_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
gen_train_data = iter(gen_train_data)
_ = next(dis_train_data) # have one offset
for g_x in gen_train_data:
g_num += 1
for _ in range(3):
d_num += 1
try:
d_x = next(dis_train_data)
except StopIteration:
del dis_train_data
dis_train_data = copy.deepcopy(real_dataloader)
dis_train_data = iter(dis_train_data)
d_x = next(dis_train_data)
losses = self.model.calculate_d_train_loss(d_x, epoch_idx=_)
dis_total_loss = self._optimize_step(losses, dis_total_loss, self.model.discriminator, self.d_optimizer)
gen_losses, critic_losses = self.model.calculate_g_adversarial_loss(g_x, epoch_idx=g_num)
gen_total_loss = self._optimize_step(gen_losses, gen_total_loss, self.model.generator, self.g_optimizer)
critic_total_loss = self._optimize_step(critic_losses, critic_total_loss, self.model.discriminator.critic_fc_linear, self.c_optimizer)
return {"dis_loss": dis_total_loss / d_num, "gen_loss": gen_total_loss / g_num, "critic_loss": critic_total_loss / g_num}
def _evaluate_nll_test(self, eval_data):
total_loss = 0
real_data = self._get_real_data(eval_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
nll_test = self.model.calculate_nll_test(data, batch_idx)
total_loss += float(nll_test)
return total_loss / len(eval_data)
def _add_eos(self, data, length):
batch_size, pad_seq_len = data.size()
padded_data = torch.full((batch_size, self.max_length), self.eos_token_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
l = int(length[i].cpu().data)
if l == self.max_length+2:
padded_data[i, :] = data[i, 1:l-1]
else:
padded_data[i, 0:l-1] = data[i, 1:l]
return padded_data
def _get_real_data(self, train_data):
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx'] # bs*batch_max_seq_len
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _save_checkpoint(self, epoch, postfix=None):
state = {
'config': self.config,
'epoch': epoch,
'cur_step': self.cur_step,
'best_valid_score': self.best_valid_score,
'state_dict': self.model.state_dict(),
'g_opt': self.g_optimizer.state_dict(),
'd_opt': self.d_optimizer.state_dict(),
'c_opt':self.c_optimizer.state_dict()
}
if postfix is not None:
path = self.saved_model_file + "_" + str(epoch) + "_" + postfix
torch.save(state, path)
return path
else:
torch.save(state, self.saved_model_file)
def _load_generated_text(self):
r""" Load the generated text by our model to log.
"""
with open(self.saved_text_file, 'r') as fin:
samples = []
for i in range(5):
text = fin.readline()
samples.append(text)
return samples
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# generator pretraining
if self.checkp is not None:
checkpoint = torch.load(self.checkp)
self.model.load_state_dict(checkpoint['state_dict'])
self.d_optimizer.load_state_dict(checkpoint["d_opt"])
self.g_optimizer.load_state_dict(checkpoint["g_opt"])
epoch_check = checkpoint['epoch']
if verbose:
self.logger.info("Load checkpoint file from: {}".format(self.checkp))
else:
if self.pre_lm_weight is None:
if verbose:
self.logger.info("Start LM pretraining...")
pretrain_lm, ppl = self.pretrain_lm(train_data, valid_data, verbose)
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight")
else:
pretrain_lm = torch.load(self.pre_lm_weight)
embedder = pretrain_lm['embedder'].state_dict()
lstm = pretrain_lm['encoder'].state_dict()
vocab_linear = pretrain_lm['vocab_linear'].state_dict()
self.model.generator.embedder.load_state_dict(embedder)
self.model.generator.encoder.encoder.load_state_dict(lstm)
self.model.generator.decoder.decoder.load_state_dict(lstm)
self.model.generator.vocab_linear.load_state_dict(vocab_linear)
self.model.discriminator.encoder.encoder.load_state_dict(lstm)
self.model.discriminator.decoder.decoder.load_state_dict(lstm)
if verbose:
self.logger.info("Load pretrained LM weight from: {}".format(self.pre_lm_weight))
if verbose:
self.logger.info("Start generator mask pretraining...")
for epoch_idx in range(self.g_mask_pretraining_epochs):
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
self.g_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"generator pre")
if verbose:
self.logger.info(train_loss_output)
ppl = self._get_validate_ppl(valid_data, epoch_idx)
if verbose:
self.logger.info(
"Epoch {}/{} of mask pretraining PPL: {}...".format(epoch_idx + 1, self.g_mask_pretraining_epochs, ppl))
if ppl <= 90:
if verbose:
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.logger.info(">>>> [Pretrain Gen] PPL: {} save weight in {}".format(ppl, path))
self.logger.info("End generator mask pretraining...")
break
if (epoch_idx) % 10 == 0:
self.logger.info(">>>> [Pretrain Gen] Save pretrain gen check in epoch %d ..." % (epoch_idx + 1))
path = self._save_checkpoint(epoch_idx + 1, postfix="pretrain_gen")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>> [Pretrain Gen] test result: {}'.format(test_result))
self.logger.info('>>>> [Pretrain Gen] test result samples: {}'.format(tmp))
# discriminator pretraining
if verbose:
self.logger.info("Start discriminator pretraining...")
for epoch_idx in range(self.d_pretraining_epochs):
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
self.d_pretraining_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info("End discriminator pretraining...")
# adversarial training
if verbose:
self.logger.info("Start adversarial training...")
for epoch_idx in range(self.adversarail_training_epochs):
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
if (epoch_idx+1) % 10 == 0:
path = self._save_checkpoint((epoch_idx + 1), postfix="adv_train")
self.model.eval()
test_result = self.evaluate(valid_data, model_file=path)
self.model.train()
sample = self._load_generated_text()
tmp = "\n"
for i, s in enumerate(sample):
tmp += str(i)
tmp += ": "
tmp += s.strip()
tmp += "\n"
self.logger.info('>>>>>> [Adv] test result: {}'.format(test_result))
self.logger.info('>>>>>> [Adv] test result samples: {}'.format(tmp))
if verbose:
self.logger.info("End adversarial pretraining...")
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
class LeakGANTrainer(GANTrainer):
r"""Specified for leakgan trainer
"""
def __init__(self, config, model):
super(LeakGANTrainer, self).__init__(config, model)
self.interleaved_pretrain_epoch = config['interleaved_pretrain_epoch']
self.adversarail_g_epochs = config['adversarail_g_epochs']
gen_lr = config['generator_lr'] # 0.001
dis_lr = config['discriminator_lr'] # 0.00005
self.g_optimizer = self._build_module_optimizer_(self.model.generator, gen_lr) # (manager_opt, worker_opt)
self.d_optimizer = self._build_module_optimizer_(self.model.discriminator, dis_lr)
self.iters_num = config['iter_num']
self.end_idx = model.end_idx
def _build_module_optimizer_(self, module, learing_rate):
r"""Specified for leakgan
"""
multi_flag = False
if module._get_name() == 'LeakGANGenerator':
manager_params, worker_params = module.split_params()
multi_flag = True
if self.learner.lower() == 'adam':
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'sgd':
if multi_flag:
manager_opt = optim.SGD(manager_params, lr=learing_rate)
worker_opt = optim.SGD(worker_params, lr=learing_rate)
else:
optimizer = optim.SGD(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'adagrad':
if multi_flag:
manager_opt = optim.Adagrad(manager_params, lr=learing_rate)
worker_opt = optim.Adagrad(worker_params, lr=learing_rate)
else:
optimizer = optim.Adagrad(module.parameters(), lr=learing_rate)
elif self.learner.lower() == 'rmsprop':
if multi_flag:
manager_opt = optim.RMSprop(manager_params, lr=learing_rate)
worker_opt = optim.RMSprop(worker_params, lr=learing_rate)
else:
optimizer = optim.RMSprop(module.parameters(), lr=learing_rate)
else:
self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')
if multi_flag:
manager_opt = optim.Adam(manager_params, lr=learing_rate)
worker_opt = optim.Adam(worker_params, lr=learing_rate)
else:
optimizer = optim.Adam(module.parameters(), lr=learing_rate)
if multi_flag:
return (manager_opt, worker_opt)
else:
return optimizer
def _optimize_step(self, losses, total_loss, model, opt):
r"""Specified for leakgan optimize
"""
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
if isinstance(losses, tuple):
for i, (o, loss) in enumerate(zip(opt, losses)):
o.zero_grad()
loss.backward(retain_graph=True if i < len(opt) - 1 else False)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
o.step()
else:
opt.zero_grad()
losses.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
opt.step()
return total_loss
def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses, train_info=""):
r"""Specified for leakgan output format
"""
train_loss_output = "%straining [time: %.2fs, " % (train_info, e_time - s_time)
if isinstance(losses, dict):
for key, loss in losses.items():
train_loss_output += '%s: %.4f, ' % (key, loss)
train_loss_output = train_loss_output[:-2]
else:
train_loss_output += "train loss: %.4f" % losses
return train_loss_output + ']'
def _add_eos(self, data, length):
batch_size = data.shape[0]
padded_data = torch.full((batch_size, self.max_length), self.end_idx, dtype=torch.long, device=self.device)
for i in range(batch_size):
len = length[i].cpu().data
padded_data[i, :len] = data[i, :len]
return padded_data
def _get_real_data(self, train_data):
r"""Specified for leakgan which use eos_idx pad not pad_idx
"""
real_datas = []
for corpus in train_data:
real_data = corpus['target_idx']
length = corpus['target_length']
real_data = self._add_eos(real_data, length)
real_datas.append(real_data)
real_datas = torch.cat(real_datas, dim=0)
return real_datas
def _adversarial_train_epoch(self, train_data, epoch_idx):
r"""Specified for leakgan adversarial training
"""
self.model.generator.train()
total_g_loss = None
total_d_loss = 0
total_d_acc = 0
adv_mana_loss = 0
adv_work_loss = 0
adv_d_loss = 0
for e in range(self.adversarail_g_epochs):
losses = self.model.calculate_g_adversarial_loss(epoch_idx=e)
total_g_loss = self._optimize_step(losses, total_g_loss, self.model.generator, self.g_optimizer)
adv_mana_loss, adv_work_loss = total_g_loss
adv_mana_loss = adv_mana_loss / self.adversarail_g_epochs
adv_work_loss = adv_work_loss / self.adversarail_g_epochs
for e in range(self.adversarail_d_epochs):
loss_dict = self._d_train_epoch(train_data, epoch_idx=epoch_idx)
total_d_loss = total_d_loss + loss_dict['total_loss']
total_d_acc = total_d_acc + loss_dict['train_acc']
adv_d_loss = total_d_loss / self.adversarail_d_epochs
adv_c_loss = total_d_acc / self.adversarail_d_epochs
return {"mana_loss": adv_mana_loss, "work_loss": adv_work_loss, "dis_loss": adv_d_loss, "train_acc": adv_c_loss}
def _g_train_epoch(self, train_data, epoch_idx):
total_loss = None
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
for batch_idx, data in enumerate(real_dataloader):
# interaction = interaction.to(self.device)
losses = self.model.calculate_g_train_loss(data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.generator, self.g_optimizer)
total_loss = [l / len(real_dataloader) for l in total_loss] if isinstance(total_loss,
tuple) else total_loss / len(
train_data)
mana_loss, work_loss = total_loss
return {"mana_loss": mana_loss, "work_loss": work_loss}
def _d_train_epoch(self, train_data, epoch_idx):
total_loss = None
total_acc = 0
real_data = self._get_real_data(train_data)
real_dataloader = DataLoader(real_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
# not need sample self.d_sample_num numbers becauese only train discriminator 5 batch
d_sample_num = (self.d_sample_training_epochs + 1) * self.model.batch_size
fake_data = self.model.sample(d_sample_num)
fake_dataloader = DataLoader(fake_data, batch_size=self.model.batch_size, shuffle=True, drop_last=True)
idx = 0
for real_data, fake_data in zip(real_dataloader, fake_dataloader):
# self.model.discriminator.eval() # pretraining not use dropout
if idx == self.d_sample_training_epochs:
break
losses, acc = self.model.calculate_d_train_loss(real_data, fake_data, epoch_idx=epoch_idx)
total_loss = self._optimize_step(losses, total_loss, self.model.discriminator, self.d_optimizer)
total_acc = total_acc + acc
idx += 1
total_loss = total_loss / self.d_sample_training_epochs
total_acc = total_acc / self.d_sample_training_epochs
return {"total_loss": total_loss, "train_acc": total_acc}
def fit(self, train_data, valid_data=None, verbose=True, saved=True):
# pretraining
if verbose:
self.logger.info(">> Start pretraining")
# generator pretraining
for epoch_idx in range(self.g_pretraining_epochs): # 80
if verbose:
self.logger.info(">>>> [Pretrain Gen] Start %d / %d epochs generator pretraining" % (
epoch_idx + 1, self.g_pretraining_epochs))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx + 1, training_start_time, training_end_time, train_loss,
"generator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# discriminator pretraining
for epoch_idx in range(self.d_pretraining_epochs): # 5
if verbose:
self.logger.info(">>>> [Pretrain Dis]Start %d / %d epochs discriminator pretraining..." % (
epoch_idx + 1, self.d_pretraining_epochs))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss,
"discriminator pre")
train_loss_output = ">>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
if verbose:
self.logger.info(">> End pretraining")
# adversarial training
if verbose:
self.logger.info(">> Start adversarial training")
for epoch in range(int(self.iters_num / self.adversarail_training_epochs)):
if verbose:
self.logger.info(">>>> [Adv] Start epoch %d / 10 interleaved adversarial training" % (epoch + 1))
for epoch_idx in range(self.adversarail_training_epochs):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / %d adversarial training" % (
epoch_idx + 1, self.adversarail_training_epochs))
training_start_time = time()
train_loss = self._adversarial_train_epoch(train_data, epoch_idx)
# self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
train_info="adv ")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# gen pretrain
for epoch_idx in range(5):
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain generator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._g_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv generator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
# dis pretrain
for epoch_idx in range(5): # d_steps
if verbose:
self.logger.info(">>>>>> [Adv] Start epoch %d / 5 pretrain discriminator" % (epoch_idx + 1))
training_start_time = time()
train_loss = self._d_train_epoch(train_data, epoch_idx)
training_end_time = time()
train_loss_output = \
self._generate_train_loss_output((epoch_idx + 1), training_start_time, training_end_time,
train_loss,
"adv discriminator pre")
train_loss_output = ">>>>>> " + train_loss_output
if verbose:
self.logger.info(train_loss_output)
self._save_checkpoint(self.adversarail_training_epochs)
return -1, None
| en | 0.762766 | # @Time : 2020/11/14 # @Author : <NAME>, <NAME> # @Email : <EMAIL> # UPDATE: # @Time : 2020/12/2, 2020/11/27, 2020/12/3, 2020/12/26 # @Author : <NAME>, <NAME>, <NAME>, <NAME> # @Email : <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL> textbox.trainer.trainer ################################ Trainer Class is used to manage the training and evaluation processes of text generation system models. AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according to different training and evaluation strategies. Train the model based on the train data. Evaluate the model based on the eval data. The basic Trainer for basic training and evaluation strategies in text generation systems. This class defines common functions for training and evaluation processes of most text generation system models, including fit(), evalute(), resume_checkpoint() and some other features helpful for model training and evaluation. Generally speaking, this class can serve most text generation system models, If the training process of the model is to simply optimize a single loss without involving any complex training strategies, such as adversarial learning, pre-training and so on. Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on. More information can be found in [placeholder]. `model` is the instantiated object of a Model Class. Init the Optimizer Returns: torch.optim: the optimizer Train the model in an epoch Args: train_data (DataLoader): the train data epoch_idx (int): the current epoch id Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. Valid the model with valid data Args: valid_data (DataLoader): the valid data Returns: float: valid score dict: valid result Store the model parameters information and training information. Args: epoch (int): the current epoch id Store the generated text by our model. Args: corpus (list of string list): Load the model parameters information and training information. Args: resume_file (file): the checkpoint file # load architecture params from checkpoint # load optimizer state from checkpoint only when optimizer type is not changed Train the model based on the train data and the valid data. Args: train_data (DataLoader): the train data valid_data (DataLoader, optional): the valid data, default: None. If it's None, the early_stopping is invalid. verbose (bool, optional): whether to write training and evaluation information to logger, default: True saved (bool, optional): whether to save the model parameters, default: True Returns: (float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None) # train # eval # valid_loss, ppl # better model are supposed to provide smaller perplexity and loss Calculate the negative log-likelihood of the eval_data. Args: eval_data (DataLoader): the eval data. Returns: Float: NLL_test of the eval data. Evaluate the model based on the eval data. Args: eval_data (DataLoader): the eval data load_best_model (bool, optional): whether load the best model in the training process, default: True. It should be set True, if users want to test the model after training. model_file (str, optional): the saved model file, default: None. If users want to test the previously trained model file, they can set this parameter. Returns: dict: eval result, key is the eval metric and value in the corresponding metric value Plot the train loss in each epoch Args: show (bool, optional): whether to show this figure, default: True save_path (str, optional): the data path to save the figure, default: None. If it's None, it will not be saved. UnconditionalTrainer is designed for RNN, which is a typical unconditional generator. GANTrainer is designed for GAN, which is a generative adversarial net method. Init the Module Optimizer Args: module (torch.nn.Mudule): Mudule class of torch.nn needed optimizer Returns: torch.optim: the optimizer The opt uses the cliped losses to conduct an optimize step to optimize model and sum up losses to the total_loss. Args: losses (torch.Tensor or tuple): The loss to be backward. total_loss (Float): Total loss in an epoch. model (torch.nn.Mudule): The model to be optimized. opt (torch.optim): The optimizer of the model. Returns: torch.Tensor or tuple: Total loss in an epoch, shape: []. Pad the data to the max length of corpus. Args: data (torch.Tensor): The data to be padded, shape: [batch_size, max_batch_length]. Returns: torch.Tensor: The padded data, shape: [batch_size, max_seq_length]. Get the target text index of the corpus train_datas. Args: train_data (DataLoader): the train data. Returns: torch.Tensor: The target text index, shape: [batch_size, max_batch_length]. Train the generator module in an epoch Args: train_data (DataLoader): the train data epoch_idx (int): the current epoch id Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. Train the discriminator module in an epoch Args: train_data (DataLoader): the train data epoch_idx (int): the current epoch id Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. # d_epoch Adversarial training in an epoch Args: train_data (DataLoader): the train data epoch_idx (int): the current epoch id Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. # generator pretraining # discriminator pretraining # adversarial training TextGANTrainer is designed for TextGAN. RankGANTrainer is designed for RankGAN. Train the discriminator module in an epoch Args: train_data (DataLoader): the train data epoch_idx (int): the current epoch id Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. # ref_size * l Adversarial training in an epoch Args: train_data (DataLoader): the train data epoch_idx (int): the current epoch id Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. # ref_size * l ConditionalTrainer is designed for seq2seq testing, which is a typically used setting. Evaluate the model based on the eval data. Args: eval_data (DataLoader): the eval data load_best_model (bool, optional): whether load the best model in the training process, default: True. It should be set True, if users want to test the model after training. model_file (str, optional): the saved model file, default: None. If users want to test the previously trained model file, they can set this parameter. Returns: dict: eval result, key is the eval metric and value in the corresponding metric value Trainer specifically designed for MaskGAN training process. Init the Module Optimizer with specified learning rate Returns: torch.optim: the optimizer Add retain_graph option Specified for maskgan output Pretrain rnn-based Language Model with teacher forcing mechanism One iteration of LM forward # bs * self.max_len - 1 # pretaining # bs * self.max_len # bs * self.max_len # bs * self.max_len # bs * self.max_len Specified for MaskGAN adversarial training # have one offset # bs*batch_max_seq_len Load the generated text by our model to log. # generator pretraining # discriminator pretraining # adversarial training Specified for leakgan trainer # 0.001 # 0.00005 # (manager_opt, worker_opt) Specified for leakgan Specified for leakgan optimize Specified for leakgan output format Specified for leakgan which use eos_idx pad not pad_idx Specified for leakgan adversarial training # interaction = interaction.to(self.device) # not need sample self.d_sample_num numbers becauese only train discriminator 5 batch # self.model.discriminator.eval() # pretraining not use dropout # pretraining # generator pretraining # 80 # discriminator pretraining # 5 # adversarial training # self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss # gen pretrain # dis pretrain # d_steps | 2.922995 | 3 |
rsserpent/plugins/builtin/__init__.py | EurusEurus/RSSerpent | 0 | 8299 | from ...models import Persona, Plugin
from . import example, example_cache, example_ratelimit, example_with_args
plugin = Plugin(
name="rsserpent-plugin-builtin",
author=Persona(
name="queensferryme",
link="https://github.com/queensferryme",
email="<EMAIL>",
),
repository="https://github.com/RSSerpent/RSSerpent",
prefix="/_",
routers={
example.path: example.provider,
example_cache.path: example_cache.provider,
example_ratelimit.path: example_ratelimit.provider,
example_with_args.path: example_with_args.provider,
},
)
__all__ = ("plugin",)
| from ...models import Persona, Plugin
from . import example, example_cache, example_ratelimit, example_with_args
plugin = Plugin(
name="rsserpent-plugin-builtin",
author=Persona(
name="queensferryme",
link="https://github.com/queensferryme",
email="<EMAIL>",
),
repository="https://github.com/RSSerpent/RSSerpent",
prefix="/_",
routers={
example.path: example.provider,
example_cache.path: example_cache.provider,
example_ratelimit.path: example_ratelimit.provider,
example_with_args.path: example_with_args.provider,
},
)
__all__ = ("plugin",)
| none | 1 | 1.771361 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.