content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import splatlog as logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.six import with_metaclass, text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
try:
# Python 2
input = raw_input
except NameError:
# Python 3, we already have raw_input
pass
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR:
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(with_metaclass(Singleton, object)):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
if C.ANSIBLE_COW_WHITELIST and any(C.ANSIBLE_COW_WHITELIST):
self.cows_available = set(C.ANSIBLE_COW_WHITELIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
# Note: After Display() class is refactored need to update the log capture
# code in 'bin/ansible-connection' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
# We first convert to a byte string so that we get rid of
# color and characters that are invalid in the user's locale
msg2 = to_bytes(nocolor.lstrip(u'\n'))
if sys.version_info >= (3,):
# Convert back to text string on python3
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
lvl = logging.INFO
if color:
# set logger level based on color (not great)
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg, host=None):
if C.DEFAULT_DEBUG:
if host is None:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
else:
self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
to_stderr = C.VERBOSE_TO_STDERR
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
''' used to print out a deprecation message.'''
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-base'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(prompt_string)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
| 35.886827 | 147 | 0.603578 | [
"BSD-3-Clause"
] | nrser/nansi.collections | dev/ref/display.py | 19,343 | Python |
import os
# Defaults to where the code is contained.
# If your bokeh input data is placed elsewhere, go ahead and edit this path
ROOT = os.path.dirname(os.path.realpath(__file__))
ROOT = os.path.abspath(os.path.join(ROOT, '..'))
# Intermediate bokeh files are cached under this path
CACHE_DIR = os.path.join(ROOT, 'cache')
# Bokeh inputs will be loaded from the following path
INPUT_DIR = os.path.join(ROOT, 'datasets', 'doc_images')
USER_STUDY_ROOT = os.path.join(ROOT, 'user_study')
# Default font embedding location, used when not specified explicitly
FONT_EMBEDDING_MODEL = os.path.join(ROOT, 'models', 'resnet_font_classifier.pth') | 40 | 81 | 0.7625 | [
"Apache-2.0"
] | QPC-database/multimodal-affinities | multimodal_affinities/common_config.py | 640 | Python |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "django_vue_shopping_list.users"
verbose_name = _("Users")
def ready(self):
try:
import django_vue_shopping_list.users.signals # noqa F401
except ImportError:
pass
| 24.714286 | 70 | 0.687861 | [
"MIT"
] | JRedeker/django-vue-shopping-list | django_vue_shopping_list/django_vue_shopping_list/users/apps.py | 346 | Python |
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[rekognition-image-python-create-collection.py demonstrates how to create an Amazon Rekognition collection.]
# snippet-service:[rekognition]
# snippet-keyword:[Amazon Rekognition]
# snippet-keyword:[Python]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[snippet]
# snippet-sourcedate:[2019-01-3]
# snippet-sourceauthor:[reesch (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[rekognition.python.rekognition-image-python-create-collection.complete]
import boto3
if __name__ == "__main__":
# Replace collectionID with the name of the collection that you want to create.
maxResults = 2
collectionId = 'MyCollection'
# Create a collection
print('Creating collection:' + collectionId)
client=boto3.client('rekognition')
response = client.create_collection(CollectionId=collectionId)
print('Collection ARN: ' + response['CollectionArn'])
print('Status code: ' + str(response['StatusCode']))
print('Done...')
# snippet-end:[rekognition.python.rekognition-image-python-create-collection.complete]
| 38.818182 | 137 | 0.734778 | [
"Apache-2.0"
] | ExamProCo/aws-doc-sdk-examples | python/example_code/rekognition/rekognition-image-python-create-collection.py | 1,708 | Python |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from IMLearn.metrics import *
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df = df.dropna()
df = df[df['price'] > 0]
df = df[df['yr_built'] > 0]
df = df[df['bedrooms'] < 20]
df['date'] = df['date'].apply(lambda x: int(str(x)[:4]))
df = df[df['sqft_living'] <= df['sqft_lot']]
labels_to_drop = ['zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15']
df.drop(columns=labels_to_drop, inplace=True)
series = df.pop('price')
return (df, series)
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
correlations = np.array(y.size)
features = list(X)
for feature in features:
cov = np.cov(y, X[feature])
std = np.std(X[feature]) * np.std(y)
pearson_correlation = cov[0][1] / std
np.append(correlations, pearson_correlation)
fig = go.Figure()
fig.add_trace(go.Scatter(x=X[feature], y=y, mode="markers"))
fig.update_layout(title=feature + " - Pearson Correlation = "
+ str(pearson_correlation),
xaxis_title=feature + " Feature values",
yaxis_title="House's Price")
fig.write_image(f"{output_path}\\{feature}.png", format="png")
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
df, series = load_data('../datasets/house_prices.csv')
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df,series,'C:/Users/shahaf/Documents')
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(df, series, 0.75)
# Question 4 - Fit model over increasing percentages of the overall
# training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10
# times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon
# of size (mean-2*std, mean+2*std)
training_size = []
average_loss = []
var = []
for p in range(10, 101):
training_size.append(p / 100)
mse_list = []
for i in range(0, 10):
train_sample = train_X.sample(frac=p / 100)
sample_X, sample_y = train_sample, train_y.loc[
train_sample.index]
# model
model = LinearRegression()
model.fit(sample_X, sample_y)
mse_list.append(model.loss(test_X, test_y))
mse_arr = np.array(mse_list)
average_loss.append(mse_arr.mean())
var.append(mse_arr.std())
var = np.array(var)
average_loss = np.array(average_loss)
fig = go.Figure()
fig.add_trace(go.Scatter(x=training_size, y=average_loss,
mode="markers+lines",
marker=dict(color="LightSeaGreen"),name="Mean "
"MSE"))
fig.add_trace(go.Scatter(
x=training_size, y=average_loss - 2 * var, mode="lines", line=dict(
color="Aquamarine"),name="-2*STD"))
fig.add_trace(
go.Scatter(x=training_size,
y=average_loss + 2 * var, mode="lines", fill='tonexty',
line=dict(
color="Aquamarine"),name="+2*STD"))
fig.update_layout(title = "Mean MSE vs Precentage of Samples for "
"Fitting")
fig.show()
| 36.462687 | 79 | 0.605608 | [
"MIT"
] | shahaf-shafirstein/IML.HUJI | exercises/house_price_prediction.py | 4,886 | Python |
from fastapi import APIRouter, HTTPException
import pandas as pd
import plotly.express as px
import json
from dotenv import load_dotenv
import os
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy.types import Integer, Float, Text, String, DateTime
from fastapi.encoders import jsonable_encoder
from os.path import join as join_path
router = APIRouter()
@router.post('/wage_trade_transport_viz/')
async def wage_trade_transport_viz(user_queried_citystates: list):
"""
### Path Parameter (POST from front-end)
list: A list of city-states the user queried in this format: ["Albany, NY", "San Francisco, CA", "Chicago, IL"]
### Response
JSON string of all figures to render with [react-plotly.js](https://plotly.com/javascript/react/)
"""
def create_db_uri():
# give full path to .env
env_path = r'.env'
# LOAD environment variables
load_dotenv(dotenv_path=env_path, verbose=True)
# GET .env vars
DB_FLAVOR = os.getenv("DB_FLAVOR")
DB_PYTHON_LIBRARY = os.getenv("DB_PYTHON_LIBRARY")
DB_HOST = os.getenv("DB_HOST")
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASS = os.getenv("DB_PASS")
DB_PORT = os.getenv("DB_PORT")
DB_URI = DB_FLAVOR + "+" + DB_PYTHON_LIBRARY + "://" + DB_USER + ":" + DB_PASS + "@" + DB_HOST + ":" + DB_PORT + "/" + DB_NAME
return DB_URI
DB_URI = create_db_uri()
# CONNECTION Engine with SQLAlchemy
engine = create_engine(DB_URI, echo=True)
def cc_json():
'''
Opens county_city.json file, converts to .json object and returns it
'''
with open(join_path('app', 'db', 'city-county.json')) as f:
data_to_encode = json.load(f)
encoded_json = jsonable_encoder(data_to_encode)
county_city_json = json.dumps(encoded_json)
return county_city_json
cc = cc_json()
cc = json.loads(cc)
# city_states_list = ["New York City, NY", "San Francisco, CA", "Chicago, IL"]
def get_county_from_city(city_states_list):
county_list = []
i = 0
for i in range(len(city_states_list)):
county_list.append(cc[city_states_list[i]])
i += 1
return county_list
county_list = get_county_from_city(user_queried_citystates)
def sql_query(county_list):
'''
Create a SQL query to grab only the user queried cities' data from the covid table in the DB.
Output: subset grouped DF by month and city with only queried cities
'''
# get length of list of queried cities
list_length = len(county_list)
# Create Boolean Statements to Avoid Errors with output
if list_length == 1:
county1 = county_list[0]
query1 = 'SELECT * FROM jobs WHERE county_state IN (%(county1)s)'
subsetJ = pd.read_sql(sql = query1, columns = "county_state", params={"county1":county1}, con=engine, parse_dates=['created_at', 'updated_at'])
elif list_length == 2:
county1 = county_list[0]
county2 = county_list[1]
query2 = 'SELECT * FROM jobs WHERE county_state IN (%(county1)s, %(county2)s)'
subsetJ = pd.read_sql(sql = query2, columns = "county_state", params={"county1":county1, "county2":county2}, con=engine, parse_dates=['created_at', 'updated_at'])
elif list_length == 3:
county1 = county_list[0]
county2 = county_list[1]
county3 = county_list[2]
query3 = 'SELECT * FROM jobs WHERE "county_state" IN (%(county1)s, %(county2)s, %(county3)s)'
subsetJ = pd.read_sql(sql = query3, columns = "county_state", params={"county1":county1, "county2":county2, "county3":county3}, con=engine, parse_dates=['created_at', 'updated_at'])
else:
raise Exception("Please pass a list of 1-3 City-States")
return subsetJ
subsetJ = sql_query(county_list)
industry_list = ['Goods-producing', 'Natural resources and mining', 'Construction', 'Manufacturing', 'Service-providing', 'Trade, transportation, and utilities', 'Information', 'Financial activities', 'Professional and business services', 'Education and health services', 'Leisure and hospitality', 'Other services', 'Unclassified']
def create_wage_plots(df, industry_list, industry_name):
subsetJ['County, State'] = subsetJ['county_state']
subsetJ['date'] = pd.PeriodIndex(year=subsetJ['Year'], quarter=subsetJ['Qtr']).to_timestamp()
industry = subsetJ[subsetJ['Industry']==industry_name]
industry = industry.sort_values('date')
fig = px.line(industry, x='date', y='Average Weekly Wage', labels={'Average Weekly Wage': 'Average Weekly Wage ($)', 'date': 'Date'}, color='County, State', title=f"{industry_name}: Average Weekly Wage").for_each_trace(lambda t: t.update(name=t.name.split("=")[-1]))
fig.update_layout(legend=dict(orientation="h",yanchor="bottom",y=1.02,xanchor="right",x=1),
xaxis = dict(
tickmode = 'array',
tick0 = 1,
dtick = 1,
tickvals = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020],
ticktext = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020']
)) # legend above graph top right
fig.write_image("fig1.png")
jobs_json = fig.to_json() # save figure to JSON object to pass to WEB
return jobs_json
wage_json = create_wage_plots(subsetJ, industry_list, industry_list[5])
return wage_json
| 46.893443 | 336 | 0.634854 | [
"MIT"
] | Lambda-School-Labs/Labs26-Citrics-DS-TeamC | project/app/api/wage_trade_transport_viz.py | 5,721 | Python |
my_data=[['slashdot','USA','yes',18,'None'],
['google','France','yes',23,'Premium'],
['digg','USA','yes',24,'Basic'],
['kiwitobes','France','yes',23,'Basic'],
['google','UK','no',21,'Premium'],
['(direct)','New Zealand','no',12,'None'],
['(direct)','UK','no',21,'Basic'],
['google','USA','no',24,'Premium'],
['slashdot','France','yes',19,'None'],
['digg','USA','no',18,'None'],
['google','UK','no',18,'None'],
['kiwitobes','UK','no',19,'None'],
['digg','New Zealand','yes',12,'Basic'],
['slashdot','UK','no',21,'None'],
['google','UK','yes',18,'Basic'],
['kiwitobes','France','yes',19,'Basic']]
class decisionnode:
def __init__(self,col=-1,value=None,results=None,tb=None,fb=None):
self.col=col
self.value=value
self.results=results
self.tb=tb
self.fb=fb
# Divides a set on a specific column. Can handle numeric
# or nominal values
def divideset(rows,column,value):
# Make a function that tells us if a row is in
# the first group (true) or the second group (false)
split_function=None
if isinstance(value,int) or isinstance(value,float):
split_function=lambda row:row[column]>=value
else:
split_function=lambda row:row[column]==value
# Divide the rows into two sets and return them
set1=[row for row in rows if split_function(row)]
set2=[row for row in rows if not split_function(row)]
return (set1,set2)
# Create counts of possible results (the last column of
# each row is the result)
def uniquecounts(rows):
results={}
for row in rows:
# The result is the last column
r=row[len(row)-1]
if r not in results: results[r]=0
results[r]+=1
return results
# Probability that a randomly placed item will
# be in the wrong category
def giniimpurity(rows):
total=len(rows)
counts=uniquecounts(rows)
imp=0
for k1 in counts:
p1=float(counts[k1])/total
for k2 in counts:
if k1==k2: continue
p2=float(counts[k2])/total
imp+=p1*p2
return imp
# Entropy is the sum of p(x)log(p(x)) across all
# the different possible results
def entropy(rows):
from math import log
log2=lambda x:log(x)/log(2)
results=uniquecounts(rows)
# Now calculate the entropy
ent=0.0
for r in results.keys():
p=float(results[r])/len(rows)
ent=ent-p*log2(p)
return ent
def printtree(tree,indent=''):
# Is this a leaf node?
if tree.results!=None:
print str(tree.results)
else:
# Print the criteria
print str(tree.col)+':'+str(tree.value)+'? '
# Print the branches
print indent+'T->',
printtree(tree.tb,indent+' ')
print indent+'F->',
printtree(tree.fb,indent+' ')
def getwidth(tree):
if tree.tb==None and tree.fb==None: return 1
return getwidth(tree.tb)+getwidth(tree.fb)
def getdepth(tree):
if tree.tb==None and tree.fb==None: return 0
return max(getdepth(tree.tb),getdepth(tree.fb))+1
from PIL import Image,ImageDraw
def drawtree(tree,jpeg='tree.jpg'):
w=getwidth(tree)*100
h=getdepth(tree)*100+120
img=Image.new('RGB',(w,h),(255,255,255))
draw=ImageDraw.Draw(img)
drawnode(draw,tree,w/2,20)
img.save(jpeg,'JPEG')
def drawnode(draw,tree,x,y):
if tree.results==None:
# Get the width of each branch
w1=getwidth(tree.fb)*100
w2=getwidth(tree.tb)*100
# Determine the total space required by this node
left=x-(w1+w2)/2
right=x+(w1+w2)/2
# Draw the condition string
draw.text((x-20,y-10),str(tree.col)+':'+str(tree.value),(0,0,0))
# Draw links to the branches
draw.line((x,y,left+w1/2,y+100),fill=(255,0,0))
draw.line((x,y,right-w2/2,y+100),fill=(255,0,0))
# Draw the branch nodes
drawnode(draw,tree.fb,left+w1/2,y+100)
drawnode(draw,tree.tb,right-w2/2,y+100)
else:
txt=' \n'.join(['%s:%d'%v for v in tree.results.items()])
draw.text((x-20,y),txt,(0,0,0))
def classify(observation,tree):
if tree.results!=None:
return tree.results
else:
v=observation[tree.col]
branch=None
if isinstance(v,int) or isinstance(v,float):
if v>=tree.value: branch=tree.tb
else: branch=tree.fb
else:
if v==tree.value: branch=tree.tb
else: branch=tree.fb
return classify(observation,branch)
def prune(tree,mingain):
# If the branches aren't leaves, then prune them
if tree.tb.results==None:
prune(tree.tb,mingain)
if tree.fb.results==None:
prune(tree.fb,mingain)
# If both the subbranches are now leaves, see if they
# should merged
if tree.tb.results!=None and tree.fb.results!=None:
# Build a combined dataset
tb,fb=[],[]
for v,c in tree.tb.results.items():
tb+=[[v]]*c
for v,c in tree.fb.results.items():
fb+=[[v]]*c
# Test the reduction in entropy
delta=entropy(tb+fb)-(entropy(tb)+entropy(fb)/2)
if delta<mingain:
# Merge the branches
tree.tb,tree.fb=None,None
tree.results=uniquecounts(tb+fb)
def mdclassify(observation,tree):
if tree.results!=None:
return tree.results
else:
v=observation[tree.col]
if v==None:
tr,fr=mdclassify(observation,tree.tb),mdclassify(observation,tree.fb)
tcount=sum(tr.values())
fcount=sum(fr.values())
tw=float(tcount)/(tcount+fcount)
fw=float(fcount)/(tcount+fcount)
result={}
for k,v in tr.items(): result[k]=v*tw
for k,v in fr.items(): result[k]=v*fw
return result
else:
if isinstance(v,int) or isinstance(v,float):
if v>=tree.value: branch=tree.tb
else: branch=tree.fb
else:
if v==tree.value: branch=tree.tb
else: branch=tree.fb
return mdclassify(observation,branch)
def variance(rows):
if len(rows)==0: return 0
data=[float(row[len(row)-1]) for row in rows]
mean=sum(data)/len(data)
variance=sum([(d-mean)**2 for d in data])/len(data)
return variance
def buildtree(rows,scoref=entropy):
if len(rows)==0: return decisionnode()
current_score=scoref(rows)
# Set up some variables to track the best criteria
best_gain=0.0
best_criteria=None
best_sets=None
column_count=len(rows[0])-1
for col in range(0,column_count):
# Generate the list of different values in
# this column
column_values={}
for row in rows:
column_values[row[col]]=1
# Now try dividing the rows up for each value
# in this column
for value in column_values.keys():
(set1,set2)=divideset(rows,col,value)
# Information gain
p=float(len(set1))/len(rows)
gain=current_score-p*scoref(set1)-(1-p)*scoref(set2)
if gain>best_gain and len(set1)>0 and len(set2)>0:
best_gain=gain
best_criteria=(col,value)
best_sets=(set1,set2)
# Create the sub branches
if best_gain>0:
trueBranch=buildtree(best_sets[0])
falseBranch=buildtree(best_sets[1])
return decisionnode(col=best_criteria[0],value=best_criteria[1],
tb=trueBranch,fb=falseBranch)
else:
return decisionnode(results=uniquecounts(rows))
| 28.426295 | 75 | 0.632376 | [
"MIT"
] | YuHongJun/python-training | CollectiveIntelligence/chapter7/treepredict.py | 7,135 | Python |
from unittest import TestCase
from estrutura_dados.ordering_algorithms import bubble_sort, quick_sort
numbers = [82, 9, 6, 16, 5, 70, 63, 64, 59, 72, 30, 10, 26, 77, 64, 11, 10, 7, 66, 59, 55, 76, 13, 38, 19, 68, 60, 42, 7, 51]
_sorted = [5, 6, 7, 7, 9, 10, 10, 11, 13, 16, 19, 26, 30, 38, 42, 51, 55, 59, 59, 60, 63, 64, 64, 66, 68, 70, 72, 76, 77, 82]
class BubbleSort(TestCase):
def test_order(self):
self.assertEqual(
_sorted,
bubble_sort(numbers=numbers)
)
def test_quick_sort(self):
self.assertEqual(
_sorted,
quick_sort(numbers)
)
| 31.7 | 125 | 0.572555 | [
"MIT"
] | gustavo-veiga/-estrutura-dado | python_src/tests/tests_ordering_algorithms.py | 634 | Python |
import re
import requests
import time
class TeleBot(object):
def __init__(self, import_name):
self.import_name = import_name
self.update_rules = list()
self.config = dict(
api_key=None,
requests_kwargs=dict(
timeout=60,
),
)
self.offset = 0
self.whoami = None
def add_update_rule(self, rule, endpoint=None, view_func=None, **options):
self.update_rules.append(dict(
rule=re.compile(rule),
endpoint=endpoint,
view_func=view_func,
options=dict(**options),
))
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_update_rule(rule, endpoint, f, **options)
return f
return decorator
def process_update(self, update):
self.offset = max(self.offset, update.get('update_id', 0)) + 1
for x in self.update_rules:
# TODO: Find a good pattern to detect each type and process
# accordingly.
if 'message' in update and 'text' in update['message'] and \
x['rule'].match(update['message']['text']):
m = x['rule'].match(update['message']['text'])
x['view_func'](update['message'],
*m.groups(),
**m.groupdict())
def process_updates(self, updates):
if updates.get('ok', False) is True:
for msg in updates['result']:
self.process_update(msg)
def _start(self):
'''Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
'''
if self.whoami is None:
me = self.get_me()
if me.get('ok', False):
self.whoami = me['result']
else:
raise ValueError("Bot Cannot request information, check "
"api_key")
def poll(self, offset=None, poll_timeout=600, cooldown=60, debug=False):
'''These should also be in the config section, but some here for
overrides
'''
if self.config['api_key'] is None:
raise ValueError("config api_key is undefined")
if offset or self.config.get('offset', None):
self.offset = offset or self.config.get('offset', None)
self._start()
while True:
try:
response = self.get_updates(poll_timeout, self.offset)
if response.get('ok', False) is False:
raise ValueError(response['error'])
else:
self.process_updates(response)
except Exception as e:
print("Error: Unknown Exception")
print(e)
if debug:
raise e
else:
time.sleep(cooldown)
def listen(self):
raise NotImplemented
def _bot_cmd(self, method, endpoint, *args, **kwargs):
base_api = "https://api.telegram.org/bot{api_key}/{endpoint}"
endpoint = base_api.format(api_key=self.config['api_key'],
endpoint=endpoint)
try:
response = method(endpoint,
data=kwargs.get('data', None),
params=kwargs.get('params', {}),
**self.config['requests_kwargs'])
if response.status_code != 200:
raise ValueError('Got unexpected response. ({}) - {}'.
format(response.status_code, response.text))
return response.json()
except Exception as e:
return {
'ok': False,
'error': str(e),
}
def get_me(self):
'''A simple method for testing your bot's auth token. Requires no
parameters. Returns basic information about the bot in form of a `User
object.
'''
return self._bot_cmd(requests.get, 'getMe')
def send_message(self, chat_id, text):
data = dict(
chat_id=chat_id,
text=text,
)
return self._bot_cmd(requests.post, 'sendMessage', data=data)
def forward_message(self):
raise NotImplemented("forward_message needs work")
def send_photo(self):
raise NotImplemented("send_photo needs work")
def send_audio(self):
raise NotImplemented("send_audio needs work")
def send_document(self):
raise NotImplemented("send_document needs work")
def send_sticker(self):
raise NotImplemented("send_sticker needs work")
def send_video(self):
raise NotImplemented("send_video needs work")
def send_location(self):
raise NotImplemented("send_location needs work")
def send_chat_action(self):
raise NotImplemented("send_chat_action needs work")
def get_user_profile_photos(self):
raise NotImplemented("get_user_profile_photos needs work")
def get_updates(self, timeout=0, offset=None):
params = dict(
timeout=timeout,
offset=offset,
)
return self._bot_cmd(requests.get, 'getUpdates', params=params)
def set_webhook(self):
raise NotImplemented("set_webhook needs work")
| 34.8125 | 78 | 0.55012 | [
"MIT"
] | KyleJamesWalker/telebot | telebot/__init__.py | 6,684 | Python |
#!/usr/bin/python3
import sys
import boto3
import requests
import getpass
import configparser
import base64
import logging
import xml.etree.ElementTree as ET
import re
import pytz
from tzlocal import get_localzone
from datetime import datetime
from bs4 import BeautifulSoup
from os.path import expanduser
from urllib.parse import urlparse, urlunparse
##########################################################################
# Variables
# region: The default AWS region that this script will connect
# to for all API calls
region = 'eu-west-1'
# output format: The AWS CLI output format that will be configured in the
# saml profile (affects subsequent CLI calls)
outputformat = 'json'
# awsconfigfile: The file where this script will store the temp
# credentials under the saml profile
awsconfigfile = '/.aws/credentials'
# SSL certificate verification: Whether or not strict certificate
# verification is done, False should only be used for dev/test
sslverification = True
# idpentryurl: The initial url that starts the authentication process.
idpentryurl = 'https://sts.novartis.com/adfs/ls/IdpInitiatedSignOn.aspx?loginToRp=urn:amazon:webservices'
# Uncomment to enable low level debugging
#logging.basicConfig(level=logging.DEBUG)
##########################################################################
# Get the federated credentials from the user
print("Username:", end=' ')
username = input()
password = getpass.getpass()
print('')
# Initiate session handler
session = requests.Session()
# Programmatically get the SAML assertion
# Opens the initial IdP url and follows all of the HTTP302 redirects, and
# gets the resulting login page
formresponse = session.get(idpentryurl, verify=sslverification)
# Capture the idpauthformsubmiturl, which is the final url after all the 302s
idpauthformsubmiturl = formresponse.url
# Parse the response and extract all the necessary values
# in order to build a dictionary of all of the form values the IdP expects
formsoup = BeautifulSoup(formresponse.text, "html.parser")
payload = {}
for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):
name = inputtag.get('name','')
value = inputtag.get('value','')
if "user" in name.lower():
#Make an educated guess that this is the right field for the username
payload[name] = username
elif "email" in name.lower():
#Some IdPs also label the username field as 'email'
payload[name] = username
elif "pass" in name.lower():
#Make an educated guess that this is the right field for the password
payload[name] = password
else:
#Simply populate the parameter with the existing value (picks up hidden fields in the login form)
payload[name] = value
# Debug the parameter payload if needed
# Use with caution since this will print sensitive output to the screen
#print(payload)
# Some IdPs don't explicitly set a form action, but if one is set we should
# build the idpauthformsubmiturl by combining the scheme and hostname
# from the entry url with the form action target
# If the action tag doesn't exist, we just stick with the
# idpauthformsubmiturl above
for inputtag in formsoup.find_all(re.compile('(FORM|form)')):
action = inputtag.get('action')
loginid = inputtag.get('id')
if (action and loginid == "loginForm"):
parsedurl = urlparse(idpentryurl)
idpauthformsubmiturl = parsedurl.scheme + "://" + parsedurl.netloc + action
# Performs the submission of the IdP login form with the above post data
response = session.post(
idpauthformsubmiturl, data=payload, verify=sslverification)
# Debug the response if needed
#print(response.text)
# Overwrite and delete the credential variables, just for safety
username = '##############################################'
password = '##############################################'
del username
del password
# Decode the response and extract the SAML assertion
soup = BeautifulSoup(response.text, "html.parser")
assertion = ''
# Look for the SAMLResponse attribute of the input tag (determined by
# analyzing the debug print lines above)
for inputtag in soup.find_all('input'):
if(inputtag.get('name') == 'SAMLResponse'):
#print(inputtag.get('value'))
assertion = inputtag.get('value')
# Better error handling is required for production use.
if (assertion == ''):
#TODO: Insert valid error checking/handling
print('Response did not contain a valid SAML assertion')
sys.exit(0)
# Debug only
#print(base64.b64decode(assertion))
# Parse the returned assertion and extract the authorized roles
awsroles = []
root = ET.fromstring(base64.b64decode(assertion))
for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):
if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):
for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):
awsroles.append(saml2attributevalue.text)
# Note the format of the attribute value should be role_arn,principal_arn
# but lots of blogs list it as principal_arn,role_arn so let's reverse
# them if needed
for awsrole in awsroles:
chunks = awsrole.split(',')
if'saml-provider' in chunks[0]:
newawsrole = chunks[1] + ',' + chunks[0]
index = awsroles.index(awsrole)
awsroles.insert(index, newawsrole)
awsroles.remove(awsrole)
# If I have more than one role, ask the user which one they want,
# otherwise just proceed
print("")
if len(awsroles) > 1:
i = 0
print("Please choose the role you would like to assume:")
for awsrole in awsroles:
print('[', i, ']: ', awsrole.split(',')[0])
i += 1
print("Selection: ", end=' ')
selectedroleindex = input()
# Basic sanity check of input
if int(selectedroleindex) > (len(awsroles) - 1):
print('You selected an invalid role index, please try again')
sys.exit(0)
role_arn = awsroles[int(selectedroleindex)].split(',')[0]
principal_arn = awsroles[int(selectedroleindex)].split(',')[1]
else:
role_arn = awsroles[0].split(',')[0]
principal_arn = awsroles[0].split(',')[1]
# Use the assertion to get an AWS STS token using Assume Role with SAML
conn = boto3.client('sts', region_name=region)
token = conn.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion)
# Write the AWS STS token into the AWS credential file
home = expanduser("~")
filename = home + awsconfigfile
# Read in the existing config file
config = configparser.RawConfigParser()
config.read(filename)
# Put the credentials into a saml specific section instead of clobbering
# the default credentials
if not config.has_section('saml'):
config.add_section('saml')
config['saml']['output'] = outputformat
config['saml']['region'] = region
config['saml']['aws_access_key_id'] = token['Credentials']['AccessKeyId']
config['saml']['aws_secret_access_key'] = token['Credentials']['SecretAccessKey']
config['saml']['aws_session_token'] = token['Credentials']['SessionToken']
# Write the updated config file
with open(filename, 'w+') as configfile:
config.write(configfile)
# Give the user some basic info as to what has just happened
print('\n\n----------------------------------------------------------------')
print('Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(filename))
print('Note that it will expire at {0}.'.format(token['Credentials']['Expiration'].astimezone(get_localzone())))
print('After this time, you may safely rerun this script to refresh your access key pair.')
print('To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).')
print('----------------------------------------------------------------\n\n')
| 37.674641 | 125 | 0.697104 | [
"MIT"
] | davidsvejda/aws-ec2-saml | samlapi_formauth_adfsv3mod_python3.py | 7,874 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Bioindustrial-Park: BioSTEAM's Premier Biorefinery Models and Results
# Copyright (C) 2020-, Yalin Li <[email protected]>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
'''
References
----------
[1] Humbird et al., Process Design and Economics for Biochemical Conversion of
Lignocellulosic Biomass to Ethanol: Dilute-Acid Pretreatment and Enzymatic
Hydrolysis of Corn Stover; Technical Report NREL/TP-5100-47764;
National Renewable Energy Lab (NREL), 2011.
https://www.nrel.gov/docs/fy11osti/47764.pdf
[2] Davis et al., Process Design and Economics for the Conversion of Lignocellulosic
Biomass to Hydrocarbon Fuels and Coproducts: 2018 Biochemical Design Case Update;
NREL/TP-5100-71949; National Renewable Energy Lab (NREL), 2018.
https://doi.org/10.2172/1483234
[3] Roni et al., Herbaceous Feedstock 2018 State of Technology Report;
INL/EXT-18-51654-Rev000; Idaho National Lab. (INL), 2020.
https://doi.org/10.2172/1615147
'''
import biosteam as bst
from biorefineries.ethanol_adipic._chemicals import chems
from biorefineries.ethanol_adipic._utils import _kg_per_ton, _ethanol_kg_2_gal
from biorefineries.ethanol_adipic._settings import set_feedstock_price, \
price, CFs, _feedstock_factor
from biorefineries.ethanol_adipic._processes import (
create_preprocessing_process,
create_acid_pretreatment_process,
create_base_pretreatment_process,
create_ethanol_process,
create_adipic_process,
create_wastewater_process,
create_facilities,
create_biorefinery
)
bst.settings.set_thermo(chems)
bst.CE = 541.7 # year 2016
# %%
# =============================================================================
# Different depot systems
# =============================================================================
depot_dct = {}
CPP_flowsheet, CPP_cost = create_preprocessing_process(kind='CPP', with_AFEX=False)
CPP_preprocessed = CPP_flowsheet.stream.preprocessed
depot_dct['CPP'] = {
'flowsheet': CPP_flowsheet,
'cost': CPP_cost,
'preprocessed': CPP_preprocessed,
}
CPP_AFEX_flowsheet, CPP_AFEX_cost = create_preprocessing_process(kind='CPP', with_AFEX=True)
CPP_AFEX_preprocessed = CPP_AFEX_flowsheet.stream.preprocessed
depot_dct['CPP_AFEX'] = {
'flowsheet': CPP_AFEX_flowsheet,
'cost': CPP_AFEX_cost,
'preprocessed': CPP_AFEX_preprocessed,
}
HMPP_flowsheet, HMPP_cost = create_preprocessing_process(kind='HMPP', with_AFEX=False)
HMPP_preprocessed = HMPP_flowsheet.stream.preprocessed
depot_dct['HMPP'] = {
'flowsheet': HMPP_flowsheet,
'cost': HMPP_cost,
'preprocessed': HMPP_preprocessed,
}
HMPP_AFEX_flowsheet, HMPP_AFEX_cost = create_preprocessing_process(kind='HMPP', with_AFEX=True)
HMPP_AFEX_preprocessed = HMPP_AFEX_flowsheet.stream.preprocessed
depot_dct['HMPP_AFEX'] = {
'flowsheet': HMPP_AFEX_flowsheet,
'cost': HMPP_AFEX_cost,
'preprocessed': HMPP_AFEX_preprocessed,
}
def get_preprocessing_GWP():
GWPs = {}
e_CF = CFs['GWP_CFs']['Electricity']
NH3_CF = CFs['GWP_CFs']['NH3']
CH4_CF = CFs['GWP_CFs']['CH4']
e_rates = {}
for depot, dct in depot_dct.items():
sys = dct['flowsheet'].system.prep_sys
e_rates[depot] = \
sum(i.power_utility.rate for i in sys.units)/dct['preprocessed'].F_mass
# Add electricity
for depot in depot_dct.keys():
# 69.27 kg CO2-eq/U.S. ton from ref [3] for HMPP
GWPs[depot] = 69.27/_kg_per_ton + (e_rates[depot]-e_rates['HMPP'])*e_CF
for depot in ('CPP_AFEX', 'HMPP_AFEX'):
dct = depot_dct[depot]
feedstock_mass = dct['preprocessed'].F_mass
GWPs[depot] += dct['flowsheet'].stream.ammonia.F_mass*NH3_CF/feedstock_mass
GWPs[depot] += dct['flowsheet'].stream.natural_gas.F_mass*CH4_CF/feedstock_mass
return GWPs
feedstock_GWPs = get_preprocessing_GWP()
# # If want to use the default preprocessing price ($24.35/Mg)
# set_feedstock_price(feedstock)
# # If want to use the price in ref [2], note that the price here is $/dry U.S. ton
# CPP_feedstock.price = price['Feedstock']
# %%
# =============================================================================
# Acid-pretreatment biorefinery
# =============================================================================
def create_acid_biorefinery(preprocessed):
flowsheet = bst.Flowsheet('acid')
bst.main_flowsheet.set_flowsheet(flowsheet)
s = flowsheet.stream
u = flowsheet.unit
feedstock = preprocessed.copy('feedstock')
feedstock.price = preprocessed.price
get_feedstock_dry_mass = \
lambda: feedstock.F_mass - feedstock.imass['H2O']
get_flow_tpd = \
lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/_kg_per_ton
groups = []
flowsheet, groups = create_acid_pretreatment_process(
flowsheet, groups, feedstock, get_feedstock_dry_mass)
flowsheet, groups = \
create_ethanol_process(flowsheet, groups, u.P201-0)
# The last one is reserved for blowdown
wwt_streams = (u.H201-0, u.D402_P-0, u.S401-1, '')
flowsheet, groups = \
create_wastewater_process(flowsheet, groups, get_flow_tpd, wwt_streams,
need_ammonia=False, bypass_R501=False,
recover_sodium_sulfate=True)
CHP_wastes = (u.S401-0, u.S504-1)
CHP_biogas = u.R501-0
CHP_side_streams = (s.water_M201, s.water_M202, s.steam_M203)
process_water_streams = {
'pretreatment': (s.water_M201, s.water_M202, s.steam_M203, s.water_M205),
'ethanol': (s.water_M301, s.water_U401,)
}
recycled_water = u.S505-0
flowsheet, groups = \
create_facilities(flowsheet, groups, get_flow_tpd,
CHP_wastes, CHP_biogas, CHP_side_streams,
process_water_streams, recycled_water,
if_HXN=False, if_BDM=True)
u.T603.outs[0] = s.sulfuric_acid_T201
u.T604_S.outs[0] = s.ammonia_M205
flowsheet, teas, funcs = create_biorefinery(flowsheet, groups, get_flow_tpd)
return flowsheet, groups, teas, funcs
# %%
# =============================================================================
# AFEX-pretreatment biorefinery
# =============================================================================
def create_AFEX_biorefinery(preprocessed, include_adipic_process=False,
recover_sodium_sulfate=False):
flowsheet = bst.Flowsheet('AFEX')
bst.main_flowsheet.set_flowsheet(flowsheet)
s = flowsheet.stream
u = flowsheet.unit
feedstock = preprocessed.copy('feedstock')
feedstock.price = preprocessed.price
get_flow_tpd = \
lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/_kg_per_ton
groups = []
flowsheet, groups = \
create_ethanol_process(flowsheet, groups, feedstock)
u.M301.T = u.R301.T_saccharification
u.R301.C5_saccharification = True
# An empty filler stream
black_liquor = bst.Stream('black_liquor')
if include_adipic_process:
flowsheet, groups = \
create_adipic_process(flowsheet, groups, black_liquor, u.S401-0)
wwt_streams = (u.D402_P-0, u.S401-1, u.S701-1, u.S702-0, '')
flowsheet, groups = \
create_wastewater_process(flowsheet, groups, get_flow_tpd, wwt_streams,
need_ammonia=True, bypass_R501=True,
recover_sodium_sulfate=recover_sodium_sulfate)
CHP_wastes1 = (u.R701-1, u.S504-1)
CHP_biogas = ''
process_water_streams = {'adipid': (s.water_R702,)}
else:
wwt_streams = (u.D402_P-0, u.S401-1, '')
flowsheet, groups = \
create_wastewater_process(flowsheet, groups, get_flow_tpd, wwt_streams,
need_ammonia=False, bypass_R501=False,
recover_sodium_sulfate=recover_sodium_sulfate)
CHP_wastes1 = (u.S401-0, u.S504-1)
CHP_biogas = u.R501-0
process_water_streams = {}
CHP_wastes2 = (u.S506-1, ) if recover_sodium_sulfate else ()
CHP_wastes = (*CHP_wastes1, *CHP_wastes2)
CHP_side_streams = ()
process_water_streams['ethanol'] = (s.water_M301, s.water_U401,)
recycled_water = u.S505-0
flowsheet, groups = \
create_facilities(flowsheet, groups, get_flow_tpd,
CHP_wastes, CHP_biogas, CHP_side_streams,
process_water_streams, recycled_water,
if_HXN=False, if_BDM=True)
flowsheet, teas, funcs = create_biorefinery(flowsheet, groups, get_flow_tpd)
return flowsheet, groups, teas, funcs
# %%
# =============================================================================
# Base-pretreatment biorefinery
# =============================================================================
def create_base_biorefinery(preprocessed, include_adipic_process=True,
recover_sodium_sulfate=True):
flowsheet = bst.Flowsheet('base')
bst.main_flowsheet.set_flowsheet(flowsheet)
s = flowsheet.stream
u = flowsheet.unit
feedstock = preprocessed.copy('feedstock')
feedstock.price = preprocessed.price
get_flow_tpd = \
lambda: (feedstock.F_mass-feedstock.imass['H2O'])*24/_kg_per_ton
groups = []
flowsheet, groups = create_base_pretreatment_process(
flowsheet, groups, feedstock)
flowsheet, groups = \
create_ethanol_process(flowsheet, groups, u.P202-0)
u.M301.enzyme_load = 10
u.M301.solid_loading = 0.25
u.R301.C5_saccharification = True
if include_adipic_process:
flowsheet, groups = \
create_adipic_process(flowsheet, groups, u.P201-0, u.S401-0)
wwt_streams = (u.D402_P-0, u.S401-1, u.S701-1, u.S702-0, '')
flowsheet, groups = \
create_wastewater_process(flowsheet, groups, get_flow_tpd, wwt_streams,
need_ammonia=True, bypass_R501=True,
recover_sodium_sulfate=recover_sodium_sulfate)
CHP_wastes1 = (u.R701-1, u.S504-1)
CHP_biogas = ''
process_water_streams = {'adipid': (s.water_R702,)}
else:
wwt_streams = (u.P201-0, u.D402_P-0, u.S401-1, '')
flowsheet, groups = \
create_wastewater_process(flowsheet, groups, get_flow_tpd, wwt_streams,
need_ammonia=False, bypass_R501=False,
recover_sodium_sulfate=recover_sodium_sulfate)
CHP_wastes1 = (u.S401-0, u.S504-1)
CHP_biogas = u.R501-0
process_water_streams = {}
CHP_wastes2 = (u.S506-1, ) if recover_sodium_sulfate else ()
CHP_wastes = (*CHP_wastes1, *CHP_wastes2)
CHP_side_streams = ()
process_water_streams['pretreatment'] = (s.water_R201,)
process_water_streams['ethanol'] = (s.water_M301, s.water_U401,)
recycled_water = u.S505-0
flowsheet, groups = \
create_facilities(flowsheet, groups, get_flow_tpd,
CHP_wastes, CHP_biogas, CHP_side_streams,
process_water_streams, recycled_water,
if_HXN=False, if_BDM=True)
flowsheet, teas, funcs = create_biorefinery(flowsheet, groups, get_flow_tpd)
return flowsheet, groups, teas, funcs | 38.395349 | 95 | 0.633555 | [
"MIT"
] | yoelcortes/Bioindustrial-Complex | BioSTEAM 2.x.x/biorefineries/ethanol_adipic/systems.py | 11,557 | Python |
#!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
# This tool is used to manipulate an existing Meson build definition.
#
# - add a file to a target
# - remove files from a target
# - move targets
# - reindent?
from __future__ import annotations
from .ast import IntrospectionInterpreter, BUILD_TARGET_FUNCTIONS, AstConditionLevel, AstIDGenerator, AstIndentationGenerator, AstPrinter
from mesonbuild.mesonlib import MesonException
from . import mlog, environment
from functools import wraps
from .mparser import Token, ArrayNode, ArgumentNode, AssignmentNode, BooleanNode, ElementaryNode, IdNode, FunctionNode, StringNode
import json, os, re, sys
import typing as T
if T.TYPE_CHECKING:
from .mparser import BaseNode
class RewriterException(MesonException):
pass
def add_arguments(parser, formatter=None):
parser.add_argument('-s', '--sourcedir', type=str, default='.', metavar='SRCDIR', help='Path to source directory.')
parser.add_argument('-V', '--verbose', action='store_true', default=False, help='Enable verbose output')
parser.add_argument('-S', '--skip-errors', dest='skip', action='store_true', default=False, help='Skip errors instead of aborting')
subparsers = parser.add_subparsers(dest='type', title='Rewriter commands', description='Rewrite command to execute')
# Target
tgt_parser = subparsers.add_parser('target', help='Modify a target', formatter_class=formatter)
tgt_parser.add_argument('-s', '--subdir', default='', dest='subdir', help='Subdirectory of the new target (only for the "add_target" action)')
tgt_parser.add_argument('--type', dest='tgt_type', choices=rewriter_keys['target']['target_type'][2], default='executable',
help='Type of the target to add (only for the "add_target" action)')
tgt_parser.add_argument('target', help='Name or ID of the target')
tgt_parser.add_argument('operation', choices=['add', 'rm', 'add_target', 'rm_target', 'add_extra_files', 'rm_extra_files', 'info'],
help='Action to execute')
tgt_parser.add_argument('sources', nargs='*', help='Sources to add/remove')
# KWARGS
kw_parser = subparsers.add_parser('kwargs', help='Modify keyword arguments', formatter_class=formatter)
kw_parser.add_argument('operation', choices=rewriter_keys['kwargs']['operation'][2],
help='Action to execute')
kw_parser.add_argument('function', choices=list(rewriter_func_kwargs.keys()),
help='Function type to modify')
kw_parser.add_argument('id', help='ID of the function to modify (can be anything for "project")')
kw_parser.add_argument('kwargs', nargs='*', help='Pairs of keyword and value')
# Default options
def_parser = subparsers.add_parser('default-options', help='Modify the project default options', formatter_class=formatter)
def_parser.add_argument('operation', choices=rewriter_keys['default_options']['operation'][2],
help='Action to execute')
def_parser.add_argument('options', nargs='*', help='Key, value pairs of configuration option')
# JSON file/command
cmd_parser = subparsers.add_parser('command', help='Execute a JSON array of commands', formatter_class=formatter)
cmd_parser.add_argument('json', help='JSON string or file to execute')
class RequiredKeys:
def __init__(self, keys):
self.keys = keys
def __call__(self, f):
@wraps(f)
def wrapped(*wrapped_args, **wrapped_kwargs):
assert len(wrapped_args) >= 2
cmd = wrapped_args[1]
for key, val in self.keys.items():
typ = val[0] # The type of the value
default = val[1] # The default value -- None is required
choices = val[2] # Valid choices -- None is for everything
if key not in cmd:
if default is not None:
cmd[key] = default
else:
raise RewriterException('Key "{}" is missing in object for {}'
.format(key, f.__name__))
if not isinstance(cmd[key], typ):
raise RewriterException('Invalid type of "{}". Required is {} but provided was {}'
.format(key, typ.__name__, type(cmd[key]).__name__))
if choices is not None:
assert isinstance(choices, list)
if cmd[key] not in choices:
raise RewriterException('Invalid value of "{}": Possible values are {} but provided was "{}"'
.format(key, choices, cmd[key]))
return f(*wrapped_args, **wrapped_kwargs)
return wrapped
class MTypeBase:
def __init__(self, node: T.Optional[BaseNode] = None):
if node is None:
self.node = self._new_node() # lgtm [py/init-calls-subclass] (node creation does not depend on base class state)
else:
self.node = node
self.node_type = None
for i in self.supported_nodes(): # lgtm [py/init-calls-subclass] (listing nodes does not depend on base class state)
if isinstance(self.node, i):
self.node_type = i
def _new_node(self):
# Overwrite in derived class
raise RewriterException('Internal error: _new_node of MTypeBase was called')
def can_modify(self):
return self.node_type is not None
def get_node(self):
return self.node
def supported_nodes(self):
# Overwrite in derived class
return []
def set_value(self, value):
# Overwrite in derived class
mlog.warning('Cannot set the value of type', mlog.bold(type(self).__name__), '--> skipping')
def add_value(self, value):
# Overwrite in derived class
mlog.warning('Cannot add a value of type', mlog.bold(type(self).__name__), '--> skipping')
def remove_value(self, value):
# Overwrite in derived class
mlog.warning('Cannot remove a value of type', mlog.bold(type(self).__name__), '--> skipping')
def remove_regex(self, value):
# Overwrite in derived class
mlog.warning('Cannot remove a regex in type', mlog.bold(type(self).__name__), '--> skipping')
class MTypeStr(MTypeBase):
def __init__(self, node: T.Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return StringNode(Token('', '', 0, 0, 0, None, ''))
def supported_nodes(self):
return [StringNode]
def set_value(self, value):
self.node.value = str(value)
class MTypeBool(MTypeBase):
def __init__(self, node: T.Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return BooleanNode(Token('', '', 0, 0, 0, None, False))
def supported_nodes(self):
return [BooleanNode]
def set_value(self, value):
self.node.value = bool(value)
class MTypeID(MTypeBase):
def __init__(self, node: T.Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return IdNode(Token('', '', 0, 0, 0, None, ''))
def supported_nodes(self):
return [IdNode]
def set_value(self, value):
self.node.value = str(value)
class MTypeList(MTypeBase):
def __init__(self, node: T.Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return ArrayNode(ArgumentNode(Token('', '', 0, 0, 0, None, '')), 0, 0, 0, 0)
def _new_element_node(self, value):
# Overwrite in derived class
raise RewriterException('Internal error: _new_element_node of MTypeList was called')
def _ensure_array_node(self):
if not isinstance(self.node, ArrayNode):
tmp = self.node
self.node = self._new_node()
self.node.args.arguments += [tmp]
def _check_is_equal(self, node, value) -> bool:
# Overwrite in derived class
return False
def _check_regex_matches(self, node, regex: str) -> bool:
# Overwrite in derived class
return False
def get_node(self):
if isinstance(self.node, ArrayNode):
if len(self.node.args.arguments) == 1:
return self.node.args.arguments[0]
return self.node
def supported_element_nodes(self):
# Overwrite in derived class
return []
def supported_nodes(self):
return [ArrayNode] + self.supported_element_nodes()
def set_value(self, value):
if not isinstance(value, list):
value = [value]
self._ensure_array_node()
self.node.args.arguments = [] # Remove all current nodes
for i in value:
self.node.args.arguments += [self._new_element_node(i)]
def add_value(self, value):
if not isinstance(value, list):
value = [value]
self._ensure_array_node()
for i in value:
self.node.args.arguments += [self._new_element_node(i)]
def _remove_helper(self, value, equal_func):
def check_remove_node(node):
for j in value:
if equal_func(i, j):
return True
return False
if not isinstance(value, list):
value = [value]
self._ensure_array_node()
removed_list = []
for i in self.node.args.arguments:
if not check_remove_node(i):
removed_list += [i]
self.node.args.arguments = removed_list
def remove_value(self, value):
self._remove_helper(value, self._check_is_equal)
def remove_regex(self, regex: str):
self._remove_helper(regex, self._check_regex_matches)
class MTypeStrList(MTypeList):
def __init__(self, node: T.Optional[BaseNode] = None):
super().__init__(node)
def _new_element_node(self, value):
return StringNode(Token('', '', 0, 0, 0, None, str(value)))
def _check_is_equal(self, node, value) -> bool:
if isinstance(node, StringNode):
return node.value == value
return False
def _check_regex_matches(self, node, regex: str) -> bool:
if isinstance(node, StringNode):
return re.match(regex, node.value) is not None
return False
def supported_element_nodes(self):
return [StringNode]
class MTypeIDList(MTypeList):
def __init__(self, node: T.Optional[BaseNode] = None):
super().__init__(node)
def _new_element_node(self, value):
return IdNode(Token('', '', 0, 0, 0, None, str(value)))
def _check_is_equal(self, node, value) -> bool:
if isinstance(node, IdNode):
return node.value == value
return False
def _check_regex_matches(self, node, regex: str) -> bool:
if isinstance(node, StringNode):
return re.match(regex, node.value) is not None
return False
def supported_element_nodes(self):
return [IdNode]
rewriter_keys = {
'default_options': {
'operation': (str, None, ['set', 'delete']),
'options': (dict, {}, None)
},
'kwargs': {
'function': (str, None, None),
'id': (str, None, None),
'operation': (str, None, ['set', 'delete', 'add', 'remove', 'remove_regex', 'info']),
'kwargs': (dict, {}, None)
},
'target': {
'target': (str, None, None),
'operation': (str, None, ['src_add', 'src_rm', 'target_rm', 'target_add', 'extra_files_add', 'extra_files_rm', 'info']),
'sources': (list, [], None),
'subdir': (str, '', None),
'target_type': (str, 'executable', ['both_libraries', 'executable', 'jar', 'library', 'shared_library', 'shared_module', 'static_library']),
}
}
rewriter_func_kwargs = {
'dependency': {
'language': MTypeStr,
'method': MTypeStr,
'native': MTypeBool,
'not_found_message': MTypeStr,
'required': MTypeBool,
'static': MTypeBool,
'version': MTypeStrList,
'modules': MTypeStrList
},
'target': {
'build_by_default': MTypeBool,
'build_rpath': MTypeStr,
'dependencies': MTypeIDList,
'gui_app': MTypeBool,
'link_with': MTypeIDList,
'export_dynamic': MTypeBool,
'implib': MTypeBool,
'install': MTypeBool,
'install_dir': MTypeStr,
'install_rpath': MTypeStr,
'pie': MTypeBool
},
'project': {
'default_options': MTypeStrList,
'meson_version': MTypeStr,
'license': MTypeStrList,
'subproject_dir': MTypeStr,
'version': MTypeStr
}
}
class Rewriter:
def __init__(self, sourcedir: str, generator: str = 'ninja', skip_errors: bool = False):
self.sourcedir = sourcedir
self.interpreter = IntrospectionInterpreter(sourcedir, '', generator, visitors = [AstIDGenerator(), AstIndentationGenerator(), AstConditionLevel()])
self.skip_errors = skip_errors
self.modified_nodes = []
self.to_remove_nodes = []
self.to_add_nodes = []
self.functions = {
'default_options': self.process_default_options,
'kwargs': self.process_kwargs,
'target': self.process_target,
}
self.info_dump = None
def analyze_meson(self):
mlog.log('Analyzing meson file:', mlog.bold(os.path.join(self.sourcedir, environment.build_filename)))
self.interpreter.analyze()
mlog.log(' -- Project:', mlog.bold(self.interpreter.project_data['descriptive_name']))
mlog.log(' -- Version:', mlog.cyan(self.interpreter.project_data['version']))
def add_info(self, cmd_type: str, cmd_id: str, data: dict):
if self.info_dump is None:
self.info_dump = {}
if cmd_type not in self.info_dump:
self.info_dump[cmd_type] = {}
self.info_dump[cmd_type][cmd_id] = data
def print_info(self):
if self.info_dump is None:
return
sys.stderr.write(json.dumps(self.info_dump, indent=2))
def on_error(self):
if self.skip_errors:
return mlog.cyan('-->'), mlog.yellow('skipping')
return mlog.cyan('-->'), mlog.red('aborting')
def handle_error(self):
if self.skip_errors:
return None
raise MesonException('Rewriting the meson.build failed')
def find_target(self, target: str):
def check_list(name: str) -> T.List[BaseNode]:
result = []
for i in self.interpreter.targets:
if name == i['name'] or name == i['id']:
result += [i]
return result
targets = check_list(target)
if targets:
if len(targets) == 1:
return targets[0]
else:
mlog.error('There are multiple targets matching', mlog.bold(target))
for i in targets:
mlog.error(' -- Target name', mlog.bold(i['name']), 'with ID', mlog.bold(i['id']))
mlog.error('Please try again with the unique ID of the target', *self.on_error())
self.handle_error()
return None
# Check the assignments
tgt = None
if target in self.interpreter.assignments:
node = self.interpreter.assignments[target]
if isinstance(node, FunctionNode):
if node.func_name in ['executable', 'jar', 'library', 'shared_library', 'shared_module', 'static_library', 'both_libraries']:
tgt = self.interpreter.assign_vals[target]
return tgt
def find_dependency(self, dependency: str):
def check_list(name: str):
for i in self.interpreter.dependencies:
if name == i['name']:
return i
return None
dep = check_list(dependency)
if dep is not None:
return dep
# Check the assignments
if dependency in self.interpreter.assignments:
node = self.interpreter.assignments[dependency]
if isinstance(node, FunctionNode):
if node.func_name in ['dependency']:
name = self.interpreter.flatten_args(node.args)[0]
dep = check_list(name)
return dep
@RequiredKeys(rewriter_keys['default_options'])
def process_default_options(self, cmd):
# First, remove the old values
kwargs_cmd = {
'function': 'project',
'id': "/",
'operation': 'remove_regex',
'kwargs': {
'default_options': [f'{x}=.*' for x in cmd['options'].keys()]
}
}
self.process_kwargs(kwargs_cmd)
# Then add the new values
if cmd['operation'] != 'set':
return
kwargs_cmd['operation'] = 'add'
kwargs_cmd['kwargs']['default_options'] = []
cdata = self.interpreter.coredata
options = {
**{str(k): v for k, v in cdata.options.items()},
**{str(k): v for k, v in cdata.options.items()},
**{str(k): v for k, v in cdata.options.items()},
**{str(k): v for k, v in cdata.options.items()},
**{str(k): v for k, v in cdata.options.items()},
}
for key, val in sorted(cmd['options'].items()):
if key not in options:
mlog.error('Unknown options', mlog.bold(key), *self.on_error())
self.handle_error()
continue
try:
val = options[key].validate_value(val)
except MesonException as e:
mlog.error('Unable to set', mlog.bold(key), mlog.red(str(e)), *self.on_error())
self.handle_error()
continue
kwargs_cmd['kwargs']['default_options'] += [f'{key}={val}']
self.process_kwargs(kwargs_cmd)
@RequiredKeys(rewriter_keys['kwargs'])
def process_kwargs(self, cmd):
mlog.log('Processing function type', mlog.bold(cmd['function']), 'with id', mlog.cyan("'" + cmd['id'] + "'"))
if cmd['function'] not in rewriter_func_kwargs:
mlog.error('Unknown function type', cmd['function'], *self.on_error())
return self.handle_error()
kwargs_def = rewriter_func_kwargs[cmd['function']]
# Find the function node to modify
node = None
arg_node = None
if cmd['function'] == 'project':
# msys bash may expand '/' to a path. It will mangle '//' to '/'
# but in order to keep usage shell-agnostic, also allow `//` as
# the function ID such that it will work in both msys bash and
# other shells.
if {'/', '//'}.isdisjoint({cmd['id']}):
mlog.error('The ID for the function type project must be "/" or "//" not "' + cmd['id'] + '"', *self.on_error())
return self.handle_error()
node = self.interpreter.project_node
arg_node = node.args
elif cmd['function'] == 'target':
tmp = self.find_target(cmd['id'])
if tmp:
node = tmp['node']
arg_node = node.args
elif cmd['function'] == 'dependency':
tmp = self.find_dependency(cmd['id'])
if tmp:
node = tmp['node']
arg_node = node.args
if not node:
mlog.error('Unable to find the function node')
assert isinstance(node, FunctionNode)
assert isinstance(arg_node, ArgumentNode)
# Transform the key nodes to plain strings
arg_node.kwargs = {k.value: v for k, v in arg_node.kwargs.items()}
# Print kwargs info
if cmd['operation'] == 'info':
info_data = {}
for key, val in sorted(arg_node.kwargs.items()):
info_data[key] = None
if isinstance(val, ElementaryNode):
info_data[key] = val.value
elif isinstance(val, ArrayNode):
data_list = []
for i in val.args.arguments:
element = None
if isinstance(i, ElementaryNode):
element = i.value
data_list += [element]
info_data[key] = data_list
self.add_info('kwargs', '{}#{}'.format(cmd['function'], cmd['id']), info_data)
return # Nothing else to do
# Modify the kwargs
num_changed = 0
for key, val in sorted(cmd['kwargs'].items()):
if key not in kwargs_def:
mlog.error('Cannot modify unknown kwarg', mlog.bold(key), *self.on_error())
self.handle_error()
continue
# Remove the key from the kwargs
if cmd['operation'] == 'delete':
if key in arg_node.kwargs:
mlog.log(' -- Deleting', mlog.bold(key), 'from the kwargs')
del arg_node.kwargs[key]
num_changed += 1
else:
mlog.log(' -- Key', mlog.bold(key), 'is already deleted')
continue
if key not in arg_node.kwargs:
arg_node.kwargs[key] = None
modifyer = kwargs_def[key](arg_node.kwargs[key])
if not modifyer.can_modify():
mlog.log(' -- Skipping', mlog.bold(key), 'because it is to complex to modify')
# Apply the operation
val_str = str(val)
if cmd['operation'] == 'set':
mlog.log(' -- Setting', mlog.bold(key), 'to', mlog.yellow(val_str))
modifyer.set_value(val)
elif cmd['operation'] == 'add':
mlog.log(' -- Adding', mlog.yellow(val_str), 'to', mlog.bold(key))
modifyer.add_value(val)
elif cmd['operation'] == 'remove':
mlog.log(' -- Removing', mlog.yellow(val_str), 'from', mlog.bold(key))
modifyer.remove_value(val)
elif cmd['operation'] == 'remove_regex':
mlog.log(' -- Removing all values matching', mlog.yellow(val_str), 'from', mlog.bold(key))
modifyer.remove_regex(val)
# Write back the result
arg_node.kwargs[key] = modifyer.get_node()
num_changed += 1
# Convert the keys back to IdNode's
arg_node.kwargs = {IdNode(Token('', '', 0, 0, 0, None, k)): v for k, v in arg_node.kwargs.items()}
if num_changed > 0 and node not in self.modified_nodes:
self.modified_nodes += [node]
def find_assignment_node(self, node: BaseNode) -> AssignmentNode:
if node.ast_id and node.ast_id in self.interpreter.reverse_assignment:
return self.interpreter.reverse_assignment[node.ast_id]
return None
@RequiredKeys(rewriter_keys['target'])
def process_target(self, cmd):
mlog.log('Processing target', mlog.bold(cmd['target']), 'operation', mlog.cyan(cmd['operation']))
target = self.find_target(cmd['target'])
if target is None and cmd['operation'] != 'target_add':
mlog.error('Unknown target', mlog.bold(cmd['target']), *self.on_error())
return self.handle_error()
# Make source paths relative to the current subdir
def rel_source(src: str) -> str:
subdir = os.path.abspath(os.path.join(self.sourcedir, target['subdir']))
if os.path.isabs(src):
return os.path.relpath(src, subdir)
elif not os.path.exists(src):
return src # Trust the user when the source doesn't exist
# Make sure that the path is relative to the subdir
return os.path.relpath(os.path.abspath(src), subdir)
if target is not None:
cmd['sources'] = [rel_source(x) for x in cmd['sources']]
# Utility function to get a list of the sources from a node
def arg_list_from_node(n):
args = []
if isinstance(n, FunctionNode):
args = list(n.args.arguments)
if n.func_name in BUILD_TARGET_FUNCTIONS:
args.pop(0)
elif isinstance(n, ArrayNode):
args = n.args.arguments
elif isinstance(n, ArgumentNode):
args = n.arguments
return args
to_sort_nodes = []
if cmd['operation'] == 'src_add':
node = None
if target['sources']:
node = target['sources'][0]
else:
node = target['node']
assert node is not None
# Generate the current source list
src_list = []
for i in target['sources']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
src_list += [j.value]
# Generate the new String nodes
to_append = []
for i in sorted(set(cmd['sources'])):
if i in src_list:
mlog.log(' -- Source', mlog.green(i), 'is already defined for the target --> skipping')
continue
mlog.log(' -- Adding source', mlog.green(i), 'at',
mlog.yellow(f'{node.filename}:{node.lineno}'))
token = Token('string', node.filename, 0, 0, 0, None, i)
to_append += [StringNode(token)]
# Append to the AST at the right place
arg_node = None
if isinstance(node, (FunctionNode, ArrayNode)):
arg_node = node.args
elif isinstance(node, ArgumentNode):
arg_node = node
assert arg_node is not None
arg_node.arguments += to_append
# Mark the node as modified
if arg_node not in to_sort_nodes and not isinstance(node, FunctionNode):
to_sort_nodes += [arg_node]
if node not in self.modified_nodes:
self.modified_nodes += [node]
elif cmd['operation'] == 'src_rm':
# Helper to find the exact string node and its parent
def find_node(src):
for i in target['sources']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
if j.value == src:
return i, j
return None, None
for i in cmd['sources']:
# Try to find the node with the source string
root, string_node = find_node(i)
if root is None:
mlog.warning(' -- Unable to find source', mlog.green(i), 'in the target')
continue
# Remove the found string node from the argument list
arg_node = None
if isinstance(root, (FunctionNode, ArrayNode)):
arg_node = root.args
elif isinstance(root, ArgumentNode):
arg_node = root
assert arg_node is not None
mlog.log(' -- Removing source', mlog.green(i), 'from',
mlog.yellow(f'{string_node.filename}:{string_node.lineno}'))
arg_node.arguments.remove(string_node)
# Mark the node as modified
if arg_node not in to_sort_nodes and not isinstance(root, FunctionNode):
to_sort_nodes += [arg_node]
if root not in self.modified_nodes:
self.modified_nodes += [root]
elif cmd['operation'] == 'extra_files_add':
tgt_function: FunctionNode = target['node']
mark_array = True
try:
node = target['extra_files'][0]
except IndexError:
# Specifying `extra_files` with a list that flattens to empty gives an empty
# target['extra_files'] list, account for that.
try:
extra_files_key = next(k for k in tgt_function.args.kwargs.keys() if isinstance(k, IdNode) and k.value == 'extra_files')
node = tgt_function.args.kwargs[extra_files_key]
except StopIteration:
# Target has no extra_files kwarg, create one
node = ArrayNode(ArgumentNode(Token('', tgt_function.filename, 0, 0, 0, None, '[]')), tgt_function.end_lineno, tgt_function.end_colno, tgt_function.end_lineno, tgt_function.end_colno)
tgt_function.args.kwargs[IdNode(Token('string', tgt_function.filename, 0, 0, 0, None, 'extra_files'))] = node
mark_array = False
if tgt_function not in self.modified_nodes:
self.modified_nodes += [tgt_function]
target['extra_files'] = [node]
if isinstance(node, IdNode):
node = self.interpreter.assignments[node.value]
target['extra_files'] = [node]
if not isinstance(node, ArrayNode):
mlog.error('Target', mlog.bold(cmd['target']), 'extra_files argument must be a list', *self.on_error())
return self.handle_error()
# Generate the current extra files list
extra_files_list = []
for i in target['extra_files']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
extra_files_list += [j.value]
# Generate the new String nodes
to_append = []
for i in sorted(set(cmd['sources'])):
if i in extra_files_list:
mlog.log(' -- Extra file', mlog.green(i), 'is already defined for the target --> skipping')
continue
mlog.log(' -- Adding extra file', mlog.green(i), 'at',
mlog.yellow(f'{node.filename}:{node.lineno}'))
token = Token('string', node.filename, 0, 0, 0, None, i)
to_append += [StringNode(token)]
# Append to the AST at the right place
arg_node = node.args
arg_node.arguments += to_append
# Mark the node as modified
if arg_node not in to_sort_nodes:
to_sort_nodes += [arg_node]
# If the extra_files array is newly created, don't mark it as its parent function node already is,
# otherwise this would cause double modification.
if mark_array and node not in self.modified_nodes:
self.modified_nodes += [node]
elif cmd['operation'] == 'extra_files_rm':
# Helper to find the exact string node and its parent
def find_node(src):
for i in target['extra_files']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
if j.value == src:
return i, j
return None, None
for i in cmd['sources']:
# Try to find the node with the source string
root, string_node = find_node(i)
if root is None:
mlog.warning(' -- Unable to find extra file', mlog.green(i), 'in the target')
continue
# Remove the found string node from the argument list
arg_node = root.args
mlog.log(' -- Removing extra file', mlog.green(i), 'from',
mlog.yellow(f'{string_node.filename}:{string_node.lineno}'))
arg_node.arguments.remove(string_node)
# Mark the node as modified
if arg_node not in to_sort_nodes and not isinstance(root, FunctionNode):
to_sort_nodes += [arg_node]
if root not in self.modified_nodes:
self.modified_nodes += [root]
elif cmd['operation'] == 'target_add':
if target is not None:
mlog.error('Can not add target', mlog.bold(cmd['target']), 'because it already exists', *self.on_error())
return self.handle_error()
id_base = re.sub(r'[- ]', '_', cmd['target'])
target_id = id_base + '_exe' if cmd['target_type'] == 'executable' else '_lib'
source_id = id_base + '_sources'
filename = os.path.join(cmd['subdir'], environment.build_filename)
# Build src list
src_arg_node = ArgumentNode(Token('string', filename, 0, 0, 0, None, ''))
src_arr_node = ArrayNode(src_arg_node, 0, 0, 0, 0)
src_far_node = ArgumentNode(Token('string', filename, 0, 0, 0, None, ''))
src_fun_node = FunctionNode(filename, 0, 0, 0, 0, 'files', src_far_node)
src_ass_node = AssignmentNode(filename, 0, 0, source_id, src_fun_node)
src_arg_node.arguments = [StringNode(Token('string', filename, 0, 0, 0, None, x)) for x in cmd['sources']]
src_far_node.arguments = [src_arr_node]
# Build target
tgt_arg_node = ArgumentNode(Token('string', filename, 0, 0, 0, None, ''))
tgt_fun_node = FunctionNode(filename, 0, 0, 0, 0, cmd['target_type'], tgt_arg_node)
tgt_ass_node = AssignmentNode(filename, 0, 0, target_id, tgt_fun_node)
tgt_arg_node.arguments = [
StringNode(Token('string', filename, 0, 0, 0, None, cmd['target'])),
IdNode(Token('string', filename, 0, 0, 0, None, source_id))
]
src_ass_node.accept(AstIndentationGenerator())
tgt_ass_node.accept(AstIndentationGenerator())
self.to_add_nodes += [src_ass_node, tgt_ass_node]
elif cmd['operation'] == 'target_rm':
to_remove = self.find_assignment_node(target['node'])
if to_remove is None:
to_remove = target['node']
self.to_remove_nodes += [to_remove]
mlog.log(' -- Removing target', mlog.green(cmd['target']), 'at',
mlog.yellow(f'{to_remove.filename}:{to_remove.lineno}'))
elif cmd['operation'] == 'info':
# T.List all sources in the target
src_list = []
for i in target['sources']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
src_list += [j.value]
extra_files_list = []
for i in target['extra_files']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
extra_files_list += [j.value]
test_data = {
'name': target['name'],
'sources': src_list,
'extra_files': extra_files_list
}
self.add_info('target', target['id'], test_data)
# Sort files
for i in to_sort_nodes:
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
path_sorter = lambda key: ([(key.count('/') <= idx, alphanum_key(x)) for idx, x in enumerate(key.split('/'))])
unknown = [x for x in i.arguments if not isinstance(x, StringNode)]
sources = [x for x in i.arguments if isinstance(x, StringNode)]
sources = sorted(sources, key=lambda x: path_sorter(x.value))
i.arguments = unknown + sources
def process(self, cmd):
if 'type' not in cmd:
raise RewriterException('Command has no key "type"')
if cmd['type'] not in self.functions:
raise RewriterException('Unknown command "{}". Supported commands are: {}'
.format(cmd['type'], list(self.functions.keys())))
self.functions[cmd['type']](cmd)
def apply_changes(self):
assert all(hasattr(x, 'lineno') and hasattr(x, 'colno') and hasattr(x, 'filename') for x in self.modified_nodes)
assert all(hasattr(x, 'lineno') and hasattr(x, 'colno') and hasattr(x, 'filename') for x in self.to_remove_nodes)
assert all(isinstance(x, (ArrayNode, FunctionNode)) for x in self.modified_nodes)
assert all(isinstance(x, (ArrayNode, AssignmentNode, FunctionNode)) for x in self.to_remove_nodes)
# Sort based on line and column in reversed order
work_nodes = [{'node': x, 'action': 'modify'} for x in self.modified_nodes]
work_nodes += [{'node': x, 'action': 'rm'} for x in self.to_remove_nodes]
work_nodes = list(sorted(work_nodes, key=lambda x: (x['node'].lineno, x['node'].colno), reverse=True))
work_nodes += [{'node': x, 'action': 'add'} for x in self.to_add_nodes]
# Generating the new replacement string
str_list = []
for i in work_nodes:
new_data = ''
if i['action'] == 'modify' or i['action'] == 'add':
printer = AstPrinter()
i['node'].accept(printer)
printer.post_process()
new_data = printer.result.strip()
data = {
'file': i['node'].filename,
'str': new_data,
'node': i['node'],
'action': i['action']
}
str_list += [data]
# Load build files
files = {}
for i in str_list:
if i['file'] in files:
continue
fpath = os.path.realpath(os.path.join(self.sourcedir, i['file']))
fdata = ''
# Create an empty file if it does not exist
if not os.path.exists(fpath):
with open(fpath, 'w', encoding='utf-8'):
pass
with open(fpath, encoding='utf-8') as fp:
fdata = fp.read()
# Generate line offsets numbers
m_lines = fdata.splitlines(True)
offset = 0
line_offsets = []
for j in m_lines:
line_offsets += [offset]
offset += len(j)
files[i['file']] = {
'path': fpath,
'raw': fdata,
'offsets': line_offsets
}
# Replace in source code
def remove_node(i):
offsets = files[i['file']]['offsets']
raw = files[i['file']]['raw']
node = i['node']
line = node.lineno - 1
col = node.colno
start = offsets[line] + col
end = start
if isinstance(node, (ArrayNode, FunctionNode)):
end = offsets[node.end_lineno - 1] + node.end_colno
# Only removal is supported for assignments
elif isinstance(node, AssignmentNode) and i['action'] == 'rm':
if isinstance(node.value, (ArrayNode, FunctionNode)):
remove_node({'file': i['file'], 'str': '', 'node': node.value, 'action': 'rm'})
raw = files[i['file']]['raw']
while raw[end] != '=':
end += 1
end += 1 # Handle the '='
while raw[end] in [' ', '\n', '\t']:
end += 1
files[i['file']]['raw'] = raw[:start] + i['str'] + raw[end:]
for i in str_list:
if i['action'] in ['modify', 'rm']:
remove_node(i)
elif i['action'] in ['add']:
files[i['file']]['raw'] += i['str'] + '\n'
# Write the files back
for key, val in files.items():
mlog.log('Rewriting', mlog.yellow(key))
with open(val['path'], 'w', encoding='utf-8') as fp:
fp.write(val['raw'])
target_operation_map = {
'add': 'src_add',
'rm': 'src_rm',
'add_target': 'target_add',
'rm_target': 'target_rm',
'add_extra_files': 'extra_files_add',
'rm_extra_files': 'extra_files_rm',
'info': 'info',
}
def list_to_dict(in_list: T.List[str]) -> T.Dict[str, str]:
result = {}
it = iter(in_list)
try:
for i in it:
# calling next(it) is not a mistake, we're taking the next element from
# the iterator, avoiding the need to preprocess it into a sequence of
# key value pairs.
result[i] = next(it)
except StopIteration:
raise TypeError('in_list parameter of list_to_dict must have an even length.')
return result
def generate_target(options) -> T.List[dict]:
return [{
'type': 'target',
'target': options.target,
'operation': target_operation_map[options.operation],
'sources': options.sources,
'subdir': options.subdir,
'target_type': options.tgt_type,
}]
def generate_kwargs(options) -> T.List[dict]:
return [{
'type': 'kwargs',
'function': options.function,
'id': options.id,
'operation': options.operation,
'kwargs': list_to_dict(options.kwargs),
}]
def generate_def_opts(options) -> T.List[dict]:
return [{
'type': 'default_options',
'operation': options.operation,
'options': list_to_dict(options.options),
}]
def generate_cmd(options) -> T.List[dict]:
if os.path.exists(options.json):
with open(options.json, encoding='utf-8') as fp:
return json.load(fp)
else:
return json.loads(options.json)
# Map options.type to the actual type name
cli_type_map = {
'target': generate_target,
'tgt': generate_target,
'kwargs': generate_kwargs,
'default-options': generate_def_opts,
'def': generate_def_opts,
'command': generate_cmd,
'cmd': generate_cmd,
}
def run(options):
if not options.verbose:
mlog.set_quiet()
try:
rewriter = Rewriter(options.sourcedir, skip_errors=options.skip)
rewriter.analyze_meson()
if options.type is None:
mlog.error('No command specified')
return 1
commands = cli_type_map[options.type](options)
if not isinstance(commands, list):
raise TypeError('Command is not a list')
for i in commands:
if not isinstance(i, object):
raise TypeError('Command is not an object')
rewriter.process(i)
rewriter.apply_changes()
rewriter.print_info()
return 0
except Exception as e:
raise e
finally:
mlog.set_verbose()
| 40.63015 | 203 | 0.565045 | [
"Apache-2.0"
] | annacrombie/meson | mesonbuild/rewriter.py | 43,393 | Python |
from switch import Switch
class Link():
def __init__(self, lid, init_capacity, delay):
self.lid = lid
self.init_capacity = init_capacity
self.capacity = init_capacity
self.end1 = "None"
self.end2 = "None"
self.delay = delay
self.level = "None"
self._edges = []
# attach a link between a server and a switch, or between two switches
def attach(self, x, y):
if type(x) is Switch:
self.end1 = x.id
x.bindPort()
else:
self.end1 = x.id
if type(y) is Switch:
self.end2 = y.id
y.bindPort()
else:
self.end2 = y.id
def canFitEdge(self, G, edge):
if (self.capacity - G.edges[edge]['bandwidth'] >= 0):
return True
else:
return False
def putEdge(self, G, edge):
self.capacity -= G.edges[edge]['bandwidth']
self._edges.append(edge)
def removeEdge(self, G, edge):
self.capacity = self.capacity + G.edges[edge]['bandwidth']
self._edges.remove(edge) | 23.341463 | 74 | 0.625914 | [
"Apache-2.0"
] | Peniac/NSE-CSC | substrate_network/link.py | 957 | Python |
# -------------------------------------------------------------------------------------------------
# scientific
import numpy as np
import pandas as pd
# -------------------------------------------------------------------------------------------------
# PyQuantum.TC
from PyQuantum.TC_sink.WaveFunction import WaveFunction
# -------------------------------------------------------------------------------------------------
# PyQuantum.Common
from PyQuantum.Common.Matrix import *
# -------------------------------------------------------------------------------------------------
class DensityMatrix(Matrix):
# -----------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- INIT -------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
def __init__(self, wf):
Assert(isinstance(wf, WaveFunction), "wf is not WaveFunction")
super(DensityMatrix, self).__init__(
m=wf.m, n=wf.m, dtype=np.complex128)
wf_data = wf.data
ro_data = wf_data.dot(wf_data.getH())
Assert(np.shape(ro_data) == (self.m, self.n), "size mismatch")
self.data = ro_data
# self.data = np.matrix(ro_data, dtype=np.complex128)
self.states = wf.states
self.size = np.shape(self.data)[0]
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- IPRINT -----------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
def iprint(self):
df = pd.DataFrame()
for i in range(self.size):
for j in range(self.size):
df.loc[i, j] = self.data[i, j]
df.index = df.columns = [str(v) for v in self.states.values()]
self.df = df
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- NORMALIZE --------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
def normalize(self):
self.data = (self.data+self.data.getH())/2.0
self.data /= np.sum(np.abs(self.data.diagonal()))
def energy(self, capacity, n_atoms, states_bin, diag_abs):
energy = [0] * (capacity + n_atoms+1)
for i in range(1, len(states_bin)):
energy[i] += np.sum(diag_abs[states_bin[i]])
# for j in states_bin[i]:
# energy[i] += diag_abs[j]
energy[i] *= i
# for i in range(1, len(states_bin)):
return energy
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# =====================================================================================================================
| 46.804878 | 119 | 0.23606 | [
"MIT"
] | Global19-atlassian-net/PyQuantum | PyQuantum/TC_sink/DensityMatrix.py | 3,838 | Python |
from common import num_range_scale
from neurons_engine import neurons_request, neurons_blocking_read, neurons_blocking_read
INQUIRE_ID = 0x00
SHAKE_ID = 0x01
ACC_X_ID = 0x02
ACC_Y_ID = 0x03
ACC_Z_ID = 0x04
GYRO_X_ID = 0x05
GYRO_Y_ID = 0x06
GYRO_Z_ID = 0x07
PITCH_ID = 0x08
ROLL_ID = 0x09
ROTATE_Z_ID = 0x0a
ROTATE_X_ID = 0x0b
ROTATE_Y_ID = 0x0c
TILTED_THRESHOLD = 20
SHAKED_THRESHOLD = {"light": 10, "usual": 30, "strong": 50}
ACC_SHIFT = 9.8
FACE_STATUC_THRESHOLD = 9.4 #// 9.4 = 9.8 * cos(15)
def get_acceleration(axis, index = 1):
if not isinstance(index, (int, float)):
return 0
if axis == 'x':
value = neurons_blocking_read("m_motion_sensor", "get_acc_x", (INQUIRE_ID, ACC_X_ID), index)
elif axis == 'y':
value = neurons_blocking_read("m_motion_sensor", "get_acc_y", (INQUIRE_ID, ACC_Y_ID), index)
elif axis == 'z':
value = neurons_blocking_read("m_motion_sensor", "get_acc_z", (INQUIRE_ID, ACC_Z_ID), index)
else:
return 0
if value != None:
return value[0] * ACC_SHIFT
else:
return 0
def get_gyroscope(axis, index = 1):
if not isinstance(index, (int, float)):
return 0
if axis == 'x':
value = neurons_blocking_read("m_motion_sensor", "get_gyr_x", (INQUIRE_ID, GYRO_X_ID), index)
elif axis == 'y':
value = neurons_blocking_read("m_motion_sensor", "get_gyr_y", (INQUIRE_ID, GYRO_Y_ID), index)
elif axis == 'z':
value = neurons_blocking_read("m_motion_sensor", "get_gyr_z", (INQUIRE_ID, GYRO_Z_ID), index)
else:
return 0
if value != None:
return value[0]
else:
return 0
def get_rotation(axis, index = 1):
if not isinstance(index, (int, float)):
return 0
if axis == 'x':
value = neurons_blocking_read("m_motion_sensor", "get_rotate_x", (INQUIRE_ID, ROTATE_X_ID), index)
elif axis == 'y':
value = neurons_blocking_read("m_motion_sensor", "get_rotate_y", (INQUIRE_ID, ROTATE_Y_ID), index)
elif axis == 'z':
value = neurons_blocking_read("m_motion_sensor", "get_rotate_z", (INQUIRE_ID, ROTATE_Z_ID), index)
else:
return 0
if value != None:
return value[0]
else:
return 0
def reset_rotation(axis = "all", index = 1):
if not isinstance(index, (int, float)):
return
if axis == 'x':
neurons_request("m_motion_sensor", "reset_x", (), index)
elif axis == 'y':
neurons_request("m_motion_sensor", "reset_y", (), index)
elif axis == 'z':
neurons_request("m_motion_sensor", "reset_z", (), index)
elif axis == 'all':
neurons_request("m_motion_sensor", "reset_x", (), index)
neurons_request("m_motion_sensor", "reset_y", (), index)
neurons_request("m_motion_sensor", "reset_z", (), index)
def is_shaked(level = "usual", index = 1):
if not isinstance(index, (int, float)):
return False
value = neurons_blocking_read("m_motion_sensor", "get_shake_strength", (INQUIRE_ID, SHAKE_ID), index)
if level in SHAKED_THRESHOLD:
thres = SHAKED_THRESHOLD[level]
else:
thres = SHAKED_THRESHOLD["usual"]
if value != None:
return bool(value[0] > thres)
else:
return False
def get_shake_strength(index = 1):
if not isinstance(index, (int, float)):
return 0
value = neurons_blocking_read("m_motion_sensor", "get_shake_strength", (INQUIRE_ID, SHAKE_ID), index)
if value != None:
return round(value[0], 1)
else:
return 0
def get_pitch(index = 1):
if not isinstance(index, (int, float)):
return 0
value = neurons_blocking_read("m_motion_sensor", "get_pitch", (INQUIRE_ID, PITCH_ID), index)
if value != None:
return value[0]
else:
return 0
def get_roll(index = 1):
if not isinstance(index, (int, float)):
return 0
value = neurons_blocking_read("m_motion_sensor", "get_roll", (INQUIRE_ID, ROLL_ID), index)
if value != None:
return value[0]
else:
return 0
def is_tilted_left(index = 1):
value = get_roll(index)
return bool(value < -TILTED_THRESHOLD)
def is_tilted_right(index = 1):
value = get_roll(index)
return bool(value > TILTED_THRESHOLD)
def is_tilted_forward(index = 1):
value = get_pitch(index)
return bool(value < -TILTED_THRESHOLD)
def is_tilted_backward(index = 1):
value = get_pitch(index)
return bool(value > TILTED_THRESHOLD)
def is_face_up(index = 1):
acc_z = get_acceleration('z', index)
if acc_z < -FACE_STATUC_THRESHOLD:
return True
else:
return False
def is_face_down(index = 1):
acc_z = get_acceleration('z', index)
if acc_z > FACE_STATUC_THRESHOLD:
return True
else:
return False
def is_upright(index = 1):
acc_y = get_acceleration('y', index)
if acc_y > FACE_STATUC_THRESHOLD or acc_y < -FACE_STATUC_THRESHOLD:
return True
else:
return False
| 27.398907 | 106 | 0.641604 | [
"MIT"
] | FFtust/k210_scripts | src/neurons_engine/mbuild_modules/motion_sensor.py | 5,014 | Python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 生成词云
'''
Reference:
https://amueller.github.io/word_cloud/
https://github.com/amueller/word_cloud
'''
from wordcloud import WordCloud
import matplotlib.pyplot as plt
filename = "***.txt" # 文本
with open(filename) as f:
mytext = f.read()
# print(mytext)
wordcloud = WordCloud().generate(mytext)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off") # 隐藏坐标
plt.show()
| 16.481481 | 47 | 0.678652 | [
"MIT"
] | comeCU/coding-python | funnyPython/test_wordcloud.py | 465 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a conditional TFGAN trained MNIST model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import data_provider
import networks
import tensorflow as tf
from absl import app
from absl import flags
import util
tfgan = tf.contrib.gan
flags.DEFINE_string('checkpoint_dir', '/tmp/mnist/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/mnist/',
'Directory where the results are saved to.')
flags.DEFINE_integer('num_images_per_class', 10,
'Number of images to generate per class.')
flags.DEFINE_integer('noise_dims', 64,
'Dimensions of the generator noise vector')
flags.DEFINE_string('classifier_filename', None,
'Location of the pretrained classifier. If `None`, use '
'default.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_boolean('write_to_disk', True, 'If `True`, run images to disk.')
FLAGS = flags.FLAGS
NUM_CLASSES = 10
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
noise, one_hot_labels = _get_generator_inputs(
FLAGS.num_images_per_class, NUM_CLASSES, FLAGS.noise_dims)
# Generate images.
with tf.variable_scope('Generator'): # Same scope as in train job.
images = networks.conditional_generator(
(noise, one_hot_labels), is_training=False)
# Visualize images.
reshaped_img = tfgan.eval.image_reshaper(
images, num_cols=FLAGS.num_images_per_class)
tf.summary.image('generated_images', reshaped_img, max_outputs=1)
# Calculate evaluation metrics.
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(images, FLAGS.classifier_filename))
tf.summary.scalar('MNIST_Cross_entropy',
util.mnist_cross_entropy(
images, one_hot_labels, FLAGS.classifier_filename))
# Write images to disk.
image_write_ops = None
if FLAGS.write_to_disk:
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'),
tf.image.encode_png(data_provider.float_image_to_uint8(
reshaped_img[0])))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
def _get_generator_inputs(num_images_per_class, num_classes, noise_dims):
# Since we want a grid of numbers for the conditional generator, manually
# construct the desired class labels.
num_images_generated = num_images_per_class * num_classes
noise = tf.random_normal([num_images_generated, noise_dims])
labels = [lbl for lbl in range(num_classes) for _
in range(num_images_per_class)]
one_hot_labels = tf.one_hot(tf.constant(labels), num_classes)
return noise, one_hot_labels
if __name__ == '__main__':
app.run(main)
| 35.702703 | 80 | 0.700227 | [
"Apache-2.0"
] | SimiaCryptus/models | research/gan/mnist/conditional_eval.py | 3,963 | Python |
#!/usr/bin/env python3
#
# Copyright (c) 2020 The Bitcoin ABC developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from base64 import b64encode
import mock
import os
import unittest
from phabricator_wrapper import BITCOIN_ABC_PROJECT_PHID, BITCOIN_ABC_REPO
import test.mocks.phabricator
class PhabricatorTests(unittest.TestCase):
def setUp(self):
self.phab = test.mocks.phabricator.instance()
def tearDown(self):
pass
def test_get_project_members(self):
self.phab.project.search.return_value = test.mocks.phabricator.Result([
{
"id": 1,
"type": "PROJ",
"phid": BITCOIN_ABC_PROJECT_PHID,
"attachments": {
"members": {
"members": [
{
"phid": "PHID-USER-usernumber1"
},
{
"phid": "PHID-USER-usernumber2"
},
{
"phid": "PHID-USER-usernumber3"
},
]
}
}
}
])
abc_members = self.phab.get_project_members(BITCOIN_ABC_PROJECT_PHID)
self.phab.project.search.assert_called_with(
constraints={
"phids": [BITCOIN_ABC_PROJECT_PHID],
},
attachments={
"members": True,
},
)
self.assertEqual(
abc_members,
[
"PHID-USER-usernumber1",
"PHID-USER-usernumber2",
"PHID-USER-usernumber3",
]
)
def test_get_latest_diff_staging_ref(self):
revision_PHID = "PHID-DREV-987645"
def assert_diff_searched_called():
return self.phab.differential.diff.search.assert_called_with(
constraints={
"revisionPHIDs": [revision_PHID],
},
order="newest"
)
# No diff associated to the revision
ref = self.phab.get_latest_diff_staging_ref(revision_PHID)
assert_diff_searched_called()
self.assertEqual(ref, "")
# 2 diffs associated with the revision. Ordering is guaranteed by the
# "order" request parameter.
self.phab.differential.diff.search.return_value = test.mocks.phabricator.Result([
{
"id": 42,
"type": "DIFF",
"phid": "PHID-DIFF-123456",
},
{
"id": 41,
"type": "DIFF",
"phid": "PHID-DIFF-abcdef",
},
])
ref = self.phab.get_latest_diff_staging_ref(revision_PHID)
assert_diff_searched_called()
self.assertEqual(ref, "refs/tags/phabricator/diff/42")
def test_get_current_user_phid(self):
user_PHID = "PHID-USER-foobarbaz"
self.phab.user.whoami.return_value = {
"phid": user_PHID,
"userName": "foo",
"realName": "Foo Bar",
}
# The whoami result should be cached. Call the method a few times and
# check the call occurs once and the result is always as expected.
for i in range(10):
phid = self.phab.get_current_user_phid()
self.phab.user.whoami.assert_called_once()
self.assertEqual(phid, user_PHID)
def test_getRevisionAuthor(self):
self.phab.differential.revision.search.return_value = test.mocks.phabricator.Result([{
'fields': {
'authorPHID': 'PHID-USER-2345',
},
}])
expectedAuthor = {
"phid": 'PHID-USER-2345',
}
self.phab.user.search.return_value = test.mocks.phabricator.Result([
expectedAuthor])
actualAuthor = self.phab.getRevisionAuthor('D1234')
self.assertEqual(actualAuthor, expectedAuthor)
def test_getAuthorSlackUsername(self):
self.assertEqual("", self.phab.getAuthorSlackUsername({}))
self.assertEqual("", self.phab.getAuthorSlackUsername({'fields': {}}))
self.assertEqual("test-slack-name", self.phab.getAuthorSlackUsername({
'fields': {
'custom.abc:slack-username': 'test-slack-name',
'username': 'test-username',
},
}))
self.assertEqual("test-username", self.phab.getAuthorSlackUsername({
'fields': {
'username': 'test-username',
},
}))
def test_user_roles(self):
user_PHID = "PHID-USER-abcdef"
def assert_user_search_called():
return self.phab.user.search.assert_called_with(
constraints={
"phids": [user_PHID],
}
)
# User not found
user_roles = self.phab.get_user_roles(user_PHID)
assert_user_search_called()
self.assertEqual(user_roles, [])
# User found
self.phab.user.search.return_value = test.mocks.phabricator.Result([
{
"id": 1,
"type": "USER",
"phid": user_PHID,
"fields": {
"username": "foobar",
"realName": "Foo Bar",
"roles": [
"admin",
"verified",
"approved",
"activated",
],
"dateCreated": 0,
"dateModified": 0,
"custom.abc:slack-username": "Foobar",
},
},
])
user_roles = self.phab.get_user_roles(user_PHID)
assert_user_search_called()
self.assertEqual(
user_roles,
[
"admin",
"verified",
"approved",
"activated",
]
)
# If more than 1 user is returned (should never occur), check no role is
# returned to prevent privilege exploits.
self.phab.user.search.return_value = test.mocks.phabricator.Result([
{
"id": 1,
"type": "USER",
"phid": user_PHID,
"fields": {
"roles": [
"verified",
],
},
},
{
"id": 2,
"type": "USER",
"phid": user_PHID,
"fields": {
"roles": [
"admin",
],
},
},
])
user_roles = self.phab.get_user_roles(user_PHID)
assert_user_search_called()
self.assertEqual(user_roles, [])
def test_get_laster_master_commit_hash(self):
with self.assertRaises(AssertionError):
self.phab.get_latest_master_commit_hash()
self.phab.diffusion.commit.search.return_value = test.mocks.phabricator.Result([
{
"id": 1234,
"type": "CMIT",
"phid": "PHID-CMIT-abcdef",
"fields": {
"identifier": "0000000000000000000000000000000123456789",
"repositoryPHID": "PHID-REPO-abcrepo",
},
}
])
commit_hash = self.phab.get_latest_master_commit_hash()
self.phab.diffusion.commit.search.assert_called_with(
constraints={
"repositories": [BITCOIN_ABC_REPO],
},
limit=1,
)
self.assertEqual(
commit_hash,
"0000000000000000000000000000000123456789")
def test_get_file_content_from_master(self):
commit_hash = "0000000000000000000000000000000123456789"
file_phid = "PHID-FILE-somefile"
path = "some/file"
self.phab.get_latest_master_commit_hash = mock.Mock()
self.phab.get_latest_master_commit_hash.return_value = commit_hash
self.phab.diffusion.browsequery = mock.Mock()
def configure_browsequery(file_path=path, hash="abcdef"):
self.phab.diffusion.browsequery.return_value = {
"paths": [
{
"fullPath": "some/file/1",
"hash": "1234"
},
{
"fullPath": "some/file/2",
"hash": "5678"
},
{
"fullPath": file_path,
"hash": hash
},
]
}
def assert_diffusion_browsequery_called():
self.phab.get_latest_master_commit_hash.assert_called()
self.phab.diffusion.browsequery.assert_called_with(
path=os.path.dirname(path) or None,
commit=commit_hash,
repository=BITCOIN_ABC_REPO,
branch="master",
)
def configure_file_content_query(
file_phid=file_phid, too_slow=False, too_huge=False):
output = {
"tooSlow": too_slow,
"tooHuge": too_huge,
}
if file_phid is not None:
output["filePHID"] = file_phid
self.phab.diffusion.filecontentquery.return_value = output
def assert_file_commit_and_file_searched():
self.phab.get_latest_master_commit_hash.assert_called()
self.phab.diffusion.filecontentquery.assert_called_with(
path=path,
commit=commit_hash,
timeout=5,
byteLimit=1024 * 1024,
repository=BITCOIN_ABC_REPO,
branch="master",
)
# Browse query failure
self.phab.diffusion.browsequery.return_value = {}
with self.assertRaisesRegex(AssertionError, "File .+ not found in master"):
self.phab.get_file_content_from_master(path)
assert_diffusion_browsequery_called()
# Browse query returns no file
self.phab.diffusion.browsequery.return_value = {'paths': []}
with self.assertRaisesRegex(AssertionError, "File .+ not found in master"):
self.phab.get_file_content_from_master(path)
assert_diffusion_browsequery_called()
# Browse query failed to find our file
configure_browsequery(file_path='something/else')
with self.assertRaisesRegex(AssertionError, "File .+ not found in master"):
self.phab.get_file_content_from_master(path)
assert_diffusion_browsequery_called()
configure_browsequery()
# Missing file PHID
configure_file_content_query(file_phid=None)
with self.assertRaisesRegex(AssertionError, "File .+ not found in master"):
self.phab.get_file_content_from_master(path)
assert_file_commit_and_file_searched()
# Too long
configure_file_content_query(too_slow=True)
with self.assertRaisesRegex(AssertionError, "is oversized or took too long to download"):
self.phab.get_file_content_from_master(path)
assert_file_commit_and_file_searched()
# Too huge
configure_file_content_query(too_huge=True)
with self.assertRaisesRegex(AssertionError, "is oversized or took too long to download"):
self.phab.get_file_content_from_master(path)
assert_file_commit_and_file_searched()
# Check the file content can be retrieved
expected_content = b'Some nice content'
result = test.mocks.phabricator.Result([])
result.response = b64encode(expected_content)
self.phab.file.download.return_value = result
configure_file_content_query()
file_content = self.phab.get_file_content_from_master(path)
assert_file_commit_and_file_searched()
self.phab.file.download.assert_called_with(phid=file_phid)
self.assertEqual(file_content, expected_content)
# With later calls the content is returned directly thanks to the cache
self.phab.diffusion.filecontentquery.reset_mock()
self.phab.file.download.reset_mock()
for i in range(10):
file_content = self.phab.get_file_content_from_master(path)
self.assertEqual(file_content, expected_content)
self.phab.diffusion.filecontentquery.assert_not_called()
self.phab.file.download.assert_not_called()
# If the master commit changes, the file content is still valid in cache
# as long as its file hash is unchanged
for i in range(10):
commit_hash = str(int(commit_hash) + 1)
self.phab.get_latest_master_commit_hash.return_value = commit_hash
file_content = self.phab.get_file_content_from_master(path)
self.assertEqual(file_content, expected_content)
self.phab.diffusion.filecontentquery.assert_not_called()
self.phab.file.download.assert_not_called()
# But if the file hash changes, the file content needs to be updated...
configure_browsequery(hash="defghi")
file_content = self.phab.get_file_content_from_master(path)
assert_file_commit_and_file_searched()
self.phab.file.download.assert_called_with(phid=file_phid)
self.assertEqual(file_content, expected_content)
# ... only once.
self.phab.diffusion.filecontentquery.reset_mock()
self.phab.file.download.reset_mock()
for i in range(10):
file_content = self.phab.get_file_content_from_master(path)
self.assertEqual(file_content, expected_content)
self.phab.diffusion.filecontentquery.assert_not_called()
self.phab.file.download.assert_not_called()
def test_set_text_panel_content(self):
panel_id = 42
panel_content = "My wonderful panel content"
self.phab.dashboard.panel.edit.return_value = {
"error": None,
"errorMessage": None,
"response": {
"object": {
"id": panel_id,
"phid": "PHID-DSHP-123456789",
"transactions": [
{
"phid": "PHID-XACT-DSHP-abcdefghi"
}
]
}
}
}
def call_set_text_panel_content():
self.phab.set_text_panel_content(panel_id, panel_content)
self.phab.dashboard.panel.edit.assert_called_with(
objectIdentifier=panel_id,
transactions=[
{
"type": "text",
"value": panel_content
}
]
)
# Happy path
call_set_text_panel_content()
# Error
self.phab.dashboard.panel.edit.return_value["error"] = "You shall not pass !"
with self.assertRaisesRegex(AssertionError, "Failed to edit panel"):
call_set_text_panel_content()
def test_get_object_token(self):
user_PHID = "PHID-USER-foobarbaz"
self.phab.user.whoami.return_value = {
"phid": user_PHID,
}
object_PHID = "PHID-DREV-abcdef"
def assert_token_given_called():
self.phab.token.given.assert_called_with(
authorPHIDs=[user_PHID],
objectPHIDs=[object_PHID],
tokenPHIDs=[],
)
# There is no token for this object
self.phab.token.given.return_value = []
token = self.phab.get_object_token(object_PHID)
assert_token_given_called()
self.assertEqual(token, "")
# There is exactly 1 token for this object
self.phab.token.given.return_value = [
{
"authorPHID": user_PHID,
"objectPHID": object_PHID,
"tokenPHID": "PHID-TOKN-like-1",
"dateCreated": 0,
},
]
token = self.phab.get_object_token(object_PHID)
assert_token_given_called()
self.assertEqual(token, "PHID-TOKN-like-1")
# If there is more than a single token only the first one is returned
self.phab.token.given.return_value = [
{
"authorPHID": user_PHID,
"objectPHID": object_PHID,
"tokenPHID": "PHID-TOKN-like-1",
"dateCreated": 0,
},
{
"authorPHID": user_PHID,
"objectPHID": object_PHID,
"tokenPHID": "PHID-TOKN-like-2",
"dateCreated": 1,
},
]
token = self.phab.get_object_token(object_PHID)
assert_token_given_called()
self.assertEqual(token, "PHID-TOKN-like-1")
def test_set_object_token(self):
object_PHID = "PHID-DREV-abcdef"
def assert_token_give_called(token_PHID):
self.phab.token.give.assert_called_with(
objectPHID=object_PHID,
tokenPHID=token_PHID,
)
# Rescind any previoulsy awarded token
self.phab.set_object_token(object_PHID)
assert_token_give_called("")
token_PHID = "PHID-TOKN-like-1"
self.phab.set_object_token(object_PHID, token_PHID)
assert_token_give_called(token_PHID)
if __name__ == '__main__':
unittest.main()
| 35.483168 | 97 | 0.544617 | [
"MIT"
] | ryan763/bitcoin-abc | contrib/buildbot/test/test_phabricator.py | 17,919 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['FirewallRule']
class FirewallRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
end_ip_address: Optional[pulumi.Input[str]] = None,
firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
start_ip_address: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a server firewall rule.
API Version: 2014-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] end_ip_address: The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
:param pulumi.Input[str] firewall_rule_name: The name of the firewall rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] start_ip_address: The start IP address of the firewall rule. Must be IPv4 format. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if end_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'end_ip_address'")
__props__['end_ip_address'] = end_ip_address
__props__['firewall_rule_name'] = firewall_rule_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
if start_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'start_ip_address'")
__props__['start_ip_address'] = start_ip_address
__props__['kind'] = None
__props__['location'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/latest:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20140401:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20150501preview:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:FirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallRule, __self__).__init__(
'azure-nextgen:sql:FirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return FirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> pulumi.Output[str]:
"""
The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of server that contains this firewall rule.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the server that contains this firewall rule.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> pulumi.Output[str]:
"""
The start IP address of the firewall rule. Must be IPv4 format. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.953947 | 391 | 0.651251 | [
"Apache-2.0"
] | pulumi/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/sql/firewall_rule.py | 6,833 | Python |
import os
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines3.common import logger
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
make_plots = False,
print_verbosity = 10,
day = 0,
initial=True,
previous_state=[],
model_name = '',
mode='',
iteration=''):
self.day = day
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
self.data = self.df.loc[self.day,:]
self.terminal = False
self.make_plots = make_plots
self.print_verbosity = print_verbosity
self.turbulence_threshold = turbulence_threshold
self.initial = initial
self.previous_state = previous_state
self.model_name=model_name
self.mode=mode
self.iteration=iteration
# initalize state
self.state = self._initiate_state()
# initialize reward
self.reward = 0
self.turbulence = 0
self.cost = 0
self.trades = 0
self.episode = 0
# memorize all the total balance change
self.asset_memory = [self.initial_amount]
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
#self.reset()
self._seed()
def _sell_stock(self, index, action):
def _do_sell_normal():
# perform sell action based on the sign of the action
if self.state[index+self.stock_dim+1] > 0:
sell_num_shares = min(abs(action),self.state[index+self.stock_dim+1])
sell_amount = self.state[index+1]* sell_num_shares * (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] -= min(abs(action), self.state[index+self.stock_dim+1])
self.cost +=self.state[index+1]*min(abs(action),self.state[index+self.stock_dim+1]) * \
self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
pass
return sell_num_shares
# perform sell action based on the sign of the action
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
# if turbulence goes over threshold, just clear out all positions
if self.state[index+self.stock_dim+1] > 0:
sell_num_shares = self.state[index+self.stock_dim+1]
sell_amount = self.state[index+1]*sell_num_shares* (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] =0
self.cost += self.state[index+1]*self.state[index+self.stock_dim+1]* \
self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
pass
else:
sell_num_shares = _do_sell_normal()
else:
sell_num_shares = _do_sell_normal()
return sell_num_shares
def _buy_stock(self, index, action):
def _do_buy():
available_amount = self.state[0] // self.state[index+1]
# print('available_amount:{}'.format(available_amount))
#update balance
buy_num_shares = min(available_amount, action)
buy_amount = self.state[index+1]* buy_num_shares * (1+ self.buy_cost_pct)
self.state[0] -= buy_amount
self.state[index+self.stock_dim+1] += min(available_amount, action)
self.cost+=self.state[index+1]*min(available_amount, action)* \
self.buy_cost_pct
self.trades+=1
return buy_num_shares
# perform buy action based on the sign of the action
if self.turbulence_threshold is None:
buy_num_shares = _do_buy()
else:
if self.turbulence< self.turbulence_threshold:
buy_num_shares = _do_buy()
else:
pass
return buy_num_shares
def _make_plot(self):
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_trade_{}.png'.format(self.episode))
plt.close()
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
# print(f"Episode: {self.episode}")
if self.make_plots:
self._make_plot()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
df_total_value = pd.DataFrame(self.asset_memory)
tot_reward = self.state[0]+sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))- self.initial_amount
df_total_value.columns = ['account_value']
df_total_value['date'] = self.date_memory
df_total_value['daily_return']=df_total_value['account_value'].pct_change(1)
if df_total_value['daily_return'].std() !=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.columns = ['account_rewards']
df_rewards['date'] = self.date_memory[:-1]
if self.episode % self.print_verbosity == 0:
print(f"day: {self.day}, episode: {self.episode}")
print(f"begin_total_asset: {self.asset_memory[0]:0.2f}")
print(f"end_total_asset: {end_total_asset:0.2f}")
print(f"total_reward: {tot_reward:0.2f}")
print(f"total_cost: {self.cost:0.2f}")
print(f"total_trades: {self.trades}")
if df_total_value['daily_return'].std() != 0:
print(f"Sharpe: {sharpe:0.3f}")
print("=================================")
if (self.model_name!='') and (self.mode!=''):
df_actions = self.save_action_memory()
df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration))
df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode,self.model_name, self.iteration),index=False)
plt.close()
# Add outputs to logger interface
logger.record("environment/portfolio_value", end_total_asset)
logger.record("environment/total_reward", tot_reward)
logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100)
logger.record("environment/total_cost", self.cost)
logger.record("environment/total_trades", self.trades)
return self.state, self.reward, self.terminal, {}
else:
actions = actions * self.hmax #actions initially is scaled between 0 to 1
actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
actions=np.array([-self.hmax]*self.stock_dim)
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
#print("begin_total_asset:{}".format(begin_total_asset))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print(f"Num shares before: {self.state[index+self.stock_dim+1]}")
# print(f'take sell action before : {actions[index]}')
actions[index] = self._sell_stock(index, actions[index])*(-1)
# print(f'take sell action after : {actions[index]}')
# print(f"Num shares after: {self.state[index+self.stock_dim+1]}")
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
actions[index] = self._buy_stock(index, actions[index])
self.actions_memory.append(actions)
self.day += 1
self.data = self.df.loc[self.day,:]
if self.turbulence_threshold is not None:
self.turbulence = self.data['turbulence'].values[0]
self.state = self._update_state()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory.append(end_total_asset)
self.date_memory.append(self._get_date())
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
self.reward = self.reward*self.reward_scaling
return self.state, self.reward, self.terminal, {}
def reset(self):
if self.initial:
self.asset_memory = [self.initial_amount]
else:
previous_total_asset = self.previous_state[0]+ \
sum(np.array(self.previous_state[1:(self.stock_dim+1)])*np.array(self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory = [previous_total_asset]
self.day = 0
self.data = self.df.loc[self.day,:]
self.turbulence = 0
self.cost = 0
self.trades = 0
self.terminal = False
# self.iteration=self.iteration
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
#initiate state
self.state = self._initiate_state()
self.episode+=1
return self.state
def render(self, mode='human',close=False):
return self.state
def _initiate_state(self):
if self.initial:
# For Initial State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.initial_amount] + \
self.data.close.values.tolist() + \
[0]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.initial_amount] + \
[self.data.close] + \
[0]*self.stock_dim + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
else:
#Using Previous State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.previous_state[0]] + \
self.data.close.values.tolist() + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.previous_state[0]] + \
[self.data.close] + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _update_state(self):
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.state[0]] + \
self.data.close.values.tolist() + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.state[0]] + \
[self.data.close] + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _get_date(self):
if len(self.df.tic.unique())>1:
date = self.data.date.unique()[0]
else:
date = self.data.date
return date
def save_asset_memory(self):
date_list = self.date_memory
asset_list = self.asset_memory
#print(len(date_list))
#print(len(asset_list))
df_account_value = pd.DataFrame({'date':date_list,'account_value':asset_list})
return df_account_value
def save_action_memory(self):
if len(self.df.tic.unique())>1:
# date and close price length must match actions length
date_list = self.date_memory[:-1]
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
#df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
else:
date_list = self.date_memory[:-1]
action_list = self.actions_memory
df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
| 43.445652 | 171 | 0.548974 | [
"MIT"
] | hyqus/Deep-Reinforcement-Learning-for-Automated-Stock-Trading-Ensemble-Strategy-ICAIF-2020 | env/env_stocktrading.py | 15,988 | Python |
# -*- coding: UTF-8 -*-
import os
import sys
import jieba
import json
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
class SinglePassCluster():
def __init__(self, stopWords_path="../data/stop_words.txt", my_stopwords=None,
max_df=0.5, max_features=1000,
simi_threshold=0.5, res_save_path="./cluster_res.json"):
self.stopwords = self.load_stopwords(stopWords_path)
if isinstance(my_stopwords, list):
self.stopwords += my_stopwords
self.tfidf = TfidfVectorizer(stop_words=self.stopwords, max_df=max_df, max_features=max_features)
self.simi_thr = simi_threshold
self.cluster_center_vec = [] # [cluster_center_vec, ]
self.idx_2_text = {} # {文本id: text, }
self.cluster_2_idx = {} # {cluster_id: [text_id, ]}
self.res_path = res_save_path # save self.cluster_2_idx
def load_stopwords(self, path):
stopwords = []
with open(path, 'r', encoding="utf-8") as f:
for line in f:
stopwords.append(line.strip())
return stopwords
def cut_sentences(self, texts):
if isinstance(texts, str):
if not os.path.exists(texts):
print("path: {} is not exist !!!".format(texts))
sys.exit()
else:
_texts = []
with open(texts, 'r', encoding="utf-8") as f:
for line in f:
_texts.append(line.strip())
texts = _texts
texts_cut = [" ".join(jieba.lcut(t)) for t in texts]
self.idx_2_text = {idx: text for idx, text in enumerate(texts)}
return texts_cut
def get_tfidf(self, texts_cut):
tfidf = self.tfidf.fit_transform(texts_cut)
return tfidf.todense().tolist()
def cosion_simi(self, vec):
simi = cosine_similarity(np.array([vec]), np.array(self.cluster_center_vec))
max_idx = np.argmax(simi, arixs=1)[0]
max_val = simi[0][max_idx]
return max_val, max_idx
def single_pass(self, texts):
texts_cut = self.cut_sentences(texts)
tfidf = self.get_tfidf(texts_cut)
# print(len(tfidf), len(tfidf[0]))
# 开始遍历
for idx, vec in enumerate(tfidf):
# 初始化,没有中心生成
if not self.cluster_center_vec:
self.cluster_center_vec.append(vec)
self.cluster_2_idx[0] = [idx]
# 存在簇
else:
max_simi, max_idx = self.cosion_simi(vec)
if max_simi >= self.simi_thr:
self.cluster_2_idx[max_idx].append(idx)
else:
self.cluster_center_vec.append(vec)
self.cluster_2_idx[len(self.cluster_2_idx)] = [idx]
with open(self.res_path, "w", encoding="utf-8") as f:
json.dump(self.cluster_2_idx, f, ensure_ascii=False)
if __name__ == "__main__":
test_data = "../data/test_data.txt"
cluster = SinglePassCluster(max_features=100, simi_threshold=0.1)
cluster.single_pass(test_data)
| 34.554348 | 105 | 0.592639 | [
"Apache-2.0"
] | murray-z/text_clustering | Single_Pass/single_pass_cluster.py | 3,217 | Python |
from functools import wraps
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from nets.CSPdarknet53 import darknet_body
from utils.utils import compose
#--------------------------------------------------#
# 单次卷积
#--------------------------------------------------#
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
#---------------------------------------------------#
# 卷积块
# DarknetConv2D + BatchNormalization + LeakyReLU
#---------------------------------------------------#
def DarknetConv2D_BN_Leaky(*args, **kwargs):
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
#---------------------------------------------------#
# 特征层->最后的输出
#---------------------------------------------------#
def make_five_convs(x, num_filters):
# 五次卷积
x = DarknetConv2D_BN_Leaky(num_filters, (1,1))(x)
x = DarknetConv2D_BN_Leaky(num_filters*2, (3,3))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (1,1))(x)
x = DarknetConv2D_BN_Leaky(num_filters*2, (3,3))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (1,1))(x)
return x
#---------------------------------------------------#
# 特征层->最后的输出
#---------------------------------------------------#
def yolo_body(inputs, num_anchors, num_classes):
# 生成darknet53的主干模型
feat1,feat2,feat3 = darknet_body(inputs)
# 第一个特征层
# y1=(batch_size,13,13,3,85)
P5 = DarknetConv2D_BN_Leaky(512, (1,1))(feat3)
P5 = DarknetConv2D_BN_Leaky(1024, (3,3))(P5)
P5 = DarknetConv2D_BN_Leaky(512, (1,1))(P5)
# 使用了SPP结构,即不同尺度的最大池化后堆叠。
maxpool1 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(P5)
maxpool2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(P5)
maxpool3 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(P5)
P5 = Concatenate()([maxpool1, maxpool2, maxpool3, P5])
P5 = DarknetConv2D_BN_Leaky(512, (1,1))(P5)
P5 = DarknetConv2D_BN_Leaky(1024, (3,3))(P5)
P5 = DarknetConv2D_BN_Leaky(512, (1,1))(P5)
P5_upsample = compose(DarknetConv2D_BN_Leaky(256, (1,1)), UpSampling2D(2))(P5)
P4 = DarknetConv2D_BN_Leaky(256, (1,1))(feat2)
P4 = Concatenate()([P4, P5_upsample])
P4 = make_five_convs(P4,256)
P4_upsample = compose(DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(P4)
P3 = DarknetConv2D_BN_Leaky(128, (1,1))(feat1)
P3 = Concatenate()([P3, P4_upsample])
P3 = make_five_convs(P3,128)
P3_output = DarknetConv2D_BN_Leaky(256, (3,3))(P3)
P3_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P3_output)
#26,26 output
P3_downsample = ZeroPadding2D(((1,0),(1,0)))(P3)
P3_downsample = DarknetConv2D_BN_Leaky(256, (3,3), strides=(2,2))(P3_downsample)
P4 = Concatenate()([P3_downsample, P4])
P4 = make_five_convs(P4,256)
P4_output = DarknetConv2D_BN_Leaky(512, (3,3))(P4)
P4_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P4_output)
#13,13 output
P4_downsample = ZeroPadding2D(((1,0),(1,0)))(P4)
P4_downsample = DarknetConv2D_BN_Leaky(512, (3,3), strides=(2,2))(P4_downsample)
P5 = Concatenate()([P4_downsample, P5])
P5 = make_five_convs(P5,512)
P5_output = DarknetConv2D_BN_Leaky(1024, (3,3))(P5)
P5_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P5_output)
return Model(inputs, [P5_output, P4_output, P3_output])
#---------------------------------------------------#
# 将预测值的每个特征层调成真实值
#---------------------------------------------------#
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
num_anchors = len(anchors)
# [1, 1, 1, num_anchors, 2]
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
# 获得x,y的网格
# (13,13, 1, 2)
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
# (batch_size,13,13,3,85)
feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# 将预测值调成真实值
# box_xy对应框的中心点
# box_wh对应框的宽和高
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
# 在计算loss的时候返回如下参数
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
#---------------------------------------------------#
# 对box进行调整,使其符合真实图片的样子
#---------------------------------------------------#
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
boxes *= K.concatenate([image_shape, image_shape])
return boxes
#---------------------------------------------------#
# 获取每个box和它的得分
#---------------------------------------------------#
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
# 将预测值调成真实值
# box_xy对应框的中心点
# box_wh对应框的宽和高
# -1,13,13,3,2; -1,13,13,3,2; -1,13,13,3,1; -1,13,13,3,80
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape)
# 将box_xy、和box_wh调节成y_min,y_max,xmin,xmax
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
# 获得得分和box
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
#---------------------------------------------------#
# 图片预测
#---------------------------------------------------#
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5):
# 获得特征层的数量
num_layers = len(yolo_outputs)
# 特征层1对应的anchor是678
# 特征层2对应的anchor是345
# 特征层3对应的anchor是012
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
# 对每个特征层进行处理
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
# 将每个特征层的结果进行堆叠
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# 取出所有box_scores >= score_threshold的框,和成绩
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
# 非极大抑制,去掉box重合程度高的那一些
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
# 获取非极大抑制后的结果
# 下列三个分别是
# 框的位置,得分与种类
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
| 36.800813 | 132 | 0.598144 | [
"MIT"
] | yanjingke/yolov4-keras | nets/yolo4.py | 9,627 | Python |
from hy.macros import macroexpand
from hy.compiler import HyTypeError
from hy.lex import tokenize
def test_reader_macro_error():
"""Check if we get correct error with wrong disptach character"""
try:
macroexpand(tokenize("(dispatch_reader_macro '- '())")[0], __name__)
except HyTypeError as e:
assert "with the character `-`" in str(e)
| 30.5 | 76 | 0.70765 | [
"MIT"
] | ALSchwalm/hy | tests/macros/test_reader_macros.py | 366 | Python |
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
import antlr3
class TestStringStream(unittest.TestCase):
"""Test case for the StringStream class."""
def testSize(self):
"""StringStream.size()"""
stream = antlr3.StringStream('foo')
self.failUnlessEqual(stream.size(), 3)
def testIndex(self):
"""StringStream.index()"""
stream = antlr3.StringStream('foo')
self.failUnlessEqual(stream.index(), 0)
def testConsume(self):
"""StringStream.consume()"""
stream = antlr3.StringStream('foo\nbar')
stream.consume() # f
self.failUnlessEqual(stream.index(), 1)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.line, 1)
stream.consume() # o
self.failUnlessEqual(stream.index(), 2)
self.failUnlessEqual(stream.charPositionInLine, 2)
self.failUnlessEqual(stream.line, 1)
stream.consume() # o
self.failUnlessEqual(stream.index(), 3)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 1)
stream.consume() # \n
self.failUnlessEqual(stream.index(), 4)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.line, 2)
stream.consume() # b
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.line, 2)
stream.consume() # a
self.failUnlessEqual(stream.index(), 6)
self.failUnlessEqual(stream.charPositionInLine, 2)
self.failUnlessEqual(stream.line, 2)
stream.consume() # r
self.failUnlessEqual(stream.index(), 7)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 2)
stream.consume() # EOF
self.failUnlessEqual(stream.index(), 7)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 2)
stream.consume() # EOF
self.failUnlessEqual(stream.index(), 7)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 2)
def testReset(self):
"""StringStream.reset()"""
stream = antlr3.StringStream('foo')
stream.consume()
stream.consume()
stream.reset()
self.failUnlessEqual(stream.index(), 0)
self.failUnlessEqual(stream.line, 1)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.LA(1), 'f')
def testLA(self):
"""StringStream.LA()"""
stream = antlr3.StringStream('foo')
self.failUnlessEqual(stream.LA(1), 'f')
self.failUnlessEqual(stream.LA(2), 'o')
self.failUnlessEqual(stream.LA(3), 'o')
stream.consume()
stream.consume()
self.failUnlessEqual(stream.LA(1), 'o')
self.failUnlessEqual(stream.LA(2), antlr3.EOF)
self.failUnlessEqual(stream.LA(3), antlr3.EOF)
def testSubstring(self):
"""StringStream.substring()"""
stream = antlr3.StringStream('foobar')
self.failUnlessEqual(stream.substring(0, 0), 'f')
self.failUnlessEqual(stream.substring(0, 1), 'fo')
self.failUnlessEqual(stream.substring(0, 5), 'foobar')
self.failUnlessEqual(stream.substring(3, 5), 'bar')
def testSeekForward(self):
"""StringStream.seek(): forward"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
self.failUnlessEqual(stream.index(), 4)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.LA(1), 'b')
## # not yet implemented
## def testSeekBackward(self):
## """StringStream.seek(): backward"""
## stream = antlr3.StringStream('foo\nbar')
## stream.seek(4)
## stream.seek(1)
## self.failUnlessEqual(stream.index(), 1)
## self.failUnlessEqual(stream.line, 1)
## self.failUnlessEqual(stream.charPositionInLine, 1)
## self.failUnlessEqual(stream.LA(1), 'o')
def testMark(self):
"""StringStream.mark()"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker = stream.mark()
self.failUnlessEqual(marker, 1)
self.failUnlessEqual(stream.markDepth, 1)
stream.consume()
marker = stream.mark()
self.failUnlessEqual(marker, 2)
self.failUnlessEqual(stream.markDepth, 2)
def testReleaseLast(self):
"""StringStream.release(): last marker"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.release()
self.failUnlessEqual(stream.markDepth, 1)
# release same marker again, nothing has changed
stream.release()
self.failUnlessEqual(stream.markDepth, 1)
def testReleaseNested(self):
"""StringStream.release(): nested"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.release(marker2)
self.failUnlessEqual(stream.markDepth, 1)
def testRewindLast(self):
"""StringStream.rewind(): last marker"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker = stream.mark()
stream.consume()
stream.consume()
stream.rewind()
self.failUnlessEqual(stream.markDepth, 0)
self.failUnlessEqual(stream.index(), 4)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.LA(1), 'b')
def testRewindNested(self):
"""StringStream.rewind(): nested"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LA(1), 'a')
class TestFileStream(unittest.TestCase):
"""Test case for the FileStream class."""
def testNoEncoding(self):
path = os.path.join(os.path.dirname(__file__), 'teststreams.input1')
stream = antlr3.FileStream(path)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LA(1), 'a')
def testEncoded(self):
path = os.path.join(os.path.dirname(__file__), 'teststreams.input2')
stream = antlr3.FileStream(path, 'utf-8')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LA(1), u'ä')
class TestInputStream(unittest.TestCase):
"""Test case for the InputStream class."""
def testNoEncoding(self):
file = StringIO('foo\nbar')
stream = antlr3.InputStream(file)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LA(1), 'a')
def testEncoded(self):
file = StringIO(u'foo\nbär'.encode('utf-8'))
stream = antlr3.InputStream(file, 'utf-8')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LA(1), u'ä')
class TestCommonTokenStream(unittest.TestCase):
"""Test case for the StringStream class."""
def setUp(self):
"""Setup test fixure
The constructor of CommonTokenStream needs a token source. This
is a simple mock class providing just the nextToken() method.
"""
class MockSource(object):
def __init__(self):
self.tokens = []
def nextToken(self):
try:
return self.tokens.pop(0)
except IndexError:
return None
self.source = MockSource()
def testInit(self):
"""CommonTokenStream.__init__()"""
stream = antlr3.CommonTokenStream(self.source)
self.failUnlessEqual(stream.index(), -1)
def testSetTokenSource(self):
"""CommonTokenStream.setTokenSource()"""
stream = antlr3.CommonTokenStream(None)
stream.setTokenSource(self.source)
self.failUnlessEqual(stream.index(), -1)
self.failUnlessEqual(stream.channel, antlr3.DEFAULT_CHANNEL)
def testLTEmptySource(self):
"""CommonTokenStream.LT(): EOF (empty source)"""
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.failUnlessEqual(lt1.type, antlr3.EOF)
def testLT1(self):
"""CommonTokenStream.LT(1)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.failUnlessEqual(lt1.type, 12)
def testLT1WithHidden(self):
"""CommonTokenStream.LT(1): with hidden tokens"""
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.failUnlessEqual(lt1.type, 13)
def testLT2BeyondEnd(self):
"""CommonTokenStream.LT(2): beyond end"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(2)
self.failUnlessEqual(lt1.type, antlr3.EOF)
# not yet implemented
def testLTNegative(self):
"""CommonTokenStream.LT(-1): look back"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
lt1 = stream.LT(-1)
self.failUnlessEqual(lt1.type, 12)
def testLB1(self):
"""CommonTokenStream.LB(1)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
self.failUnlessEqual(stream.LB(1).type, 12)
def testLTZero(self):
"""CommonTokenStream.LT(0)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(0)
self.failUnless(lt1 is None)
def testLBBeyondBegin(self):
"""CommonTokenStream.LB(-1): beyond begin"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
self.failUnless(stream.LB(1) is None)
stream.consume()
stream.consume()
self.failUnless(stream.LB(3) is None)
def testFillBuffer(self):
"""CommonTokenStream.fillBuffer()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=14)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
self.failUnlessEqual(len(stream.tokens), 3)
self.failUnlessEqual(stream.tokens[0].type, 12)
self.failUnlessEqual(stream.tokens[1].type, 13)
self.failUnlessEqual(stream.tokens[2].type, 14)
def testConsume(self):
"""CommonTokenStream.consume()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
self.failUnlessEqual(stream.LA(1), 12)
stream.consume()
self.failUnlessEqual(stream.LA(1), 13)
stream.consume()
self.failUnlessEqual(stream.LA(1), antlr3.EOF)
stream.consume()
self.failUnlessEqual(stream.LA(1), antlr3.EOF)
def testSeek(self):
"""CommonTokenStream.seek()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
self.failUnlessEqual(stream.LA(1), 12)
stream.seek(2)
self.failUnlessEqual(stream.LA(1), antlr3.EOF)
stream.seek(0)
self.failUnlessEqual(stream.LA(1), 12)
def testMarkRewind(self):
"""CommonTokenStream.mark()/rewind()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
marker = stream.mark()
stream.consume()
stream.rewind(marker)
self.failUnlessEqual(stream.LA(1), 13)
def testToString(self):
"""CommonTokenStream.toString()"""
self.source.tokens.append(
antlr3.CommonToken(type=12, text="foo")
)
self.source.tokens.append(
antlr3.CommonToken(type=13, text="bar")
)
self.source.tokens.append(
antlr3.CommonToken(type=14, text="gnurz")
)
self.source.tokens.append(
antlr3.CommonToken(type=15, text="blarz")
)
stream = antlr3.CommonTokenStream(self.source)
assert stream.toString() == "foobargnurzblarz"
assert stream.toString(1, 2) == "bargnurz"
assert stream.toString(stream.tokens[1], stream.tokens[-2]) == "bargnurz"
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 27.087023 | 81 | 0.57502 | [
"MIT"
] | MichaelReiter/CSC435 | libs/antlr-3.0.1/runtime/Python/unittests/teststreams.py | 17,745 | Python |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FeedbackList(ListResource):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the FeedbackList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param call_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackList
"""
super(FeedbackList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
def get(self):
"""
Constructs a FeedbackContext
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
return FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __call__(self):
"""
Constructs a FeedbackContext
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
return FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackList>'
class FeedbackPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the FeedbackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param call_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackPage
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackPage
"""
super(FeedbackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FeedbackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackPage>'
class FeedbackContext(InstanceContext):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the FeedbackContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param call_sid: The call sid that uniquely identifies the call
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
super(FeedbackContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
self._uri = '/Accounts/{account_sid}/Calls/{call_sid}/Feedback.json'.format(**self._solution)
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The quality_score
:param FeedbackInstance.Issues issue: The issue
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
data = values.of({
'QualityScore': quality_score,
'Issue': issue,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def update(self, quality_score, issue=values.unset):
"""
Update the FeedbackInstance
:param unicode quality_score: An integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Updated FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
data = values.of({
'QualityScore': quality_score,
'Issue': issue,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.FeedbackContext {}>'.format(context)
class FeedbackInstance(InstanceResource):
class Issues(object):
AUDIO_LATENCY = "audio-latency"
DIGITS_NOT_CAPTURED = "digits-not-captured"
DROPPED_CALL = "dropped-call"
IMPERFECT_AUDIO = "imperfect-audio"
INCORRECT_CALLER_ID = "incorrect-caller-id"
ONE_WAY_AUDIO = "one-way-audio"
POST_DIAL_DELAY = "post-dial-delay"
UNSOLICITED_CALL = "unsolicited-call"
def __init__(self, version, payload, account_sid, call_sid):
"""
Initialize the FeedbackInstance
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
super(FeedbackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'issues': payload['issues'],
'quality_score': deserialize.integer(payload['quality_score']),
'sid': payload['sid'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FeedbackContext for this FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
if self._context is None:
self._context = FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def issues(self):
"""
:returns: The issues
:rtype: FeedbackInstance.Issues
"""
return self._properties['issues']
@property
def quality_score(self):
"""
:returns: 1 to 5 quality score
:rtype: unicode
"""
return self._properties['quality_score']
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The quality_score
:param FeedbackInstance.Issues issue: The issue
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.create(
quality_score,
issue=issue,
)
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.fetch()
def update(self, quality_score, issue=values.unset):
"""
Update the FeedbackInstance
:param unicode quality_score: An integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Updated FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.update(
quality_score,
issue=issue,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.FeedbackInstance {}>'.format(context)
| 29.551181 | 101 | 0.606981 | [
"MIT"
] | Jason-Paprocki/hacknjit | lib/python2.7/site-packages/twilio/rest/api/v2010/account/call/feedback.py | 11,259 | Python |
import xgboost as xgb
import testing as tm
import numpy as np
import unittest
rng = np.random.RandomState(1994)
class TestFastHist(unittest.TestCase):
def test_fast_hist(self):
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits
try:
from sklearn.model_selection import train_test_split
except:
from sklearn.cross_validation import train_test_split
digits = load_digits(2)
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = xgb.DMatrix(X_train, y_train)
dtest = xgb.DMatrix(X_test, y_test)
param = {'objective': 'binary:logistic',
'tree_method': 'hist',
'grow_policy': 'depthwise',
'max_depth': 3,
'eval_metric': 'auc'}
res = {}
xgb.train(param, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')],
evals_result=res)
assert self.non_decreasing(res['train']['auc'])
assert self.non_decreasing(res['test']['auc'])
param2 = {'objective': 'binary:logistic',
'tree_method': 'hist',
'grow_policy': 'lossguide',
'max_depth': 0,
'max_leaves': 8,
'eval_metric': 'auc'}
res = {}
xgb.train(param2, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')],
evals_result=res)
assert self.non_decreasing(res['train']['auc'])
assert self.non_decreasing(res['test']['auc'])
param3 = {'objective': 'binary:logistic',
'tree_method': 'hist',
'grow_policy': 'lossguide',
'max_depth': 0,
'max_leaves': 8,
'max_bin': 16,
'eval_metric': 'auc'}
res = {}
xgb.train(param3, dtrain, 10, [(dtrain, 'train'), (dtest, 'test')],
evals_result=res)
assert self.non_decreasing(res['train']['auc'])
# fail-safe test for dense data
from sklearn.datasets import load_svmlight_file
dpath = 'demo/data/'
X2, y2 = load_svmlight_file(dpath + 'agaricus.txt.train')
X2 = X2.toarray()
dtrain2 = xgb.DMatrix(X2, label=y2)
param = {'objective': 'binary:logistic',
'tree_method': 'hist',
'grow_policy': 'depthwise',
'max_depth': 2,
'eval_metric': 'auc'}
res = {}
xgb.train(param, dtrain2, 10, [(dtrain2, 'train')], evals_result=res)
assert self.non_decreasing(res['train']['auc'])
assert res['train']['auc'][0] >= 0.85
for j in range(X2.shape[1]):
for i in np.random.choice(X2.shape[0], size=10, replace=False):
X2[i, j] = 2
dtrain3 = xgb.DMatrix(X2, label=y2)
res = {}
xgb.train(param, dtrain3, 10, [(dtrain3, 'train')], evals_result=res)
assert self.non_decreasing(res['train']['auc'])
assert res['train']['auc'][0] >= 0.85
for j in range(X2.shape[1]):
for i in np.random.choice(X2.shape[0], size=10, replace=False):
X2[i, j] = 3
dtrain4 = xgb.DMatrix(X2, label=y2)
res = {}
xgb.train(param, dtrain4, 10, [(dtrain4, 'train')], evals_result=res)
assert self.non_decreasing(res['train']['auc'])
assert res['train']['auc'][0] >= 0.85
# fail-safe test for max_bin=2
param = {'objective': 'binary:logistic',
'tree_method': 'hist',
'grow_policy': 'depthwise',
'max_depth': 2,
'eval_metric': 'auc',
'max_bin': 2}
res = {}
xgb.train(param, dtrain2, 10, [(dtrain2, 'train')], evals_result=res)
assert self.non_decreasing(res['train']['auc'])
assert res['train']['auc'][0] >= 0.85
def non_decreasing(self, L):
return all(x <= y for x, y in zip(L, L[1:]))
| 36.783784 | 81 | 0.525104 | [
"Apache-2.0"
] | daoliker/xgboost | tests/python/test_fast_hist.py | 4,083 | Python |
def test_List():
a: i32
b: i32
a = [1, 2, 3]
a = [-3, -2, -1]
a = ["a", "b", "c"]
a = [[1, 2, 3], [4, 5, 6]]
# a = [-2, -1, 0.45] -> semantic error
b = a[2]
| 15.916667 | 42 | 0.319372 | [
"MIT"
] | Smit-create/lpython | tests/list1.py | 191 | Python |
import discord
import json
from discord.ext import commands
from cogs.ObjectCache import config
from cogs.ObjectCache import server_config
from cogs.ObjectCache import get_lang
from collections import OrderedDict
with open('commands.json') as json_data:
commands_json = json.load(json_data, object_pairs_hook = OrderedDict)
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases = ['h'])
async def help(self, ctx, command_name = None):
try:
guild_prefix = server_config[ctx.guild.id]['prefix']
except:
guild_prefix = config['default_prefix']
if not command_name:
embed = discord.Embed(color = 0x00FF00)
embed.set_author(name = self.bot.user.name, icon_url = self.bot.user.avatar_url)
embed.add_field(name = 'Help', value = ', '.join(['`' + guild_prefix + i + '`' for i in commands_json.keys() if commands_json[i]['module'] == 'Help']), inline = False)
embed.add_field(name = 'Administration', value = ', '.join(['`' + guild_prefix + i + '`' for i in commands_json.keys() if commands_json[i]['module'] == 'Administration']), inline = False)
embed.add_field(name = 'MemberPresence', value = ', '.join(['`' + guild_prefix + i + '`' for i in commands_json.keys() if commands_json[i]['module'] == 'MemberPresence']), inline = False)
embed.add_field(name = 'Economy', value = ', '.join(['`' + guild_prefix + i + '`' for i in commands_json.keys() if commands_json[i]['module'] == 'Economy']), inline = False)
embed.add_field(name = 'Utility', value = ', '.join(['`' + guild_prefix + i + '`' for i in commands_json.keys() if commands_json[i]['module'] == 'Utility']), inline = False)
embed.set_footer(text = get_lang(ctx.guild, 'HELP_response_footer').format(guild_prefix))
else:
try:
cmd_data = commands_json[command_name.lower()]
except:
return await ctx.send(embed = discord.Embed(description = get_lang(ctx, 'HELP_command_notfound'), color = 0xFF0000))
embed = discord.Embed(title = ' / '.join(['`' + guild_prefix + i + '`' for i in cmd_data['title']]), description = get_lang(ctx.guild, cmd_data['description']), color = 0x00FF00)
embed.add_field(name = get_lang(ctx.guild, 'HELP_permission_string_user'), value = '\n'.join(['`' + get_lang(ctx.guild, i) + '`' for i in cmd_data['user_permissions']]), inline = True)
embed.add_field(name = get_lang(ctx.guild, 'HELP_permission_string_bot'), value = '\n'.join(['`' + get_lang(ctx.guild, i) + '`' for i in cmd_data['bot_permissions']]), inline = True)
embed.add_field(name = get_lang(ctx.guild, 'HELP_example_string'), value = ' or '.join(['`' + guild_prefix + i + '`' for i in cmd_data['examples']]), inline = False)
embed.set_footer(text = 'Module: ' + cmd_data['module'])
await ctx.send(embed = embed)
@commands.command()
async def invite(self, ctx):
await ctx.send(embed = discord.Embed(description = '[Support Server](https://discord.gg/sbySHxA)\n[Add Me](https://discordapp.com/oauth2/authorize?client_id=' + str(self.bot.user.id) + '&scope=bot&permissions=0)\n[GitHub](https://github.com/Deivedux/Shiramine)\n[Patreon](https://www.patreon.com/QuoteBot)', color = 0x00FF00))
def setup(bot):
bot.add_cog(Help(bot))
| 57.017857 | 328 | 0.691513 | [
"MIT"
] | Deivedux/Shiramine | cogs/Help.py | 3,193 | Python |
import json
import os
import uuid
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import override_settings
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from talents.models import Agency, Talent
from orders.models import (
AgencyProfit,
AgencyProfitPercentage,
Buyer,
Charge,
CreditCard,
CustomTalentProfitPercentage,
TalentProfit,
DefaultTalentProfitPercentage,
Order,
)
from request_shoutout.domain.models import Charge as DomainCharge
from shoutouts.models import ShoutoutVideo
from utils.telegram import TELEGRAM_BOT_API_URL
from wirecard.models import WirecardTransactionData
User = get_user_model()
FAKE_WIRECARD_ORDER_HASH = 'ORD-O5DLMAJZPTHV'
FAKE_WIRECARD_PAYMENT_HASH = 'PAY-HL7QRKFEQNHV'
def get_wirecard_mocked_abriged_response():
wirecard_capture_payment_api_abriged_response = {
'id': FAKE_WIRECARD_PAYMENT_HASH,
'status': 'AUTHORIZED',
}
capture_payment_response = mock.Mock()
capture_payment_response.status_code = 200
capture_payment_response.json.return_value = wirecard_capture_payment_api_abriged_response
return capture_payment_response
@override_settings(
task_eager_propagates=True,
task_always_eager=True,
broker_url='memory://',
backend='memory'
)
@mock.patch('wirecard.services.requests.post', return_value=get_wirecard_mocked_abriged_response())
class FulfillShoutoutRequestTest(APITestCase):
def do_login(self, user, password):
data = {
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'password': password,
}
response = self.client.post(reverse('accounts:signin'), data, format='json')
token = response.data['access']
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {token}')
def setUp(self):
self.maxDiff = None
password = 'senha123'
user = User(
email='[email protected]',
first_name='Nome',
last_name='Sobrenome',
)
user.set_password(password)
user.save()
self.do_login(user, password)
self.talent = Talent.objects.create(
user=user,
price=1000,
phone_number=1,
area_code=1,
main_social_media='',
social_media_username='',
number_of_followers=1,
)
self.order = Order.objects.create(
hash_id=uuid.uuid4(),
talent_id=self.talent.id,
video_is_for='someone_else',
is_from='MJ',
is_to='Peter',
instruction="Go Get 'em, Tiger",
email='[email protected]',
is_public=True,
expiration_datetime=datetime.now(timezone.utc) + timedelta(days=4),
)
charge = Charge.objects.create(
order=self.order,
amount_paid=1000,
payment_date=datetime.now(timezone.utc) - timedelta(days=3),
status=DomainCharge.PRE_AUTHORIZED,
)
CreditCard.objects.create(
charge=charge,
fullname='Peter Parker',
birthdate='2019-12-31',
tax_document='12346578910',
credit_card_hash='<encrypted-credit-card-hash>',
)
Buyer.objects.create(
charge=charge,
fullname='Mary Jane Watson',
birthdate='2019-12-31',
tax_document='09876543210',
)
WirecardTransactionData.objects.create(
order=self.order,
wirecard_order_hash=FAKE_WIRECARD_ORDER_HASH,
wirecard_payment_hash=FAKE_WIRECARD_PAYMENT_HASH,
)
DefaultTalentProfitPercentage.objects.create(value='0.75')
self.request_data = {
'talent_id': self.talent.id,
'order_hash': self.order.hash_id,
'order_video': SimpleUploadedFile("file.mp4", b"filecontentstring"),
}
self.agency = Agency.objects.create(name='Agency')
AgencyProfitPercentage.objects.create(agency=self.agency, value='0.05')
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_shoutout_video(self, mock1):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ShoutoutVideo.objects.count(), 1)
shoutout = ShoutoutVideo.objects.first()
expected_file_url = f'orders/talent-1/order-{shoutout.order.hash_id}/viggio-para-peter.mp4'
self.assertEqual(shoutout.hash_id, response.data['shoutout_hash'])
self.assertTrue(shoutout.file.url.endswith(expected_file_url))
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_talent_profit(self, mock1):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(TalentProfit.objects.count(), 1)
talent_profit_qs = TalentProfit.objects.filter(
talent=self.talent,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.75'),
profit=Decimal('750.00'),
paid=False
)
self.assertTrue(talent_profit_qs.exists())
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_agency_profit_when_talent_is_managed(self, mock1): # noqa: E501
self.talent.agency = self.agency
self.talent.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(AgencyProfit.objects.count(), 1)
agency_profit_qs = AgencyProfit.objects.filter(
agency=self.agency,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.05'),
profit=Decimal('50.00'),
paid=False
)
self.assertTrue(agency_profit_qs.exists())
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_dont_create_a_agency_profit_when_talent_isnt_managed(self, mock1): # noqa: E501
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(AgencyProfit.objects.count(), 0)
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_after_upload_a_shoutout_transcode_process_is_triggered(self, mock1):
with mock.patch('transcoder.tasks.transcode') as mocked_transcoder:
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(ShoutoutVideo.objects.count(), 1)
shoutout = ShoutoutVideo.objects.first()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mocked_transcoder.assert_called_once_with(shoutout, 'mp4')
@mock.patch('transcoder.tasks.transcode', mock.Mock())
def test_send_email_to_customer_after_transcode_process_ending(self, mock1):
with mock.patch('post_office.mailgun.requests') as mocked_requests:
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
shoutout = ShoutoutVideo.objects.first()
expected_calls = [
mock.call(
auth=('api', os.environ['MAILGUN_API_KEY']),
url=os.environ['MAILGUN_API_URL'],
data={
'from': os.environ['CONTACT_EMAIL'],
'to': 'MJ <[email protected]>',
'subject': 'Seu viggio para Peter está pronto',
'template': 'notify-customer-that-his-viggio-is-ready',
'v:order_is_to': 'Peter',
'v:customer_name': 'MJ',
'v:talent_name': 'Nome Sobrenome',
'v:shoutout_absolute_url': f'{os.environ["SITE_URL"]}v/{shoutout.hash_id}'
},
),
]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(mocked_requests.post.mock_calls, expected_calls)
@mock.patch('request_shoutout.adapters.db.orm.DjangoTalentProfit.persist', side_effect=Exception())
def test_rollback_when_fulfilling_a_shoutout_request_fails(self, mock1, mock2):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(
response.data,
{'error': 'It happened an issue when persisting shoutout video'},
)
self.assertEqual(TalentProfit.objects.count(), 0)
self.assertEqual(ShoutoutVideo.objects.count(), 0)
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_when_talent_profit_percentage_is_not_the_default(self, mock1):
CustomTalentProfitPercentage.objects.create(talent=self.talent, value=Decimal('0.80'))
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(TalentProfit.objects.count(), 1)
talent_profit_qs = TalentProfit.objects.filter(
talent=self.talent,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.80'),
profit=Decimal('800.00'),
paid=False
)
self.assertTrue(talent_profit_qs.exists())
def test_cant_fulfill_same_order_twice(self, mock1):
ShoutoutVideo.objects.create(
hash_id=uuid.uuid4(),
order=self.order,
talent=self.talent,
file=SimpleUploadedFile("file.mp4", b"filecontentstring"),
)
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Order already has a shoutout attached.'})
def test_cant_fulfill_an_expired_order(self, mock1):
self.order.expiration_datetime = datetime.now(timezone.utc) - timedelta(hours=1)
self.order.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': "Can't fulfill an expired order."})
def test_a_talent_cant_fulfill_an_order_requested_to_another_talent(self, mock1):
user = User.objects.create(email='[email protected]')
talent = Talent.objects.create(
user=user,
price=10,
phone_number=1,
area_code=1,
main_social_media='',
social_media_username='',
number_of_followers=1,
)
self.order.talent_id = talent.id
self.order.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Order belongs to another Talent.'})
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
@mock.patch('utils.telegram.requests.post')
def test_when_capture_payment_fails_it_should_send_alert_message_to_staff(self, mock1, telegram_request_post): # noqa: E501
expected_call = mock.call(
url=f'{TELEGRAM_BOT_API_URL}/sendMessage',
data=json.dumps({
'chat_id': os.environ['TELEGRAM_GROUP_ID'],
'text': (
'OCORREU UM ERRO AO CAPTURAR UM PAGAMENTO. '
'Verifique o Sentry: '
'https://sentry.io/organizations/viggio-sandbox/issues/?project=1770932'
)
}),
headers={'Content-Type': 'application/json'}
)
method_path = 'request_shoutout.adapters.db.orm.WirecardPaymentApi.capture_payment'
with mock.patch(method_path, side_effect=Exception):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(telegram_request_post.mock_calls, [expected_call])
| 39.960452 | 128 | 0.64011 | [
"MIT"
] | michel-rodrigues/viggio_backend | app/request_shoutout/adapters/tests/test_fulfill_shoutout_request.py | 14,147 | Python |
import math
import numpy as np
import pypact as pp
from tests.testerbase import Tester
DECIMAL_PLACE_ACC = 6
class GroupConvertUnitTest(Tester):
def _test_imp(self, in_group, in_values, out_group, expected_values, almost=False):
if almost:
np.testing.assert_almost_equal(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), err_msg="Assert group convert")
else:
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_overlap(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [0.5, 1.0]
expected_values = [0.5]
self._test_imp(in_group, in_values, out_group, expected_values)
def test_byenergy_simple_overlap2(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [0.0, 0.5]
expected_values = [0.5]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_adjacent(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [1.0, 1.5]
expected_values = [0.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_adjacent2(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [-1.0, 0.0]
expected_values = [0.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_same(self):
in_group = [0.0, 1.0]
in_values = [1.0]
out_group = [0.0, 1.0]
expected_values = [1.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_same2(self):
in_group = [0.0, 1.0, 2.0]
in_values = [1.0, 0.7]
out_group = [0.0, 1.0, 2.0]
expected_values = [1.0, 0.7]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_negative1(self):
in_group = [-1.0, 0.0, 1.0]
in_values = [5.0, 8.0]
out_group = [0.0, 0.5, 0.75, 1.0]
expected_values = [4.0, 2.0, 2.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_simple_negative2(self):
in_group = [-1.0, 0.0, 1.0]
in_values = [5.0, 8.0]
out_group = [-10.0, 0.5, 0.75, 1.0]
expected_values = [9.0, 2.0, 2.0]
self.assertEqual(expected_values, pp.groupconvert.by_energy(
in_group, in_values, out_group), "Assert group convert")
def test_byenergy_case1(self):
self._test_imp([0.2, 0.5], [8], [0., 0.4, 0.5],
[16./3., 8./3.], almost=True)
def test_byenergy_case2(self):
self._test_imp([0, 0.1, 2], [2, 3], [0.1, 0.25, 0.5, 0.75, 0.9],
[0.23684210526315788, 0.39473684210526316, 0.39473684210526305, 0.23684210526315788], almost=True)
def test_byenergy_case3(self):
self._test_imp([0, 0.2, 2], [2, 3], [0.1, 0.25, 0.5, 0.75, 0.9],
[1.0833333333333333, 0.41666666666666663, 0.41666666666666663, 0.25], almost=True)
def test_byenergy_case4(self):
self._test_imp([0, 0.2, 0.3, 0.4, 0.55], [2, 3, 1, 8], [0.1, 0.25, 0.5, 0.75, 0.9],
[2.5, 7.833333333333331, 2.6666666666666687, 0.0], almost=True)
def test_byenergy_709_to_single(self):
g_709 = list(reversed(pp.ALL_GROUPS[709]))
self._test_imp(g_709, [1.0]*709, [1e6, 2e6],
[15.050386030584683], almost=True)
| 38.826923 | 121 | 0.606736 | [
"Apache-2.0"
] | dvp2015/pypact | tests/input/groupconverttest.py | 4,038 | Python |
# layout.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import util
from util import manhattanDistance
from game import Grid
import os
import random
import itertools
VISIBILITY_MATRIX_CACHE = {}
class Layout:
"""
A Layout manages the static information about the game board.
"""
def __init__(self, layoutText):
self.width = len(layoutText[0])
self.height= len(layoutText)
self.walls = Grid(self.width, self.height, False)
self.food = Grid(self.width, self.height, False)
self.capsules = []
self.agentPositions = []
self.numGhosts = 0
self.processLayoutText(layoutText)
self.layoutText = layoutText
self.totalFood = len(self.food.asList())
# self.initializeVisibilityMatrix()
def getNumGhosts(self):
return self.numGhosts
def initializeVisibilityMatrix(self):
global VISIBILITY_MATRIX_CACHE
if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE:
from game import Directions
vecs = [(-0.5,0), (0.5,0),(0,-0.5),(0,0.5)]
dirs = [Directions.NORTH, Directions.SOUTH, Directions.WEST, Directions.EAST]
vis = Grid(self.width, self.height, {Directions.NORTH:set(), Directions.SOUTH:set(), Directions.EAST:set(), Directions.WEST:set(), Directions.STOP:set()})
for x in range(self.width):
for y in range(self.height):
if self.walls[x][y] == False:
for vec, direction in zip(vecs, dirs):
dx, dy = vec
nextx, nexty = x + dx, y + dy
while (nextx + nexty) != int(nextx) + int(nexty) or not self.walls[int(nextx)][int(nexty)] :
vis[x][y][direction].add((nextx, nexty))
nextx, nexty = x + dx, y + dy
self.visibility = vis
VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis
else:
self.visibility = VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)]
def isWall(self, pos):
x, col = pos
return self.walls[x][col]
def getRandomLegalPosition(self):
x = random.choice(range(self.width))
y = random.choice(range(self.height))
while self.isWall( (x, y) ):
x = random.choice(range(self.width))
y = random.choice(range(self.height))
return (x,y)
def getRandomCorner(self):
poses = [(1,1), (1, self.height - 2), (self.width - 2, 1), (self.width - 2, self.height - 2)]
return random.choice(poses)
def getFurthestCorner(self, pacPos):
poses = [(1,1), (1, self.height - 2), (self.width - 2, 1), (self.width - 2, self.height - 2)]
dist, pos = max([(manhattanDistance(p, pacPos), p) for p in poses])
return pos
def isVisibleFrom(self, ghostPos, pacPos, pacDirection):
row, col = [int(x) for x in pacPos]
return ghostPos in self.visibility[row][col][pacDirection]
def __str__(self):
return "\n".join(self.layoutText)
def deepCopy(self):
return Layout(self.layoutText[:])
def processLayoutText(self, layoutText):
"""
Coordinates are flipped from the input format to the (x,y) convention here
The shape of the maze. Each character
represents a different type of object.
% - Wall
. - Food
o - Capsule
G - Ghost
P - Pacman
Other characters are ignored.
"""
maxY = self.height - 1
for y in range(self.height):
for x in range(self.width):
layoutChar = layoutText[maxY - y][x]
self.processLayoutChar(x, y, layoutChar)
self.agentPositions.sort()
self.agentPositions = [ ( i == 0, pos) for i, pos in self.agentPositions]
def processLayoutChar(self, x, y, layoutChar):
if layoutChar == '%':
self.walls[x][y] = True
elif layoutChar == '.':
self.food[x][y] = True
elif layoutChar == 'o':
self.capsules.append((x, y))
elif layoutChar == 'P':
self.agentPositions.append( (0, (x, y) ) )
elif layoutChar in ['G']:
self.agentPositions.append( (1, (x, y) ) )
self.numGhosts += 1
elif layoutChar in ['1', '2', '3', '4']:
self.agentPositions.append( (int(layoutChar), (x,y)))
self.numGhosts += 1
def getLayout(name, back = 2):
if name.endswith('.lay'):
layout = tryToLoad('layouts/' + name)
if layout == None: layout = tryToLoad(name)
else:
layout = tryToLoad('layouts/' + name + '.lay')
if layout == None: layout = tryToLoad(name + '.lay')
if layout == None and back >= 0:
curdir = os.path.abspath('.')
os.chdir('..')
layout = getLayout(name, back -1)
os.chdir(curdir)
return layout
def tryToLoad(fullname):
if(not os.path.exists(fullname)): return None
f = open(fullname)
try: return Layout([line.strip() for line in f])
finally: f.close()
| 38.039474 | 166 | 0.58907 | [
"MIT"
] | chibinz/CS181 | tracking/layout.py | 5,782 | Python |
# Connection libraries
import os
import shutil
import re
# Class create project
class Create:
def __init__(self, path):
self.path = path
# Create project
def createProject(self, name):
if not os.path.isdir(self.path + name):
shutil.copytree("launcher/shablon/", self.path + name)
else:
n, a = os.listdir(path=self.path), []
for s in n:
if s.find("new") != -1: a.append(s)
shutil.copytree("launcher/shablon/", self.path + name + str(len(a)))
# Delete project
def deleteProject(self, name):
shutil.rmtree(self.path+name) | 23.913043 | 71 | 0.681818 | [
"MIT"
] | KValexander/pygame-vn | create.py | 550 | Python |
import matplotlib.pyplot as plt, streamlit as st
from typing import Iterable, Union
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc, RocCurveDisplay
def train(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Train custom classifier model.
Parameters:
estimator: Unfitted estimator.
X: Input training data.
y: Labels for test data.
Returns:
Fitted estimator model.
"""
return estimator.fit(X=X, y=y)
def classify(estimator: object, X: Iterable[Union[int, float]]):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
Returns:
Predicted labels.
"""
return estimator.predict(X=X)
def regress(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom regressor model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pass
def evaluate(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pred = estimator.predict(X=X)
# classification report
report = classification_report(y_true=y, y_pred=pred)
st.write('Classification Report')
st.write(report)
# ROC curve
fpr, tpr, thresholds = roc_curve(y, pred)
roc_auc = auc(fpr, tpr)
_, _, figure = RocCurveDisplay(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
estimator_name=type(estimator)
)
st.pyplot(fig=figure)
| 24.202532 | 78 | 0.611402 | [
"MIT"
] | Fennec2000GH/Poly-Finance | ml.py | 1,912 | Python |
from dataclasses import dataclass
from typing import List
@dataclass(frozen=True)
class Channel:
name: str
symbols: List[str]
def __post_init__(self):
self.symbols.sort()
| 17.083333 | 34 | 0.668293 | [
"MPL-2.0"
] | OrzPond/tardis-python | tardis_client/channel.py | 205 | Python |
"""
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20
MAX_LOOPS = 100000
#CAMERA
CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST)
IMAGE_W = 160
IMAGE_H = 120
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
CAMERA_VFLIP = False
CAMERA_HFLIP = False
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40
PCA9685_I2C_BUSNUM = None
#STEERING
STEERING_CHANNEL = 1
STEERING_LEFT_PWM = 460
STEERING_RIGHT_PWM = 290
#THROTTLE
THROTTLE_CHANNEL = 0
THROTTLE_FORWARD_PWM = 500
THROTTLE_STOPPED_PWM = 370
THROTTLE_REVERSE_PWM = 220
#TRAINING
DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent)
BATCH_SIZE = 128
TRAIN_TEST_SPLIT = 0.8
MAX_EPOCHS = 100
SHOW_PLOT = True
VERBOSE_TRAIN = True
USE_EARLY_STOP = True
EARLY_STOP_PATIENCE = 5
MIN_DELTA = .0005
PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem.
PRUNE_CNN = False
PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning.
PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration.
PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning.
PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model.
#model transfer options
FREEZE_LAYERS = False
NUM_LAST_LAYERS_TO_TRAIN = 7
#For the categorical model, this limits the upper bound of the learned throttle
#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py
#and ideally wouldn't change once set.
MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5
#RNN or 3D
SEQUENCE_LENGTH = 3
#SOMBRERO
HAVE_SOMBRERO = False
#RECORD OPTIONS
RECORD_DURING_AI = False
AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly
#JOYSTICK
USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick
JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.
JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.
AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.
CONTROLLER_TYPE='ps3' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command
USE_NETWORKED_JS = False #should we listen for remote joystick control over the network?
NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information
JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered.
JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward
USE_FPV = False # send camera data to FPV webserver
JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick.
#WEB CONTROL
WEB_CONTROL_PORT = int(os.getenv("WEB_CONTROL_PORT", 8887)) # which port to listen on when making a web controller
WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode.
#DonkeyGym
#Only on Ubuntu linux, you can use the simulator as a virtual donkey and
#issue the same python manage.py drive command as usual, but have them control a virtual car.
#This enables that, and sets the path to the simualator and the environment.
#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip
#then extract that and modify DONKEY_SIM_PATH.
DONKEY_GYM = False
DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first.
DONKEY_GYM_ENV_NAME = "donkey-mountain-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0")
GYM_CONF = { "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100} # body style(donkey|bare|car01) body rgb 0-255
GYM_CONF["racer_name"] = "Your Name"
GYM_CONF["country"] = "Place"
GYM_CONF["bio"] = "I race robots."
SIM_HOST = "127.0.0.1" # when racing on virtual-race-league use host "trainmydonkey.com"
SIM_ARTIFICIAL_LATENCY = 0 # this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable.
| 46.184 | 225 | 0.757838 | [
"MIT"
] | DocGarbanzo/donkeycar | donkeycar/templates/cfg_basic.py | 5,773 | Python |
from hatesonar import Sonar
def hate_speech_detection(texts):
sonar = Sonar()
phrases_by_line = " ".join(texts).split(" ")
dynamic_black_list = []
for sent in phrases_by_line:
if sonar.ping(text=sent)['top_class'] != 'neither':
[dynamic_black_list.append(i) for i in sent.split()]
return dynamic_black_list | 34.8 | 64 | 0.66954 | [
"MIT"
] | greaseuniverse/greaseterminator | interventions/text/speech_filter.py | 348 | Python |
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2022/3/12 8:49 下午
# Description :
""" | 17.333333 | 33 | 0.5 | [
"Apache-2.0"
] | abcdcamey/Gobigger-Explore | my_work/config/__init__.py | 108 | Python |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A module that provides algorithms for performing linear fit between
sets of 2D points.
:Authors: Mihai Cara, Warren Hack
:License: :doc:`../LICENSE`
"""
import logging
import numbers
import numpy as np
from .linalg import inv
from . import __version__ # noqa: F401
__author__ = 'Mihai Cara, Warren Hack'
__all__ = ['iter_linear_fit', 'build_fit_matrix']
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class SingularMatrixError(Exception):
""" An error class used to report when a singular matrix is encountered."""
pass
class NotEnoughPointsError(Exception):
"""
An error class used to report when there are not enough points to
find parameters of a linear transformation.
"""
pass
def iter_linear_fit(xy, uv, wxy=None, wuv=None,
fitgeom='general', center=None,
nclip=3, sigma=(3.0, 'rmse'), clip_accum=False):
r"""
Compute linear transformation parameters that "best" (in the sense of
minimizing residuals) transform ``uv`` source position to ``xy``
sources iteratively using sigma-clipping.
More precisely, this functions attempts to find a ``2x2`` matrix ``F`` and
a shift vector ``s`` that minimize the residuals between the *transformed*
reference source coordinates ``uv``
.. math::
\mathbf{xy}'_k = \mathbf{F}\cdot(\mathbf{uv}_k-\mathbf{c})+\
\mathbf{s} + \mathbf{c}
:label: ilf1
and the "observed" source positions ``xy``:
.. math::
\epsilon^2 = \Sigma_k w_k \|\mathbf{xy}_k-\mathbf{xy}'_k\|^2.
:label: ilf2
In the above equations, :math:`\mathbf{F}` is a ``2x2`` matrix while
:math:`\mathbf{xy}_k` and :math:`\mathbf{uv}_k` are the position
coordinates of the ``k``-th source (row in input ``xy`` and ``uv`` arrays).
One of the two catalogs (``xy`` or ``uv``) contains what we refer to as
"image" source positions and the other one as "reference" source positions.
The meaning assigned to ``xy`` and ``uv`` parameters are up to the
caller of this function.
Parameters
----------
xy: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line).
uv: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line). This array *must have* the same length (shape)
as the ``xy`` array.
wxy: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wuv`` is also set to `None`.
See ``Notes`` section for more details.
wuv: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wxy`` is also set to `None`.
See ``Notes`` section for more details.
fitgeom: {'shift', 'rscale', 'general'}, optional
The fitting geometry to be used in fitting the matched object lists.
This parameter is used in fitting the shifts (offsets), rotations
and/or scale changes from the matched object lists. The 'general'
fit geometry allows for independent scale and rotation for each axis.
center: tuple, list, numpy.ndarray, None, optional
A list-like container with two ``X``- and ``Y``-positions of the center
(origin) of rotations in the ``uv`` and ``xy`` coordinate frames.
If not provided, ``center`` is estimated as a (weighted) mean position
in the ``uv`` frame.
nclip: int, None, optional
Number (a non-negative integer) of clipping iterations in fit.
Clipping will be turned off if ``nclip`` is either `None` or 0.
sigma: float, tuple of the form (float, str), optional
When a tuple is provided, first value (a positive number)
indicates the number of "fit error estimates" to use for clipping.
The second value (a string) indicates the statistic to be
used for "fit error estimate". Currently the following values are
supported: ``'rmse'``, ``'mae'``, and ``'std'``
- see ``Notes`` section for more details.
When ``sigma`` is a single number, it must be a positive number and
the default error estimate ``'rmse'`` is assumed.
This parameter is ignored when ``nclip`` is either `None` or 0.
clip_accum: bool, optional
Indicates whether or not to reset the list of "bad" (clipped out)
sources after each clipping iteration. When set to `True` the list
only grows with each iteration as "bad" positions never re-enter the
pool of available position for the fit. By default the list of
"bad" source positions is purged at each iteration.
Returns
-------
fit: dict
- ``'shift'``: A ``numpy.ndarray`` with two components of the
computed shift.
- ``'shift_ld'``: A ``numpy.ndarray`` with two components of the
computed shift of type ``numpy.longdouble``.
- ``'matrix'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix.
- ``'matrix_ld'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix of type ``numpy.longdouble``.
- ``'proper_rot'``: Rotation angle (degree) as if the rotation is
proper.
- ``'rot'``: A tuple of ``(rotx, roty)`` - the rotation angles with
regard to the ``X`` and ``Y`` axes.
- ``'<rot>'``: *Arithmetic mean* of the angles of rotation around
``X`` and ``Y`` axes.
- ``'scale'``: A tuple of ``(sx, sy)`` - scale change in the direction
of the ``X`` and ``Y`` axes.
- ``'<scale>'``: *Geometric mean* of scales ``sx`` and ``sy``.
- ``'skew'``: Computed skew.
- ``'proper'``: a boolean indicating whether the rotation is proper.
- ``'fitgeom'``: Fit geometry (allowed transformations) used for
fitting data (to minimize residuals). This is copy of the input
argument ``fitgeom``.
- ``'center'``: Center of rotation
- ``'center_ld'``: Center of rotation as a ``numpy.longdouble``.
- ``'fitmask'``: A boolean array indicating which source positions
where used for fitting (`True`) and which were clipped out
(`False`). **NOTE** For weighted fits, positions with zero
weights are automatically excluded from the fits.
- ``'eff_nclip'``: Effective number of clipping iterations
- ``'rmse'``: Root-Mean-Square Error
- ``'mae'``: Mean Absolute Error
- ``'std'``: Standard Deviation of the residuals
- ``'resids'``: An array of residuals of the fit.
**NOTE:** Only the residuals for the "valid" points are reported
here. Therefore the length of this array may be smaller than the
length of input arrays of positions.
Notes
-----
**Weights**
Weights can be provided for both "image" source positions and "reference"
source positions. When no weights are given, all positions are weighted
equally. When only one set of positions have weights (i.e., either ``wxy``
or ``wuv`` is not `None`) then weights in :eq:`ilf2` are set to be equal
to the provided set of weights. When weights for *both* "image" source
positions and "reference" source positions are provided, then the
combined weight that is used in :eq:`ilf2` is computed as:
.. math::
1/w = 1/w_{xy} + 1/w_{uv}.
**Statistics for clipping**
Several statistics are available for clipping iterations and all of them
are reported in the returned ``fit`` dictionary regardless of the
setting in ``sigma``:
.. math::
\mathrm{RMSE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|^2}
.. math::
\mathrm{MAE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|}
.. math::
\mathrm{STD} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k - \
\mathbf{\overline{r}}\|^2}/(1-V_2)
where :math:`\mathbf{r}_k=\mathbf{xy}_k-\mathbf{xy}'_k`,
:math:`\Sigma_k w_k = 1`, and :math:`V_2=\Sigma_k w_k^2`.
"""
if fitgeom == 'general':
linear_fit = fit_general
elif fitgeom == 'rscale':
linear_fit = fit_rscale
elif fitgeom == 'shift':
linear_fit = fit_shifts
else:
raise ValueError("Unsupported 'fitgeom' value: '{}'".format(fitgeom))
minobj_per_fitgeom = {'shift': 1, 'rscale': 2, 'general': 3}
minobj = minobj_per_fitgeom[fitgeom]
xy = np.array(xy, dtype=np.longdouble)
uv = np.array(uv, dtype=np.longdouble)
if len(xy.shape) != 2 or xy.shape[1] != 2 or uv.shape != xy.shape:
raise ValueError("Input coordinate arrays 'xy' and 'uv' must be of "
"shape (N, 2) where N is the number of coordinate "
"points.")
wmask = np.ones(len(xy), dtype=np.bool_)
if wxy is not None:
wxy = np.asarray(wxy)
if len(wxy.shape) != 1 or wxy.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wxy' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wxy > 0.0
if wuv is not None:
wuv = np.asarray(wuv)
if len(wuv.shape) != 1 or wuv.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wuv' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wuv > 0.0
mask = wmask
if sigma is None and nclip is not None and nclip > 0:
raise ValueError("Argument 'sigma' cannot be None when 'nclip' is "
"a positive number.")
if isinstance(sigma, numbers.Number):
sigstat = 'rmse' # default value
nsigma = float(sigma)
elif sigma is not None:
nsigma = float(sigma[0])
sigstat = sigma[1]
if sigstat not in ['rmse', 'mae', 'std']:
raise ValueError("Unsupported sigma statistics value.")
if sigma is not None and nsigma <= 0.0:
raise ValueError("The value of sigma for clipping iterations must be "
"positive.")
if nclip is None:
nclip = 0
else:
if nclip < 0:
raise ValueError("Argument 'nclip' must be non-negative.")
nclip = int(nclip)
if np.count_nonzero(mask) == minobj:
log.warning("The number of sources for the fit is smaller than the "
"minimum number of sources necessary for the requested "
"'fitgeom'.")
log.warning("Resetting number of clipping iterations to 0.")
nclip = 0
if center is None:
center_ld = uv[mask].mean(axis=0, dtype=np.longdouble)
center = center_ld.astype(np.double)
else:
center_ld = np.longdouble(center)
xy[mask] -= center_ld
uv[mask] -= center_ld
log.info("Performing '{:s}' fit".format(fitgeom))
# initial fit:
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
# clipping iterations:
effective_nclip = 0
for n in range(nclip):
resids = fit['resids']
# redefine what pixels will be included in next iteration
cutoff = nsigma * fit[sigstat]
nonclipped = np.linalg.norm(resids, axis=1) < cutoff
if np.count_nonzero(nonclipped) < minobj or nonclipped.all():
break
effective_nclip += 1
prev_mask = mask
if not clip_accum:
mask = np.array(wmask)
mask[prev_mask] *= nonclipped
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
fit['center'] = center
fit['center_ld'] = center_ld
fit['fitmask'] = mask
fit['eff_nclip'] = effective_nclip
return fit
def _compute_stat(fit, residuals, weights):
if weights is None:
fit['rmse'] = float(np.sqrt(np.mean(2 * residuals**2)))
fit['mae'] = float(np.mean(np.linalg.norm(residuals, axis=1)))
fit['std'] = float(np.linalg.norm(residuals.std(axis=0)))
else:
# assume all weights > 0 (this should be insured by the caller => no
# need to repeat the check here)
npts = len(weights)
wt = np.sum(weights)
if npts == 0 or wt == 0.0:
fit['rmse'] = float('nan')
fit['mae'] = float('nan')
fit['std'] = float('nan')
return
w = weights / wt
fit['rmse'] = float(np.sqrt(np.sum(np.dot(w, residuals**2))))
fit['mae'] = float(np.dot(w, np.linalg.norm(residuals, axis=1)))
if npts == 1:
fit['std'] = 0.0
else:
# see:
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
wmean = np.dot(w, residuals)
fit['std'] = float(
np.sqrt(np.sum(np.dot(w, (residuals - wmean)**2) /
(1.0 - np.sum(w**2))))
)
def fit_shifts(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement
transformation only between input lists of positions ``xy`` and ``uv``.
When weights are provided, a weighted fit is performed. Parameter
descriptions and return values are identical to those in `iter_linear_fit`,
except returned ``fit`` dictionary does not contain the following
keys irrelevant to this function: ``'center'``, ``'fitmask'``, and
``'eff_nclip'``.
"""
if xy.size == 0:
raise NotEnoughPointsError(
"At least one point is required to find shifts."
)
diff_pts = np.subtract(xy, uv, dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
meanx = diff_pts[:, 0].mean(dtype=np.longdouble)
meany = diff_pts[:, 1].mean(dtype=np.longdouble)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if not np.sum(w > 0, dtype=np.int):
raise ValueError("Not enough valid data for 'shift' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
meanx = np.dot(w, diff_pts[:, 0])
meany = np.dot(w, diff_pts[:, 1])
p = np.array([1.0, 0.0, meanx], dtype=np.longdouble)
q = np.array([0.0, 1.0, meany], dtype=np.longdouble)
fit = _build_fit(p, q, 'shift')
resids = diff_pts - fit['shift']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
# Implementation of geomap 'rscale' fitting based on 'lib/geofit.x'
# by Warren Hack. Support for axis flips added by Mihai Cara.
def fit_rscale(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation and scale transformations between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 2:
raise NotEnoughPointsError(
"At least two points are required to find shifts, rotation, and "
"scale."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
xm = np.mean(x)
ym = np.mean(y)
um = np.mean(u)
vm = np.mean(v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(u, u)
sv2 = np.dot(v, v)
sxv = np.dot(x, v)
syu = np.dot(y, u)
sxu = np.dot(x, u)
syv = np.dot(y, v)
su2v2 = su2 + sv2
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 2:
raise ValueError("Not enough valid data for 'rscale' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
xm = np.dot(w, x)
ym = np.dot(w, y)
um = np.dot(w, u)
vm = np.dot(w, v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(w, u**2)
sv2 = np.dot(w, v**2)
sxv = np.dot(w, x * v)
syu = np.dot(w, y * u)
sxu = np.dot(w, x * u)
syv = np.dot(w, y * v)
su2v2 = su2 + sv2
det = sxu * syv - sxv * syu
if det < 0:
rot_num = sxv + syu
rot_denom = sxu - syv
else:
rot_num = sxv - syu
rot_denom = sxu + syv
if rot_num == rot_denom:
theta = 0.0
else:
theta = np.rad2deg(np.arctan2(rot_num, rot_denom))
if theta < 0:
theta += 360.0
ctheta = np.cos(np.deg2rad(theta))
stheta = np.sin(np.deg2rad(theta))
s_num = rot_denom * ctheta + rot_num * stheta
if su2v2 > 0.0:
mag = s_num / su2v2
else:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
if det < 0:
# "flip" y-axis (reflection about x-axis *after* rotation)
# NOTE: keep in mind that 'matrix' is the transposed rotation matrix.
sthetax = -mag * stheta
cthetay = -mag * ctheta
else:
sthetax = mag * stheta
cthetay = mag * ctheta
cthetax = mag * ctheta
sthetay = mag * stheta
sdet = np.sign(det)
xshift = xm - um * cthetax - sdet * vm * sthetax
yshift = ym + sdet * um * sthetay - vm * cthetay
p = np.array([cthetax, sthetay, xshift], dtype=np.longdouble)
q = np.array([-sthetax, cthetay, yshift], dtype=np.longdouble)
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, fitgeom='rscale')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def fit_general(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation, scale, and skew transformations (i.e., the full ``2x2``
transformation matrix) between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 3:
raise NotEnoughPointsError(
"At least three points are required to find 6-parameter linear "
"affine transformations."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
# Set up products used for computing the fit
sw = float(x.size)
sx = x.sum()
sy = y.sum()
su = u.sum()
sv = v.sum()
sxu = np.dot(x, u)
syu = np.dot(y, u)
sxv = np.dot(x, v)
syv = np.dot(y, v)
suu = np.dot(u, u)
svv = np.dot(v, v)
suv = np.dot(u, v)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 3:
raise ValueError("Not enough valid data for 'general' fit: "
"too many weights are zero!")
# Set up products used for computing the fit
sw = np.sum(w, dtype=np.longdouble)
sx = np.dot(w, x)
sy = np.dot(w, y)
su = np.dot(w, u)
sv = np.dot(w, v)
sxu = np.dot(w, x * u)
syu = np.dot(w, y * u)
sxv = np.dot(w, x * v)
syv = np.dot(w, y * v)
suu = np.dot(w, u * u)
svv = np.dot(w, v * v)
suv = np.dot(w, u * v)
m = np.array([[su, sv, sw], [suu, suv, su], [suv, svv, sv]],
dtype=np.longdouble)
a = np.array([sx, sxu, sxv], dtype=np.longdouble)
b = np.array([sy, syu, syv], dtype=np.longdouble)
try:
inv_m = inv(m)
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
p = np.dot(inv_m, a)
q = np.dot(inv_m, b)
if not (np.all(np.isfinite(p)) and np.all(np.isfinite(q))):
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
) # pragma: no cover
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, 'general')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def _build_fit(p, q, fitgeom):
# Build fit matrix:
fit_matrix = np.vstack((p[:2], q[:2]))
# determinant of the transformation
det = p[0] * q[1] - p[1] * q[0]
sdet = np.sign(det)
proper = sdet >= 0
# Create a working copy (no reflections) for computing transformation
# parameters (scale, rotation angle, skew):
wfit = fit_matrix.copy()
# Skew is zero for all fitgeom except 'general':
skew = 0.0
if fitgeom == 'shift':
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': 0.0,
'rot': (0.0, 0.0),
'<rot>': 0.0,
'scale': (1.0, 1.0),
'<scale>': 1.0,
'skew': 0.0,
'proper': proper,
'fitgeom': 'shift'
}
return fit
# Compute average scale:
s = np.sqrt(np.abs(det))
# Compute scales for each axis:
if fitgeom == 'general':
sx, sy = np.sqrt(p[:2]**2 + q[:2]**2)
else:
sx = s
sy = s
# Remove scale from the transformation matrix:
wfit[:, 0] /= sx
wfit[:, 1] /= sy
# Compute rotation angle as if we have a proper rotation.
# This will also act as *some sort* of "average rotation" even for
# transformations with different rot_x and rot_y:
prop_rot = np.rad2deg(
np.arctan2(wfit[0, 1] - sdet * wfit[1, 0],
wfit[0, 0] + sdet * wfit[1, 1])
)
if proper and fitgeom == 'rscale':
rotx = prop_rot
roty = prop_rot
rot = prop_rot
else:
rotx = np.rad2deg(np.arctan2(-wfit[1, 0], wfit[0, 0]))
roty = np.rad2deg(np.arctan2(wfit[0, 1], wfit[1, 1]))
rot = 0.5 * (rotx + roty)
skew = np.mod(roty - rotx - 180.0, 360.0) - 180.0
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': float(prop_rot),
'rot': (float(rotx), float(roty)),
'<rot>': float(rot),
'scale': (float(sx), float(sy)),
'<scale>': float(s),
'skew': float(skew),
'proper': proper,
'fitgeom': fitgeom
}
return fit
def build_fit_matrix(rot, scale=1):
r"""
Create an affine transformation matrix (2x2) from the provided rotation
angle(s) and scale(s):
.. math::
M = \begin{bmatrix}
s_x \cos(\theta_x) & s_y \sin(\theta_y) \\
-s_x \sin(\theta_x) & s_y \cos(\theta_y)
\end{bmatrix}
Parameters
----------
rot: tuple, float, optional
Rotation angle in degrees. Two values (one for each axis) can be
provided as a tuple.
scale: tuple, float, optional
Scale of the liniar transformation. Two values (one for each axis)
can be provided as a tuple.
Returns
-------
matrix: numpy.ndarray
A 2x2 `numpy.ndarray` containing coefficients of a liniear
transformation.
"""
if hasattr(rot, '__iter__'):
rx, ry = map(np.deg2rad, rot)
else:
rx = ry = np.deg2rad(float(rot))
if hasattr(scale, '__iter__'):
sx, sy = scale
else:
sx = sy = float(scale)
matrix = np.array([[sx * np.cos(rx), sy * np.sin(ry)],
[-sx * np.sin(rx), sy * np.cos(ry)]])
return matrix
| 34.521739 | 90 | 0.573603 | [
"BSD-3-Clause"
] | jhunkeler/tweakwcs | tweakwcs/linearfit.py | 26,996 | Python |
'''
@Date: 2019-08-22 20:40:54
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-08-22 20:48:24
'''
years, months = eval(input("Enter years and months: "))
if (months == 1 or months == 3 or months == 5 or months == 7 or months == 8
or months == 10 or months == 12):
print(years, ".", months, " has 31 days. ")
elif (months == 4 or months == 6 or months == 9 or months == 11):
print(years, ".", months, "has 30 days. ")
elif (months == 2):
if (years % 4 == 0 and years % 100 != 0) or (years % 400 == 0):
print(years, ".", months, "has 29 days. ")
else:
print(years, ".", months, "has 28 days. ")
else:
print("Wrong Input!")
| 32.954545 | 75 | 0.57931 | [
"Apache-2.0"
] | ywyz/IntroducingToProgrammingUsingPython | Exercise04/4-11.py | 725 | Python |
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a video or zmq with a certain extension
(e.g., .jpg) in a folder. Sample:
python tools/infer_from_video.py \
--cfg configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml \
--output-dir ./output \
--image-ext jpg \
--wts generalized_rcnn/model_final.pkl \
--video ~/data/video3.h264
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import logging
import sys
import time
import zmq
import numpy as np
import os
from caffe2.python import workspace
import glob
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
# import arp.line_detection as detection
from multiprocessing import Process, Queue
from Queue import Empty
import json
import math
import copy
import arp.const as const
from arp.fusion_kalman import Fusion
from arp.fusion_particle_line import FusionParticle
from arp.detection_filter import LineFilter
from arp.line_extractor import LineExtractor
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
g_fusion_filter = None
g_particle_filter = None
extractor = LineExtractor()
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output-dir',
dest='output_dir',
help='directory for visualization pdfs (default: /tmp/infer_simple)',
default='/tmp/infer_simple',
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='png',
type=str
)
parser.add_argument(
'--video',
help='zmq or /path/to/video/file',
default=None,
type=str
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
predict_time = []
process_time = []
show_img = None
#im is rgb
def hanle_frame(args, frameId, origin_im, im, logger, model, dataset, file_name):
global predict_time, process_time, show_img
logger.info('Processing frame: {}'.format(frameId))
# cv2.imshow("tmplog", im)
# cv2.waitKey(0)
timers = defaultdict(Timer)
t = time.time()
im = im[:, :, ::-1]
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
predict_time.append(time.time() - t)
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
logger.info('predict_time: {:.3f}s'.format(np.mean(np.array(predict_time))))
# for k, v in timers.items():
# logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if frameId == 1:
logger.info(
' \ Note: inference on the first image will be slower than the '
'rest (caches and auto-tuning need to warm up)'
)
t = time.time()
img_debug = True
ret = extractor.get_detection_line(
im,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dataset,
show_class=True,
thresh=0.8,
kp_thresh=2,
frame_id=frameId,
img_debug = img_debug
)
im, mid_im, top_im, result, fork_pos = ret
process_time.append(time.time() - t)
logger.info('get_detection_line time: {:.3f}s'.format(time.time() - t))
#
logger.info('process_time: {:.3f}s'.format(np.mean(np.array(process_time))))
line_list = None
cache_list = None
particles = None
filter_list = None
if not result is None:
line_list, cache_list, filter_list, particles = add2MsgQueue(result, frameId, fork_pos, img_debug)
g_debug_img_queue.put((origin_im[:, :, ::-1], im, mid_im, top_im, line_list, cache_list, filter_list, frameId, fork_pos, file_name))
if g_debug_img_queue.full():
try:
g_debug_img_queue.get_nowait()
except Empty:
print ("Queue.Empty")
def drawParticles(image, particles):
histogram = np.array([[i, 0] for i in range(500)])
for index, p in enumerate(particles):
if abs(p.x) > 100:
continue
meter_scale = (3.5/extractor.lane_wid)
# histogram[index][0] = index + 100#int(p.x) / meter_scale
histogram[int(p.x / meter_scale) + 150][1] += 1
cv2.polylines(image, np.int32([np.vstack((histogram[:,0] + extractor.IMAGE_WID/2 - 150, histogram[:,1])).T]), False, (0, 0, 250), thickness=1)
def drawParabola(image, line_param, type, color):
points = []
for x in range(-800, 30, 10):
points.append([line_param[0] * x**2 + line_param[1] * x + line_param[2], x])
points = np.array(points)
points[:,0] = points[:,0] + extractor.IMAGE_WID/2
points[:,1] = points[:,1] + extractor.IMAGE_HEI
points = cv2.perspectiveTransform(np.array([points], dtype='float32'), np.array(extractor.H_OP))
offset_y = extractor.CUT_OFFSET_IMG[0]
points = points[0]
points[:,1] = points[:,1] + offset_y
# print ("drawParabola points:" + str(points))
parabola_im = np.zeros((extractor.IMAGE_HEI,extractor.IMAGE_WID,3), np.uint8)
if type in ["yellow dashed", "yellow solid", "yellow solid solid", "yellow dashed dashed", "yellow dashed-solid", "yellow solid-dashed"]:
cv2.polylines(parabola_im, np.int32([np.vstack((points[:,0], points[:,1])).T]), False, (0, 200, 200), thickness=2)
elif type in ["boundary", "fork_edge", "handrail"]:
cv2.polylines(parabola_im, np.int32([np.vstack((points[:, 0], points[:, 1])).T]), False, (0, 0, 200), thickness=4)
else:
cv2.polylines(parabola_im, np.int32([np.vstack((points[:,0], points[:,1])).T]), False, color, thickness=2)
kernel = np.ones((5,5), np.float32) / 25
parabola_im = cv2.filter2D(parabola_im, -1, kernel)
# parabola_im = cv2.GaussianBlur(parabola_im, (16, 16),0)
image = cv2.addWeighted(image, 1., parabola_im, 1., 0)
return image
def add2MsgQueue(result, frameId, fork_x, img_debug):
if (result is None) or len(result[0]) == 0:
print ("error: len(line_list) == 0")
return [], None
full_line_list = []
full_cache_list = []
line_filter = [left_fork_filter]
is_fork = (len(result) == 2)
if is_fork:
if not right_fork_filter.isAvialabel():
right_fork_filter.reset(left_fork_filter.cache_list)
line_filter.append(right_fork_filter)
else:
if right_fork_filter.isAvialabel():
left_fork_filter.extend(right_fork_filter.cache_list)
for index, parabola_param in enumerate(result):
line_list = []
for (line_param, line_type) in zip(parabola_param[0], parabola_param[1]):
if abs(line_param[2]) > 500:
print ("abs(line_param[2]) > 500")
continue
# line_info = {'curve_param':line_param[0:3].tolist(), 'type':line_type, 'score':line_param[3], 'x':line_param[4]}
line_info = {'curve_param':line_param[0:3].tolist(), 'type':line_type, 'score':line_param[3], 'x':line_param[2], 'middle':line_param[5]}
line_list.append(line_info)
line_list, cache_list = line_filter[index].get_predict_list(line_list, frameId, fork_x[0] if is_fork else None, index==0)
full_line_list.append(line_list)
full_cache_list.append(cache_list)
filter_list = None
particles = None
# filter_list = dr_filter(line_list)
# ret = particle_filter(line_list)
# if not ret is None:
# filter_list, particles = ret
finalMessage = {'frame': frameId, 'timestamp': time.time(), 'is_fork': is_fork, 'line_list': full_line_list[0]}
json_str = json.dumps(finalMessage)
print ("finalMessage:", json_str)
if g_detect_queue.full():
g_detect_queue.get_nowait()
g_detect_queue.put(json_str)
return full_line_list, full_cache_list, filter_list, particles
def get_right_parabola(line_list):
for index, line in enumerate(line_list):
if line["curve_param"][2] > 0:
ret = line["curve_param"][:]
ret[2] = ret[2] % extractor.lane_wid
return ret
ret = line_list[-1]["curve_param"][:]
ret[2] = ret[2] % extractor.lane_wid
return ret
def particle_filter(line_list):
global g_particle_filter
if line_list is None or len(line_list) == 0:
return None
param = get_right_parabola(line_list)
meter_scale = (3.5/extractor.lane_wid)
x = param[2] * meter_scale
if g_particle_filter is None or (time.time() - g_particle_filter.timestamp > 1):
g_particle_filter = FusionParticle(x, g_dr_queue)
g_particle_filter.start()
return None
t = time.time()
# x_estimate, particles = g_particle_filter.update(x)
x_estimate, particles = g_particle_filter.update(x, param)
dalta_x = (x_estimate - x) / meter_scale
print (str(time.time()-t) + "particle filter adjust x:" + str(dalta_x))
filter_list = copy.deepcopy(line_list)
for line in filter_list:
line["curve_param"][2] += dalta_x
return filter_list, particles
g_x_log = []
g_x_pred_log = []
g_x_est_log = []
g_x_time = []
def dr_filter(line_list):
if line_list is None or len(line_list) == 0:
return None
global g_fusion_filter
param = get_right_parabola(line_list)
meter_scale = (3.5/extractor.lane_wid)
x = param[2] * meter_scale
avg_speed = []
avg_angle = []
for i in range(10):
message = g_dr_queue.get(True)
json_item = json.loads(message)
avg_speed.append(json_item["speed"])
avg_angle.append(json_item["steerAngle"])
avg_speed = np.array(avg_speed)
debug_angle = avg_angle[0]
avg_angle = np.array(avg_angle)
avg_speed = np.mean(avg_speed)
avg_angle = np.mean(avg_angle)
print ("g_dr_queue speed:{} angle:{}->{}".format(avg_speed, debug_angle, avg_angle))
v = avg_speed
wheel_theta = avg_angle / const.STEER_RATIO
wheel_theta = math.radians(wheel_theta)
car_theta = np.pi/2 + wheel_theta
w = v/(const.WHEEL_BASE/np.sin(wheel_theta))
# if car_theta < np.pi / 2:
# w = -w
if g_fusion_filter is None or (time.time() - g_fusion_filter.timestamp > 1):
g_fusion_filter = Fusion(x, v, w)
print ("kalman filter recreate")
return None
t = time.time() - g_fusion_filter.timestamp
# pre_estimate = g_fusion_filter.get_estimate()
#x, v, w, t, parabola_param
if len(g_x_time) == 0:
g_x_time.append(t)
else:
g_x_time.append(t + g_x_time[-1])
g_x_log.append(x)
estimate_x = g_fusion_filter.update_step(x, v, w, t, param)
predict_x = g_fusion_filter.get_predict()
g_x_pred_log.append(predict_x)
g_x_est_log.append(estimate_x)
print("kalman filter: {} + {} --> {} ".format(x, predict_x, estimate_x))
if len(g_x_log) % 100 == 0:
np.savetxt('kalman_x.txt', g_x_log, newline=',', fmt=str("%s"))
np.savetxt('kalman_x_pred.txt', np.array(g_x_pred_log), newline=',', fmt=str("%s"))
np.savetxt('kalman_x_est.txt', np.array(g_x_est_log), newline=',', fmt=str("%s"))
np.savetxt('kalman_x_time.txt', np.array(g_x_time), newline=',', fmt=str("%s"))
dalta_x = (estimate_x - x) / meter_scale
print ("kalman filter adjust x:" + str(dalta_x))
filter_list = copy.deepcopy(line_list)
for line in filter_list:
line["curve_param"][2] += dalta_x
return filter_list
left_fork_filter = LineFilter()
right_fork_filter = LineFilter()
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine.initialize_model_from_cfg(args.weights)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
zmq_video = args.video == "zmq"
frameId = 0
print ("args.video:" + str(args.video))
socket = None
im_list = None
ret = None
if zmq_video:
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:{}".format(const.PORT_IMAGE_OUT))
elif os.path.isdir(args.video):
im_list = glob.glob(args.video + '/*.' + args.image_ext)
im_list.sort()
else:
# From virtual camera video and its associated timestamp file on Drive PX2,e.g."./lane/videofilepath.h264"
cap = cv2.VideoCapture(args.video)
im_file_index = frameId
while True:
file_name = ""
if zmq_video:
try:
socket.send_string('req from detectron')
print ("--------------------send!")
message = socket.recv()
print ("--------------------recv!" + str(time.time()))
print("Received message length:" + str(len(message)) + " type:" + str(type(message)))
if len(message) < 100:
continue
img_np = np.fromstring(message, np.uint8)
if const.CAMERA_TYPE != 2:
img_np = img_np.reshape((1208, 1920,3))
else:
img_np = img_np.reshape((604, 960,3))
print("nparr type:" + str(type(img_np)) + " shape:" + str(img_np.shape))
ret = True
except KeyboardInterrupt:
print ("interrupt received, stopping...")
socket.close()
context.term()
ret = False
cap.release()
elif os.path.isdir(args.video):
if im_file_index >= len(im_list):
break
file_name = im_list[im_file_index].split("/")[-1].split(".")[0]
img_np = cv2.imread(im_list[im_file_index])
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
im_file_index += 1
ret = True
else:
ret, img_np = cap.read()
frameId += 1
# read completely or raise exception
if not ret:
print("cannot get frame")
break
if frameId < 0:
continue
if frameId % 1 == 0:
t = time.time()
print("time:" + str(t))
time.sleep(0.001)
#cv2.imwrite("tmp" + str(frameId) + ".png", img_np)
if extractor.scale_size:
# img_np = cv2.resize(img_np, dsize=img_np.shape/2, interpolation=cv2.INTER_CUBIC)
img_np = img_np[::2]
img_np = img_np[:,::2]
origin_im = np.copy(img_np)
img_np = img_np[extractor.CUT_OFFSET_IMG[0]:extractor.CUT_OFFSET_IMG[1], 0:extractor.IMAGE_WID]
print ("detection size:", img_np.shape)
# img_np = cv2.undistort(img_np, mtx, dist, None)
hanle_frame(args, frameId, origin_im, img_np, logger, model, dummy_coco_dataset, file_name)
logger.info('hanle_frame time: {:.3f}s'.format(time.time() - t))
raw_input('press Enter to exit...')
def show_debug_img():
print ("debug img process start !")
while(True):
message = g_debug_img_queue.get(True)
if not message is None:
origin_im, im, mid_im, top_im, line_list_array, cache_list_array, filter_list_array, frameId, fork_pos, file_name = message
half_size = (int(im.shape[1] / 2), int(im.shape[0] / 2))
if extractor.IMAGE_WID > 960:
im = cv2.resize(im, half_size)
top_im = cv2.resize(top_im, (extractor.IMAGE_WID/2, extractor.IMAGE_HEI/2))
mid_im = cv2.resize(mid_im, half_size)
# mid_im = mid_im[604:902, 0:extractor.IMAGE_WID]
# mid_im = cv2.resize(mid_im, (int(extractor.IMAGE_WID / 2), 150))
else:
# mid_im = mid_im[302:451, 0:extractor.IMAGE_WID]
pass
if (not line_list_array is None) and (not cache_list_array is None):
if filter_list_array is None:
filter_list_array = [[]] if len(line_list_array) == 1 else [[],[]]
line_color = [(0, 200, 0), (100, 200, 0)]
for line_list, cache_list, filter_list, color in zip(line_list_array, cache_list_array, filter_list_array, line_color):
x_pos = []
x_pos_11 = []
prob_wid = extractor.IMAGE_WID
if prob_wid > 960:
prob_wid = prob_wid / 2
for i in range(-int(prob_wid / 2), int(prob_wid / 2), 1):
matched_y = 1
matched_y_11 = 2
for l in line_list:
dis = abs(l['x'] - i)
if dis < 4:
# hei = dis
if l['type'] == "boundary":
matched_y = int(220 * l['score'])
else:
matched_y = int(190 * l['score'] - dis * dis)
for l in cache_list:
dis = abs(l['x'] - i)
if dis < 8:
matched_y_11 = int(200 * l['score'] - dis * dis)
x_pos.append([i + int(prob_wid / 2), matched_y])
x_pos_11.append([i + int(prob_wid / 2), matched_y_11])
# h = np.zeros((100, extractor.IMAGE_WID, 3))
cv2.polylines(origin_im, [np.array(x_pos)], False, (0, 255, 0))
cv2.polylines(origin_im, [np.array(x_pos_11)], False, (0, 0, 255))
# origin_im = np.flipud(origin_im)
# cv2.imshow('prob', h)
# cv2.waitKey(1)
for line in line_list:
line_param = line['curve_param']
line_type = line['type']
origin_im = drawParabola(origin_im, line_param[0:3], line_type, color=color)
if not filter_list is None:
for line in filter_list:
line_param = line['curve_param']
line_type = line['type']
origin_im = drawParabola(origin_im, line_param[0:3], line_type, color=(200, 0, 0))
overlay = origin_im.copy()
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255)]
# for index in range(len(line_array)):
# if index > 0:
# left_line = line_array[index - 1]
# right_line = line_array[index]
# fill_points = np.array([np.append(left_line, right_line[::-1], axis=0)], dtype=np.int32)
# print ("fill_points:" + str(fill_points.shape))
# print ("color[index - 1]:" + str(color[index - 1]))
# cv2.fillPoly(overlay, fill_points, color[index - 1])
# alpha = 0.2
# cv2.addWeighted(overlay, alpha, origin_im, 1-alpha, 0, origin_im)
# origin_im
origin_im = np.append(origin_im, top_im, axis=1)
im = np.append(im, mid_im, axis=1)
show_img = np.append(origin_im, im, axis=0)
file_name = "source_{}_{}.png".format(file_name, frameId)
cv2.imwrite(os.path.join(args.output_dir, file_name), show_img)
cv2.imshow("carlab", show_img)
cv2.waitKey(1)
def result_sender():
print ("sender process start !")
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.SNDTIMEO, 3000)
socket.bind("tcp://*:{}".format(const.PORT_DETECTION))
while(True):
message = g_detect_queue.get(True)
if not message is None:
recv = socket.recv()
print ("Received request:%s" % recv)
try:
socket.send(message)
except zmq.ZMQError:
time.sleep(1)
def dr_recever():
print ("dr recever process start !")
sub_context = zmq.Context()
socket = sub_context.socket(zmq.SUB)
print ("tcp://localhost:{}".format(const.PORT_DR_OUT))
socket.connect("tcp://localhost:{}".format(const.PORT_DR_OUT))
socket.setsockopt_string(zmq.SUBSCRIBE, "")
# socket.setsockopt(zmq.CONFLATE, 1)
while(True):
try:
string = socket.recv()
# print ("Received:{}".format(len(string)))
if g_dr_queue.full():
g_dr_queue.get(True)
g_dr_queue.put(string)
except zmq.ZMQError, Queue.em:
time.sleep(1)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
setup_logging(__name__)
args = parse_args()
# g_fusion_filter = Fusion()
g_detect_queue = Queue(2)
g_dr_queue = Queue(10)
p = Process(target=result_sender)
p.start()
g_debug_img_queue = Queue(2)
p = Process(target=show_debug_img)
p.start()
# pdr_receiver = Process(target=dr_recever)
# pdr_receiver.start()
main(args)
| 38.918367 | 148 | 0.597273 | [
"Apache-2.0"
] | keeploading/Detectron | arp/arp_infer.py | 22,884 | Python |
#!/usr/bin/python3
# Helper script for the transactions method that can read the log file
# and use it to detect and recover the state to one of the transactions
# at which the script had previously been restarted.
import sys
import os
import re
import json
import subprocess
import codecs
import argparse
import tempfile
import git
branchRe = re.compile(r'^.* - Branch ([^ \t\r\n]+) at ([0-9a-fA-F]+)(, current)?.$')
def GetBranch(logLine):
m = branchRe.match(logLine)
if m:
br = m.group(1)
hash = m.group(2)
isCurrent = m.group(3) is not None
return { "name": br, "commit": hash, "is_current": isCurrent}
return None
lastStateRe = re.compile(r'^.*Loaded last state at transaction ([0-9]+) as:$')
def GetTransaction(logLine):
m = lastStateRe.match(logLine)
if m is not None:
return int(m.group(1))
return None
def Restore(repoPath, branchList, transaction):
print("Restoring state for transaction: {tr}".format(tr=transaction))
print("branch list:")
for br in branchList:
print(" - Branch {br} at {hash}.{current}".format(br=br["name"], hash=br["commit"], current=' Current.' if br["is_current"] else ''))
state = { "transaction": transaction, "branch_list": branchList }
repo = git.open(repoPath)
if repo is None:
print("Failed to open git repository '{r}'".format(r=repoPath))
return 1
stateFilePath = None
with tempfile.NamedTemporaryFile(mode='w+', prefix='ac2git_state_', delete=False) as stateFile:
stateFilePath = stateFile.name
stateFile.write(json.dumps(state))
hashObj = repo.raw_cmd(['git', 'hash-object', '-w', stateFilePath ])
if hashObj is None:
raise Exception("Failed to restore state! git hash-object -w {f}, returned {r}.".format(f=stateFilePath, r=hashObj))
else:
os.remove(stateFilePath)
refResult = repo.raw_cmd(['git', 'update-ref', 'refs/ac2git/state', hashObj])
if refResult is None:
raise Exception("Failed to restore state! git update-ref refs/ac2git/state {h}, returned {r}.".format(h=hashObj, r=refResult))
return 0
def Main(argv):
argparser = argparse.ArgumentParser(description='Processes a logfile previously generated by the ac2git.py script for restore points and optionally restores the state of a git repository to a selected point. Only works for the transactions method conversions.')
argparser.add_argument('-f', '--file', dest='file', help='The log file from which the state information will be parsed.')
argparser.add_argument('-t', '--transaction', dest='transaction', help='The transaction, from the log file, to which the state will be restored to. If omitted then all potential restore points are printed and the script exits with a return code of 1.')
argparser.add_argument('-r', '--git-repo', dest='repo', help='The path to the git repository whose state will be restored.')
args = argparser.parse_args()
if not os.path.exists(args.file):
print("Failed to open log file '{f}'.".format(f=args.file))
return 1
trList = []
with codecs.open(args.file) as f:
line = f.readline()
while len(line) > 0:
line = line.strip()
tr = GetTransaction(line)
if tr is not None:
branchList = []
line = f.readline()
while len(line) > 0:
line = line.strip()
br = GetBranch(line)
if br is not None:
branchList.append(br)
else:
break
line=f.readline()
if args.transaction is not None and int(tr) == int(args.transaction):
return Restore(args.repo, branchList, int(args.transaction))
elif tr not in trList:
trList.append(tr)
print("Found transaction {tr}.".format(tr=tr))
line = f.readline()
if len(trList) > 0:
print("Please choose one of the transactions listed above to restore the state to and re-run the script with the -t option.")
return 0
else:
print("Found no usable transaction state information in the log file '{f}'".format(f=args.file))
return 1
if __name__ == "__main__":
Main(sys.argv)
| 39.0625 | 265 | 0.6272 | [
"Unlicense"
] | NavicoOS/ac2git | recover_state_from_log.py | 4,375 | Python |
# Generated by Django 3.1.7 on 2021-07-12 13:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact', '0002_feedback_phone'),
]
operations = [
migrations.RenameField(
model_name='feedback',
old_name='name',
new_name='subject',
),
]
| 18.947368 | 47 | 0.580556 | [
"BSD-3-Clause"
] | OMAR-EHAB777/FerpMenu | OmegaErp/contact/migrations/0003_auto_20210712_1539.py | 360 | Python |
from django.conf.urls.defaults import *
urlpatterns = patterns("",
url(r"^$", "idios.views.profiles", name="profile_list"),
url(r"^profile/(?P<username>[\w\._-]+)/$", "idios.views.profile", name="profile_detail"),
url(r"^edit/$", "idios.views.profile_edit", name="profile_edit"),
)
| 32.888889 | 93 | 0.648649 | [
"BSD-3-Clause"
] | paltman/idios | idios/urls.py | 296 | Python |
translation = {
'i1':'chleb',
'i2':'piwo',
'i3':'paluszki',
'i4':'śledź',
'i5':'chipsy',
'i6':'masło',
'M':'Mango',
'O':'Onion',
'N':'Nintendo',
'K':'Key-chain',
'E':'Eggs',
'Y':'Yo-yo',
'D':'Doll',
'A':'Apple',
'U':'Umbrella',
'C':'Corn',
'I':'Ice-cream',
'B':'Beans',
'T':'Tomatoes',
'S':'Squash',
}
set1 = [
['i2','i3','i4'],
['i1','i2','i5'],
['i2','i3','i5'],
['i1','i2','i4'],
['i2','i3','i5','i6'],
['i2','i4','i6'],
['i1','i4','i5'],
['i2','i3','i5'],
['i1','i2','i6'],
['i2','i4','i6'],
['i3','i5','i6']
]
set2 = [
['i1','i2','i5'],
['i2','i4'],
['i2','i3'],
['i1','i2','i4'],
['i1','i3'],
['i2','i3'],
['i1','i3'],
['i1','i2','i3','i5'],
['i1','i2','i3']
]
set3 = [
['M', 'O', 'N', 'K', 'E', 'Y'],
['D', 'O', 'N', 'K', 'E', 'Y'],
['M', 'A', 'K', 'E'],
['M', 'U', 'C', 'K', 'Y'],
['C', 'O', 'O', 'K', 'I', 'E']
]
set4 = [
['r','z','h','j','p'],
['z','y','x','w','v','u','t','s'],
['z'],
['r','x','n','o','s'],
['y','r','x','z','q','t','p'],
['y','z','x','e','q','s','t','m']
]
set5 = [
['1', '3', '4'],
['2', '3', '5'],
['1', '2', '3', '5'],
['2', '5'],
['1', '2', '3', '5']
]
set6 = [
['B' , 'A' , 'T'],
['A' , 'C'],
['A' , 'S'],
['B' , 'A' , 'C'],
['B' , 'S'],
['A' , 'S'],
['B' , 'S'],
['B' , 'A' , 'S' , 'T'],
['B' , 'A' , 'S']
]
set7 = [
['A', 'B'],
['B', 'C', 'D'],
['A', 'C', 'D', 'E'],
['A', 'D', 'E'],
['A', 'B', 'C']
]
set8 = [
['I1','I2','I5'],
['I2','I4'],
['I2','I3'],
['I1','I2','I4'],
['I1','I3'],
['I2','I3'],
['I1','I3'],
['I1','I2','I3','I5'],
['I1','I2','I3']
]
test = {
'set1':[set1, 2],
'set2':[set2, 2],
'set3':[set3, 3],
'set4':[set4, 3],
'set5':[set5, 2],
'set6':[set6, 2],
'set7':[set7, 2],
'set8':[set8, 4],
}
set9 = [
['p1', 'p2', 'p5'],
['p2', 'p1', 'p4'],
['p1', 'p2', 'p3'],
['p2', 'p3', 'p1'],
['p2', 'p1', 'p3', 'p4'],
['p3', 'p1', 'p2', 'p5'],
['p2', 'p1', 'p2', 'p3'],
['p3', 'p1', 'p3'],
['p2', 'p3', 'p2', 'p1'],
['p3', 'p1', 'p2', 'p4'],
['p3', 'p1', 'p2', 'p1', 'p3'],
['p3', 'p1', 'p2', 'p4', 'p5'],
['p1', 'p2', 'p1', 'p2', 'p5'],
['p3', 'p1', 'p4', 'p1'],
['p2', 'p1', 'p3', 'p4']
]
set10 = [
['p1', 'p2', 'p5', 'p1'],
['p2', 'p4'],
['p2', 'p3', 'p4'],
['p1', 'p2', 'p4'],
['p1', 'p3'],
['p2', 'p3', 'p4'],
['p1', 'p3'],
['p1', 'p2', 'p3', 'p5'],
['p1', 'p2', 'p3']
]
aprioriS_sets = [
(set9, 2),
(set10, 2)
]
set11 = [
['105', '112', '01/26/98', '30'],
['106', '113', '01/26/98', '10'],
['106', '113', '01/26/98', '20'],
['105', '114', '02/01/98', '90'],
['106', '115', '02/10/98', '30'],
['106', '116', '03/08/98', '40'],
['106', '116', '03/08/98', '60'],
['106', '116', '03/08/98', '70'],
['200', '117', '03/09/98', '30'],
['200', '117', '03/09/98', '50'],
['200', '117', '03/09/98', '70'],
['220', '118', '03/09/98', '30'],
['300', '119', '03/10/98', '90'],
['220', '120', '03/15/98', '40'],
['220', '120', '03/15/98', '70'],
['220', '121', '03/18/98', '90'],
]
set12 = [
['1', 'Jan 4', ['A']],
['1', 'Feb 5', ['A']],
['2', 'Jan 5', ['A']],
['2', 'Feb 7', ['B']],
['2', 'Mar 3', ['C', 'E']],
['3', 'Feb 11', ['A', 'E']],
['4', 'Jan 2', ['A']],
['4', 'Jan 22', ['C', 'D', 'E']],
['4', 'Mar 11', ['A']],
['5', 'Jun 5', ['A']],
]
aprioriAll_sets = [
(set12, 0.25),
] | 19.861702 | 38 | 0.283878 | [
"MIT"
] | Vyzrala/ZTED_Algorithms | datasets.py | 3,737 | Python |
"""Plugin
=========
The fixtures provided by pytest-kivy.
"""
import pytest
import weakref
from typing import Tuple, Type, Optional, Callable
import gc
import logging
from os import environ
from pytest_kivy.app import AsyncUnitApp
__all__ = ('trio_kivy_app', 'asyncio_kivy_app', 'async_kivy_app')
#: NOTE: Kivy cannot be imported before or while the plugin is imported or
# configured as that leads to pytest issues.
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
_async_lib = environ.get('KIVY_EVENTLOOP', 'asyncio')
if _async_lib == 'asyncio':
@pytest.fixture
async def _nursery():
return None
@pytest.fixture
async def _event_loop(event_loop):
return event_loop
elif _async_lib == 'trio':
@pytest.fixture
async def _nursery(nursery):
return nursery
@pytest.fixture
async def _event_loop():
return None
else:
raise TypeError(f'unknown event loop {_async_lib}')
def pytest_addoption(parser):
group = parser.getgroup("kivy")
group.addoption(
"--kivy-app-release",
action="store_true",
default=False,
help='Whether to check after each test if the app is released and no '
'references are kept to the app preventing it from being garbage '
'collected.',
)
group.addoption(
"--kivy-app-release-end",
action="store_true",
default=False,
help='Whether to check at the end of all tests if all of the test apps'
'were released and no references were kept to the app preventing'
'them from being garbage collected.',
)
@pytest.fixture(scope='session')
def _app_release_list():
apps = []
yield apps
gc.collect()
alive_apps = []
for i, (app, request) in enumerate(apps[:-1]):
app = app()
request = request()
if request is None:
request = '<dead request>'
if app is not None:
alive_apps.append((app, request))
logging.error(
'Memory leak: failed to release app for test ' + repr(request))
assert not alive_apps, 'Memory leak: failed to release all apps'
@pytest.fixture
def _app_release():
app = []
yield app
gc.collect()
if not app:
return
app, request = app[0]
app = app()
request = request()
if request is None:
request = '<dead request>'
assert app is None, \
f'Memory leak: failed to release app for test {request!r}'
def _get_request_config(
request, _app_release_list, _app_release
) -> Tuple[Type[AsyncUnitApp], dict, Optional[Callable], list]:
opts = getattr(request, 'param', {})
cls = opts.get('cls', AsyncUnitApp)
kwargs = opts.get('kwargs', {})
app_cls = opts.get('app_cls', None)
app_list = None
if request.config.getoption("kivy_app_release"):
app_list = _app_release
elif request.config.getoption("kivy_app_release_end"):
app_list = _app_release_list
return cls, kwargs, app_cls, app_list
@pytest.fixture
async def trio_kivy_app(
request, nursery, _app_release_list, _app_release
) -> AsyncUnitApp:
"""Fixture yielding a :class:`~pytest_kivy.app.AsyncUnitApp` using
explicitly trio as backend for the async library.
pytest-trio and trio must be installed, and ``trio_mode = true`` must be
set in pytest.ini.
"""
cls, kwargs, app_cls, app_list = _get_request_config(
request, _app_release_list, _app_release)
async with cls(nursery=nursery, async_lib='trio', **kwargs) as app:
if app_list is not None:
app_list.append((weakref.ref(app), weakref.ref(request)))
if app_cls is not None:
await app(app_cls)
app.raise_startup_exception()
yield app
await app.wait_stop_app()
@pytest.fixture
async def asyncio_kivy_app(
request, event_loop, _app_release_list, _app_release) -> AsyncUnitApp:
"""Fixture yielding a :class:`~pytest_kivy.app.AsyncUnitApp` using
explicitly asyncio as backend for the async library.
pytest-asyncio must be installed.
"""
cls, kwargs, app_cls, app_list = _get_request_config(
request, _app_release_list, _app_release)
async with cls(
event_loop=event_loop, async_lib='asyncio', **kwargs) as app:
if app_list is not None:
app_list.append((weakref.ref(app), weakref.ref(request)))
if app_cls is not None:
await app(app_cls)
app.raise_startup_exception()
yield app
await app.wait_stop_app()
@pytest.fixture
async def async_kivy_app(
request, _app_release_list, _app_release, _nursery, _event_loop
) -> AsyncUnitApp:
"""Fixture yielding a :class:`~pytest_kivy.app.AsyncUnitApp` using
trio or asyncio as backend for the async library, depending on
KIVY_EVENTLOOP.
If using trio, pytest-trio and trio must be installed, and
``trio_mode = true`` must be set in pytest.ini. If using asyncio,
pytest-asyncio must be installed.
"""
cls, kwargs, app_cls, app_list = _get_request_config(
request, _app_release_list, _app_release)
async with cls(
nursery=_nursery, event_loop=_event_loop, async_lib=_async_lib,
**kwargs) as app:
if app_list is not None:
app_list.append((weakref.ref(app), weakref.ref(request)))
if app_cls is not None:
await app(app_cls)
app.raise_startup_exception()
yield app
await app.wait_stop_app()
| 28.085859 | 79 | 0.65258 | [
"MIT"
] | matham/pytest-kivy | pytest_kivy/plugin.py | 5,561 | Python |
from pymongo import MongoClient
import json
from newsapi.database import mongo
class UserModel:
def __init__(self, _id, username, password):
self.id = _id
self.username = username
self.password = password
@classmethod
def find_by_username(cls, username):
result = mongo.db.user.find_one({'username': username})
if result:
user = cls(_id=result['_id'], username=result['username'], password=result['password'])
else:
user = None
return user
@classmethod
def find_by_id(cls, _id):
result = mongo.db.user.find_one({'_id': _id})
if result:
user = cls(_id=result['_id'], username=result['username'], password=result['password'])
else:
user = None
return user
| 25.59375 | 99 | 0.603175 | [
"MIT"
] | rubiagatra/news-api | newsapi/user/models.py | 819 | Python |
"""Test data purging."""
from datetime import datetime, timedelta
import json
from unittest.mock import patch
from homeassistant.components import recorder
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import Events, RecorderRuns, States
from homeassistant.components.recorder.purge import purge_old_data
from homeassistant.components.recorder.util import session_scope
from homeassistant.util import dt as dt_util
from .common import wait_recording_done
def test_purge_old_states(hass, hass_recorder):
"""Test deleting old states."""
hass = hass_recorder()
_add_test_states(hass)
# make sure we start with 6 states
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 6
# run purge_old_data()
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert states.count() == 4
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert states.count() == 2
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert finished
assert states.count() == 2
def test_purge_old_events(hass, hass_recorder):
"""Test deleting old events."""
hass = hass_recorder()
_add_test_events(hass)
with session_scope(hass=hass) as session:
events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
assert events.count() == 6
# run purge_old_data()
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert events.count() == 4
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert events.count() == 2
# we should only have 2 events left
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert finished
assert events.count() == 2
def test_purge_old_recorder_runs(hass, hass_recorder):
"""Test deleting old recorder runs keeps current run."""
hass = hass_recorder()
_add_test_recorder_runs(hass)
# make sure we start with 7 recorder runs
with session_scope(hass=hass) as session:
recorder_runs = session.query(RecorderRuns)
assert recorder_runs.count() == 7
# run purge_old_data()
finished = purge_old_data(hass.data[DATA_INSTANCE], 0, repack=False)
assert finished
assert recorder_runs.count() == 1
def test_purge_method(hass, hass_recorder):
"""Test purge method."""
hass = hass_recorder()
service_data = {"keep_days": 4}
_add_test_events(hass)
_add_test_states(hass)
_add_test_recorder_runs(hass)
# make sure we start with 6 states
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 6
events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
assert events.count() == 6
recorder_runs = session.query(RecorderRuns)
assert recorder_runs.count() == 7
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
# run purge method - no service data, use defaults
hass.services.call("recorder", "purge")
hass.block_till_done()
# Small wait for recorder thread
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
# only purged old events
assert states.count() == 4
assert events.count() == 4
# run purge method - correct service data
hass.services.call("recorder", "purge", service_data=service_data)
hass.block_till_done()
# Small wait for recorder thread
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
# we should only have 2 states left after purging
assert states.count() == 2
# now we should only have 2 events left
assert events.count() == 2
# now we should only have 3 recorder runs left
assert recorder_runs.count() == 3
assert not ("EVENT_TEST_PURGE" in (event.event_type for event in events.all()))
# run purge method - correct service data, with repack
with patch("homeassistant.components.recorder.purge._LOGGER") as mock_logger:
service_data["repack"] = True
hass.services.call("recorder", "purge", service_data=service_data)
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
assert (
mock_logger.debug.mock_calls[5][1][0]
== "Vacuuming SQL DB to free space"
)
def _add_test_states(hass):
"""Add multiple states to the db for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
eleven_days_ago = now - timedelta(days=11)
attributes = {"test_attr": 5, "test_attr_10": "nice"}
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
with recorder.session_scope(hass=hass) as session:
for event_id in range(6):
if event_id < 2:
timestamp = eleven_days_ago
state = "autopurgeme"
elif event_id < 4:
timestamp = five_days_ago
state = "purgeme"
else:
timestamp = now
state = "dontpurgeme"
session.add(
States(
entity_id="test.recorder2",
domain="sensor",
state=state,
attributes=json.dumps(attributes),
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event_id + 1000,
)
)
def _add_test_events(hass):
"""Add a few events for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
eleven_days_ago = now - timedelta(days=11)
event_data = {"test_attr": 5, "test_attr_10": "nice"}
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
with recorder.session_scope(hass=hass) as session:
for event_id in range(6):
if event_id < 2:
timestamp = eleven_days_ago
event_type = "EVENT_TEST_AUTOPURGE"
elif event_id < 4:
timestamp = five_days_ago
event_type = "EVENT_TEST_PURGE"
else:
timestamp = now
event_type = "EVENT_TEST"
session.add(
Events(
event_type=event_type,
event_data=json.dumps(event_data),
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
def _add_test_recorder_runs(hass):
"""Add a few recorder_runs for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
eleven_days_ago = now - timedelta(days=11)
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
with recorder.session_scope(hass=hass) as session:
for rec_id in range(6):
if rec_id < 2:
timestamp = eleven_days_ago
elif rec_id < 4:
timestamp = five_days_ago
else:
timestamp = now
session.add(
RecorderRuns(
start=timestamp,
created=dt_util.utcnow(),
end=timestamp + timedelta(days=1),
)
)
| 32.646091 | 87 | 0.613261 | [
"Apache-2.0"
] | AdmiralStipe/core | tests/components/recorder/test_purge.py | 7,933 | Python |
"""An abstract class for entities."""
from abc import ABC
import asyncio
from datetime import datetime, timedelta
import functools as ft
import logging
from timeit import default_timer as timer
from typing import Any, Awaitable, Dict, Iterable, List, Optional
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_DEFAULT_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CALLBACK_TYPE, Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError, NoEntitySpecifiedError
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import Event, async_track_entity_registry_updated_event
from homeassistant.helpers.typing import StateType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util, ensure_unique_string, slugify
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
DATA_ENTITY_SOURCE = "entity_info"
SOURCE_CONFIG_ENTRY = "config_entry"
SOURCE_PLATFORM_CONFIG = "platform_config"
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> Dict[str, Dict[str, str]]:
"""Get the entity sources."""
return hass.data.get(DATA_ENTITY_SOURCE, {})
def generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[List[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
return async_generate_entity_id(entity_id_format, name, current_ids, hass)
@callback
def async_generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[Iterable[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if current_ids is not None:
return ensure_unique_string(preferred_string, current_ids)
if hass is None:
raise ValueError("Missing required parameter current_ids or hass")
test_string = preferred_string
tries = 1
while not hass.states.async_available(test_string):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
class Entity(ABC):
"""An abstract class for Home Assistant entities."""
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id: str = None # type: ignore
# Owning hass instance. Will be set by EntityPlatform
# While not purely typed, it makes typehinting more useful for us
# and removes the need for constant None checks or asserts.
# Ignore types: https://github.com/PyCQA/pylint/issues/3167
hass: HomeAssistant = None # type: ignore
# Owning platform instance. Will be set by EntityPlatform
platform: Optional[EntityPlatform] = None
# If we reported if this entity was slow
_slow_reported = False
# If we reported this entity is updated while disabled
_disabled_reported = False
# Protect for multiple updates
_update_staged = False
# Process updates in parallel
parallel_updates: Optional[asyncio.Semaphore] = None
# Entry in the entity registry
registry_entry: Optional[RegistryEntry] = None
# Hold list for functions to call on remove.
_on_remove: Optional[List[CALLBACK_TYPE]] = None
# Context
_context: Optional[Context] = None
_context_set: Optional[datetime] = None
# If entity is added to an entity platform
_added = False
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return None
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return None
@property
def state(self) -> StateType:
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes.
Implemented by component base class, should not be extended by integrations.
Convention for attribute names is lowercase snake_case.
"""
return None
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return entity specific state attributes.
This method is deprecated, platform classes should implement
extra_state_attributes instead.
"""
return None
@property
def extra_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Return device specific attributes.
Implemented by platform classes.
"""
return None
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self) -> Optional[str]:
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self) -> Optional[str]:
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return False
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return False
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return None
@property
def context_recent_time(self) -> timedelta:
"""Time that a context is considered recent."""
return timedelta(seconds=5)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return True
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
@property
def enabled(self) -> bool:
"""Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
"""
return self.registry_entry is None or not self.registry_entry.disabled
@callback
def async_set_context(self, context: Context) -> None:
"""Set the context the entity currently operates under."""
self._context = context
self._context_set = dt_util.utcnow()
async def async_update_ha_state(self, force_refresh: bool = False) -> None:
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
# update entity data
if force_refresh:
try:
await self.async_device_update()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Update for %s fails", self.entity_id)
return
self._async_write_ha_state()
@callback
def async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
self._async_write_ha_state()
@callback
def _async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.registry_entry and self.registry_entry.disabled_by:
if not self._disabled_reported:
self._disabled_reported = True
assert self.platform is not None
_LOGGER.warning(
"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration",
self.entity_id,
self.platform.platform_name,
)
return
start = timer()
attr = self.capability_attributes
attr = dict(attr) if attr else {}
if not self.available:
state = STATE_UNAVAILABLE
else:
sstate = self.state
state = STATE_UNKNOWN if sstate is None else str(sstate)
attr.update(self.state_attributes or {})
extra_state_attributes = self.extra_state_attributes
# Backwards compatibility for "device_state_attributes" deprecated in 2021.4
# Add warning in 2021.6, remove in 2021.10
if extra_state_attributes is None:
extra_state_attributes = self.device_state_attributes
attr.update(extra_state_attributes or {})
unit_of_measurement = self.unit_of_measurement
if unit_of_measurement is not None:
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
# pylint: disable=consider-using-ternary
name = (entry and entry.name) or self.name
if name is not None:
attr[ATTR_FRIENDLY_NAME] = name
icon = (entry and entry.icon) or self.icon
if icon is not None:
attr[ATTR_ICON] = icon
entity_picture = self.entity_picture
if entity_picture is not None:
attr[ATTR_ENTITY_PICTURE] = entity_picture
assumed_state = self.assumed_state
if assumed_state:
attr[ATTR_ASSUMED_STATE] = assumed_state
supported_features = self.supported_features
if supported_features is not None:
attr[ATTR_SUPPORTED_FEATURES] = supported_features
device_class = self.device_class
if device_class is not None:
attr[ATTR_DEVICE_CLASS] = str(device_class)
end = timer()
if end - start > 0.4 and not self._slow_reported:
self._slow_reported = True
extra = ""
if "custom_components" in type(self).__module__:
extra = "Please report it to the custom component author."
else:
extra = (
"Please create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
if self.platform:
extra += (
f"+label%3A%22integration%3A+{self.platform.platform_name}%22"
)
_LOGGER.warning(
"Updating state for %s (%s) took %.3f seconds. %s",
self.entity_id,
type(self),
end - start,
extra,
)
# Overwrite properties that have been set in the config file.
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
# Convert temperature if we detect one
try:
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if (
unit_of_measure in (TEMP_CELSIUS, TEMP_FAHRENHEIT)
and unit_of_measure != units.temperature_unit
):
prec = len(state) - state.index(".") - 1 if "." in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
if (
self._context_set is not None
and dt_util.utcnow() - self._context_set > self.context_recent_time
):
self._context = None
self._context_set = None
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update, self._context
)
def schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
self.hass.add_job(self.async_update_ha_state(force_refresh)) # type: ignore
@callback
def async_schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
if force_refresh:
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state()
async def async_device_update(self, warning: bool = True) -> None:
"""Process 'update' or 'async_update' from entity.
This method is a coroutine.
"""
if self._update_staged:
return
self._update_staged = True
# Process update sequential
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
# pylint: disable=no-member
if hasattr(self, "async_update"):
task = self.hass.async_create_task(self.async_update()) # type: ignore
elif hasattr(self, "update"):
task = self.hass.async_add_executor_job(self.update) # type: ignore
else:
return
if not warning:
await task
return
finished, _ = await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING)
for done in finished:
exc = done.exception()
if exc:
raise exc
return
_LOGGER.warning(
"Update of %s is taking over %s seconds",
self.entity_id,
SLOW_UPDATE_WARNING,
)
await task
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release()
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when entity removed."""
if self._on_remove is None:
self._on_remove = []
self._on_remove.append(func)
async def async_removed_from_registry(self) -> None:
"""Run when entity has been removed from entity registry.
To be extended by integrations.
"""
@callback
def add_to_platform_start(
self,
hass: HomeAssistant,
platform: EntityPlatform,
parallel_updates: Optional[asyncio.Semaphore],
) -> None:
"""Start adding an entity to a platform."""
if self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} cannot be added a second time to an entity platform"
)
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._added = True
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
self.hass = None # type: ignore
self.platform = None
self.parallel_updates = None
self._added = False
async def add_to_platform_finish(self) -> None:
"""Finish adding an entity to a platform."""
await self.async_internal_added_to_hass()
await self.async_added_to_hass()
self.async_write_ha_state()
async def async_remove(self, *, force_remove: bool = False) -> None:
"""Remove entity from Home Assistant.
If the entity has a non disabled entry in the entity registry,
the entity's state will be set to unavailable, in the same way
as when the entity registry is loaded.
If the entity doesn't have a non disabled entry in the entity registry,
or if force_remove=True, its state will be removed.
"""
if self.platform and not self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} async_remove called twice"
)
self._added = False
if self._on_remove is not None:
while self._on_remove:
self._on_remove.pop()()
await self.async_internal_will_remove_from_hass()
await self.async_will_remove_from_hass()
# Check if entry still exists in entity registry (e.g. unloading config entry)
if (
not force_remove
and self.registry_entry
and not self.registry_entry.disabled
):
# Set the entity's state will to unavailable + ATTR_RESTORED: True
self.registry_entry.write_unavailable_state(self.hass)
else:
self.hass.states.async_remove(self.entity_id, context=self._context)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
To be extended by integrations.
"""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
To be extended by integrations.
"""
async def async_internal_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Not to be extended by integrations.
"""
if self.platform:
info = {"domain": self.platform.platform_name}
if self.platform.config_entry:
info["source"] = SOURCE_CONFIG_ENTRY
info["config_entry"] = self.platform.config_entry.entry_id
else:
info["source"] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if self.registry_entry is not None:
# This is an assert as it should never happen, but helps in tests
assert (
not self.registry_entry.disabled_by
), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(
async_track_entity_registry_updated_event(
self.hass, self.entity_id, self._async_registry_updated
)
)
async def async_internal_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Not to be extended by integrations.
"""
if self.platform:
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id)
async def _async_registry_updated(self, event: Event) -> None:
"""Handle entity registry update."""
data = event.data
if data["action"] == "remove":
await self.async_removed_from_registry()
self.registry_entry = None
await self.async_remove()
if data["action"] != "update":
return
ent_reg = await self.hass.helpers.entity_registry.async_get_registry()
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data["entity_id"])
assert self.registry_entry is not None
if self.registry_entry.disabled:
await self.async_remove()
return
assert old is not None
if self.registry_entry.entity_id == old.entity_id:
self.async_write_ha_state()
return
await self.async_remove(force_remove=True)
assert self.platform is not None
self.entity_id = self.registry_entry.entity_id
await self.platform.async_add_entities([self])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if not isinstance(other, self.__class__):
return False
# Can only decide equality if both have a unique id
if self.unique_id is None or other.unique_id is None:
return False
# Ensure they belong to the same platform
if self.platform is not None or other.platform is not None:
if self.platform is None or other.platform is None:
return False
if self.platform.platform != other.platform.platform:
return False
return self.unique_id == other.unique_id
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}: {self.state}>"
async def async_request_call(self, coro: Awaitable) -> None:
"""Process request batched."""
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
await coro
finally:
if self.parallel_updates:
self.parallel_updates.release()
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
@property
def state(self) -> str:
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
raise NotImplementedError()
def turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))
def toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
async def async_toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
await self.async_turn_off(**kwargs)
else:
await self.async_turn_on(**kwargs)
| 33.541384 | 133 | 0.628155 | [
"Apache-2.0"
] | Leviosa-Shades/core | homeassistant/helpers/entity.py | 24,720 | Python |
# from https://stackoverflow.com/questions/8032642/how-to-obtain-image-size-using-standard-python-class-without-using-external-lib
import struct
import imghdr
def get_image_size(fname):
"""Determine the image type of fhandle and return its size.
from draco"""
with open(fname, "rb") as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == "png":
check = struct.unpack(">i", head[4:8])[0]
if check != 0x0D0A1A0A:
return
width, height = struct.unpack(">ii", head[16:24])
elif imghdr.what(fname) == "gif":
width, height = struct.unpack("<HH", head[6:10])
elif imghdr.what(fname) == "jpeg":
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xFF:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack(">H", fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack(">HH", fhandle.read(4))
except Exception: # IGNORE:W0703
return
else:
return
return width, height
| 35.904762 | 131 | 0.5 | [
"MIT"
] | geraked/mdpdfbook | mdpdfbook/mdpdf/image.py | 1,508 | Python |
# Copyright 2021 Zeppelin Bend Pty Ltd
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
__all__ = ["get_upstream_end_to_tns"]
from typing import List, Tuple, TypeVar
from zepben.evolve import PowerTransformerEnd, SinglePhaseKind, PhaseDirection
T = TypeVar("T")
def get_upstream_end_to_tns(
ends_to_topological_nodes: List[Tuple[PowerTransformerEnd, T]]
) -> List[Tuple[PowerTransformerEnd, T]]:
return [(end, tn) for (end, tn) in ends_to_topological_nodes
if tn is not None
and end is not None
# TODO: How to account for the fact you can have phases with different directions??
and (end.terminal.traced_phases.direction_normal(SinglePhaseKind.A).has(PhaseDirection.IN)
or end.terminal.traced_phases.direction_normal(SinglePhaseKind.B).has(PhaseDirection.IN)
or end.terminal.traced_phases.direction_normal(SinglePhaseKind.C).has(PhaseDirection.IN))
]
| 41.481481 | 106 | 0.7125 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | zepben/pp-translator | src/pp_creators/utils.py | 1,120 | Python |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| 36.135135 | 245 | 0.595161 | [
"Apache-2.0",
"BSD-3-Clause"
] | LaudateCorpus1/oci-python-sdk | src/oci/_vendor/requests/models.py | 34,762 | Python |
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: achao
# File Name: weight_init.py
# Description:
"""
import copy
import math
import warnings
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from deep3dmap.core.utils import Registry, build_from_cfg, get_logger, print_log
INITIALIZERS = Registry('initializer')
def update_init_info(module, init_info):
"""Update the `_params_init_info` in the module if the value of parameters
are changed.
Args:
module (obj:`nn.Module`): The module of PyTorch with a user-defined
attribute `_params_init_info` which records the initialization
information.
init_info (str): The string that describes the initialization.
"""
assert hasattr(
module,
'_params_init_info'), f'Can not find `_params_init_info` in {module}'
for name, param in module.named_parameters():
assert param in module._params_init_info, (
f'Find a new :obj:`Parameter` '
f'named `{name}` during executing the '
f'`init_weights` of '
f'`{module.__class__.__name__}`. '
f'Please do not add or '
f'replace parameters during executing '
f'the `init_weights`. ')
# The parameter has been changed during executing the
# `init_weights` of module
mean_value = param.data.mean()
if module._params_init_info[param]['tmp_mean_value'] != mean_value:
module._params_init_info[param]['init_info'] = init_info
module._params_init_info[param]['tmp_mean_value'] = mean_value
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def normal_init(module, mean=0, std=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
bias: float = 0) -> None:
if hasattr(module, 'weight') and module.weight is not None:
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) # type: ignore
def uniform_init(module, a=0, b=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.uniform_(module.weight, a, b)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def caffe2_xavier_init(module, bias=0):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
kaiming_init(
module,
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
bias=bias,
distribution='uniform')
def bias_init_with_prob(prior_prob):
"""initialize conv/fc bias value according to a given probability value."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
def _get_bases_name(m):
return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object):
def __init__(self, *, bias=0, bias_prob=None, layer=None):
self.wholemodule = False
if not isinstance(bias, (int, float)):
raise TypeError(f'bias must be a number, but got a {type(bias)}')
if bias_prob is not None:
if not isinstance(bias_prob, float):
raise TypeError(f'bias_prob type must be float, \
but got {type(bias_prob)}')
if layer is not None:
if not isinstance(layer, (str, list)):
raise TypeError(f'layer must be a str or a list of str, \
but got a {type(layer)}')
else:
layer = []
if bias_prob is not None:
self.bias = bias_init_with_prob(bias_prob)
else:
self.bias = bias
self.layer = [layer] if isinstance(layer, str) else layer
def _get_init_info(self):
info = f'{self.__class__.__name__}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Constant')
class ConstantInit(BaseInit):
"""Initialize module parameters with constant values.
Args:
val (int | float): the value to fill the weights in the module with
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, val, **kwargs):
super().__init__(**kwargs)
self.val = val
def __call__(self, module):
def init(m):
if self.wholemodule:
constant_init(m, self.val, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
constant_init(m, self.val, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit):
r"""Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
Args:
gain (int | float): an optional scaling factor. Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'``
or ``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, gain=1, distribution='normal', **kwargs):
super().__init__(**kwargs)
self.gain = gain
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
xavier_init(m, self.gain, self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
xavier_init(m, self.gain, self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: gain={self.gain}, ' \
f'distribution={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Normal')
class NormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
mean (int | float):the mean of the normal distribution. Defaults to 0.
std (int | float): the standard deviation of the normal distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, mean=0, std=1, **kwargs):
super().__init__(**kwargs)
self.mean = mean
self.std = std
def __call__(self, module):
def init(m):
if self.wholemodule:
normal_init(m, self.mean, self.std, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
normal_init(m, self.mean, self.std, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: mean={self.mean},' \
f' std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='TruncNormal')
class TruncNormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
outside :math:`[a, b]`.
Args:
mean (float): the mean of the normal distribution. Defaults to 0.
std (float): the standard deviation of the normal distribution.
Defaults to 1.
a (float): The minimum cutoff value.
b ( float): The maximum cutoff value.
bias (float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
**kwargs) -> None:
super().__init__(**kwargs)
self.mean = mean
self.std = std
self.a = a
self.b = b
def __call__(self, module: nn.Module) -> None:
def init(m):
if self.wholemodule:
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
f' mean={self.mean}, std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Uniform')
class UniformInit(BaseInit):
r"""Initialize module parameters with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (int | float): the lower bound of the uniform distribution.
Defaults to 0.
b (int | float): the upper bound of the uniform distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, a=0, b=1, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
def __call__(self, module):
def init(m):
if self.wholemodule:
uniform_init(m, self.a, self.b, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
uniform_init(m, self.a, self.b, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a},' \
f' b={self.b}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
r"""Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
Args:
a (int | float): the negative slope of the rectifier used after this
layer (only used with ``'leaky_relu'``). Defaults to 0.
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
``'fan_in'`` preserves the magnitude of the variance of the weights
in the forward pass. Choosing ``'fan_out'`` preserves the
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
nonlinearity (str): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
Defaults to 'relu'.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'`` or
``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
a=0,
mode='fan_out',
nonlinearity='relu',
distribution='normal',
**kwargs):
super().__init__(**kwargs)
self.a = a
self.mode = mode
self.nonlinearity = nonlinearity
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
f'nonlinearity={self.nonlinearity}, ' \
f'distribution ={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
def __init__(self, **kwargs):
super().__init__(
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='uniform',
**kwargs)
def __call__(self, module):
super().__call__(module)
@INITIALIZERS.register_module(name='Pretrained')
class PretrainedInit(object):
"""Initialize module by loading a pretrained model.
Args:
checkpoint (str): the checkpoint file of the pretrained model should
be load.
prefix (str, optional): the prefix of a sub-module in the pretrained
model. it is for loading a part of the pretrained model to
initialize. For example, if we would like to only load the
backbone of a detector model, we can set ``prefix='backbone.'``.
Defaults to None.
map_location (str): map tensors into proper locations.
"""
def __init__(self, checkpoint, prefix=None, map_location=None):
self.checkpoint = checkpoint
self.prefix = prefix
self.map_location = map_location
def __call__(self, module):
from deep3dmap.runners import (_load_checkpoint_with_prefix, load_checkpoint,
load_state_dict)
logger = get_logger('deep3dmap')
if self.prefix is None:
print_log(f'load model from: {self.checkpoint}', logger=logger)
load_checkpoint(
module,
self.checkpoint,
map_location=self.map_location,
strict=False,
logger=logger)
else:
print_log(
f'load {self.prefix} in model from: {self.checkpoint}',
logger=logger)
state_dict = _load_checkpoint_with_prefix(
self.prefix, self.checkpoint, map_location=self.map_location)
load_state_dict(module, state_dict, strict=False, logger=logger)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
return info
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
# wholemodule flag is for override mode, there is no layer key in override
# and initializer will give init values for the whole module with the name
# in override.
func.wholemodule = wholemodule
func(module)
def _initialize_override(module, override, cfg):
if not isinstance(override, (dict, list)):
raise TypeError(f'override must be a dict or a list of dict, \
but got {type(override)}')
override = [override] if isinstance(override, dict) else override
for override_ in override:
cp_override = copy.deepcopy(override_)
name = cp_override.pop('name', None)
if name is None:
raise ValueError('`override` must contain the key "name",'
f'but got {cp_override}')
# if override only has name key, it means use args in init_cfg
if not cp_override:
cp_override.update(cfg)
# if override has name key and other args except type key, it will
# raise error
elif 'type' not in cp_override.keys():
raise ValueError(
f'`override` need "type" key, but got {cp_override}')
if hasattr(module, name):
_initialize(getattr(module, name), cp_override, wholemodule=True)
else:
raise RuntimeError(f'module did not have attribute {name}, '
f'but init_cfg is {cp_override}.')
def initialize(module, init_cfg):
"""Initialize a module.
Args:
module (``torch.nn.Module``): the module will be initialized.
init_cfg (dict | list[dict]): initialization configuration dict to
define initializer. OpenMMLab has implemented 6 initializers
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
``Kaiming``, and ``Pretrained``.
Example:
>>> module = nn.Linear(2, 3, bias=True)
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
>>> initialize(module, init_cfg)
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
>>> # define key ``'layer'`` for initializing layer with different
>>> # configuration
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
dict(type='Constant', layer='Linear', val=2)]
>>> initialize(module, init_cfg)
>>> # define key``'override'`` to initialize some specific part in
>>> # module
>>> class FooNet(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.feat = nn.Conv2d(3, 16, 3)
>>> self.reg = nn.Conv2d(16, 10, 3)
>>> self.cls = nn.Conv2d(16, 5, 3)
>>> model = FooNet()
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
>>> initialize(model, init_cfg)
>>> model = ResNet(depth=50)
>>> # Initialize weights with the pretrained model.
>>> init_cfg = dict(type='Pretrained',
checkpoint='torchvision://resnet50')
>>> initialize(model, init_cfg)
>>> # Initialize weights of a sub-module with the specific part of
>>> # a pretrained model by using "prefix".
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
>>> 'retinanet_r50_fpn_1x_coco/'\
>>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
>>> init_cfg = dict(type='Pretrained',
checkpoint=url, prefix='backbone.')
"""
if not isinstance(init_cfg, (dict, list)):
raise TypeError(f'init_cfg must be a dict or a list of dict, \
but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in init_cfg:
# should deeply copy the original config because cfg may be used by
# other modules, e.g., one init_cfg shared by multiple bottleneck
# blocks, the expected cfg will be changed after pop and will change
# the initialization behavior of other modules
cp_cfg = copy.deepcopy(cfg)
override = cp_cfg.pop('override', None)
_initialize(module, cp_cfg)
if override is not None:
cp_cfg.pop('layer', None)
_initialize_override(module, override, cp_cfg)
else:
# All attributes in module have same initialization.
pass
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
b: float) -> Tensor:
# Method based on
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
# Modified from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [lower, upper], then translate
# to [2lower-1, 2upper-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor: Tensor,
mean: float = 0.,
std: float = 1.,
a: float = -2.,
b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
mean (float): the mean of the normal distribution.
std (float): the standard deviation of the normal distribution.
a (float): the minimum cutoff value.
b (float): the maximum cutoff value.
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| 37.645954 | 85 | 0.600015 | [
"Apache-2.0"
] | achao2013/DeepRecon | deep3dmap/core/utils/weight_init.py | 26,051 | Python |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The minc module provides classes for interfacing with the `MINC
<http://www.bic.mni.mcgill.ca/ServicesSoftware/MINC>`_ command line tools. This
module was written to work with MINC version 2.2.00.
Author: Carlo Hamalainen <[email protected]>
http://carlo-hamalainen.net
"""
import glob
import os
import os.path
import re
import warnings
from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,
StdOutCommandLineInputSpec, StdOutCommandLine, File,
Directory, InputMultiPath, OutputMultiPath, traits,
isdefined)
from .base import aggregate_filename
warnings.filterwarnings('always', category=UserWarning)
class ExtractInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.raw',
keep_extension=False)
_xor_write = (
'write_ascii',
'write_ascii',
'write_byte',
'write_short',
'write_int',
'write_long',
'write_float',
'write_double',
'write_signed',
'write_unsigned',
)
write_ascii = traits.Bool(
desc='Write out data as ascii strings (default).',
argstr='-ascii',
xor=_xor_write)
write_byte = traits.Bool(
desc='Write out data as bytes.', argstr='-byte', xor=_xor_write)
write_short = traits.Bool(
desc='Write out data as short integers.',
argstr='-short',
xor=_xor_write)
write_int = traits.Bool(
desc='Write out data as 32-bit integers.',
argstr='-int',
xor=_xor_write)
write_long = traits.Bool(
desc='Superseded by write_int.', argstr='-long', xor=_xor_write)
write_float = traits.Bool(
desc='Write out data as single precision floating-point values.',
argstr='-float',
xor=_xor_write)
write_double = traits.Bool(
desc='Write out data as double precision floating-point values.',
argstr='-double',
xor=_xor_write)
_xor_signed = ('write_signed', 'write_unsigned')
write_signed = traits.Bool(
desc='Write out signed data.', argstr='-signed', xor=_xor_signed)
write_unsigned = traits.Bool(
desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed)
write_range = traits.Tuple(
traits.Float,
traits.Float,
argstr='-range %s %s',
desc=
'Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.',
)
_xor_normalize = (
'normalize',
'nonormalize',
)
normalize = traits.Bool(
desc='Normalize integer pixel values to file max and min.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Turn off pixel normalization.',
argstr='-nonormalize',
xor=_xor_normalize)
image_range = traits.Tuple(
traits.Float,
traits.Float,
desc='Specify the range of real image values for normalization.',
argstr='-image_range %s %s')
image_minimum = traits.Float(
desc=('Specify the minimum real image value for normalization.'
'Default value: 1.79769e+308.'),
argstr='-image_minimum %s')
image_maximum = traits.Float(
desc=('Specify the maximum real image value for normalization.'
'Default value: 1.79769e+308.'),
argstr='-image_maximum %s')
start = InputMultiPath(
traits.Int,
desc='Specifies corner of hyperslab (C conventions for indices).',
sep=',',
argstr='-start %s',
)
count = InputMultiPath(
traits.Int,
desc='Specifies edge lengths of hyperslab to read.',
sep=',',
argstr='-count %s',
)
# FIXME Can we make sure that len(start) == len(count)?
_xor_flip = ('flip_positive_direction', 'flip_negative_direction',
'flip_any_direction')
flip_positive_direction = traits.Bool(
desc='Flip images to always have positive direction.',
argstr='-positive_direction',
xor=_xor_flip)
flip_negative_direction = traits.Bool(
desc='Flip images to always have negative direction.',
argstr='-negative_direction',
xor=_xor_flip)
flip_any_direction = traits.Bool(
desc='Do not flip images (Default).',
argstr='-any_direction',
xor=_xor_flip)
_xor_x_flip = ('flip_x_positive', 'flip_x_negative', 'flip_x_any')
flip_x_positive = traits.Bool(
desc='Flip images to give positive xspace:step value (left-to-right).',
argstr='+xdirection',
xor=_xor_x_flip)
flip_x_negative = traits.Bool(
desc='Flip images to give negative xspace:step value (right-to-left).',
argstr='-xdirection',
xor=_xor_x_flip)
flip_x_any = traits.Bool(
desc='Don\'t flip images along x-axis (default).',
argstr='-xanydirection',
xor=_xor_x_flip)
_xor_y_flip = ('flip_y_positive', 'flip_y_negative', 'flip_y_any')
flip_y_positive = traits.Bool(
desc='Flip images to give positive yspace:step value (post-to-ant).',
argstr='+ydirection',
xor=_xor_y_flip)
flip_y_negative = traits.Bool(
desc='Flip images to give negative yspace:step value (ant-to-post).',
argstr='-ydirection',
xor=_xor_y_flip)
flip_y_any = traits.Bool(
desc='Don\'t flip images along y-axis (default).',
argstr='-yanydirection',
xor=_xor_y_flip)
_xor_z_flip = ('flip_z_positive', 'flip_z_negative', 'flip_z_any')
flip_z_positive = traits.Bool(
desc='Flip images to give positive zspace:step value (inf-to-sup).',
argstr='+zdirection',
xor=_xor_z_flip)
flip_z_negative = traits.Bool(
desc='Flip images to give negative zspace:step value (sup-to-inf).',
argstr='-zdirection',
xor=_xor_z_flip)
flip_z_any = traits.Bool(
desc='Don\'t flip images along z-axis (default).',
argstr='-zanydirection',
xor=_xor_z_flip)
class ExtractOutputSpec(TraitedSpec):
output_file = File(desc='output file in raw/text format', exists=True)
class Extract(StdOutCommandLine):
"""Dump a hyperslab of MINC file data.
Examples
--------
>>> from nipype.interfaces.minc import Extract
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> extract = Extract(input_file=minc2Dfile)
>>> extract.run() # doctest: +SKIP
>>> extract = Extract(input_file=minc2Dfile, start=[3, 10, 5], count=[4, 4, 4]) # extract a 4x4x4 slab at offset [3, 10, 5]
>>> extract.run() # doctest: +SKIP
"""
input_spec = ExtractInputSpec
output_spec = ExtractOutputSpec
_cmd = 'mincextract'
class ToRawInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.raw',
keep_extension=False)
_xor_write = ('write_byte', 'write_short', 'write_int', 'write_long',
'write_float', 'write_double')
write_byte = traits.Bool(
desc='Write out data as bytes.', argstr='-byte', xor=_xor_write)
write_short = traits.Bool(
desc='Write out data as short integers.',
argstr='-short',
xor=_xor_write)
write_int = traits.Bool(
desc='Write out data as 32-bit integers.',
argstr='-int',
xor=_xor_write)
write_long = traits.Bool(
desc='Superseded by write_int.', argstr='-long', xor=_xor_write)
write_float = traits.Bool(
desc='Write out data as single precision floating-point values.',
argstr='-float',
xor=_xor_write)
write_double = traits.Bool(
desc='Write out data as double precision floating-point values.',
argstr='-double',
xor=_xor_write)
_xor_signed = ('write_signed', 'write_unsigned')
write_signed = traits.Bool(
desc='Write out signed data.', argstr='-signed', xor=_xor_signed)
write_unsigned = traits.Bool(
desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed)
write_range = traits.Tuple(
traits.Float,
traits.Float,
argstr='-range %s %s',
desc=('Specify the range of output values.'
'Default value: 1.79769e+308 1.79769e+308.'),
)
_xor_normalize = (
'normalize',
'nonormalize',
)
normalize = traits.Bool(
desc='Normalize integer pixel values to file max and min.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Turn off pixel normalization.',
argstr='-nonormalize',
xor=_xor_normalize)
class ToRawOutputSpec(TraitedSpec):
output_file = File(desc='output file in raw format', exists=True)
class ToRaw(StdOutCommandLine):
"""Dump a chunk of MINC file data. This program is largely
superceded by mincextract (see Extract).
Examples
--------
>>> from nipype.interfaces.minc import ToRaw
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> toraw = ToRaw(input_file=minc2Dfile)
>>> toraw.run() # doctest: +SKIP
>>> toraw = ToRaw(input_file=minc2Dfile, write_range=(0, 100))
>>> toraw.run() # doctest: +SKIP
"""
input_spec = ToRawInputSpec
output_spec = ToRawOutputSpec
_cmd = 'minctoraw'
class ConvertInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file for converting',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_convert_output.mnc')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
template = traits.Bool(
desc=
('Create a template file. The dimensions, variables, and'
'attributes of the input file are preserved but all data it set to zero.'
),
argstr='-template',
)
compression = traits.Enum(
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
argstr='-compress %s',
desc='Set the compression level, from 0 (disabled) to 9 (maximum).',
)
chunk = traits.Range(
low=0,
desc=
'Set the target block size for chunking (0 default, >1 block size).',
argstr='-chunk %d',
)
class ConvertOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Convert(CommandLine):
"""convert between MINC 1 to MINC 2 format.
Examples
--------
>>> from nipype.interfaces.minc import Convert
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = Convert(input_file=minc2Dfile, output_file='/tmp/out.mnc', two=True) # Convert to MINC2 format.
>>> c.run() # doctest: +SKIP
"""
input_spec = ConvertInputSpec
output_spec = ConvertOutputSpec
_cmd = 'mincconvert'
class CopyInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to copy',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_copy.mnc')
_xor_pixel = ('pixel_values', 'real_values')
pixel_values = traits.Bool(
desc='Copy pixel values as is.',
argstr='-pixel_values',
xor=_xor_pixel)
real_values = traits.Bool(
desc='Copy real pixel intensities (default).',
argstr='-real_values',
xor=_xor_pixel)
class CopyOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Copy(CommandLine):
"""
Copy image values from one MINC file to another. Both the input
and output files must exist, and the images in both files must
have an equal number dimensions and equal dimension lengths.
NOTE: This program is intended primarily for use with scripts
such as mincedit. It does not follow the typical design rules of
most MINC command-line tools and therefore should be used only
with caution.
"""
input_spec = CopyInputSpec
output_spec = CopyOutputSpec
_cmd = 'minccopy'
class ToEcatInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to convert',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_to_ecat.v',
keep_extension=False)
ignore_patient_variable = traits.Bool(
desc='Ignore informations from the minc patient variable.',
argstr='-ignore_patient_variable',
)
ignore_study_variable = traits.Bool(
desc='Ignore informations from the minc study variable.',
argstr='-ignore_study_variable',
)
ignore_acquisition_variable = traits.Bool(
desc='Ignore informations from the minc acquisition variable.',
argstr='-ignore_acquisition_variable',
)
ignore_ecat_acquisition_variable = traits.Bool(
desc='Ignore informations from the minc ecat_acquisition variable.',
argstr='-ignore_ecat_acquisition_variable',
)
ignore_ecat_main = traits.Bool(
desc='Ignore informations from the minc ecat-main variable.',
argstr='-ignore_ecat_main',
)
ignore_ecat_subheader_variable = traits.Bool(
desc='Ignore informations from the minc ecat-subhdr variable.',
argstr='-ignore_ecat_subheader_variable',
)
no_decay_corr_fctr = traits.Bool(
desc='Do not compute the decay correction factors',
argstr='-no_decay_corr_fctr',
)
voxels_as_integers = traits.Bool(
desc=('Voxel values are treated as integers, scale and'
'calibration factors are set to unity'),
argstr='-label',
)
class ToEcatOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class ToEcat(CommandLine):
"""Convert a 2D image, a 3D volumes or a 4D dynamic volumes
written in MINC file format to a 2D, 3D or 4D Ecat7 file.
Examples
--------
>>> from nipype.interfaces.minc import ToEcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = ToEcat(input_file=minc2Dfile)
>>> c.run() # doctest: +SKIP
>>> c = ToEcat(input_file=minc2Dfile, voxels_as_integers=True)
>>> c.run() # doctest: +SKIP
"""
input_spec = ToEcatInputSpec
output_spec = ToEcatOutputSpec
_cmd = 'minctoecat'
class DumpInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_dump.txt',
keep_extension=False)
_xor_coords_or_header = (
'coordinate_data',
'header_data',
)
coordinate_data = traits.Bool(
desc='Coordinate variable data and header information.',
argstr='-c',
xor=_xor_coords_or_header)
header_data = traits.Bool(
desc='Header information only, no data.',
argstr='-h',
xor=_xor_coords_or_header)
_xor_annotations = (
'annotations_brief',
'annotations_full',
)
annotations_brief = traits.Enum(
'c',
'f',
argstr='-b %s',
desc='Brief annotations for C or Fortran indices in data.',
xor=_xor_annotations)
annotations_full = traits.Enum(
'c',
'f',
argstr='-f %s',
desc='Full annotations for C or Fortran indices in data.',
xor=_xor_annotations)
variables = InputMultiPath(
traits.Str,
desc='Output data for specified variables only.',
sep=',',
argstr='-v %s')
line_length = traits.Range(
low=0,
desc='Line length maximum in data section (default 80).',
argstr='-l %d')
netcdf_name = traits.Str(
desc='Name for netCDF (default derived from file name).',
argstr='-n %s')
precision = traits.Either(
traits.Int(),
traits.Tuple(traits.Int, traits.Int),
desc='Display floating-point values with less precision',
argstr='%s',
) # See _format_arg in Dump for actual formatting.
class DumpOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Dump(StdOutCommandLine):
"""Dump a MINC file. Typically used in conjunction with mincgen (see Gen).
Examples
--------
>>> from nipype.interfaces.minc import Dump
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> dump = Dump(input_file=minc2Dfile)
>>> dump.run() # doctest: +SKIP
>>> dump = Dump(input_file=minc2Dfile, output_file='/tmp/out.txt', precision=(3, 4))
>>> dump.run() # doctest: +SKIP
"""
input_spec = DumpInputSpec
output_spec = DumpOutputSpec
_cmd = 'mincdump'
def _format_arg(self, name, spec, value):
if name == 'precision':
if isinstance(value, int):
return '-p %d' % value
elif isinstance(value, tuple) and isinstance(
value[0], int) and isinstance(value[1], int):
return '-p %d,%d' % (
value[0],
value[1],
)
else:
raise ValueError('Invalid precision argument: ' + str(value))
return super(Dump, self)._format_arg(name, spec, value)
class AverageInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
xor=_xor_input_files)
filelist = File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
exists=True,
mandatory=True,
xor=_xor_input_files)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_averaged.mnc')
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_verbose = (
'verbose',
'quiet',
)
verbose = traits.Bool(
desc='Print out log messages (default).',
argstr='-verbose',
xor=_xor_verbose)
quiet = traits.Bool(
desc='Do not print out log messages.',
argstr='-quiet',
xor=_xor_verbose)
debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug')
_xor_check_dimensions = (
'check_dimensions',
'no_check_dimensions',
)
check_dimensions = traits.Bool(
desc='Check that dimension info matches across files (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check dimension info.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=4096,
usedefault=True,
argstr='-max_buffer_size_in_kb %d',
)
_xor_normalize = (
'normalize',
'nonormalize',
)
normalize = traits.Bool(
desc='Normalize data sets for mean intensity.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Do not normalize data sets (default).',
argstr='-nonormalize',
xor=_xor_normalize)
voxel_range = traits.Tuple(
traits.Int,
traits.Int,
argstr='-range %d %d',
desc='Valid range for output data.')
sdfile = File(
desc='Specify an output sd file (default=none).', argstr='-sdfile %s')
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc=
'Copy all of the header from the first file (default for one file).',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc=
'Do not copy all of the header from the first file (default for many files)).',
argstr='-nocopy_header',
xor=_xor_copy_header)
avgdim = traits.Str(
desc='Specify a dimension along which we wish to average.',
argstr='-avgdim %s')
binarize = traits.Bool(
desc='Binarize the volume by looking for values in a given range.',
argstr='-binarize')
binrange = traits.Tuple(
traits.Float,
traits.Float,
argstr='-binrange %s %s',
desc=
'Specify a range for binarization. Default value: 1.79769e+308 -1.79769e+308.'
)
binvalue = traits.Float(
desc=('Specify a target value (+/- 0.5) for'
'binarization. Default value: -1.79769e+308'),
argstr='-binvalue %s')
weights = InputMultiPath(
traits.Str,
desc='Specify weights for averaging ("<w1>,<w2>,...").',
sep=',',
argstr='-weights %s',
)
width_weighted = traits.Bool(
desc='Weight by dimension widths when -avgdim is used.',
argstr='-width_weighted',
requires=('avgdim', ))
class AverageOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Average(CommandLine):
"""Average a number of MINC files.
Examples
--------
>>> from nipype.interfaces.minc import Average
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = Average(input_files=files, output_file='/tmp/tmp.mnc')
>>> average.run() # doctest: +SKIP
"""
input_spec = AverageInputSpec
output_spec = AverageOutputSpec
_cmd = 'mincaverage'
class BlobInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to blob',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_blob.mnc')
trace = traits.Bool(
desc='compute the trace (approximate growth and shrinkage) -- FAST',
argstr='-trace')
determinant = traits.Bool(
desc='compute the determinant (exact growth and shrinkage) -- SLOW',
argstr='-determinant')
translation = traits.Bool(
desc='compute translation (structure displacement)',
argstr='-translation')
magnitude = traits.Bool(
desc='compute the magnitude of the displacement vector',
argstr='-magnitude')
class BlobOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Blob(CommandLine):
"""Calculate blobs from minc deformation grids.
Examples
--------
>>> from nipype.interfaces.minc import Blob
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> blob = Blob(input_file=minc2Dfile, output_file='/tmp/tmp.mnc', trace=True)
>>> blob.run() # doctest: +SKIP
"""
input_spec = BlobInputSpec
output_spec = BlobOutputSpec
_cmd = 'mincblob'
class CalcInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
File(exists=True),
desc='input file(s) for calculation',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_calc.mnc')
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_verbose = (
'verbose',
'quiet',
)
verbose = traits.Bool(
desc='Print out log messages (default).',
argstr='-verbose',
xor=_xor_verbose)
quiet = traits.Bool(
desc='Do not print out log messages.',
argstr='-quiet',
xor=_xor_verbose)
debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug')
filelist = File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
mandatory=True,
xor=_xor_input_files)
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc='Copy all of the header from the first file.',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc='Do not copy all of the header from the first file.',
argstr='-nocopy_header',
xor=_xor_copy_header)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
voxel_range = traits.Tuple(
traits.Int,
traits.Int,
argstr='-range %d %d',
desc='Valid range for output data.',
)
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
argstr='-max_buffer_size_in_kb %d')
_xor_check_dimensions = (
'check_dimensions',
'no_check_dimensions',
)
check_dimensions = traits.Bool(
desc='Check that files have matching dimensions (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check that files have matching dimensions.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
# FIXME Is it sensible to use ignore_nan and propagate_nan at the same
# time? Document this.
ignore_nan = traits.Bool(
desc='Ignore invalid data (NaN) for accumulations.',
argstr='-ignore_nan')
propagate_nan = traits.Bool(
desc='Invalid data in any file at a voxel produces a NaN (default).',
argstr='-propagate_nan')
# FIXME Double-check that these are mutually exclusive?
_xor_nan_zero_illegal = ('output_nan', 'output_zero',
'output_illegal_value')
output_nan = traits.Bool(
desc='Output NaN when an illegal operation is done (default).',
argstr='-nan',
xor=_xor_nan_zero_illegal)
output_zero = traits.Bool(
desc='Output zero when an illegal operation is done.',
argstr='-zero',
xor=_xor_nan_zero_illegal)
output_illegal = traits.Bool(
desc=
'Value to write out when an illegal operation is done. Default value: 1.79769e+308',
argstr='-illegal_value',
xor=_xor_nan_zero_illegal)
_xor_expression = ('expression', 'expfile')
expression = traits.Str(
desc='Expression to use in calculations.',
argstr='-expression \'%s\'',
xor=_xor_expression,
mandatory=True)
expfile = File(
desc='Name of file containing expression.',
argstr='-expfile %s',
xor=_xor_expression,
mandatory=True)
# FIXME test this one, the argstr will probably need tweaking, see
# _format_arg.
outfiles = traits.List(
traits.Tuple(
traits.Str,
File,
argstr='-outfile %s %s',
desc=
('List of (symbol, file) tuples indicating that output should be written'
'to the specified file, taking values from the symbol which should be'
'created in the expression (see the EXAMPLES section). If this option'
'is given, then all non-option arguments are taken as input files.'
'This option can be used multiple times for multiple output files.'
)))
eval_width = traits.Int(
desc='Number of voxels to evaluate simultaneously.',
argstr='-eval_width %s')
class CalcOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Calc(CommandLine):
"""Compute an expression using MINC files as input.
Examples
--------
>>> from nipype.interfaces.minc import Calc
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> file1 = nonempty_minc_data(1)
>>> calc = Calc(input_files=[file0, file1], output_file='/tmp/calc.mnc', expression='A[0] + A[1]') # add files together
>>> calc.run() # doctest: +SKIP
"""
input_spec = CalcInputSpec
output_spec = CalcOutputSpec
_cmd = 'minccalc'
# FIXME mincbbox produces output like
#
# -5.000000 -5.000000 -5.000000 4.800000 2.800000 8.800000
#
# so perhaps this would be better returned as a pair of Python
# lists instead of sending to an output file?
class BBoxInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file containing bounding box corners',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_bbox.txt',
keep_extension=False)
threshold = traits.Int(
0,
desc='VIO_Real value threshold for bounding box. Default value: 0.',
argstr='-threshold')
_xor_one_two = ('one_line', 'two_lines')
one_line = traits.Bool(
desc='Output on one line (default): start_x y z width_x y z',
argstr='-one_line',
xor=_xor_one_two)
two_lines = traits.Bool(
desc='Output on two lines: start_x y z \n width_x y z',
argstr='-two_lines',
xor=_xor_one_two)
format_mincresample = traits.Bool(
desc=
'Output format for mincresample: (-step x y z -start x y z -nelements x y z',
argstr='-mincresample')
format_mincreshape = traits.Bool(
desc='Output format for mincreshape: (-start x,y,z -count dx,dy,dz',
argstr='-mincreshape')
format_minccrop = traits.Bool(
desc='Output format for minccrop: (-xlim x1 x2 -ylim y1 y2 -zlim z1 z2',
argstr='-minccrop')
# FIXME Not implemented, will clash with our parsing of the output?
# Command-specific options:
# Options for logging progress. Default = -verbose.
# -verbose: Write messages indicating progress
# -quiet: Do not write log messages
# -debug: Print out debug info.
class BBoxOutputSpec(TraitedSpec):
output_file = File(
desc='output file containing bounding box corners', exists=True)
class BBox(StdOutCommandLine):
"""Determine a bounding box of image.
Examples
--------
>>> from nipype.interfaces.minc import BBox
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> bbox = BBox(input_file=file0)
>>> bbox.run() # doctest: +SKIP
"""
input_spec = BBoxInputSpec
output_spec = BBoxOutputSpec
_cmd = 'mincbbox'
class BeastInputSpec(CommandLineInputSpec):
"""
TODO:
Command-specific options:
-verbose: Enable verbose output.
-positive: Specify mask of positive segmentation (inside mask) instead of the default mask.
-output_selection: Specify file to output selected files.
-count: Specify file to output the patch count.
-mask: Specify a segmentation mask instead of the the default mask.
-no_mask: Do not apply a segmentation mask. Perform the segmentation over the entire image.
-no_positive: Do not apply a positive mask.
Generic options for all commands:
-help: Print summary of command-line options and abort
-version: Print version number of program and exit
Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov,
Pierrick Coupe, Jose V. Manjon
This program comes with ABSOLUTELY NO WARRANTY; for details type 'cat COPYING'.
This is free software, and you are welcome to redistribute it under certain
conditions; type 'cat COPYING' for details.
Usage: mincbeast [options] <library dir> <input> <output>
mincbeast -help
Get this example to work?
https://github.com/BIC-MNI/BEaST/blob/master/README.library
2.3 Source the minc-toolkit (if installed):
$ source /opt/minc/minc-toolkit-config.sh
2.4 Generate library by running:
$ beast_prepareADNIlib -flip <ADNI download directory> <BEaST library directory>
Example:
$ sudo beast_prepareADNIlib -flip Downloads/ADNI /opt/minc/share/beast-library-1.1
3. Test the setup
3.1 Normalize your data
$ beast_normalize -modeldir /opt/minc/share/icbm152_model_09c input.mnc normal.mnc normal.xfm
3.2 Run BEaST
$ mincbeast /opt/minc/share/beast-library-1.1 normal.mnc brainmask.mnc -conf /opt/minc/share/beast-library-1.1/default.2mm.conf -same_res
"""
probability_map = traits.Bool(
desc='Output the probability map instead of crisp mask.',
argstr='-probability')
flip_images = traits.Bool(
desc=
'Flip images around the mid-sagittal plane to increase patch count.',
argstr='-flip')
load_moments = traits.Bool(
desc=('Do not calculate moments instead use precalculated'
'library moments. (for optimization purposes)'),
argstr='-load_moments')
fill_holes = traits.Bool(
desc='Fill holes in the binary output.', argstr='-fill')
median_filter = traits.Bool(
desc='Apply a median filter on the probability map.', argstr='-median')
nlm_filter = traits.Bool(
desc='Apply an NLM filter on the probability map (experimental).',
argstr='-nlm_filter')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
configuration_file = File(
desc='Specify configuration file.', argstr='-configuration %s')
voxel_size = traits.Int(
4, usedefault=True,
desc=('Specify voxel size for calculations (4, 2, or 1).'
'Default value: 4. Assumes no multiscale. Use configuration'
'file for multiscale.'),
argstr='-voxel_size %s')
abspath = traits.Bool(
desc=
'File paths in the library are absolute (default is relative to library root).',
argstr='-abspath',
usedefault=True,
default_value=True)
patch_size = traits.Int(
1, usedefault=True,
desc='Specify patch size for single scale approach. Default value: 1.',
argstr='-patch_size %s')
search_area = traits.Int(
2, usedefault=True,
desc=
'Specify size of search area for single scale approach. Default value: 2.',
argstr='-search_area %s')
confidence_level_alpha = traits.Float(
0.5, usedefault=True,
desc='Specify confidence level Alpha. Default value: 0.5',
argstr='-alpha %s')
smoothness_factor_beta = traits.Float(
0.5, usedefault=True,
desc='Specify smoothness factor Beta. Default value: 0.25',
argstr='-beta %s')
threshold_patch_selection = traits.Float(
0.95, usedefault=True,
desc='Specify threshold for patch selection. Default value: 0.95',
argstr='-threshold %s')
number_selected_images = traits.Int(
20, usedefault=True,
desc='Specify number of selected images. Default value: 20',
argstr='-selection_num %s')
same_resolution = traits.Bool(
desc='Output final mask with the same resolution as input file.',
argstr='-same_resolution')
library_dir = Directory(
desc='library directory', position=-3, argstr='%s', mandatory=True)
input_file = File(
desc='input file', position=-2, argstr='%s', mandatory=True)
output_file = File(
desc='output file',
position=-1,
argstr='%s',
name_source=['input_file'],
hash_files=False,
name_template='%s_beast_mask.mnc')
class BeastOutputSpec(TraitedSpec):
output_file = File(desc='output mask file', exists=True)
class Beast(CommandLine):
"""Extract brain image using BEaST (Brain Extraction using
non-local Segmentation Technique).
Examples
--------
>>> from nipype.interfaces.minc import Beast
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> beast = Beast(input_file=file0)
>>> beast .run() # doctest: +SKIP
"""
input_spec = BeastInputSpec
output_spec = BeastOutputSpec
_cmd = 'mincbeast'
class PikInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
_xor_image_type = ('jpg', 'png')
jpg = traits.Bool(desc='Output a jpg file.', xor=_xor_image_type)
png = traits.Bool(desc='Output a png file (default).', xor=_xor_image_type)
output_file = File(
desc='output file',
argstr='%s',
genfile=True,
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.png',
keep_extension=False)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME not implemented: --verbose
# --fake
# --lookup ==> arguments to pass to minclookup
scale = traits.Int(
2, usedefault=True,
desc=('Scaling factor for resulting image. By default images are'
'output at twice their original resolution.'),
argstr='--scale %s')
width = traits.Int(
desc=
'Autoscale the resulting image to have a fixed image width (in pixels).',
argstr='--width %s')
depth = traits.Enum(
8,
16,
desc='Bitdepth for resulting image 8 or 16 (MSB machines only!)',
argstr='--depth %s')
_xor_title = ('title_string', 'title_with_filename')
title = traits.Either(
traits.Bool(desc='Use input filename as title in resulting image.'),
traits.Str(desc='Add a title to the resulting image.'),
argstr='%s') # see _format_arg for actual arg string
title_size = traits.Int(
desc='Font point size for the title.',
argstr='--title_size %s',
requires=['title'])
annotated_bar = traits.Bool(
desc=
'create an annotated bar to match the image (use height of the output image)',
argstr='--anot_bar')
# FIXME tuple of floats? Not voxel values? Man page doesn't specify.
minc_range = traits.Tuple(
traits.Float,
traits.Float,
desc='Valid range of values for MINC file.',
argstr='--range %s %s')
_xor_image_range = ('image_range', 'auto_range')
image_range = traits.Tuple(
traits.Float,
traits.Float,
desc='Range of image values to use for pixel intensity.',
argstr='--image_range %s %s',
xor=_xor_image_range)
auto_range = traits.Bool(
desc=
'Automatically determine image range using a 5 and 95% PcT. (histogram)',
argstr='--auto_range',
xor=_xor_image_range)
start = traits.Int(
desc='Slice number to get. (note this is in voxel co-ordinates).',
argstr='--slice %s') # FIXME Int is correct?
_xor_slice = ('slice_z', 'slice_y', 'slice_x')
slice_z = traits.Bool(
desc='Get an axial/transverse (z) slice.', argstr='-z', xor=_xor_slice)
slice_y = traits.Bool(
desc='Get a coronal (y) slice.', argstr='-y', xor=_xor_slice)
slice_x = traits.Bool(
desc='Get a sagittal (x) slice.', argstr='-x',
xor=_xor_slice) # FIXME typo in man page? sagital?
triplanar = traits.Bool(
desc='Create a triplanar view of the input file.',
argstr='--triplanar')
tile_size = traits.Int(
desc='Pixel size for each image in a triplanar.',
argstr='--tilesize %s')
_xor_sagittal_offset = ('sagittal_offset', 'sagittal_offset_perc')
sagittal_offset = traits.Int(
desc='Offset the sagittal slice from the centre.',
argstr='--sagittal_offset %s')
sagittal_offset_perc = traits.Range(
low=0,
high=100,
desc='Offset the sagittal slice by a percentage from the centre.',
argstr='--sagittal_offset_perc %d',
)
_xor_vertical_horizontal = ('vertical_triplanar_view',
'horizontal_triplanar_view')
vertical_triplanar_view = traits.Bool(
desc='Create a vertical triplanar view (Default).',
argstr='--vertical',
xor=_xor_vertical_horizontal)
horizontal_triplanar_view = traits.Bool(
desc='Create a horizontal triplanar view.',
argstr='--horizontal',
xor=_xor_vertical_horizontal)
lookup = traits.Str(
desc='Arguments to pass to minclookup', argstr='--lookup %s')
class PikOutputSpec(TraitedSpec):
output_file = File(desc='output image', exists=True)
class Pik(CommandLine):
"""Generate images from minc files.
Mincpik uses Imagemagick to generate images
from Minc files.
Examples
--------
>>> from nipype.interfaces.minc import Pik
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> pik = Pik(input_file=file0, title='foo')
>>> pik .run() # doctest: +SKIP
"""
input_spec = PikInputSpec
output_spec = PikOutputSpec
_cmd = 'mincpik'
def _format_arg(self, name, spec, value):
if name == 'title':
if isinstance(value, bool) and value:
return '--title'
elif isinstance(value, str):
return '--title --title_text %s' % (value, )
else:
raise ValueError(
'Unknown value for "title" argument: ' + str(value))
return super(Pik, self)._format_arg(name, spec, value)
class BlurInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file_base = File(desc='output file base', argstr='%s', position=-1)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_kernel = ('gaussian', 'rect')
gaussian = traits.Bool(
desc='Use a gaussian smoothing kernel (default).',
argstr='-gaussian',
xor=_xor_kernel)
rect = traits.Bool(
desc='Use a rect (box) smoothing kernel.',
argstr='-rect',
xor=_xor_kernel)
gradient = traits.Bool(
desc='Create the gradient magnitude volume as well.',
argstr='-gradient')
partial = traits.Bool(
desc=
'Create the partial derivative and gradient magnitude volumes as well.',
argstr='-partial')
no_apodize = traits.Bool(
desc='Do not apodize the data before blurring.', argstr='-no_apodize')
_xor_main_options = ('fwhm', 'fwhm3d', 'standard_dev')
fwhm = traits.Float(
0,
desc='Full-width-half-maximum of gaussian kernel. Default value: 0.',
argstr='-fwhm %s',
xor=_xor_main_options,
mandatory=True)
standard_dev = traits.Float(
0,
desc='Standard deviation of gaussian kernel. Default value: 0.',
argstr='-standarddev %s',
xor=_xor_main_options,
mandatory=True)
fwhm3d = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr='-3dfwhm %s %s %s',
desc=('Full-width-half-maximum of gaussian kernel.'
'Default value: -1.79769e+308 -1.79769e+308 -1.79769e+308.'),
xor=_xor_main_options,
mandatory=True)
dimensions = traits.Enum(
3,
1,
2,
desc=
'Number of dimensions to blur (either 1,2 or 3). Default value: 3.',
argstr='-dimensions %s')
class BlurOutputSpec(TraitedSpec):
output_file = File(desc='Blurred output file.', exists=True)
gradient_dxyz = File(desc='Gradient dxyz.')
partial_dx = File(desc='Partial gradient dx.')
partial_dy = File(desc='Partial gradient dy.')
partial_dz = File(desc='Partial gradient dz.')
partial_dxyz = File(desc='Partial gradient dxyz.')
class Blur(StdOutCommandLine):
"""
Convolve an input volume with a Gaussian blurring kernel of
user-defined width. Optionally, the first partial derivatives
and the gradient magnitude volume can be calculated.
Examples
--------
>>> from nipype.interfaces.minc import Blur
>>> from nipype.interfaces.minc.testdata import minc3Dfile
(1) Blur an input volume with a 6mm fwhm isotropic Gaussian
blurring kernel:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
mincblur will create /tmp/out_6_blur.mnc.
(2) Calculate the blurred and gradient magnitude data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, gradient=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc and /tmp/out_6_dxyz.mnc.
(3) Calculate the blurred data, the partial derivative volumes
and the gradient magnitude for the same data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, partial=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc, /tmp/out_6_dx.mnc,
/tmp/out_6_dy.mnc, /tmp/out_6_dz.mnc and /tmp/out_6_dxyz.mnc.
"""
input_spec = BlurInputSpec
output_spec = BlurOutputSpec
_cmd = 'mincblur'
def _gen_output_base(self):
output_file_base = self.inputs.output_file_base
if isdefined(output_file_base):
return output_file_base
else:
base_file_name = os.path.split(
self.inputs.input_file)[1] # e.g. 'foo.mnc'
base_file_name_no_ext = os.path.splitext(base_file_name)[
0] # e.g. 'foo'
output_base = os.path.join(
os.getcwd(), base_file_name_no_ext +
'_bluroutput') # e.g. '/tmp/blah/foo_bluroutput'
# return os.path.splitext(self.inputs.input_file)[0] +
# '_bluroutput'
return output_base
def _list_outputs(self):
outputs = self.output_spec().get()
output_file_base = self._gen_output_base()
outputs['output_file'] = output_file_base + '_blur.mnc'
if isdefined(self.inputs.gradient):
outputs['gradient_dxyz'] = output_file_base + '_dxyz.mnc'
if isdefined(self.inputs.partial):
outputs['partial_dx'] = output_file_base + '_dx.mnc'
outputs['partial_dy'] = output_file_base + '_dy.mnc'
outputs['partial_dz'] = output_file_base + '_dz.mnc'
outputs['partial_dxyz'] = output_file_base + '_dxyz.mnc'
return outputs
@property
def cmdline(self):
output_file_base = self.inputs.output_file_base
orig_cmdline = super(Blur, self).cmdline
if isdefined(output_file_base):
return orig_cmdline
else:
# FIXME this seems like a bit of a hack. Can we force output_file
# to show up in cmdline by default, even if it isn't specified in
# the instantiation of Pik?
return '%s %s' % (orig_cmdline, self._gen_output_base())
class MathInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
File(exists=True),
desc='input file(s) for calculation',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
xor=_xor_input_files)
output_file = File(
desc='output file',
argstr='%s',
genfile=True,
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_mincmath.mnc')
filelist = File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
exists=True,
mandatory=True,
xor=_xor_input_files)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc=
'Copy all of the header from the first file (default for one file).',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc=
'Do not copy all of the header from the first file (default for many files)).',
argstr='-nocopy_header',
xor=_xor_copy_header)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
voxel_range = traits.Tuple(
traits.Int,
traits.Int,
argstr='-range %d %d',
desc='Valid range for output data.')
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=4096,
usedefault=True,
argstr='-max_buffer_size_in_kb %d',
)
_xor_check_dimensions = (
'check_dimensions',
'no_check_dimensions',
)
check_dimensions = traits.Bool(
desc='Check that dimension info matches across files (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check dimension info.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
dimension = traits.Str(
desc=
'Specify a dimension along which we wish to perform a calculation.',
argstr='-dimension %s')
# FIXME Is it sensible to use ignore_nan and propagate_nan at the same
# time? Document this.
ignore_nan = traits.Bool(
desc='Ignore invalid data (NaN) for accumulations.',
argstr='-ignore_nan')
propagate_nan = traits.Bool(
desc='Invalid data in any file at a voxel produces a NaN (default).',
argstr='-propagate_nan')
# FIXME Double-check that these are mutually exclusive?
_xor_nan_zero_illegal = ('output_nan', 'output_zero',
'output_illegal_value')
output_nan = traits.Bool(
desc='Output NaN when an illegal operation is done (default).',
argstr='-nan',
xor=_xor_nan_zero_illegal)
output_zero = traits.Bool(
desc='Output zero when an illegal operation is done.',
argstr='-zero',
xor=_xor_nan_zero_illegal)
output_illegal = traits.Bool(
desc=('Value to write out when an illegal operation'
'is done. Default value: 1.79769e+308'),
argstr='-illegal_value',
xor=_xor_nan_zero_illegal)
# FIXME A whole bunch of the parameters will be mutually exclusive, e.g. surely can't do sqrt and abs at the same time?
# Or does mincmath do one and then the next?
##########################################################################
# Traits that expect a bool (compare two volumes) or constant (manipulate one volume) #
##########################################################################
bool_or_const_traits = [
'test_gt', 'test_lt', 'test_eq', 'test_ne', 'test_ge', 'test_le',
'calc_add', 'calc_sub', 'calc_mul', 'calc_div'
]
test_gt = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 > vol2 or vol1 > constant.',
argstr='-gt')
test_lt = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 < vol2 or vol1 < constant.',
argstr='-lt')
test_eq = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for integer vol1 == vol2 or vol1 == constant.',
argstr='-eq')
test_ne = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for integer vol1 != vol2 or vol1 != const.',
argstr='-ne')
test_ge = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 >= vol2 or vol1 >= const.',
argstr='-ge')
test_le = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 <= vol2 or vol1 <= const.',
argstr='-le')
calc_add = traits.Either(
traits.Bool(),
traits.Float(),
desc='Add N volumes or volume + constant.',
argstr='-add')
calc_sub = traits.Either(
traits.Bool(),
traits.Float(),
desc='Subtract 2 volumes or volume - constant.',
argstr='-sub')
calc_mul = traits.Either(
traits.Bool(),
traits.Float(),
desc='Multiply N volumes or volume * constant.',
argstr='-mult')
calc_div = traits.Either(
traits.Bool(),
traits.Float(),
desc='Divide 2 volumes or volume / constant.',
argstr='-div')
######################################
# Traits that expect a single volume #
######################################
single_volume_traits = [
'invert', 'calc_not', 'sqrt', 'square', 'abs', 'exp', 'log', 'scale',
'clamp', 'segment', 'nsegment', 'isnan', 'isnan'
] # FIXME enforce this in _parse_inputs and check for other members
invert = traits.Either(
traits.Float(), desc='Calculate 1/c.', argstr='-invert -const %s')
calc_not = traits.Bool(desc='Calculate !vol1.', argstr='-not')
sqrt = traits.Bool(desc='Take square root of a volume.', argstr='-sqrt')
square = traits.Bool(desc='Take square of a volume.', argstr='-square')
abs = traits.Bool(desc='Take absolute value of a volume.', argstr='-abs')
exp = traits.Tuple(
traits.Float,
traits.Float,
argstr='-exp -const2 %s %s',
desc='Calculate c2*exp(c1*x). Both constants must be specified.')
log = traits.Tuple(
traits.Float,
traits.Float,
argstr='-log -const2 %s %s',
desc='Calculate log(x/c2)/c1. The constants c1 and c2 default to 1.')
scale = traits.Tuple(
traits.Float,
traits.Float,
argstr='-scale -const2 %s %s',
desc='Scale a volume: volume * c1 + c2.')
clamp = traits.Tuple(
traits.Float,
traits.Float,
argstr='-clamp -const2 %s %s',
desc='Clamp a volume to lie between two values.')
segment = traits.Tuple(
traits.Float,
traits.Float,
argstr='-segment -const2 %s %s',
desc=
'Segment a volume using range of -const2: within range = 1, outside range = 0.'
)
nsegment = traits.Tuple(
traits.Float,
traits.Float,
argstr='-nsegment -const2 %s %s',
desc='Opposite of -segment: within range = 0, outside range = 1.')
isnan = traits.Bool(desc='Test for NaN values in vol1.', argstr='-isnan')
nisnan = traits.Bool(desc='Negation of -isnan.', argstr='-nisnan')
############################################
# Traits that expect precisely two volumes #
############################################
two_volume_traits = ['percentdiff']
percentdiff = traits.Float(
desc=
'Percent difference between 2 volumes, thresholded (const def=0.0).',
argstr='-percentdiff')
#####################################
# Traits that expect N >= 1 volumes #
#####################################
n_volume_traits = [
'count_valid', 'maximum', 'minimum', 'calc_add', 'calc_or'
]
count_valid = traits.Bool(
desc='Count the number of valid values in N volumes.',
argstr='-count_valid')
maximum = traits.Bool(desc='Find maximum of N volumes.', argstr='-maximum')
minimum = traits.Bool(desc='Find minimum of N volumes.', argstr='-minimum')
calc_and = traits.Bool(
desc='Calculate vol1 && vol2 (&& ...).', argstr='-and')
calc_or = traits.Bool(
desc='Calculate vol1 || vol2 (|| ...).', argstr='-or')
class MathOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Math(StdOutCommandLine):
"""
Various mathematical operations supplied by mincmath.
Examples
--------
>>> from nipype.interfaces.minc import Math
>>> from nipype.interfaces.minc.testdata import minc2Dfile
Scale: volume*3.0 + 2:
>>> scale = Math(input_files=[minc2Dfile], scale=(3.0, 2))
>>> scale.run() # doctest: +SKIP
Test if >= 1.5:
>>> gt = Math(input_files=[minc2Dfile], test_gt=1.5)
>>> gt.run() # doctest: +SKIP
"""
input_spec = MathInputSpec
output_spec = MathOutputSpec
_cmd = 'mincmath'
def _format_arg(self, name, spec, value):
assert value is not None
if name in self.input_spec.bool_or_const_traits:
# t is unused, what was I trying to do with it?
# t = self.inputs.__getattribute__(name)
if isinstance(value, bool) and value:
return spec.argstr
elif isinstance(value, bool) and not value:
raise ValueError('Does not make sense to specify %s=False' %
(name, ))
elif isinstance(value, float):
return '%s -const %s' % (
spec.argstr,
value,
)
else:
raise ValueError('Invalid %s argument: %s' % (
name,
value,
))
return super(Math, self)._format_arg(name, spec, value)
def _parse_inputs(self):
"""A number of the command line options expect precisely one or two files.
"""
nr_input_files = len(self.inputs.input_files)
for n in self.input_spec.bool_or_const_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if isinstance(t, bool):
if nr_input_files != 2:
raise ValueError(
'Due to the %s option we expected 2 files but input_files is of length %d'
% (
n,
nr_input_files,
))
elif isinstance(t, float):
if nr_input_files != 1:
raise ValueError(
'Due to the %s option we expected 1 file but input_files is of length %d'
% (
n,
nr_input_files,
))
else:
raise ValueError(
'Argument should be a bool or const, but got: %s' % t)
for n in self.input_spec.single_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if nr_input_files != 1:
raise ValueError(
'Due to the %s option we expected 1 file but input_files is of length %d'
% (
n,
nr_input_files,
))
for n in self.input_spec.two_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if nr_input_files != 2:
raise ValueError(
'Due to the %s option we expected 2 files but input_files is of length %d'
% (
n,
nr_input_files,
))
for n in self.input_spec.n_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if not nr_input_files >= 1:
raise ValueError(
'Due to the %s option we expected at least one file but input_files is of length %d'
% (
n,
nr_input_files,
))
return super(Math, self)._parse_inputs()
class ResampleInputSpec(CommandLineInputSpec):
"""
not implemented:
-size: synonym for -nelements)
-xsize: synonym for -xnelements
-ysize: synonym for -ynelements
-zsize: synonym for -ynelements
"""
input_file = File(
desc='input file for resampling',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_resample.mnc')
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_interpolation = ('trilinear_interpolation', 'tricubic_interpolation',
'nearest_neighbour_interpolation',
'sinc_interpolation')
trilinear_interpolation = traits.Bool(
desc='Do trilinear interpolation.',
argstr='-trilinear',
xor=_xor_interpolation)
tricubic_interpolation = traits.Bool(
desc='Do tricubic interpolation.',
argstr='-tricubic',
xor=_xor_interpolation)
nearest_neighbour_interpolation = traits.Bool(
desc='Do nearest neighbour interpolation.',
argstr='-nearest_neighbour',
xor=_xor_interpolation)
sinc_interpolation = traits.Bool(
desc='Do windowed sinc interpolation.',
argstr='-sinc',
xor=_xor_interpolation)
half_width_sinc_window = traits.Enum(
5,
1,
2,
3,
4,
6,
7,
8,
9,
10,
desc='Set half-width of sinc window (1-10). Default value: 5.',
argstr='-width %s',
requires=['sinc_interpolation'])
_xor_sinc_window_type = ('sinc_window_hanning', 'sinc_window_hamming')
sinc_window_hanning = traits.Bool(
desc='Set sinc window type to Hanning.',
argstr='-hanning',
xor=_xor_sinc_window_type,
requires=['sinc_interpolation'])
sinc_window_hamming = traits.Bool(
desc='Set sinc window type to Hamming.',
argstr='-hamming',
xor=_xor_sinc_window_type,
requires=['sinc_interpolation'])
transformation = File(
desc='File giving world transformation. (Default = identity).',
exists=True,
argstr='-transformation %s')
invert_transformation = traits.Bool(
desc='Invert the transformation before using it.',
argstr='-invert_transformation')
_xor_input_sampling = ('vio_transform', 'no_input_sampling')
vio_transform = traits.Bool(
desc='VIO_Transform the input sampling with the transform (default).',
argstr='-tfm_input_sampling',
xor=_xor_input_sampling)
no_input_sampling = traits.Bool(
desc='Use the input sampling without transforming (old behaviour).',
argstr='-use_input_sampling',
xor=_xor_input_sampling)
like = File(
desc='Specifies a model file for the resampling.',
argstr='-like %s',
exists=True)
_xor_format = (
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
output_range = traits.Tuple(
traits.Float,
traits.Float,
argstr='-range %s %s',
desc=
'Valid range for output data. Default value: -1.79769e+308 -1.79769e+308.'
)
_xor_slices = ('transverse', 'sagittal', 'coronal')
transverse_slices = traits.Bool(
desc='Write out transverse slices.',
argstr='-transverse',
xor=_xor_slices)
sagittal_slices = traits.Bool(
desc='Write out sagittal slices', argstr='-sagittal', xor=_xor_slices)
coronal_slices = traits.Bool(
desc='Write out coronal slices', argstr='-coronal', xor=_xor_slices)
_xor_fill = ('nofill', 'fill')
no_fill = traits.Bool(
desc='Use value zero for points outside of input volume.',
argstr='-nofill',
xor=_xor_fill)
fill = traits.Bool(
desc='Use a fill value for points outside of input volume.',
argstr='-fill',
xor=_xor_fill)
fill_value = traits.Float(
desc=('Specify a fill value for points outside of input volume.'
'Default value: 1.79769e+308.'),
argstr='-fillvalue %s',
requires=['fill'])
_xor_scale = ('keep_real_range', 'nokeep_real_range')
keep_real_range = traits.Bool(
desc='Keep the real scale of the input volume.',
argstr='-keep_real_range',
xor=_xor_scale)
nokeep_real_range = traits.Bool(
desc='Do not keep the real scale of the data (default).',
argstr='-nokeep_real_range',
xor=_xor_scale)
_xor_spacetype = ('spacetype', 'talairach')
spacetype = traits.Str(
desc='Set the spacetype attribute to a specified string.',
argstr='-spacetype %s')
talairach = traits.Bool(
desc='Output is in Talairach space.', argstr='-talairach')
origin = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=('Origin of first pixel in 3D space.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-origin %s %s %s')
standard_sampling = traits.Bool(
desc='Set the sampling to standard values (step, start and dircos).',
argstr='-standard_sampling') # FIXME Bool?
units = traits.Str(
desc='Specify the units of the output sampling.',
argstr='-units %s') # FIXME String?
# Elements along each dimension.
# FIXME Ints? Ranges?
# FIXME Check that this xor behaves correctly.
_xor_nelements = ('nelements', 'nelements_x_y_or_z')
# nr elements along each dimension
nelements = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
desc='Number of elements along each dimension (X, Y, Z).',
argstr='-nelements %s %s %s',
xor=_xor_nelements)
# FIXME Is mincresample happy if we only specify one of these, or do we
# need the requires=...?
xnelements = traits.Int(
desc='Number of elements along the X dimension.',
argstr='-xnelements %s',
requires=('ynelements', 'znelements'),
xor=_xor_nelements)
ynelements = traits.Int(
desc='Number of elements along the Y dimension.',
argstr='-ynelements %s',
requires=('xnelements', 'znelements'),
xor=_xor_nelements)
znelements = traits.Int(
desc='Number of elements along the Z dimension.',
argstr='-znelements %s',
requires=('xnelements', 'ynelements'),
xor=_xor_nelements)
# step size along each dimension
_xor_step = ('step', 'step_x_y_or_z')
step = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
desc=
'Step size along each dimension (X, Y, Z). Default value: (0, 0, 0).',
argstr='-step %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xstep = traits.Int(
desc='Step size along the X dimension. Default value: 0.',
argstr='-xstep %s',
requires=('ystep', 'zstep'),
xor=_xor_step)
ystep = traits.Int(
desc='Step size along the Y dimension. Default value: 0.',
argstr='-ystep %s',
requires=('xstep', 'zstep'),
xor=_xor_step)
zstep = traits.Int(
desc='Step size along the Z dimension. Default value: 0.',
argstr='-zstep %s',
requires=('xstep', 'ystep'),
xor=_xor_step)
# start point along each dimension
_xor_start = ('start', 'start_x_y_or_z')
start = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=('Start point along each dimension (X, Y, Z).'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-start %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xstart = traits.Float(
desc='Start point along the X dimension. Default value: 1.79769e+308.',
argstr='-xstart %s',
requires=('ystart', 'zstart'),
xor=_xor_start)
ystart = traits.Float(
desc='Start point along the Y dimension. Default value: 1.79769e+308.',
argstr='-ystart %s',
requires=('xstart', 'zstart'),
xor=_xor_start)
zstart = traits.Float(
desc='Start point along the Z dimension. Default value: 1.79769e+308.',
argstr='-zstart %s',
requires=('xstart', 'ystart'),
xor=_xor_start)
# dircos along each dimension
_xor_dircos = ('dircos', 'dircos_x_y_or_z')
dircos = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=(
'Direction cosines along each dimension (X, Y, Z). Default value:'
'1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 ...'
' 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308.'
),
argstr='-dircos %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xdircos = traits.Float(
desc=('Direction cosines along the X dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-xdircos %s',
requires=('ydircos', 'zdircos'),
xor=_xor_dircos)
ydircos = traits.Float(
desc=('Direction cosines along the Y dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-ydircos %s',
requires=('xdircos', 'zdircos'),
xor=_xor_dircos)
zdircos = traits.Float(
desc=('Direction cosines along the Z dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-zdircos %s',
requires=('xdircos', 'ydircos'),
xor=_xor_dircos)
class ResampleOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Resample(StdOutCommandLine):
"""
Resample a minc file.'
Examples
--------
>>> from nipype.interfaces.minc import Resample
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> r = Resample(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Resample the file.
>>> r.run() # doctest: +SKIP
"""
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
_cmd = 'mincresample'
class NormInputSpec(CommandLineInputSpec):
"""
Not implemented:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-quiet be quiet
-noquiet opposite of -quiet [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to normalise',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_norm.mnc')
output_threshold_mask = File(
desc='File in which to store the threshold mask.',
argstr='-threshold_mask %s',
name_source=['input_file'],
hash_files=False,
name_template='%s_norm_threshold_mask.mnc')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# Normalisation Options
mask = File(
desc='Calculate the image normalisation within a mask.',
argstr='-mask %s',
exists=True)
clamp = traits.Bool(
desc='Force the ouput range between limits [default].',
argstr='-clamp',
usedefault=True,
default_value=True)
cutoff = traits.Range(
low=0.0,
high=100.0,
desc=
'Cutoff value to use to calculate thresholds by a histogram PcT in %. [default: 0.01]',
argstr='-cutoff %s',
)
lower = traits.Float(desc='Lower real value to use.', argstr='-lower %s')
upper = traits.Float(desc='Upper real value to use.', argstr='-upper %s')
out_floor = traits.Float(
desc='Output files maximum [default: 0]',
argstr='-out_floor %s') # FIXME is this a float?
out_ceil = traits.Float(
desc='Output files minimum [default: 100]',
argstr='-out_ceil %s') # FIXME is this a float?
# Threshold Options
threshold = traits.Bool(
desc=
'Threshold the image (set values below threshold_perc to -out_floor).',
argstr='-threshold')
threshold_perc = traits.Range(
low=0.0,
high=100.0,
desc=
'Threshold percentage (0.1 == lower 10% of intensity range) [default: 0.1].',
argstr='-threshold_perc %s')
threshold_bmt = traits.Bool(
desc='Use the resulting image BiModalT as the threshold.',
argstr='-threshold_bmt')
threshold_blur = traits.Float(
desc='Blur FWHM for intensity edges then thresholding [default: 2].',
argstr='-threshold_blur %s')
class NormOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_threshold_mask = File(desc='threshold mask file')
class Norm(CommandLine):
"""Normalise a file between a max and minimum (possibly)
using two histogram pct's.
Examples
--------
>>> from nipype.interfaces.minc import Norm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> n = Norm(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Normalise the file.
>>> n.run() # doctest: +SKIP
"""
input_spec = NormInputSpec
output_spec = NormOutputSpec
_cmd = 'mincnorm'
"""
| volcentre will centre a MINC image's sampling about a point (0,0,0 typically)
|
| NB: It will modify the file in-place unless an outfile is given
|
| Problems or comments should be sent to: [email protected]
Summary of options:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing check files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
-com Use the CoM of the volume for the new centre (via mincstats)
-nocom opposite of -com [default]
-centre <float> <float> <float>
Centre to use (x,y,z) [default: 0 0 0]
-zero_dircos Set the direction cosines to identity [default]
-nozero_dirco opposite of -zero_dircos
Usage: volcentre [options] <infile.mnc> [<outfile.mnc>]
volcentre -help to list options
"""
class VolcentreInputSpec(CommandLineInputSpec):
"""
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to centre',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_volcentre.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
com = traits.Bool(
desc=
'Use the CoM of the volume for the new centre (via mincstats). Default: False',
argstr='-com')
centre = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr='-centre %s %s %s',
desc='Centre to use (x,y,z) [default: 0 0 0].',
)
zero_dircos = traits.Bool(
desc='Set the direction cosines to identity [default].',
argstr='-zero_dircos')
class VolcentreOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Volcentre(CommandLine):
"""Centre a MINC image's sampling about a point, typically (0,0,0).
Example
--------
>>> from nipype.interfaces.minc import Volcentre
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vc = Volcentre(input_file=minc2Dfile)
>>> vc.run() # doctest: +SKIP
"""
input_spec = VolcentreInputSpec
output_spec = VolcentreOutputSpec
_cmd = 'volcentre'
class VolpadInputSpec(CommandLineInputSpec):
"""
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
| volpad pads a MINC volume
|
| Problems or comments should be sent to: [email protected]
Summary of options:
-- General Options -------------------------------------------------------------
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to centre',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_volpad.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
auto = traits.Bool(
desc=
'Automatically determine padding distances (uses -distance as max). Default: False.',
argstr='-auto')
auto_freq = traits.Float(
desc=
'Frequency of voxels over bimodalt threshold to stop at [default: 500].',
argstr='-auto_freq %s')
distance = traits.Int(
desc='Padding distance (in voxels) [default: 4].',
argstr='-distance %s')
smooth = traits.Bool(
desc='Smooth (blur) edges before padding. Default: False.',
argstr='-smooth')
smooth_distance = traits.Int(
desc='Smoothing distance (in voxels) [default: 4].',
argstr='-smooth_distance %s')
class VolpadOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Volpad(CommandLine):
"""Centre a MINC image's sampling about a point, typically (0,0,0).
Examples
--------
>>> from nipype.interfaces.minc import Volpad
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vp = Volpad(input_file=minc2Dfile, smooth=True, smooth_distance=4)
>>> vp.run() # doctest: +SKIP
"""
input_spec = VolpadInputSpec
output_spec = VolpadOutputSpec
_cmd = 'volpad'
class VolisoInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to convert to isotropic sampling',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_voliso.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='--verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='--clobber',
usedefault=True,
default_value=True)
maxstep = traits.Float(
desc='The target maximum step desired in the output volume.',
argstr='--maxstep %s')
minstep = traits.Float(
desc='The target minimum step desired in the output volume.',
argstr='--minstep %s')
avgstep = traits.Bool(
desc=
'Calculate the maximum step from the average steps of the input volume.',
argstr='--avgstep')
class VolisoOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Voliso(CommandLine):
"""Changes the steps and starts in order that the output volume
has isotropic sampling.
Examples
--------
>>> from nipype.interfaces.minc import Voliso
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> viso = Voliso(input_file=minc2Dfile, minstep=0.1, avgstep=True)
>>> viso.run() # doctest: +SKIP
"""
input_spec = VolisoInputSpec
output_spec = VolisoOutputSpec
_cmd = 'voliso'
class GennlxfmInputSpec(CommandLineInputSpec):
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['like'],
hash_files=False,
name_template='%s_gennlxfm.xfm')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
ident = traits.Bool(
desc='Generate an identity xfm. Default: False.', argstr='-ident')
step = traits.Int(
desc='Output ident xfm step [default: 1].', argstr='-step %s')
like = File(
desc='Generate a nlxfm like this file.',
exists=True,
argstr='-like %s',
)
class GennlxfmOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid', exists=True)
class Gennlxfm(CommandLine):
"""Generate nonlinear xfms. Currently only identity xfms
are supported!
This tool is part of minc-widgets:
https://github.com/BIC-MNI/minc-widgets/blob/master/gennlxfm/gennlxfm
Examples
--------
>>> from nipype.interfaces.minc import Gennlxfm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> gennlxfm = Gennlxfm(step=1, like=minc2Dfile)
>>> gennlxfm.run() # doctest: +SKIP
"""
input_spec = GennlxfmInputSpec
output_spec = GennlxfmOutputSpec
_cmd = 'gennlxfm'
def _list_outputs(self):
outputs = super(Gennlxfm, self)._list_outputs()
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_file'])
return outputs
class XfmConcatInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_xfmconcat.xfm')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class XfmConcatOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grids = OutputMultiPath(File(exists=True), desc='output grids')
class XfmConcat(CommandLine):
"""Concatenate transforms together. The output transformation
is equivalent to applying input1.xfm, then input2.xfm, ..., in
that order.
Examples
--------
>>> from nipype.interfaces.minc import XfmConcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> conc = XfmConcat(input_files=['input1.xfm', 'input1.xfm'])
>>> conc.run() # doctest: +SKIP
"""
input_spec = XfmConcatInputSpec
output_spec = XfmConcatOutputSpec
_cmd = 'xfmconcat'
def _list_outputs(self):
outputs = super(XfmConcat, self)._list_outputs()
if os.path.exists(outputs['output_file']):
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grids'] = glob.glob(
re.sub('.(nlxfm|xfm)$', '_grid_*.mnc',
outputs['output_file']))
return outputs
class BestLinRegInputSpec(CommandLineInputSpec):
source = File(
desc='source Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-4,
)
target = File(
desc='target Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-3,
)
output_xfm = File(
desc='output xfm file',
genfile=True,
argstr='%s',
position=-2,
name_source=['source'],
hash_files=False,
name_template='%s_bestlinreg.xfm',
keep_extension=False)
output_mnc = File(
desc='output mnc file',
genfile=True,
argstr='%s',
position=-1,
name_source=['source'],
hash_files=False,
name_template='%s_bestlinreg.mnc',
keep_extension=False)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME Very bare implementation, none of these are done yet:
"""
-init_xfm initial transformation (default identity)
-source_mask source mask to use during fitting
-target_mask target mask to use during fitting
-lsq9 use 9-parameter transformation (default)
-lsq12 use 12-parameter transformation (default -lsq9)
-lsq6 use 6-parameter transformation
"""
class BestLinRegOutputSpec(TraitedSpec):
output_xfm = File(desc='output xfm file', exists=True)
output_mnc = File(desc='output mnc file', exists=True)
class BestLinReg(CommandLine):
"""Hierachial linear fitting between two files.
The bestlinreg script is part of the EZminc package:
https://github.com/BIC-MNI/EZminc/blob/master/scripts/bestlinreg.pl
Examples
--------
>>> from nipype.interfaces.minc import BestLinReg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> target_file = nonempty_minc_data(1)
>>> linreg = BestLinReg(source=input_file, target=target_file)
>>> linreg.run() # doctest: +SKIP
"""
input_spec = BestLinRegInputSpec
output_spec = BestLinRegOutputSpec
_cmd = 'bestlinreg'
class NlpFitInputSpec(CommandLineInputSpec):
source = File(
desc='source Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-3,
)
target = File(
desc='target Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_xfm = File(
desc='output xfm file',
genfile=True,
argstr='%s',
position=-1,
)
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
config_file = File(
desc='File containing the fitting configuration use.',
argstr='-config_file %s',
mandatory=True,
exists=True)
init_xfm = File(
desc='Initial transformation (default identity).',
argstr='-init_xfm %s',
mandatory=True,
exists=True)
source_mask = File(
desc='Source mask to use during fitting.',
argstr='-source_mask %s',
mandatory=True,
exists=True)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class NlpFitOutputSpec(TraitedSpec):
output_xfm = File(desc='output xfm file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class NlpFit(CommandLine):
"""Hierarchial non-linear fitting with bluring.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/nlpfit/nlpfit
Examples
--------
>>> from nipype.interfaces.minc import NlpFit
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> source = nonempty_minc_data(0)
>>> target = nonempty_minc_data(1)
>>> source_mask = nonempty_minc_data(2)
>>> config = nlp_config
>>> initial = example_data('minc_initial.xfm')
>>> nlpfit = NlpFit(config_file=config, init_xfm=initial, source_mask=source_mask, source=source, target=target)
>>> nlpfit.run() # doctest: +SKIP
"""
input_spec = NlpFitInputSpec
output_spec = NlpFitOutputSpec
_cmd = 'nlpfit'
def _gen_filename(self, name):
if name == 'output_xfm':
output_xfm = self.inputs.output_xfm
if isdefined(output_xfm):
return os.path.abspath(output_xfm)
else:
return aggregate_filename(
[self.inputs.source, self.inputs.target],
'nlpfit_xfm_output') + '.xfm'
else:
raise NotImplemented
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_xfm'] = os.path.abspath(
self._gen_filename('output_xfm'))
assert os.path.exists(outputs['output_xfm'])
if 'grid' in open(outputs['output_xfm'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_xfm'])
return outputs
class XfmAvgInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME xor these:
avg_linear = traits.Bool(
desc='average the linear part [default].', argstr='-avg_linear')
avg_nonlinear = traits.Bool(
desc='average the non-linear part [default].', argstr='-avg_nonlinear')
ignore_linear = traits.Bool(
desc='opposite of -avg_linear.', argstr='-ignore_linear')
ignore_nonlinear = traits.Bool(
desc='opposite of -avg_nonlinear.', argstr='-ignore_nonline')
class XfmAvgOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class XfmAvg(CommandLine):
"""Average a number of xfm transforms using matrix logs and exponents.
The program xfmavg calls Octave for numerical work.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/tree/master/xfmavg
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> xfm1 = example_data('minc_initial.xfm')
>>> xfm2 = example_data('minc_initial.xfm') # cheating for doctest
>>> xfmavg = XfmAvg(input_files=[xfm1, xfm2])
>>> xfmavg.run() # doctest: +SKIP
"""
input_spec = XfmAvgInputSpec
output_spec = XfmAvgOutputSpec
_cmd = 'xfmavg'
def _gen_filename(self, name):
if name == 'output_file':
output_file = self.inputs.output_file
if isdefined(output_file):
return os.path.abspath(output_file)
else:
return aggregate_filename(self.inputs.input_files,
'xfmavg_output') + '.xfm'
else:
raise NotImplemented
def _gen_outfilename(self):
return self._gen_filename('output_file')
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = os.path.abspath(self._gen_outfilename())
assert os.path.exists(outputs['output_file'])
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_file'])
return outputs
class XfmInvertInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class XfmInvertOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class XfmInvert(CommandLine):
"""Invert an xfm transform file.
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.testing import example_data
>>> xfm = example_data('minc_initial.xfm')
>>> invert = XfmInvert(input_file=xfm)
>>> invert.run() # doctest: +SKIP
"""
input_spec = XfmInvertInputSpec
output_spec = XfmInvertOutputSpec
_cmd = 'xfminvert'
def _gen_filename(self, name):
if name == 'output_file':
output_file = self.inputs.output_file
if isdefined(output_file):
return os.path.abspath(output_file)
else:
return aggregate_filename([self.inputs.input_file],
'xfminvert_output') + '.xfm'
else:
raise NotImplemented
def _gen_outfilename(self):
return self._gen_filename('output_file')
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = os.path.abspath(self._gen_outfilename())
assert os.path.exists(outputs['output_file'])
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_file'])
return outputs
class BigAverageInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_bigaverage.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='--verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='--clobber',
usedefault=True,
default_value=True)
# FIXME Redumentary implementation, various parameters not implemented.
# TODO!
output_float = traits.Bool(
desc='Output files with float precision.', argstr='--float')
robust = traits.Bool(
desc=('Perform robust averaging, features that are outside 1 standard'
'deviation from the mean are downweighted. Works well for noisy'
'data with artifacts. see the --tmpdir option if you have a'
'large number of input files.'),
argstr='-robust')
# Should Nipype deal with where the temp directory is?
tmpdir = Directory(desc='temporary files directory', argstr='-tmpdir %s')
sd_file = File(
desc='Place standard deviation image in specified file.',
argstr='--sdfile %s',
name_source=['input_files'],
hash_files=False,
name_template='%s_bigaverage_stdev.mnc')
class BigAverageOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
sd_file = File(desc='standard deviation image', exists=True)
class BigAverage(CommandLine):
"""Average 1000's of MINC files in linear time.
mincbigaverage is designed to discretise the problem of averaging either
a large number of input files or averaging a smaller number of large
files. (>1GB each). There is also some code included to perform "robust"
averaging in which only the most common features are kept via down-weighting
outliers beyond a standard deviation.
One advantage of mincbigaverage is that it avoids issues around the number
of possible open files in HDF/netCDF. In short if you have more than 100
files open at once while averaging things will slow down significantly.
mincbigaverage does this via a iterative approach to averaging files and
is a direct drop in replacement for mincaverage. That said not all the
arguments of mincaverage are supported in mincbigaverage but they should
be.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/mincbigaverage/mincbigaverage
Examples
--------
>>> from nipype.interfaces.minc import BigAverage
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = BigAverage(input_files=files, output_float=True, robust=True)
>>> average.run() # doctest: +SKIP
"""
input_spec = BigAverageInputSpec
output_spec = BigAverageOutputSpec
_cmd = 'mincbigaverage'
class ReshapeInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_reshape.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME MANY options not implemented!
write_short = traits.Bool(
desc='Convert to short integer data.', argstr='-short')
class ReshapeOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Reshape(CommandLine):
"""Cut a hyperslab out of a minc file, with dimension reordering.
This is also useful for rewriting with a different format, for
example converting to short (see example below).
Examples
--------
>>> from nipype.interfaces.minc import Reshape
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> reshape_to_short = Reshape(input_file=input_file, write_short=True)
>>> reshape_to_short.run() # doctest: +SKIP
"""
input_spec = ReshapeInputSpec
output_spec = ReshapeOutputSpec
_cmd = 'mincreshape'
class VolSymmInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-3)
trans_file = File(
desc='output xfm trans file',
genfile=True,
argstr='%s',
position=-2,
name_source=['input_file'],
hash_files=False,
name_template='%s_vol_symm.xfm',
keep_extension=False)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_vol_symm.mnc')
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME MANY options not implemented!
fit_linear = traits.Bool(desc='Fit using a linear xfm.', argstr='-linear')
fit_nonlinear = traits.Bool(
desc='Fit using a non-linear xfm.', argstr='-nonlinear')
# FIXME This changes the input/output behaviour of trans_file! Split into
# two separate interfaces?
nofit = traits.Bool(
desc='Use the input transformation instead of generating one.',
argstr='-nofit')
config_file = File(
desc=
'File containing the fitting configuration (nlpfit -help for info).',
argstr='-config_file %s',
exists=True)
x = traits.Bool(desc='Flip volume in x-plane (default).', argstr='-x')
y = traits.Bool(desc='Flip volume in y-plane.', argstr='-y')
z = traits.Bool(desc='Flip volume in z-plane.', argstr='-z')
class VolSymmOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
trans_file = File(desc='xfm trans file', exists=True)
output_grid = File(
desc='output grid file', exists=True) # FIXME Is exists=True correct?
class VolSymm(CommandLine):
"""Make a volume symmetric about an axis either linearly
and/or nonlinearly. This is done by registering a volume
to a flipped image of itself.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/volsymm/volsymm
Examples
--------
>>> from nipype.interfaces.minc import VolSymm
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> volsymm = VolSymm(input_file=input_file)
>>> volsymm.run() # doctest: +SKIP
"""
input_spec = VolSymmInputSpec
output_spec = VolSymmOutputSpec
_cmd = 'volsymm'
def _list_outputs(self):
outputs = super(VolSymm, self)._list_outputs()
# Have to manually check for the grid files.
if os.path.exists(outputs['trans_file']):
if 'grid' in open(outputs['trans_file'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['trans_file'])
return outputs
| 30.20641 | 145 | 0.60428 | [
"Apache-2.0"
] | Inria-Visages/nipype | nipype/interfaces/minc/minc.py | 111,220 | Python |
"""
A file to contain specific logic to handle version upgrades in Kolibri.
"""
from shutil import rmtree
from django.conf import settings
from kolibri.core.upgrade import version_upgrade
# Before 0.15 we copied static files to the KOLIBRI_HOME directory.
# After 0.15 we read them directly from their source directories.
@version_upgrade(old_version="<0.15.0")
def clear_static_dir():
rmtree(settings.STATIC_ROOT, ignore_errors=True)
| 27.6875 | 71 | 0.78781 | [
"MIT"
] | AksharFoundation/kolibri | kolibri/core/device/upgrade.py | 443 | Python |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
def compare_with_numpy_lamb(
test_case,
device,
x_shape,
learning_rate,
train_iters,
betas,
weight_decay,
eps,
do_bias_correction,
adam_w_mode,
clip_grad_max_norm,
clip_grad_norm_type,
):
np.random.seed(1000)
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.param = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.param * mask
simp_module = CustomModule()
simp_module.to(device)
simp_module.train()
optim_kwargs = {
"params": simp_module.parameters(),
"lr": learning_rate,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"adam_w_mode": adam_w_mode,
"do_bias_correction": do_bias_correction,
}
if clip_grad_max_norm != -1:
optim_kwargs["clip_grad_max_norm"] = clip_grad_max_norm
optim_kwargs["clip_grad_norm_type"] = clip_grad_norm_type
lamb_optim = flow.optim.LAMB([optim_kwargs])
class CustomLambGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(lamb_optim)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
lamb_graph = CustomLambGraph()
for i in range(train_iters):
mask_tensor = flow.tensor(
random_grad_seq[i],
dtype=flow.float32,
requires_grad=False,
device=flow.device(device),
)
lamb_graph(mask_tensor)
of_res = simp_module.param.numpy()
def train_by_numpy():
x = init_value
mt = np.zeros_like(x)
vt = np.zeros_like(x)
beta1 = betas[0]
beta2 = betas[1]
if adam_w_mode:
l2 = 0
wd = weight_decay
else:
l2 = weight_decay
wd = 0
def np_train_one_iter(step, grad):
if clip_grad_max_norm != -1:
_, grad = clip_grad_norm_np(
grad, clip_grad_max_norm, clip_grad_norm_type
)
grad = grad + l2 * x
bias_correction1 = 1.0
bias_correction2 = 1.0
if do_bias_correction:
bias_correction1 = 1.0 - np.power(beta1, step + 1)
bias_correction2 = 1.0 - np.power(beta2, step + 1)
m = beta1 * mt + (1 - beta1) * grad
v = beta2 * vt + (1 - beta2) * grad * grad
denom = np.sqrt(v) / np.sqrt(bias_correction2) + eps
adam_diff = m / bias_correction1 / denom
w_norm = np.linalg.norm(x, ord=2)
g_norm = np.linalg.norm(adam_diff, ord=2)
if w_norm > 0 and g_norm > 0:
trust_ratio = w_norm / g_norm
else:
trust_ratio = 1.0
param = x - learning_rate * trust_ratio * (adam_diff + wd * x)
return (param, m, v)
for i in range(train_iters):
(x, mt, vt) = np_train_one_iter(i, random_grad_seq[i])
return x
np_res = train_by_numpy()
test_case.assertTrue(
np.allclose(of_res.flatten(), np_res.flatten(), rtol=1e-3, atol=1e-3)
)
@flow.unittest.skip_unless_1n1d()
class TestLamb(flow.unittest.TestCase):
def test_lamb(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [0.1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["betas"] = [(0.99, 0.9)]
arg_dict["weight_decay"] = [0.001, 0.1]
arg_dict["eps"] = [1e-8, 1e-6]
arg_dict["do_bias_correction"] = [True, False]
arg_dict["adam_w_mode"] = [True, False]
# NOTE(l1aoxingyu): max_norm = -1 means no clip grad
# nn.Graph only support `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0`
arg_dict["clip_grad_max_norm"] = [-1, 1.0]
arg_dict["clip_grad_norm_type"] = [2.0]
for arg in GenArgList(arg_dict):
compare_with_numpy_lamb(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| 29.181319 | 92 | 0.602523 | [
"Apache-2.0"
] | LiuHaolan/oneflow | python/oneflow/test/graph/test_graph_optim_lamb.py | 5,311 | Python |
#-
# Copyright (c) 2016 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase, attr
@attr('capabilities')
@attr('cached')
class test_cp2_c0_sc(BaseBERITestCase):
EXPECTED_EXCEPTIONS = 0
def test_cp2_c0_sc_1(self):
self.assertTrapInfoNoTrap(self.MIPS.s0, "An exception was raised by SC with an unaligned DDC (but address overall is aligned)")
| 37.435897 | 135 | 0.767123 | [
"Apache-2.0"
] | capt-hb/cheritest | tests/cp2/test_cp2_c0_sc.py | 1,460 | Python |
with open("input") as file:
massList = file.readlines()
def calcModuleFuel(fuel, mass):
addedFuel = int(mass/3)-2
if addedFuel <=0:
return fuel
else:
return calcModuleFuel(fuel+addedFuel, addedFuel)
def calcFuelSum(massList):
fuelSum = 0
for m in massList:
m = float(m)
fuelSum += calcModuleFuel(0, m)
return fuelSum
print calcFuelSum(massList) | 22.666667 | 56 | 0.642157 | [
"MIT"
] | JeyP91/aoc-2019 | day01/python/thomg/calc_fuel.py | 408 | Python |
from graphql import Undefined
from .mountedtype import MountedType
from .structures import NonNull
from .utils import get_type
class InputField(MountedType):
"""
Makes a field available on an ObjectType in the GraphQL schema. Any type can be mounted as a
Input Field except Interface and Union:
- Object Type
- Scalar Type
- Enum
Input object types also can't have arguments on their input fields, unlike regular ``graphene.Field``.
All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField
using the below arguments.
.. code:: python
from graphene import InputObjectType, String, InputField
class Person(InputObjectType):
# implicitly mounted as Input Field
first_name = String(required=True)
# explicitly mounted as Input Field
last_name = InputField(String, description="Surname")
args:
type (class for a graphene.UnmountedType): Must be a class (not an instance) of an
unmounted graphene type (ex. scalar or object) which is used for the type of this
field in the GraphQL schema.
name (optional, str): Name of the GraphQL input field (must be unique in a type).
Defaults to attribute name.
default_value (optional, Any): Default value to use as input if none set in user operation (
query, mutation, etc.).
deprecation_reason (optional, str): Setting this value indicates that the field is
depreciated and may provide instruction or reason on how for clients to proceed.
description (optional, str): Description of the GraphQL field in the schema.
required (optional, bool): Indicates this input field as not null in the graphql schema.
Raises a validation error if argument not provided. Same behavior as graphene.NonNull.
Default False.
**extra_args (optional, Dict): Not used.
"""
def __init__(
self,
type_,
name=None,
default_value=Undefined,
deprecation_reason=None,
description=None,
required=False,
_creation_counter=None,
**extra_args
):
super(InputField, self).__init__(_creation_counter=_creation_counter)
self.name = name
if required:
type_ = NonNull(type_)
self._type = type_
self.deprecation_reason = deprecation_reason
self.default_value = default_value
self.description = description
@property
def type(self):
return get_type(self._type)
| 36.347222 | 106 | 0.666412 | [
"MIT"
] | AizenGlobal/graphene | graphene/types/inputfield.py | 2,617 | Python |
# -*- coding: utf-8 -*-
from cms.exceptions import CMSDeprecationWarning
from django.conf import settings
from patch import post_patch, post_patch_check, pre_patch
import warnings
def patch_settings():
"""Merge settings with global cms settings, so all required attributes
will exist. Never override, just append non existing settings.
Also check for setting inconsistencies if settings.DEBUG
"""
if patch_settings.ALREADY_PATCHED:
return
patch_settings.ALREADY_PATCHED = True
if getattr(settings, 'CMS_MODERATOR', False):
warnings.warn("CMS_MODERATOR will be removed and replaced in django CMS 2.4!", CMSDeprecationWarning)
from cms.conf import global_settings
# patch settings
pre_patch()
# merge with global cms settings
for attr in dir(global_settings):
if attr == attr.upper() and not hasattr(settings, attr):
setattr(settings._wrapped, attr, getattr(global_settings, attr))
post_patch()
if settings.DEBUG:
# check if settings are correct, call this only if debugging is enabled
post_patch_check()
patch_settings.ALREADY_PATCHED = False
| 30.384615 | 109 | 0.708861 | [
"BSD-3-Clause"
] | tonatos/django-cms | cms/conf/__init__.py | 1,185 | Python |
#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
__author__ = "Varun Nayyar <[email protected]>"
import numpy as np
import pytest
import NN.layerversions.layers4 as layer
def test_fc():
l1 = layer.FullyConnected(5, 10)
x = np.ones((100, 5))
y, c = l1.forward(x)
assert y.shape == (100, 10)
assert np.all(c == x)
def test_tanh():
l = layer.Tanh()
x = np.ones((100, 5))
y, c = l.forward(x)
assert y.shape == (100, 5)
assert np.all(c == y)
@pytest.fixture()
def optim():
return layer.sgd_optimiser(0.01)
def test_back_fc(optim):
l1 = layer.FullyConnected(5, 10)
x = np.ones((100, 5))
dldy = np.random.randn(100, 10)
dldx = l1.backward(dldy, x, optim)
assert dldx.shape == (100, 5)
def test_back_tanh():
l1 = layer.Tanh()
x = np.random.randn(100, 5)
dldy = np.random.randn(100, 5)
dldx = l1.backward(dldy, np.tanh(x), optim)
assert dldx.shape == (100, 5)
def test_network():
from NN.loss import MSELoss
x = np.random.randn(100, 10)
y = np.random.randn(100, 3)
net = layer.Network(
layer.FullyConnected(10, 20),
layer.Tanh(),
layer.FullyConnected(20, 3),
layer.Tanh()
)
mse = MSELoss()
layer.train(net, (x, y), 10)
yhat, _ = net.forward(x)
initloss = mse.loss(y, yhat)
layer.train(net, (x, y), 10)
yhat, _ = net.forward(x)
finloss = mse.loss(yhat, y)
assert initloss > finloss
| 20 | 47 | 0.586301 | [
"MIT"
] | nayyarv/CodeANet | tests/test_layers4.py | 1,460 | Python |
import unittest
class IntcodeComputer():
OP_ADD = 1
OP_MULTIPLY = 2
OP_INPUT = 3
OP_OUTPUT = 4
OP_JUMP_TRUE = 5
OP_JUMP_FALSE = 6
OP_LESS_THAN = 7
OP_EQUALS = 8
OP_MOD_REL = 9
OP_HALT = 99
PARAM_MODE_POS = '0'
PARAM_MODE_IMD = '1'
PARAM_MODE_REL = '2'
NOUN_ADDR = 1
VERB_ADDR = 2
RESULT_ADDR = 0
START_ADDR = 0
INIT_VAL = 0
def __init__(self, data = []):
self.inputs = []
self.memory = []
self.initial_memory = []
if data:
self.load_memory(data)
def load_memory(self, data):
self.initial_memory = self.normalize_memory(data)
self.reset()
def expand_memory(self, addr):
needed_mem = addr - (len(self.memory) - 1)
if needed_mem > 0:
self.memory += ([self.INIT_VAL] * needed_mem)
else:
raise Exception(f'Cannot expand memory for addr {addr}')
def check_addr(self, addr):
if addr < 0:
raise Exception(f'Addr {addr}, cannot be negative')
if addr >= len(self.memory):
self.expand_memory(addr)
return addr
def reset(self):
if self.memory:
del self.memory[:]
self.memory = self.initial_memory.copy()
if self.inputs:
del self.inputs[:]
self.inputs = []
self.output = None
self.last_input = None
self.instruction_ptr = self.START_ADDR
self.relative_base = self.START_ADDR
def add_input(self, data):
self.inputs.append(data)
def print_program(self):
print("Program: {:02d}{:02d}".format(self.memory[self.NOUN_ADDR],self.memory[self.VERB_ADDR]))
def normalize_memory(self, intcode):
if type(intcode) is str:
return list(map(int, intcode.split(',')))
elif type(intcode) is list:
if type(intcode[0]) is str:
return list(map(int, intcode))
else:
return intcode
else:
raise Exception('Corrupt intcode')
def get_paramater(self, mode):
param = self.memory[self.instruction_ptr]
self.instruction_ptr += 1
if mode == self.PARAM_MODE_POS:
addr = self.check_addr(param)
val = self.memory[addr]
elif mode == self.PARAM_MODE_REL:
addr = self.relative_base + param
addr = self.check_addr(addr)
val = self.memory[addr]
elif mode == self.PARAM_MODE_IMD:
val = param
else:
raise Exception(f"Unkown paramater mode: {param}")
return val
def set_paramater(self, mode, data):
param = self.memory[self.instruction_ptr]
self.instruction_ptr += 1
if mode == self.PARAM_MODE_POS:
addr = self.check_addr(param)
self.memory[addr] = data
elif mode == self.PARAM_MODE_REL:
addr = self.relative_base + param
addr = self.check_addr(addr)
self.memory[addr] = data
elif mode == self.PARAM_MODE_IMD:
raise Exception("Set paramater can't be in immediate mode")
else:
raise Exception(f"Unkown paramater mode: {param}")
def parse_opcode(self):
mode_opcode_str = '{:>05}'.format(str(self.memory[self.instruction_ptr]))
# Reverse of the first three chars
modes = mode_opcode_str[:3][::-1]
# integer of the last two chars
opcode = int(mode_opcode_str[3:])
self.instruction_ptr += 1
return modes, opcode
def run(self):
self.output = None
while self.instruction_ptr < len(self.memory):
param_mode, opcode = self.parse_opcode()
if opcode == self.OP_HALT:
return 0
elif opcode == self.OP_ADD:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
self.set_paramater(param_mode[2], in1 + in2)
elif opcode == self.OP_MULTIPLY:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
self.set_paramater(param_mode[2], in1 * in2)
elif opcode == self.OP_INPUT:
if self.inputs:
self.last_input = self.inputs.pop()
if self.last_input != None:
self.set_paramater(param_mode[0], self.last_input)
else:
raise Exception(f"{self.last_input} is not a valid input")
elif opcode == self.OP_OUTPUT:
self.output = self.get_paramater(param_mode[0])
return 1
elif opcode == self.OP_JUMP_TRUE:
do_jump = self.get_paramater(param_mode[0])
new_addr = self.get_paramater(param_mode[1])
if do_jump != 0:
self.instruction_ptr = new_addr
elif opcode == self.OP_JUMP_FALSE:
do_jump = self.get_paramater(param_mode[0])
new_addr = self.get_paramater(param_mode[1])
if do_jump == 0:
self.instruction_ptr = new_addr
elif opcode == self.OP_LESS_THAN:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
if in1 < in2:
self.set_paramater(param_mode[2], 1)
else:
self.set_paramater(param_mode[2], 0)
elif opcode == self.OP_EQUALS:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
if in1 == in2:
self.set_paramater(param_mode[2], 1)
else:
self.set_paramater(param_mode[2], 0)
elif opcode == self.OP_MOD_REL:
val = self.get_paramater(param_mode[0])
self.relative_base += val
else:
raise Exception(f'Unknown opcode {opcode} at addr {self.instruction_ptr}.')
self.reset()
return -1
| 32.242718 | 102 | 0.510238 | [
"MIT"
] | RubenFixit/AoC | AoC 2019/intcode.py | 6,642 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDatabaseAccountKeysResult',
'AwaitableListDatabaseAccountKeysResult',
'list_database_account_keys',
]
@pulumi.output_type
class ListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
"""
def __init__(__self__, primary_master_key=None, primary_readonly_master_key=None, secondary_master_key=None, secondary_readonly_master_key=None):
if primary_master_key and not isinstance(primary_master_key, str):
raise TypeError("Expected argument 'primary_master_key' to be a str")
pulumi.set(__self__, "primary_master_key", primary_master_key)
if primary_readonly_master_key and not isinstance(primary_readonly_master_key, str):
raise TypeError("Expected argument 'primary_readonly_master_key' to be a str")
pulumi.set(__self__, "primary_readonly_master_key", primary_readonly_master_key)
if secondary_master_key and not isinstance(secondary_master_key, str):
raise TypeError("Expected argument 'secondary_master_key' to be a str")
pulumi.set(__self__, "secondary_master_key", secondary_master_key)
if secondary_readonly_master_key and not isinstance(secondary_readonly_master_key, str):
raise TypeError("Expected argument 'secondary_readonly_master_key' to be a str")
pulumi.set(__self__, "secondary_readonly_master_key", secondary_readonly_master_key)
@property
@pulumi.getter(name="primaryMasterKey")
def primary_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-write key.
"""
return pulumi.get(self, "primary_master_key")
@property
@pulumi.getter(name="primaryReadonlyMasterKey")
def primary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-only key.
"""
return pulumi.get(self, "primary_readonly_master_key")
@property
@pulumi.getter(name="secondaryMasterKey")
def secondary_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-write key.
"""
return pulumi.get(self, "secondary_master_key")
@property
@pulumi.getter(name="secondaryReadonlyMasterKey")
def secondary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-only key.
"""
return pulumi.get(self, "secondary_readonly_master_key")
class AwaitableListDatabaseAccountKeysResult(ListDatabaseAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabaseAccountKeysResult(
primary_master_key=self.primary_master_key,
primary_readonly_master_key=self.primary_readonly_master_key,
secondary_master_key=self.secondary_master_key,
secondary_readonly_master_key=self.secondary_readonly_master_key)
def list_database_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value
return AwaitableListDatabaseAccountKeysResult(
primary_master_key=__ret__.primary_master_key,
primary_readonly_master_key=__ret__.primary_readonly_master_key,
secondary_master_key=__ret__.secondary_master_key,
secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
| 42.52381 | 158 | 0.721613 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py | 4,465 | Python |
import setuptools,subprocess,platform,os
with open("README.md","r",encoding="utf-8") as r:
long_description=r.read()
URL="https://github.com/KoichiYasuoka/spaCy-ixaKat"
if platform.machine()=="x86_64" and os.name != "nt":
subprocess.check_call(["spacy_ixakat/bin/download"])
else:
raise OSError("spaCy-ixaKat only for 64-bit unix")
try:
p=subprocess.check_output(["./packages.sh"])
except:
p=subprocess.check_output(["spacy_ixakat/bin/packages"])
packages=p.decode("utf-8").rstrip().split("\n")
setuptools.setup(
name="spacy_ixakat",
version="0.6.4",
description="ixaKat wrapper for spaCy",
long_description=long_description,
long_description_content_type="text/markdown",
url=URL,
author="Koichi Yasuoka",
author_email="[email protected]",
license="MIT",
keywords="ixaKat spaCy",
packages=setuptools.find_packages(),
package_data={"spacy_ixakat":packages},
install_requires=["spacy>=2.2.2","deplacy>=1.9.9"],
python_requires=">=3.6",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
"Topic :: Text Processing :: Linguistic",
"Natural Language :: Basque",
],
project_urls={
"ixaKat":"http://ixa2.si.ehu.es/ixakat",
"Source":URL,
"Tracker":URL+"/issues",
}
)
| 29.152174 | 58 | 0.691275 | [
"MIT"
] | sarnthil/spaCy-ixaKat | setup.py | 1,341 | Python |
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal, assert_equal)
from nose.tools import assert_raises
from pystruct.models import NodeTypeEdgeFeatureGraphCRF, EdgeFeatureGraphCRF
from pystruct.inference.linear_programming import lp_general_graph
from pystruct.inference import compute_energy, get_installed
from pystruct.utils import make_grid_edges, edge_list_to_features
from pystruct.datasets import generate_blocks_multinomial
def test_checks():
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
3 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5, 3] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
3 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2, 3], [2,3,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [99,4]]) #how many features per node type X node type?
)
def debug_joint_feature():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many possible labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
l_node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
l_edges = [ np.array([[0, 1]]) #type 0 node 0 to type 0 node 0
, np.array([[0, 1]])
, None
, None
]
l_edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = (l_node_f, l_edges, l_edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 1]),
np.array([0, 1, 2])
])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array(
[ 1. , 1., 1. , 2., 2., 2.
, 0.11 , 0.12 , 0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 , 0.33 , 0.34
, 0. , 0.111, 0. , 0. , 0. , 0.221,
0. , 0. , 0. , 0. , 0. , 0.222, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
]))
def get_simple_graph_structure():
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
return g
def get_simple_graph():
node_f = [ np.array([[1,1,1],
[2,2,2]])
]
edges = [ np.array([[0,1]])
] #an edge from 0 to 1
edge_f = [ np.array([[3,3,3]])
]
return (node_f, edges, edge_f)
def get_simple_graph2():
node_f = [ np.array([ [1,1,1]
, [2,2,2]]) ]
edges = [ np.array( [[0,1], #an edge from 0 to 1
[0,0] #an edge from 0 to 0
]) ]
edge_f = [ np.array([
[3,3,3],
[4,4,4]
]) ]
return (node_f, edges, edge_f)
def test_flatten_unflattenY():
g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
y = np.array([1,2])
l_nf = [ np.zeros((2,3)) ] #list of node feature , per type
X = (l_nf, None, None) #we give no edge
y_ref = [ np.array([1,2]) ]
assert all( [ (y_typ1 == y_typ2).all() for y_typ1, y_typ2 in zip(g.unflattenY(X, y), y_ref) ])
assert (y == g.flattenY(g.unflattenY(X, y))).all()
#============================================
g, x, y = more_complex_graph()
Y = [ np.array([0, 0])
, np.array([0, 0, 0]) #we start again at zero on 2nd type
]
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
l_nf = [ np.zeros( (2,3) ), np.zeros( (3, 4) )] #2 node with 3 features, 3 node with 4 features
X = (l_nf, None, None) #we give no edge
assert (g.flattenY(Y) == y).all()
#print g.unflattenY(X, y)
assert all( [ (y_typ1 == y_typ2).all() for y_typ1, y_typ2 in zip(g.unflattenY(X, y), Y) ])
l_nf = [ np.zeros( (1,3) ), np.zeros( (3, 4) )] #2 node with 3 features, 3 node with 4 features
X = (l_nf, None, None) #we give no edge
assert_raises(ValueError, g.unflattenY, X, y)
def test_joint_feature():
#print "---SIMPLE---------------------------------------------------------------------"
g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([1,2])
# y = np.array([1,0])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 0., 0., 0.
, 0.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 3., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,0])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,1])
node_f = [ np.array([[1.1,1.2,1.3], [2.1,2.2,2.3]]) ]
edge_f = [ np.array([[3.1,3.2,3.3]]) ]
x = (node_f, edges, edge_f)
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
assert_array_equal(g.joint_feature(x,y)
, np.array([ 1.1, 1.2, 1.3, 2.1, 2.2, 2.3, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 3.1, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 3.2, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 3.3, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ])
)
#print "---SIMPLE + 2nd EDGE--------------------------------------------------------"
node_f, edges, edge_f = get_simple_graph2()
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([1,2])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf
, np.array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 0., 0., 0., 0.,
0., 0., 0., 0., 4., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 4., 3., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 4., 3., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,0])
#print y
g.initialize(x, y)
#print "joint_feature = \n", `g.joint_feature(x,y)`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 7.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 7., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 7., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
def more_complex_graph():
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
# nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] )
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ np.array( [ [0,1] #an edge from 0 to 1
])
, np.array( [
[0,0] #an edge from typ0:0 to typ1:0
])
, None
, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = (node_f, edges, edge_f)
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
return g, x, y
def test_joint_feature2():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
g, x, y = more_complex_graph()
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 3. , 3. , 3. , 0. , 0. , 0. , 0.63 , 0.66 ,
0.69 , 0.72 , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.111, 0. , 0. , 0. , 0.221, 0. ,
0. , 0. , 0. , 0. , 0.222, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
#print "---MORE COMPLEX GRAPH :) -- BIS -------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ np.array( [ [0,1]] ), #an edge from 0 to 1
np.array( [ [0,2]] ) #an edge from 0 to 2
, None, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = ( node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([np.array([0, 1]),
2+np.array([0, 1, 2])])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. , 0.11 , 0.12 ,
0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 ,
0.33 , 0.34 , 0. , 0.111, 0. , 0. , 0. , 0. ,
0.221, 0. , 0. , 0. , 0. , 0. , 0.222, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
#print "MORE COMPLEX GRAPH :) -- BIS OK"
#print "--- REORDERED MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
node_f = [ np.array([ [2,2,2], [1,1,1] ])
, np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]])
]
edges = [ np.array( [ [1, 0]] ),
np.array( [ [1,0]] ) #an edge from 0 to 2
, None, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = ( node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([np.array([1, 0]),
2+np.array([2, 0, 1])])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. , 0.11 , 0.12 ,
0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 ,
0.33 , 0.34 , 0. , 0.111, 0. , 0. , 0. , 0. ,
0.221, 0. , 0. , 0. , 0. , 0. , 0.222, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
def test_joint_feature3():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH AGAIN :) ---------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [0, 2]
, [2, 3]]) #how many features per node type X node type?
)
# nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] )
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ None
, np.array( [
[0,1] #an edge from typ0:0 to typ1:1
])
, None
, np.array( [
[0,1], #an edge from typ0:0 to typ1:1
[1,2] #an edge from typ1:1 to typ1:2
])
]
edge_f = [ None
, np.array([[.221, .222]])
, None
, np.array([[.01, .02, .03 ],
[.001, .002, .003]])
]
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
#print y
g.initialize(x, y)
#print g.size_unaries
#print g.size_pairwise
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 3. , 3. , 3. , 0. , 0. , 0. ,
0.63 , 0.66 , 0.69 , 0.72 , 0. , 0., 0., 0. , 0., 0., 0. , 0.,
#edges 0 to 0 2x2 states
#typ0 typ0 EMPTY
#typ0 typ1
.221, 0., 0., 0., 0., 0.,
.222, 0., 0., 0., 0., 0.,
#typ1 typ0
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
#typ1 typ1
0.011, 0., 0., 0., 0., 0., 0., 0., 0.,
0.022, 0., 0., 0., 0., 0., 0., 0., 0.,
0.033, 0., 0., 0., 0., 0., 0., 0., 0.
])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 1])
, 2+np.array([1, 1, 0])
])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. ,
.31, .32, .33, .34 , .32, .34, .36, .38 , 0., 0., 0. , 0.,
#edges 0 to 0 2x2 states
#typ0 typ0 EMPTY
#typ0 typ1
0., .221, 0., 0., 0., 0.,
0., .222, 0., 0., 0., 0.,
#typ1 typ0
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
#typ1 typ1
0., 0., 0., 0.001, 0.01, 0., 0., 0., 0.,
0., 0., 0., 0.002, 0.02, 0., 0., 0., 0.,
0., 0., 0., 0.003, 0.03, 0., 0., 0., 0.
])
)
w = np.array([ 1,1,1, 2,2,2, 10,10,10,10, 20,20,20,20, 30,30,30,30 ]
+[1.0]*51, dtype=np.float64
)
#print `w`
ret_u = g._get_unary_potentials(x, w)
#print `ret_u`
assert len(ret_u) == 2
assert_array_almost_equal(ret_u[0], np.array([ #n_nodes x n_states
[3, 6],
[6, 12]]))
assert_array_almost_equal(ret_u[1], np.array([ #n_nodes x n_states
[5, 10, 15],
[9, 18, 27],
[13, 26, 39]]))
assert len(w) == g.size_joint_feature
ret_pw = g._get_pairwise_potentials(x, w)
# for _pw in ret_pw:
# print "_pw ", `_pw`
pw00, pw01, pw10, pw11 = ret_pw
assert len(pw00) == 0
assert_array_almost_equal(pw01,np.array([ #n_edges, n_states, n_states
[[0.443, 0.443, 0.443],
[0.443, 0.443, 0.443]]
]))
assert len(pw10) == 0
assert_array_almost_equal(pw11,np.array([ #n_edges, n_states, n_states
[[0.06 , 0.06 , 0.06],
[0.06 , 0.06 , 0.06],
[0.06 , 0.06 , 0.06]]
,
[[0.006, 0.006, 0.006],
[0.006, 0.006, 0.006],
[0.006, 0.006, 0.006]]
]))
def test_unary_potentials():
#print "---SIMPLE---------------------------------------------------------------------"
#g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
node_f = [ np.array([[1,1,1],
[2,2,2]])
]
edges = [ np.array([[0,1]])
] #an edge from 0 to 1
edge_f = [ np.array([[3,3,3]])
]
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([1,2])])
# y = np.array([1,0])
#print y
g.initialize(x, y)
gref = EdgeFeatureGraphCRF(4,3,3)
xref = (node_f[0], edges[0], edge_f[0])
wref = np.arange(gref.size_joint_feature)
potref = gref._get_unary_potentials(xref, wref)
#print `potref`
w = np.arange(g.size_joint_feature)
pot = g._get_unary_potentials(x, w)
#print `pot`
assert_array_equal(pot, [potref])
pwpotref = gref._get_pairwise_potentials(xref, wref)
#print `pwpotref`
pwpot = g._get_pairwise_potentials(x, w)
#print `pwpot`
assert_array_equal(pwpot, [pwpotref])
# def test_inference_util():
# g = NodeTypeEdgeFeatureGraphCRF(
# 3 #how many node type?
# , [2, 3, 1] #how many labels per node type?
# , [3, 4, 1] #how many features per node type?
# , np.array([ [1, 2, 2]
# , [2, 3, 2]
# , [2, 2, 1]]) #how many features per node type X node type?
# )
# node_f = [ np.array([ [2,2,2], [1,1,1] ])
# , np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]])
# , np.array([ [77], [88], [99]])
# ]
# edges = [ np.array( [ [1, 0]] ),
# np.array( [ [1,0]] ) #an edge from 0 to 2
# , None
#
# , None
# , None
# , None
#
# , np.array( [[1,1]] )
# , None
# , None ]
#
# x = ( node_f, edges, None)
#
# reindexed_exdges = g._index_all_edges(x)
# #print `reindexed_exdges`
# assert_array_equal(reindexed_exdges,
# np.array( [[1,0],
# [1,2],
# [6,1]]))
#
# def report_model_config(crf):
# print crf.n_states
# print crf.n_features
# print crf.n_edge_features
def inference_data():
"""
Testing with a single type of nodes. Must do as well as EdgeFeatureGraphCRF
"""
# Test inference with different weights in different directions
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# generate edge weights
edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :],
edge_list[0].shape[0], axis=0)
edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :],
edge_list[1].shape[0], axis=0)
edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical])
# do inference
res = lp_general_graph(-x.reshape(-1, n_states), edges, edge_weights)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, n_states)], [edges], [edge_features])
y = y.ravel()
return x, y, pw_horz, pw_vert, res, n_states
def test_inference_ad3plus():
x, y, pw_horz, pw_vert, res, n_states = inference_data()
# same inference through CRF inferface
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3+")
crf.initialize(x, y)
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
#np.set_printoptions(precision=2, threshold=9999)
assert_array_almost_equal(res[0], y_pred[0][0].reshape(-1, n_states), 5)
assert_array_almost_equal(res[1], y_pred[1][0], 5)
assert_array_equal(y, np.argmax(y_pred[0][0], axis=-1), 5)
# again, this time discrete predictions only
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3+")
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
crf.initialize(x)
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
def test_inference_ad3():
x, y, pw_horz, pw_vert, res, n_states = inference_data()
# same inference through CRF inferface
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3")
crf.initialize(x, y)
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
#np.set_printoptions(precision=2, threshold=9999)
assert_array_almost_equal(res[0], y_pred[0][0].reshape(-1, n_states), 5)
assert_array_almost_equal(res[1], y_pred[1][0], 5)
assert_array_equal(y, np.argmax(y_pred[0][0], axis=-1), 5)
# again, this time discrete predictions only
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3")
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
crf.initialize(x)
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
def test_joint_feature_discrete():
"""
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
"""
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
y_flat = y.ravel()
#for inference_method in get_installed(["lp", "ad3", "qpbo"]):
if True:
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
joint_feature_y = crf.joint_feature(x, y_flat)
assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
# first horizontal, then vertical
# we trust the unaries ;)
n_states = crf.l_n_states[0]
n_features = crf.l_n_features[0]
pw_joint_feature_horz, pw_joint_feature_vert = joint_feature_y[n_states *
n_features:].reshape(
2, n_states, n_states)
assert_array_equal(pw_joint_feature_vert, np.diag([9 * 4, 9 * 4, 9 * 4]))
vert_joint_feature = np.diag([10 * 3, 10 * 3, 10 * 3])
vert_joint_feature[0, 1] = 10
vert_joint_feature[1, 2] = 10
assert_array_equal(pw_joint_feature_horz, vert_joint_feature)
def test_joint_feature_continuous():
"""
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
"""
# FIXME
# first make perfect prediction, including pairwise part
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
#x = (x.reshape(-1, 3), edges, edge_features)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
y = y.ravel()
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# create crf, assemble weight, make prediction
# for inference_method in get_installed(["lp", "ad3"]):
# crf = EdgeFeatureGraphCRF(inference_method=inference_method)
if True:
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
#crf.initialize([x], [y])
#report_model_config(crf)
crf.initialize(x, y)
y_pred = crf.inference(x, w, relaxed=True)
# compute joint_feature for prediction
joint_feature_y = crf.joint_feature(x, y_pred)
assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
# FIXME
# first horizontal, then vertical
# we trust the unaries ;)
#pw_joint_feature_horz, pw_joint_feature_vert = joint_feature_y[crf.n_states *
#crf.n_features:].reshape(2,
#crf.n_states,
#crf.n_states)
def test_energy_continuous():
# make sure that energy as computed by ssvm is the same as by lp
np.random.seed(0)
#for inference_method in get_installed(["lp", "ad3"]):
if True:
found_fractional = False
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
while not found_fractional:
x = np.random.normal(size=(7, 8, 3))
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
unary_params = np.random.normal(size=(3, 3))
pw1 = np.random.normal(size=(3, 3))
pw2 = np.random.normal(size=(3, 3))
w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
crf.initialize(x)
res, energy = crf.inference(x, w, relaxed=True, return_energy=True)
found_fractional = np.any(np.max(res[0], axis=-1) != 1)
joint_feature = crf.joint_feature(x, res)
energy_svm = np.dot(joint_feature, w)
assert_almost_equal(energy, -energy_svm)
def test_energy_discrete():
# for inference_method in get_installed(["qpbo", "ad3"]):
# crf = EdgeFeatureGraphCRF(n_states=3,
# inference_method=inference_method,
# n_edge_features=2, n_features=3)
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
for i in range(10):
x = np.random.normal(size=(7, 8, 3))
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
unary_params = np.random.normal(size=(3, 3))
pw1 = np.random.normal(size=(3, 3))
pw2 = np.random.normal(size=(3, 3))
w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
crf.initialize(x)
y_hat = crf.inference(x, w, relaxed=False)
#flat_edges = crf._index_all_edges(x)
energy = compute_energy(crf._get_unary_potentials(x, w)[0],
crf._get_pairwise_potentials(x, w)[0], edges, #CAUTION: pass the flatened edges!!
y_hat)
joint_feature = crf.joint_feature(x, y_hat)
energy_svm = np.dot(joint_feature, w)
assert_almost_equal(energy, energy_svm)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=9999)
if 0:
debug_joint_feature()
if 1:
test_flatten_unflattenY()
if 1:
test_joint_feature()
if 1:
test_joint_feature2()
if 1:
test_joint_feature3()
if 1: test_unary_potentials()
# if 1: test_inference_util()
if 1: test_inference_ad3()
if 1: test_inference_ad3plus()
if 1: test_joint_feature_discrete()
if 1: test_joint_feature_continuous()
if 1: test_energy_continuous()
if 1: test_energy_discrete()
#print "OK"
| 42.36197 | 118 | 0.409091 | [
"BSD-2-Clause"
] | LemonLison/pystruct | pystruct/tests/test_models/test_node_type_edge_feature_graph_crf.py | 36,982 | Python |
from __future__ import print_function, absolute_import
import importlib
import logging
import os
from argparse import ArgumentParser
from six import string_types
from adr.formatter import all_formatters
from .errors import MissingDataError
log = logging.getLogger('adr')
here = os.path.abspath(os.path.dirname(__file__))
RECIPE_DIR = os.path.join(here, 'recipes')
ARGUMENT_GROUPS = {
'branch': [
[['-B', '--branch'],
{'default': ['mozilla-central'],
'action': 'append',
'help': "Branches to query results from",
}],
],
'build': [
[['-b', '--build-type'],
{'default': 'opt',
'help': "Build type (default: opt)",
}],
],
'date': [
[['--from'],
{'dest': 'from_date',
'default': 'today-week',
'help': "Starting date to pull data from, defaults "
"to a week ago",
}],
[['--to'],
{'dest': 'to_date',
'default': 'eod', # end of day
'help': "Ending date to pull data from, defaults "
"to now",
}],
],
'path': [
[['--path'],
{'required': True,
'help': "Path relative to repository root (file or directory)",
}],
],
'platform': [
[['-p', '--platform'],
{'default': 'windows10-64',
'help': "Platform to limit results to (default: windows10-64)",
}],
],
'rev': [
[['-r', '--revision'],
{'dest': 'rev',
'required': True,
'help': "Revision to limit results to",
}],
],
'test': [
[['-t', '--test'],
{'required': True,
'dest': 'test_name',
'help': "Path to a test file",
}],
],
}
"""
These are commonly used arguments which can be re-used. They are shared to
provide a consistent CLI across recipes.
"""
class RecipeParser(ArgumentParser):
arguments = []
def __init__(self, *groups, **kwargs):
ArgumentParser.__init__(self, **kwargs)
for cli, kwargs in self.arguments:
self.add_argument(*cli, **kwargs)
for name in groups:
group = self.add_argument_group("{} arguments".format(name))
arguments = ARGUMENT_GROUPS[name]
for cli, kwargs in arguments:
group.add_argument(*cli, **kwargs)
def run_recipe(recipe, args, config):
"""Given a recipe, calls the appropriate query and returns the result.
The provided recipe name is used to make a call to the modules.
:param str recipe: name of the recipe to be run.
:param list args: remainder arguments that were unparsed.
:param Configuration config: config object.
:returns: string
"""
modname = '.recipes.{}'.format(recipe)
mod = importlib.import_module(modname, package='adr')
try:
output = mod.run(args, config)
except MissingDataError:
return "ActiveData didn\'t return any data."
if isinstance(config.fmt, string_types):
fmt = all_formatters[config.fmt]
log.debug("Result:")
return fmt(output)
| 26.974359 | 74 | 0.552281 | [
"MPL-2.0"
] | gmierz/active-data-recipes | adr/recipe.py | 3,156 | Python |
import bbi
import clodius.tiles.format as hgfo
import functools as ft
import logging
import numpy as np
import pandas as pd
import re
from concurrent.futures import ThreadPoolExecutor
MAX_THREADS = 4
TILE_SIZE = 1024
logger = logging.getLogger(__name__)
aggregation_modes = {}
aggregation_modes['mean'] = {'name': 'Mean', 'value': 'mean'}
aggregation_modes['min'] = {'name': 'Min', 'value': 'min'}
aggregation_modes['max'] = {'name': 'Max', 'value': 'max'}
aggregation_modes['std'] = {'name': 'Standard Deviation', 'value': 'std'}
range_modes = {}
range_modes['minMax'] = {'name': 'Min-Max', 'value': 'minMax'}
range_modes['whisker'] = {'name': 'Whisker', 'value': 'whisker'}
def get_quadtree_depth(chromsizes):
tile_size_bp = TILE_SIZE
min_tile_cover = np.ceil(sum(chromsizes) / tile_size_bp)
return int(np.ceil(np.log2(min_tile_cover)))
def get_zoom_resolutions(chromsizes):
return [2**x for x in range(get_quadtree_depth(chromsizes) + 1)][::-1]
def natsort_key(s, _NS_REGEX=re.compile(r'(\d+)', re.U)):
return tuple(
[int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x]
)
def natcmp(x, y):
if x.find('_') >= 0:
x_parts = x.split('_')
if y.find('_') >= 0:
# chr_1 vs chr_2
y_parts = y.split('_')
return natcmp(x_parts[1], y_parts[1])
else:
# chr_1 vs chr1
# chr1 comes first
return 1
if y.find('_') >= 0:
# chr1 vs chr_1
# y comes second
return -1
_NS_REGEX = re.compile(r'(\d+)', re.U)
x_parts = tuple(
[int(a) if a.isdigit() else a for a in _NS_REGEX.split(x) if a]
)
y_parts = tuple(
[int(a) if a.isdigit() else a for a in _NS_REGEX.split(y) if a]
)
for key in ['m', 'y', 'x']: # order of these parameters is purposefully reverse how they should be ordered
if key in y.lower():
return -1
if key in x.lower():
return 1
try:
if x_parts < y_parts:
return -1
elif y_parts > x_parts:
return 1
else:
return 0
except TypeError:
return 1
def natsorted(iterable):
return sorted(iterable, key=ft.cmp_to_key(natcmp))
def get_chromsizes(bwpath):
"""
TODO: replace this with negspy
Also, return NaNs from any missing chromosomes in bbi.fetch
"""
chromsizes = bbi.chromsizes(bwpath)
chromosomes = natsorted(chromsizes.keys())
chrom_series = pd.Series(chromsizes)[chromosomes]
return chrom_series
def abs2genomic(chromsizes, start_pos, end_pos):
abs_chrom_offsets = np.r_[0, np.cumsum(chromsizes.values)]
cid_lo, cid_hi = np.searchsorted(abs_chrom_offsets,
[start_pos, end_pos],
side='right') - 1
rel_pos_lo = start_pos - abs_chrom_offsets[cid_lo]
rel_pos_hi = end_pos - abs_chrom_offsets[cid_hi]
start = rel_pos_lo
for cid in range(cid_lo, cid_hi):
yield cid, start, chromsizes[cid]
start = 0
yield cid_hi, start, rel_pos_hi
def tileset_info(bwpath, chromsizes=None):
'''
Get the tileset info for a bigWig file
Parameters
----------
bwpath: string
The path to the bigwig file from which to retrieve data
chromsizes: [[chrom, size],...]
A list of chromosome sizes associated with this tileset.
Typically passed in to specify in what order data from
the bigwig should be returned.
Returns
-------
tileset_info: {'min_pos': [],
'max_pos': [],
'tile_size': 1024,
'max_zoom': 7
}
'''
TILE_SIZE = 1024
if chromsizes is None:
chromsizes = get_chromsizes(bwpath)
chromsizes_list = []
for chrom, size in chromsizes.iteritems():
chromsizes_list += [[chrom, int(size)]]
else:
chromsizes_list = chromsizes
min_tile_cover = np.ceil(
sum([int(c[1]) for c in chromsizes_list]) / TILE_SIZE
)
max_zoom = int(np.ceil(np.log2(min_tile_cover)))
tileset_info = {
'min_pos': [0],
'max_pos': [TILE_SIZE * 2 ** max_zoom],
'max_width': TILE_SIZE * 2 ** max_zoom,
'tile_size': TILE_SIZE,
'max_zoom': max_zoom,
'chromsizes': chromsizes_list,
'aggregation_modes': aggregation_modes,
'range_modes': range_modes
}
return tileset_info
def fetch_data(a):
(
bwpath,
binsize,
chromsizes,
aggregation_mode,
range_mode,
cid,
start,
end
) = a
n_bins = int(np.ceil((end - start) / binsize))
n_dim = 1
if range_mode == 'minMax':
n_dim = 2
if range_mode == 'whisker':
n_dim = 4
x = np.zeros((n_bins, n_dim)) if n_dim > 1 else np.zeros(n_bins)
try:
chrom = chromsizes.index[cid]
clen = chromsizes.values[cid]
args = [bwpath, chrom, start, end]
kwargs = {"bins": n_bins, "missing": np.nan}
if range_mode == 'minMax':
x[:, 0] = bbi.fetch(*args, **dict(kwargs, summary='min'))
x[:, 1] = bbi.fetch(*args, **dict(kwargs, summary='max'))
elif range_mode == 'whisker':
x[:, 0] = bbi.fetch(*args, **dict(kwargs, summary='min'))
x[:, 1] = bbi.fetch(*args, **dict(kwargs, summary='max'))
x[:, 2] = bbi.fetch(*args, **dict(kwargs, summary='mean'))
x[:, 3] = bbi.fetch(*args, **dict(kwargs, summary='std'))
else:
x[:] = bbi.fetch(*args, **dict(kwargs, summary=aggregation_mode))
# drop the very last bin if it is smaller than the binsize
if end == clen and clen % binsize != 0:
x = x[:-1]
except IndexError:
# beyond the range of the available chromosomes
# probably means we've requested a range of absolute
# coordinates that stretch beyond the end of the genome
x[:] = np.nan
except KeyError:
# probably requested a chromosome that doesn't exist (e.g. chrM)
x[:] = np.nan
return x
def get_bigwig_tile(
bwpath,
zoom_level,
start_pos,
end_pos,
chromsizes=None,
aggregation_mode='mean',
range_mode=None
):
if chromsizes is None:
chromsizes = get_chromsizes(bwpath)
resolutions = get_zoom_resolutions(chromsizes)
binsize = resolutions[zoom_level]
cids_starts_ends = list(abs2genomic(chromsizes, start_pos, end_pos))
with ThreadPoolExecutor(max_workers=16) as e:
arrays = list(
e.map(
fetch_data, [
tuple([
bwpath,
binsize,
chromsizes,
aggregation_mode,
range_mode
] + list(c)) for c in cids_starts_ends
]
)
)
return np.concatenate(arrays)
def tiles(bwpath, tile_ids, chromsizes_map={}, chromsizes=None):
'''
Generate tiles from a bigwig file.
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
chromsizes_map: {uid: []}
A set of chromsizes listings corresponding to the parameters of the
tile_ids. To be used if a chromsizes id is passed in with the tile id
with the `|cos:id` tag in the tile id
chromsizes: [[chrom, size],...]
A 2d array containing chromosome names and sizes. Overrides the
chromsizes in chromsizes_map
Returns
-------
tile_list: [(tile_id, tile_data),...]
A list of tile_id, tile_data tuples
'''
TILE_SIZE = 1024
generated_tiles = []
for tile_id in tile_ids:
tile_option_parts = tile_id.split('|')[1:]
tile_no_options = tile_id.split('|')[0]
tile_id_parts = tile_no_options.split('.')
tile_position = list(map(int, tile_id_parts[1:3]))
return_value = tile_id_parts[3] if len(tile_id_parts) > 3 else 'mean'
aggregation_mode = (
return_value if return_value in aggregation_modes else 'mean'
)
range_mode = return_value if return_value in range_modes else None
tile_options = dict([o.split(':') for o in tile_option_parts])
if chromsizes:
chromnames = [c[0] for c in chromsizes]
chromlengths = [int(c[1]) for c in chromsizes]
chromsizes_to_use = pd.Series(chromlengths, index=chromnames)
else:
chromsizes_id = None
if 'cos' in tile_options:
chromsizes_id = tile_options['cos']
if chromsizes_id in chromsizes_map:
chromsizes_to_use = chromsizes_map[chromsizes_id]
else:
chromsizes_to_use = None
zoom_level = tile_position[0]
tile_pos = tile_position[1]
# this doesn't combine multiple consequetive ids, which
# would speed things up
if chromsizes_to_use is None:
chromsizes_to_use = get_chromsizes(bwpath)
max_depth = get_quadtree_depth(chromsizes_to_use)
tile_size = TILE_SIZE * 2 ** (max_depth - zoom_level)
start_pos = tile_pos * tile_size
end_pos = start_pos + tile_size
dense = get_bigwig_tile(
bwpath,
zoom_level,
start_pos,
end_pos,
chromsizes_to_use,
aggregation_mode=aggregation_mode,
range_mode=range_mode,
)
tile_value = hgfo.format_dense_tile(dense)
generated_tiles += [(tile_id, tile_value)]
return generated_tiles
def chromsizes(filename):
'''
Get a list of chromosome sizes from this [presumably] bigwig
file.
Parameters:
-----------
filename: string
The filename of the bigwig file
Returns
-------
chromsizes: [(name:string, size:int), ...]
An ordered list of chromosome names and sizes
'''
try:
chrom_series = get_chromsizes(filename)
data = []
for chrom, size in chrom_series.iteritems():
data.append([chrom, size])
return data
except Exception as ex:
logger.error(ex)
raise Exception(
'Error loading chromsizes from bigwig file: {}'.format(ex)
)
| 28.820163 | 110 | 0.583058 | [
"MIT"
] | 4dn-dcic/clodius | clodius/tiles/bigwig.py | 10,577 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-22 09:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_jira', '0011_unique_together'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='backend_id',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='comment',
name='backend_id',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='issue',
name='backend_id',
field=models.CharField(max_length=255, null=True),
),
]
| 26.129032 | 62 | 0.590123 | [
"MIT"
] | opennode/waldur-jira | src/waldur_jira/migrations/0012_backend_id_null.py | 810 | Python |
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the talisman network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.602941 | 98 | 0.585854 | [
"MIT"
] | Talisman-coin/honestas | share/seeds/generate-seeds.py | 4,298 | Python |
import praw
import os
from dotenv import load_dotenv
load_dotenv()
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
USERNAME = os.getenv("USERNAME")
PASSWORD = os.getenv("PASSWORD")
reddit = praw.Reddit(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent='testscript',
username=USERNAME,
password=PASSWORD)
subreddit = reddit.subreddit('python')
hot_python = subreddit.hot(limit=5)
for post in hot_python:
if not post.stickied:
print('Title: {}, Upvotes: {}, Downvotes: {}\n'.format(
post.title,
post.ups,
post.downs)) | 31.038462 | 64 | 0.530359 | [
"MIT"
] | Build-Week-Post-Here2/bw-DS | praw_practice.py | 807 | Python |
import law
import luigi
import os
from subprocess import PIPE
from law.util import interruptable_popen
from framework import Task
class CROWNBuild(Task):
"""
Gather and compile CROWN with the given configuration
"""
# configuration variables
channels = luigi.Parameter()
shifts = luigi.Parameter()
build_dir = luigi.Parameter()
install_dir = luigi.Parameter()
env_script = os.path.join(
os.path.dirname(__file__), "../../", "setup", "setup_crown_cmake.sh"
)
def output(self):
return self.local_target("crown_{}_{}.tar.gz".format(self.era, self.sampletype))
def run(self):
# get output file path
output = self.output()
print(output.path)
if os.path.exists(output.path):
print("tarball already existing in {}".format(output.path))
else:
output.parent.touch()
_sampletype = str(self.sampletype)
_era = str(self.era)
_channels = str(self.channels)
_analysis = str(self.analysis)
_shifts = str(self.shifts)
_tag = "{}_{}".format(_era, _sampletype)
_build_dir = os.path.join(str(self.build_dir), _tag)
_install_dir = os.path.join(str(self.install_dir), _tag)
# find crown
_crown_path = os.path.abspath("CROWN")
# create build directory
if not os.path.exists(_build_dir):
os.makedirs(_build_dir)
_build_dir = os.path.abspath(_build_dir)
# same for the install directory
if not os.path.exists(_install_dir):
os.makedirs(_install_dir)
_install_dir = os.path.abspath(_install_dir)
# set environment variables
my_env = self.set_environment(self.env_script)
# checking cmake path
code, _cmake_executable, error = interruptable_popen(
["which", "cmake"], stdout=PIPE, stderr=PIPE, env=my_env
)
# actual payload:
print("=========================================================")
print("| Starting cmake step for CROWN")
print("| Using cmake {}".format(_cmake_executable.replace("\n", "")))
print("| Using CROWN {}".format(_crown_path))
print("| Using build_directory {}".format(_build_dir))
print("| Using install directory {}".format(_install_dir))
print("=========================================================")
# run CROWN build step
_cmake_cmd = ["cmake", _crown_path]
_cmake_args = [
"-DANALYSIS={ANALYSIS}".format(ANALYSIS=_analysis),
"-DSAMPLES={SAMPLES}".format(SAMPLES=_sampletype),
"-DERAS={ERAS}".format(ERAS=_era),
"-DCHANNELS={CHANNELS}".format(CHANNELS=_channels),
"-DSHIFTS={SHIFTS}".format(SHIFTS=_shifts),
"-DINSTALLDIR={INSTALLDIR}".format(INSTALLDIR=_install_dir),
"-B{BUILDFOLDER}".format(BUILDFOLDER=_build_dir),
]
print("Executable: {}".format(" ".join(_cmake_cmd + _cmake_args)))
code, out, error = interruptable_popen(
_cmake_cmd + _cmake_args, stdout=PIPE, stderr=PIPE, env=my_env
)
print(code, out, error)
# if successful save Herwig-cache and run-file as tar.gz
if code != 0:
print("Error when running cmake {}".format(error))
print("Output: {}".format(out))
print("cmake returned non-zero exit status {}".format(code))
raise Exception("cmake failed")
else:
print("Successful cmake build !")
print("Executable: {}".format(" ".join(["make", "install"])))
code, out, error = interruptable_popen(
["make", "install"],
stdout=PIPE,
stderr=PIPE,
env=my_env,
cwd=_build_dir,
)
if code != 0:
print("Error when running make {}".format(error))
print("Output: {}".format(out))
print("make returned non-zero exit status {}".format(code))
raise Exception("make failed")
else:
print("Successful cmake build !")
# TODO Create Tarball from the install directory\
code, out, error = interruptable_popen(
["touch", output.basename],
stdout=PIPE,
stderr=PIPE,
env=my_env,
cwd=os.path.join(_install_dir),
)
command = [
"tar",
"-czvf",
output.basename,
"--exclude={}".format(output.basename),
".",
]
print("Executable: {}".format(" ".join(command)))
code, out, error = interruptable_popen(
command,
stdout=PIPE,
stderr=PIPE,
env=my_env,
cwd=os.path.join(_install_dir),
)
if code != 0:
print("Error when creating tarball {}".format(error))
print("Output: {}".format(out))
print("tar returned non-zero exit status {}".format(code))
raise Exception("tar failed")
else:
print("Successful tarball creation ! ")
output.copy_from_local(os.path.join(_install_dir, output.basename))
print("=======================================================")
| 38.013333 | 88 | 0.512978 | [
"MIT"
] | swozniewski/KingMaker | processor/tasks/CROWNBuild.py | 5,702 | Python |
#!/usr/bin/env python
# encoding: utf-8
class MeanVariance(object):
def __init__(self):
self.n = 0
self.K = 0.0
self.ex0 = 0.0
self.ex2 = 0.0
def add_variable(self, x):
if self.n == 0:
self.K = x
self.n += 1
delta = x - self.K
self.ex0 += delta
self.ex2 += delta * delta
def get_mean_value(self, ):
return self.K + self.ex0 / self.n
def get_variance(self, ):
return (self.ex2 - (self.ex0 * self.ex0) / self.n) / (self.n - 1)
if __name__ == "__main__":
mv = MeanVariance()
mv.add_variable(1.0)
mv.add_variable(2.0)
mv.add_variable(3.0)
mv.add_variable(4.0)
print mv.get_mean_value()
| 21.441176 | 73 | 0.540466 | [
"Apache-2.0"
] | mraad/spark-std-dist | src/main/python/mean-variance.py | 729 | Python |
import boto3
MAX_OBJECT_LIMIT = 1000
def get_s3_client():
boto3.setup_default_session()
return boto3.client('s3')
def is_object_name(client, bucket, name):
try:
client.get_object(Bucket=bucket, Key=name)
return True
except:
return False
def is_bucket_name(client, name):
response = client.list_buckets()['Buckets']
for r in response:
if name == r['Name']:
return True
return False
def get_list_of_bucket_names(client):
response = client.list_buckets()['Buckets']
return map(lambda x: x['Name'], response)
def get_list_of_objects(client, bucket):
list_of_objects = []
response = client.list_objects_v2(Bucket=bucket, MaxKeys=MAX_OBJECT_LIMIT)
list_of_objects.extend(get_keys_from_response(response))
token = get_token_from_response(response)
while len(token) > 0:
response = client.list_objects_v2(Bucket=bucket, MaxKeys=MAX_OBJECT_LIMIT, ContinuationToken=token)
list_of_objects.extend(get_keys_from_response(response))
token = get_token_from_response(response)
return list_of_objects
def get_token_from_response(response):
try:
return response['NextContinuationToken']
except KeyError:
return ''
def get_keys_from_response(response):
try:
return map(lambda x: x['Key'], response['Contents'])
except KeyError:
return []
def get_s3_object_as_string(client, bucket, key):
s3_object = client.get_object(Bucket=bucket, Key=key)
return s3_object['Body'].read()
def put_local_to_s3(client, local, bucket, key):
with open(local, 'rb') as local_file:
file_as_bytes = local_file.read()
put_s3_object(client, bucket, key, file_as_bytes)
def put_s3_object(client, bucket, key, body):
client.put_object(Body=body, Bucket=bucket, Key=key)
def put_empty_s3_object(client, bucket, key):
client.put_object(Body=b'', Bucket=bucket, Key=key)
def copy_object_between_buckets(client, source, bucket, key):
client.copy_object(Bucket=bucket, Key=key, CopySource=source)
def download_s3_file(bucket, key, location):
try:
s3 = boto3.resource('s3')
s3.Bucket(bucket).download_file(key, location)
except:
exit(1)
def create_bucket(client, name):
print('got ' + name)
response = client.create_bucket(Bucket=name)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
print('Could not create bucket')
exit(1)
def delete_s3_bucket(client, name):
response = client.delete_bucket(Bucket=name)
if response['ResponseMetadata']['HTTPStatusCode'] != 204:
print('Could not delete bucket')
exit(1)
def delete_s3_object(client, bucket, key):
client.delete_object(Bucket=bucket, Key=key)
| 25.623853 | 107 | 0.694952 | [
"MIT"
] | VanOvermeire/s3bash | s3bash/s3_helper.py | 2,793 | Python |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# FORKED FROM https://github.com/floydawong/LuaFormat
# Copyright (c) 2017 Floyda ([email protected])
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
_start_node = None
_end_node = None
_lines = []
_setting = {}
# ----------------------------------------------------------
# Const
# ----------------------------------------------------------
class NodeType:
WORD = "WORD"
BLANK = "BLANK"
OPERATOR = "OPERATOR"
SEPARATOR = "SEPARATOR"
EQUAL = "EQUAL"
BRACKET = "BRACKET"
REVERSE_BRACKET = "REVERSE_BRACKET"
ENTER = "ENTER"
STRING = "STRING"
COMMENT_SINGLE = "COMMENT_SINGLE"
COMMENT_MULTI = "COMMENT_MULTI"
NodePattern = {
"WORD": [],
"BLANK": [" "],
"OPERATOR": ["+", "-", "*", "/", "^", "%"],
"SEPARATOR": [",", ";"],
"EQUAL": ["=", "~", ">", "<"],
"BRACKET": ["(", "{", "["],
"REVERSE_BRACKET": [")", "}", "]"],
"ENTER": ["\r\n", "\n", "\r"],
"STRING": ['"', "'"],
"COMMENT_SINGLE": [],
"COMMENT_MULTI": [],
}
SingletonType = [
NodeType.BRACKET,
NodeType.REVERSE_BRACKET,
NodeType.STRING,
NodeType.BLANK,
]
CommentType = [NodeType.STRING, NodeType.COMMENT_SINGLE, NodeType.COMMENT_MULTI]
IndentKeyword = [
"function",
"for",
"repeat",
"while",
"if",
"do",
]
UnindentKeyword = ["end", "until"]
# ----------------------------------------------------------
# Line
# ----------------------------------------------------------
class Line:
def __init__(self):
self._nodes = []
self._indent = 0
def __str__(self):
r = ""
for node in self._nodes:
r += str(node)
enter_pos = r.find("\n")
r = r[:enter_pos].strip(" ") + r[enter_pos:]
if r.strip(" ") == "\n":
return "\n" # 20
return " " * _settings.get("tab_size") * self._indent + r
def is_blank_line(self):
for node in self._nodes:
if node.type not in [NodeType.BLANK, NodeType.ENTER]:
return False
return True
def add(self, node):
self._nodes.append(node)
def get_nodes(self):
return self._nodes
def set_indent(self, indent):
self._indent = indent
def get_indent(self):
return self._indent
def add_indent(self, indent):
self._indent += indent
def create_line():
line = Line()
_lines.append(line)
return line
# ----------------------------------------------------------
# Node
# ----------------------------------------------------------
class NodeIterator:
def __init__(self):
self.node = _start_node
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if not self.node:
raise StopIteration()
node = self.node
self.node = self.node.next
return node
class Node:
def __init__(self, c):
self._str = c
def __str__(self):
if self.type is NodeType.BLANK:
return " "
if self.type in CommentType:
r = self._str
r = r.replace(r"\\n", r"\n")
r = r.replace(r"\\r", r"\r")
return r
return self._str.strip(" ")
def add(self, c):
self._str += c
def make_property(attr):
def set_attr():
def inner(self, value):
setattr(self, "_" + attr, value)
return inner
def get_attr():
def inner(self):
try:
return getattr(self, "_" + attr)
except:
return None
return inner
return property(get_attr(), set_attr())
type = make_property("type")
last = make_property("last")
next = make_property("next")
del make_property
def create_node(content, type=None):
global _start_node
global _end_node
node = Node(content)
node.type = type
if _start_node is None:
_start_node = node
if _end_node:
node.last = _end_node
_end_node.next = node
_end_node = node
return node
def insert_blank_node(node):
bn = Node(" ")
bn.type = NodeType.BLANK
bn.last = node.last
bn.next = node
node.last.next = bn
node.last = bn
def merge_prev_node(node):
if not node.last:
return node
lnode = node.last
lnode.add(str(node))
if node.next:
node.next.last = lnode
lnode.next = node.next
else:
lnode.next = None
del node
return lnode
def delete_node(node):
if node.last and node.next:
node.last.next = node.next
node.next.last = node.last
elif node.next == None:
node.last.next = None
elif node.last == None:
node.next.last = None
return node.next
def delete_forward_blank(node):
while True:
node = node.last
if node and node.type == NodeType.BLANK:
node = delete_node(node)
else:
return
def delete_backward_blank(node):
while True:
node = node.next
if node and node.type == NodeType.BLANK:
node = delete_node(node)
node = node.last
else:
return
def get_forward_char(node, count):
r = ""
while True:
if not node:
return r[::-1]
r += str(node)[::-1]
if len(r) >= count:
return r[::-1][-count:]
node = node.last
def get_forward_type(node):
pnode = node.last
if pnode:
return pnode.type
return None
def get_forward_type_for_negative(node):
while True:
node = node.last
if node is None:
return None
if node.type != NodeType.BLANK:
return node.type
# ----------------------------------------------------------
# Format
# ----------------------------------------------------------
def split_content(content, count=1):
return content[:count], content[count:]
def get_char_type(c):
for key in NodePattern:
pattern = NodePattern[key]
if c in pattern:
return key
return NodeType.WORD
def parse_node(content):
node = None
while content:
c, content = split_content(content)
ctype = get_char_type(c)
if node is None:
node = create_node(c, ctype)
continue
if ctype == node.type and not ctype in SingletonType:
node.add(c)
else:
node = create_node(c, ctype)
def foreach_node():
node = _start_node
while node:
if node.type == NodeType.STRING:
char_key = str(node)
while True:
node = node.next
if char_key == str(node) and get_forward_char(node, 2)[0] != "\\":
merge_prev_node(node)
break
if not node.next:
break
merge_prev_node(node)
str_node = str(node)
if (
str_node == len(str_node) * "="
and str(node.last) == "["
and str(node.next) == "["
):
end_flag = "]%s]" % (len(str_node) * "=")
node = merge_prev_node(node)
node.type = NodeType.COMMENT_SINGLE
while True:
node = node.next
merge_prev_node(node)
if get_forward_char(node, len(end_flag)) == end_flag:
break
if not node.next:
break
if get_forward_char(node, 2) == "[[":
node = merge_prev_node(node)
node.type = NodeType.STRING
while True:
node = node.next
node.type = NodeType.STRING
if get_forward_char(node, 2) == "]]":
node = merge_prev_node(node)
break
merge_prev_node(node)
if not node.next:
break
if get_forward_char(node, 2) == "--":
# COMMENT_SINGLE
# node = merge_prev_node(node)
node.type = NodeType.COMMENT_SINGLE
while True:
node = node.next
if node.type == NodeType.ENTER:
break
if not node.next:
break
tmp = merge_prev_node(node)
str_tmp = str(tmp)
check_flag = "--[%s[" % ((len(str_tmp) - 4) * "=")
end_flag = "]%s]" % ((len(str_tmp) - 4) * "=")
if str(tmp) == check_flag:
node = tmp
# node.type == NodeType.COMMENT_MULTI
while True:
node = node.next
if get_forward_char(node, len(end_flag)) == end_flag:
merge_prev_node(node)
break
merge_prev_node(node)
if not node.next:
break
break
node = node.next
def foreach_blank():
for node in NodeIterator():
if node.last and node.type == node.last.type == NodeType.BLANK:
merge_prev_node(node)
def foreach_string_connect():
for node in NodeIterator():
if str(node) == "..":
node.type = NodeType.OPERATOR
def foreach_operator():
for node in NodeIterator():
if str(node) == "-":
# scientific notation
# 科学计数法
if (
node.last
and str(node.last)[-1].lower() == "e"
and str(node.last)[-2] in [str(x) for x in range(10)]
):
continue
# negative number
# 负号
pntype = get_forward_type_for_negative(node)
if not pntype in [NodeType.WORD, NodeType.REVERSE_BRACKET, NodeType.STRING]:
delete_backward_blank(node)
continue
if node.type == NodeType.OPERATOR:
delete_forward_blank(node)
delete_backward_blank(node)
if _settings.get("special_symbol_split"):
if node.last and node.last.type is not NodeType.BLANK:
insert_blank_node(node)
if node.next and node.next.type is not NodeType.BLANK:
insert_blank_node(node.next)
def foreach_separator():
for node in NodeIterator():
if node.type == NodeType.SEPARATOR:
delete_forward_blank(node)
delete_backward_blank(node)
if _settings.get("special_symbol_split"):
if node.next and node.next.type is not NodeType.BLANK:
insert_blank_node(node.next)
def foreach_equal():
for node in NodeIterator():
if node.type == NodeType.EQUAL:
if node.last and node.last.type is NodeType.EQUAL:
merge_prev_node(node)
for node in NodeIterator():
if node.type == NodeType.EQUAL:
delete_forward_blank(node)
delete_backward_blank(node)
if _settings.get("special_symbol_split"):
if node.last and node.last.type is not NodeType.BLANK:
insert_blank_node(node)
if node.next and node.next.type is not NodeType.BLANK:
insert_blank_node(node.next)
def foreach_bracket():
for node in NodeIterator():
if node.type == NodeType.BRACKET:
delete_backward_blank(node)
if _settings.get("bracket_split"):
if node.next and node.next.type != NodeType.BRACKET:
insert_blank_node(node.next)
if node.type == NodeType.REVERSE_BRACKET:
delete_forward_blank(node)
if _settings.get("bracket_split"):
if node.last and node.last.type != NodeType.REVERSE_BRACKET:
insert_blank_node(node)
if (
node.last
and node.last.last
and node.last.type == NodeType.ENTER
and node.last.last.type == NodeType.REVERSE_BRACKET
):
delete_node(node.last)
def foreach_word():
for node in NodeIterator():
if node.last and node.last.type == node.type == NodeType.WORD:
merge_prev_node(node)
def tidy_indent():
global line_indent
global indent
line_indent = 0
indent = 0
line = create_line()
line_key_dict = {}
bracket_key_dict = {}
def deal_indent(line, delta=0):
line.set_indent(indent + delta)
def inc_indent(delta):
global line_indent
global indent
if line_indent + delta > 1:
return
if line_indent + delta < -1:
return
line_indent += delta
indent += delta
if indent < 0:
indent = 0
for node in NodeIterator():
line.add(node)
key = str(node)
line_key_dict[key] = line_key_dict.get(key, 0) + 1
if node.type is NodeType.BRACKET or node.type is NodeType.REVERSE_BRACKET:
bracket_key_dict[key] = bracket_key_dict.get(key, 0) + 1
if node.type is NodeType.ENTER:
inc_indent(
1 if line_key_dict.get("(", 0) > line_key_dict.get(")", 0) else 0
)
inc_indent(
1 if line_key_dict.get("{", 0) > line_key_dict.get("}", 0) else 0
)
inc_indent(
1 if line_key_dict.get("[", 0) > line_key_dict.get("]", 0) else 0
)
if line_key_dict.get("(", 0) < line_key_dict.get(")", 0):
inc_indent(-1)
deal_indent(line)
if line_key_dict.get("{", 0) < line_key_dict.get("}", 0):
inc_indent(-1)
deal_indent(line)
if line_key_dict.get("[", 0) < line_key_dict.get("]", 0):
inc_indent(-1)
deal_indent(line)
do_count = line_key_dict.get("do", 0)
end_count = line_key_dict.get("end", 0)
if do_count > 0 and do_count <= end_count:
indent += end_count - do_count
deal_indent(line)
line = create_line()
else:
line = create_line()
deal_indent(line)
line_indent = 0
del line_key_dict
line_key_dict = {}
if str(node) == "else" or str(node) == "elseif":
deal_indent(line, -1)
if str(node) in IndentKeyword:
inc_indent(1)
if str(node) in UnindentKeyword:
inc_indent(-1)
deal_indent(line)
# ----------------------------------------------------------
# Main
# ----------------------------------------------------------
def purge():
global _start_node
global _end_node
global _lines
global _settings
_start_node = None
_end_node = None
_lines = []
_settings = {}
def _lua_format(lines, setting=None):
purge()
global _settings
_settings = setting
# deal content
content = ""
for line in lines:
line += "\n"
content += line
content += "\n"
content = content.replace("\t", "")
content = content.replace(r"\n", r"\\n")
content = content.replace(r"\r", r"\\r")
parse_node(content)
foreach_node()
# for node in NodeIterator():
# print(str(node), node.ty8e)
# return ""
# exit()
foreach_blank()
foreach_string_connect()
foreach_word()
foreach_bracket()
foreach_operator()
foreach_separator()
foreach_equal()
tidy_indent()
# return a string
def lua_format(lines, settings):
_lua_format(lines, settings)
r = ""
blank_line_count = 0
for line in _lines:
if line.is_blank_line():
blank_line_count += 1
if blank_line_count >= 2:
continue
else:
blank_line_count = 0
r += str(line)
r = r[:-1]
return r
def load_lines(fpath):
lines = []
with open(fpath, "r") as fp:
for line in fp.readlines():
line = line[:-1]
lines.append(line)
fp.close()
return lines
return []
if __name__ == "__main__":
settings = {}
settings["tab_size"] = 2
settings["special_symbol_split"] = False
settings["bracket_split"] = False
for i, _ in enumerate(sys.argv):
if i == 0:
continue
content_origin = load_lines(sys.argv[i])
fmt_resultT = lua_format(content_origin, settings)
with open(sys.argv[i], "w") as f:
f.write(fmt_resultT)
print("'{}' formatted".format(sys.argv[i]))
| 26.695067 | 88 | 0.522986 | [
"MIT"
] | schollz/LuaFormat | lua-format.py | 17,873 | Python |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.exceptions import ExecutionFailed
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.core.source import Template
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_user_call_output import get_user_call_output
from resource_management.libraries.script import Script
import metron_service
from rest_commands import RestCommands
class RestMaster(Script):
def install(self, env):
from params import params
env.set_params(params)
self.install_packages(env)
def configure(self, env, upgrade_type=None, config_dir=None):
from params import params
env.set_params(params)
File(format("/etc/default/metron"),
content=Template("metron.j2")
)
metron_service.refresh_configs(params)
commands = RestCommands(params)
if not commands.is_kafka_configured():
commands.init_kafka_topics()
if not commands.is_hbase_configured():
commands.create_hbase_tables()
if not commands.is_pcap_configured():
commands.init_pcap()
if not commands.is_metron_user_hdfs_dir_configured():
commands.create_metron_user_hdfs_dir()
if params.security_enabled and not commands.is_hbase_acl_configured():
commands.set_hbase_acls()
if params.security_enabled and not commands.is_kafka_acl_configured():
commands.init_kafka_acls()
commands.set_kafka_acl_configured()
if params.security_enabled and not commands.is_pcap_perm_configured():
# If we Kerberize the cluster, we need to call this again, to remove write perms from hadoop group
# If we start off Kerberized, it just does the same thing twice.
commands.init_pcap()
commands.set_pcap_perm_configured()
def start(self, env, upgrade_type=None):
from params import params
env.set_params(params)
self.configure(env)
commands = RestCommands(params)
commands.start_rest_application()
def stop(self, env, upgrade_type=None):
from params import params
env.set_params(params)
commands = RestCommands(params)
commands.stop_rest_application()
def status(self, env):
from params import status_params
env.set_params(status_params)
cmd = format('curl --max-time 3 {hostname}:{metron_rest_port}')
try:
get_user_call_output(cmd, user=status_params.metron_user)
except ExecutionFailed:
raise ComponentIsNotRunning()
def restart(self, env):
from params import params
env.set_params(params)
self.configure(env)
commands = RestCommands(params)
commands.restart_rest_application(env)
if __name__ == "__main__":
RestMaster().execute()
| 38.09901 | 110 | 0.721414 | [
"Apache-2.0"
] | kylerichardson/metron | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py | 3,848 | Python |
## Ao arrumar os NCM nos registros nãoesteaindaC180 e C190 pode ocorrer duplicidade
## nos conjuntos de campos de acordo com o manual.
## No caso preciso juntar todos os registros com estas caracteristicas
import helper
def exec(conexao):
cursor = conexao.cursor()
print("RULE 02 - Inicializando",end=' ')
select = " SELECT r0 FROM principal WHERE r1 = \"C010\" "
select = cursor.execute(select)
rselect = select.fetchall()
rselect = [i[0] for i in rselect]
#rselect.append(rselect[len(rselect)-1] + 1000)
select = " SELECT max(r0) FROM principal WHERE "
select = select + " r1 in (\"C191\",\"C195\") "
select = select + " AND r0 > " + str(rselect[len(rselect)-1]) + " "
temp = cursor.execute(select)
temp = temp.fetchone()[0]
rselect.append(temp == None and rselect[len(rselect)-1] + 1 or temp)
n2 = rselect.pop(0)
while len(rselect) > 0:
print('-',end=' ')
n1 = n2
n2 = rselect.pop(0)
# verifica se tem C190 repetido em cada C010
select = " SELECT r2,r5,r6,r7,count(*) c "
select = select + " FROM principal "
select = select + " WHERE r1 = \"C190\" "
select = select + " AND r0 BETWEEN " + str(n1) + " AND " + str(n2) + " "
select = select + " GROUP BY r2,r5,r6,r7 "
select = select + " HAVING COUNT(*) > 1 "
select = select + " ORDER BY r5 "
repetidos = cursor.execute(select)
repetidos = repetidos.fetchall()
## se não tiver repetido, continua a olhar nos outros C010
if len(repetidos) == 0:
continue
## caso tenha C190 repetido é iterado nesses processos para concertar
for i in repetidos:
print('-',end=' ')
##/* pega o r0 de todos os C190 repetidos */
select = " SELECT r0 FROM principal "
select = select + " WHERE r1 = \"C190\" "
select = select + " AND r0 BETWEEN " + str(n1) + " AND " + str(n2) + " "
select = select + " AND r2 = \"" + i[0] + "\" "
select = select + " AND r5 = \"" + i[1] + "\" "
select = select + " AND r6 = \"" + i[2] + "\" "
r0s = cursor.execute(select)
r0s = r0s.fetchall()
r0s = [i[0] for i in r0s]
primeiroID = r0s[0]
qtrepetidos = len(r0s)
## coloca na lista todos os dados do C191 e C195 que fazem parte do C190
lista = []
for i2 in r0s:
limit = helper.takeLimit(cursor,i2,"C190")
select = " SELECT r0,r1,r2,r3,r4, "
select = select + " (ROUND(CAST(replace(r5,',','.') AS FLOAT),2)) r5, "
select = select + " r6,r7,r8,r9,r10,r11,r12 "
select = select + " FROM principal WHERE "
select = select + " r0 BETWEEN " + str(limit[0]) + " AND " + str(limit[1])
select = select + " AND r1 in (\"C191\",\"C195\") "
temp = cursor.execute(select)
temp = temp.fetchall()
lista.append(temp)
if len(lista) > 1:
lista1 = []
for z in range(0,len(lista)):
lista1 = lista1 + lista[z]
lista = []
ids = []
for i2 in lista1:
lista.append(i2[1:])
ids.append(i2[0])
lista = list(set(lista))
#ids.append(temp[1][0])
## deleta todos os registros para depois inserir os que não são repetidos
delete = "DELETE FROM principal WHERE "
delete = delete + " r0 BETWEEN " + str(ids[0]) + " AND " + str(ids[len(ids)-1]) + " "
cursor.execute(delete)
conexao.commit()
## insere os itens sem repetição e soma ao mesmo tempo o valor total do item
valor_total = 0
lista.sort()
primeiroIDTemp = primeiroID
for i3 in lista:
valor_total = valor_total + i3[4]
primeiroIDTemp = primeiroIDTemp + 1
stringt = "\",\"".join([str(iz) for iz in i3])
insert = ""
insert = insert + " INSERT INTO principal(r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12) "
insert = insert + " VALUES("
insert = insert + str(primeiroIDTemp) + ",\""
insert = insert + stringt.replace(".",",")
insert = insert + "\")"
cursor.execute(insert)
conexao.commit()
## atualiza valor total do C190
update = ""
update = update + " UPDATE principal SET "
update = update + " r8 = \"" + str(round(valor_total / qtrepetidos,2)).replace(".",",") +"\""
update = update + " where r0 = " + str(primeiroID)
cursor.execute(update)
conexao.commit()
print("Finalizado") | 42.3 | 109 | 0.493696 | [
"MIT"
] | teocrono/scripts | sped_correcao/rule02.py | 5,083 | Python |
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
| 29.726457 | 100 | 0.657867 | [
"MIT"
] | Alexandros23Kazantzidis/orbitdeterminator | orbitdeterminator/kep_determination/ellipse_fit.py | 6,629 | Python |
from os import environ
from os.path import join, dirname
# Third party imports
from flask_compress import Compress
class Config:
"""
Common configurations
"""
# private variable used by Flask to secure/encrypt session cookies
SECRET_KEY = environ['SECRET_KEY']
# get the root url and concantenate it with the client secrets file
OIDC_CLIENT_SECRETS = join(dirname(__file__), "client_secrets.json")
# test out login and registration in development without using SSL
OIDC_COOKIE_SECURE = False
# URL to handle user login
OIDC_CALLBACK_ROUTE = "/oidc/callback"
# what user data to request on log in
OIDC_SCOPES = ["openid", "email", "profile"]
OIDC_ID_TOKEN_COOKIE_NAME = "oidc_token"
TESTING = False
DEBUG = False
CSRF_ENABLED = True # protect against CSRF attacks
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Gzip compression allows to reduce the size of the response by 70-90%
# Flask-Compress compresses the application’s response with gzip
COMPRESS_MIMETYPES = ['text/html', 'text/css', 'text/xml',
'application/json', 'application/javascript']
COMPRESS_LEVEL = 6
COMPRESS_MIN_SIZE = 500
CACHE_TYPE = 'simple' # compare with memcached, redis, filesystem, etc.
# 'datastore' does not require any additional configuration.
DATA_BACKEND = 'datastore' # alternatively 'cloudsql' or 'mongodb'
# Google Cloud Project ID
PROJECT_ID = environ['PROJECT_ID']
# CloudSQL & SQLAlchemy configuration
CLOUDSQL_USER = environ['CLOUDSQL_USER']
CLOUDSQL_PASSWORD = environ['CLOUDSQL_PASSWORD']
CLOUDSQL_DATABASE = environ['CLOUDSQL_DATABASE']
DB_HOST_IP = environ['DB_HOST_IP']
DB_HOST_PORT = environ['DB_HOST_PORT']
# The CloudSQL proxy is used locally to connect to the cloudsql instance.
# To start the proxy, use:
#
# $ cloud_sql_proxy -instances=your-connection-name=tcp:3306
#
# Port 3306 is the standard MySQL port. If you need to use a different port,
# change the 3306 to a different port number.
# Alternatively, use a local MySQL instance for testing.
LOCAL_SQLALCHEMY_DATABASE_URI = (
'postgresql://{}:{}@127.0.0.1:5432/{}').format(CLOUDSQL_USER, CLOUDSQL_PASSWORD, CLOUDSQL_DATABASE)
# When running on App Engine, a unix socket is used to connect to the cloudsql instance.
LIVE_SQLALCHEMY_DATABASE_URI = ('postgresql://{}:{}@{}:{}/{}').format(
CLOUDSQL_USER, CLOUDSQL_PASSWORD, DB_HOST_IP, DB_HOST_PORT, CLOUDSQL_DATABASE)
SQLALCHEMY_DATABASE_URI = LIVE_SQLALCHEMY_DATABASE_URI if environ.get(
'FLASK_ENV') == 'production' else LOCAL_SQLALCHEMY_DATABASE_URI
# Mongo configuration
# If using mongolab, the connection URI is available from the mongolab control
# panel. If self-hosting on compute engine, replace the values below.
# MONGO_URI = 'mongodb://user:password@host:27017/database'
# Google Cloud Storage and upload settings.
# You can adjust the max content length and allow extensions settings to allow
# larger or more varied file types if desired.
CLOUD_STORAGE_BUCKET = environ['CLOUD_STORAGE_BUCKET']
MAX_CONTENT_LENGTH = 8 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# OAuth2 configuration.
# This can be generated from the Google Developers Console at
# https://console.developers.google.com/project/_/apiui/credential.
#
# * http://localhost:8080/oauth2callback
# * https://flask-okta-1.appspot.com/oauth2callback.
#
# If you receive a invalid redirect URI error review you settings to ensure
# that the current URI is allowed.
# GOOGLE_OAUTH2_CLIENT_ID = \
# 'your-client-id'
# GOOGLE_OAUTH2_CLIENT_SECRET = 'your-client-secret'
class ProductionConfig(Config):
"""
Production configurations
"""
DEBUG = False
TESTING = False
class DevelopmentConfig(Config):
"""
Development configurations
"""
DEBUG = True
SQLALCHEMY_ECHO = True
class TestingConfig(Config):
"""
Testing configurations
"""
TESTING = True
app_config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'testing': TestingConfig
}
def configure_app(app):
"""Multiple app configurations"""
# Configure Compressing
Compress(app)
| 31.385714 | 107 | 0.700046 | [
"MIT"
] | Rwothoromo/Flask-Okta | config.py | 4,396 | Python |
"""Module with functions around testing. You can run tests including doctest with generating coverage,
you can generate tests from readme or you can configure tests in conftest with single call."""
from mypythontools_cicd.tests.tests_internal import (
add_readme_tests,
deactivate_test_settings,
default_test_config,
run_tests,
setup_tests,
TestConfig,
)
__all__ = [
"add_readme_tests",
"deactivate_test_settings",
"default_test_config",
"run_tests",
"setup_tests",
"TestConfig",
]
| 25.333333 | 102 | 0.734962 | [
"MIT"
] | Malachov/mypythontools_cicd | mypythontools_cicd/tests/__init__.py | 532 | Python |
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import messagebox
import ipaddress
from model import RecvTcpThread, RecvUdpThread, RecvMonitorThread, HachiUtil
from controller import LogController
# =================================
# == 定数
# =================================
DEF_PROTO = 1 # 0:TCP 1:UDP
DEF_DST_PROT = 12000
MAX_DATALEN = 9999
# =================================
# == 公開クラス
# =================================
class RecvParams(object):
""" 受信パラメータ情報クラス """
_instance = None
def __new__(cls, *args, **keys):
if cls._instance is None:
cls._instance = object.__new__(cls)
cls.proto = tk.IntVar(value=DEF_PROTO)
cls.ip = tk.StringVar()
cls.port = tk.IntVar(value=DEF_DST_PROT)
return cls._instance
class MonitorParams(object):
""" 受信モニター情報クラス """
_instance = None
def __new__(cls, *args, **keys):
if cls._instance is None:
cls._instance = object.__new__(cls)
cls.datalen = tk.IntVar(value=0)
cls.bps = tk.StringVar(value="0 bps")
cls.pps = tk.IntVar(value=0)
cls.recv_btn = tk.StringVar(value="受信開始")
# "bps換算"動的更新
cls.pps.trace_add('write', HachiUtil.UpdateBps(
cls.datalen, cls.pps, cls.bps))
return cls._instance
class RecvShareObject(object):
""" スレッド間で値を共有するためのクラス """
def __init__(self):
self.count = 0
self.total = 0
class RecvAction:
def __init__(self, widgets):
self.recvParams = RecvParams()
self.monitorParams = MonitorParams()
self.widgets = widgets
self.stat = 0
self.shareObj = RecvShareObject()
# スレッド変数
self.th_recv = None
self.th_monitor = None
def __call__(self, event=None):
# 入力パラメータチェック
msg = _param_check()
if len(msg) > 0:
messagebox.showwarning(title="warning", message=msg)
return
if self.stat == 0:
self._recv_start()
else:
self._recv_stop()
def _recv_start(self):
# ログ出力インスタンス
logger = LogController.LogController()
# モニタースレッド開始
self.monitor_start()
# パケット受信スレッド開始
# 0:TCP 1:UDP
proto = RecvParams.proto.get()
ip = RecvParams.ip.get()
port = RecvParams.port.get()
if proto == 0:
logger.insert("TCPパケット受信を開始します({}:{})".format(ip, port))
self.recv_tcp_start()
elif proto == 1:
logger.insert("UDPパケット受信を開始します({}:{})".format(ip, port))
self.recv_udp_start()
MonitorParams.recv_btn.set("受信停止")
# ウィジェット非活性化
for widget in self.widgets.values():
widget.state(['disabled'])
self.stat = 1
def recv_tcp_start(self):
""" TCPパケット送信スレッド """
self.th_recv = RecvTcpThread.RecvTcpThread(RecvParams(), self.shareObj)
self.th_recv.setDaemon(True)
self.th_recv.start()
def recv_udp_start(self):
""" UDPパケット受信スレッド """
self.th_recv = RecvUdpThread.RecvUdpThread(RecvParams(), self.shareObj)
self.th_recv.setDaemon(True)
self.th_recv.start()
def monitor_start(self):
""" パケット受信監視スレッド """
self.th_monitor = RecvMonitorThread.RecvMonitorThread(
MonitorParams(), self.shareObj)
self.th_monitor.setDaemon(True)
self.th_monitor.start()
def _recv_stop(self):
LogController.LogController().insert("パケット受信を停止します")
""" スレッド停止 """
# スレッド停止
if self.th_recv is not None:
self.th_recv.stop()
if self.th_monitor is not None:
self.th_monitor.stop()
MonitorParams().recv_btn.set("受信開始")
# 設定ウィジェット活性化
for widget in self.widgets.values():
widget.state(['!disabled'])
self.stat = 0
# =================================
# == ローカル関数
# =================================
def _param_check():
""" 受信パラメータチェック """
msg = ""
# IPアドレスチェック
if not HachiUtil.LocalAddress().is_localaddress(RecvParams.ip.get()):
# インタフェースなし
msg += "・指定した待受IPアドレスがインターフェースにありません。\n"
# ポート番号 0~65535
if not (0 <= RecvParams.port.get() <= 65535):
msg += "・ポート番号は 0~65535 の範囲で指定してください。\n"
return msg
| 24.355556 | 79 | 0.554973 | [
"MIT"
] | kinformation/hachi | controller/RxController.py | 4,992 | Python |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import numpy
import toyplot
import testing
@given(u'a sample plot, the plot can be rendered with a dashed line style.')
def step_impl(context):
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2, style={"stroke-dasharray":"5,5"})
testing.assert_canvas_equal(canvas, "style-stroke-dasharray")
| 31.75 | 98 | 0.751969 | [
"BSD-3-Clause"
] | StuntsPT/toyplot | features/steps/style.py | 508 | Python |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler Arithmetic Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArithmeticOptimizerTest(test.TestCase):
# See b/146524878.
def testFunctionArgShapeInference(self):
@def_function.function
def f(x, y):
return math_ops.matmul(
x, array_ops.reshape(array_ops.transpose(y), [384, 1536]))
with context.eager_mode():
x = array_ops.ones((1, 384))
y = array_ops.ones((1536, 384))
with context.collect_graphs(optimized=True) as graphs:
f(x, y).numpy()
self.assertLen(graphs, 1)
self.assertLen(graphs[0].node, 4)
self.assertEqual(graphs[0].node[2].name,
'ArithmeticOptimizer/FoldTransposeIntoMatMul_MatMul')
if __name__ == '__main__':
test.main()
| 34.45098 | 80 | 0.702903 | [
"Apache-2.0"
] | 00arun00/tensorflow | tensorflow/python/grappler/arithmetic_optimizer_test.py | 1,757 | Python |
from .DtnAbstractParser import DtnAbstractParser
from pydantic import confloat, PositiveFloat
from typing import Optional
class DtnFileBroadcasterParser(DtnAbstractParser):
""" Validator for a file generator """
# Data Tyoe
data_type: str
# Start time of the file transmission (in simulation time)
tstart: confloat(gt=-1) = 0
# Bundle size in bits
bundle_size: PositiveFloat
# File size in [bits]
size: PositiveFloat
# Bundle Time-to-live (TTL) in [sec]
bundle_TTL: PositiveFloat
# Data criticality. If True, then network will be flooded
# with this data
critical: bool = False
# How many times to send the file
repeat: int = 1
# How long in [sec] to wait between sending the files again
wait: float = 0.0 | 26.166667 | 63 | 0.695541 | [
"Apache-2.0"
] | msancheznet/dtnsim | simulator/parsers/DtnFileBroadcasterParser.py | 785 | Python |
# this file is only for presentation of the swallow fish game
# http://www.4399.com/flash/201247_4.htm
from pyautogui import press, keyDown, keyUp
from time import time
import serialPart
# u = 'up'
# d = 'down'
# l = 'left'
# r = 'right'
u = 'w'
d = 's'
l = 'a'
r = 'd'
def key_unit(key, period):
cmd_time_cost = 0.21
to_delay = period - cmd_time_cost
if to_delay > 0:
# start = time()
keyDown(key) # mind that these them selves take time also
delay(period - 0.203)
keyUp(key)
else:
keyDown(key)
keyUp(key)
# print('cannot be too short',time())
# print(time()-start)
def delay(period): # in second
start = time()
while (time() - start) < period:
pass
def sim_key_by_time(x, y, period=0.0000000000000001, thresh=50): # adjust this period(second) for better game control
half_period = period / 2
key_x = key_y = None # todo use this None to invoke no
# y = -88
# print(x, y, '000')
if x > 100:
x = 100
if y > 100:
y = 100
if x < -100:
x = -100
if y < -100:
y = -100
# print(x, y, '111')
if (x < -thresh) or (x > thresh):
if x > 0:
key_x = r
else:
key_x = l
x = abs(x) - thresh
else:
x = 0
if (y < -thresh) or (y > thresh):
if y > 0:
key_y = u
else:
key_y = d
y = abs(y) - thresh
else:
y = 0
# print(x, y, '222')
tx = x / (100 - thresh) * half_period
ty = y / (100 - thresh) * half_period
# tx = abs(x) * 0.01 * half_period
# ty = abs(y) * 0.01 * half_period
release_period = 2 * half_period - tx - ty
# print(key_x, key_y, tx, ty, period, release_period)
#
# t1 = time()
if key_x:
key_unit(key_x, tx)
if key_y:
key_unit(key_y, ty)
# delay(release_period)
# print(tx+ty,period,time()-t1)
#
def sim_key_by_press(x, y, thresh=10, div=1):
# half_period = period / 2
key_x = key_y = None # todo use this None to invoke no
# y = -88
# print(x, y, '000')
if x > 100:
x = 100
if y > 100:
y = 100
if x < -100:
x = -100
if y < -100:
y = -100
# print(x, y, '111')
if (x < -thresh) or (x > thresh):
if x > 0:
key_x = r
else:
key_x = l
x = abs(x) - thresh
else:
x = 0
if (y < -thresh) or (y > thresh):
if y > 0:
key_y = u
else:
key_y = d
y = abs(y) - thresh
else:
y = 0
x = x // div
y = y // div
t1 = time()
while x > 0 or y > 0:
if x >= y:
press(key_x)
x -= x
else:
press(key_y)
y -= y
print(x + y, time() - t1)
def sim_key_by_shortest_hold(x, y, thresh=10, div=10):
# half_period = period / 2
key_x = key_y = None # todo use this None to invoke no
# y = -88
# print(x, y, '000')
if x > 100:
x = 100
if y > 100:
y = 100
if x < -100:
x = -100
if y < -100:
y = -100
# print(x, y, '111')
if (x < -thresh) or (x > thresh):
if x > 0:
key_x = r
else:
key_x = l
x = abs(x) - thresh
else:
x = 0
if (y < -thresh) or (y > thresh):
if y > 0:
key_y = u
else:
key_y = d
y = abs(y) - thresh
else:
y = 0
x = x // div
y = y // div
t1 = time()
while x > 0 or y > 0:
if x >= y:
key_unit(key_x, 0)
x -= x
else:
key_unit(key_y, 0)
y -= y
print(x + y, time() - t1)
if __name__ == '__main__':
period = 1 # in second
last_time = time()
delay(1) # wait for user to switch to game
# x_stop_center = 235
# y_stop_center = 74
ser = serialPart.serial_open()
x_stop_center, y_stop_center = serialPart.get_avg_stop_point(ser)
while True:
xyz_read = serialPart.read_one_period(ser)
z_read, y_read, x_read = xyz_read.values() # order adjusted for the stick
# print(x_read,y_read)
x = -(x_read - x_stop_center) # 在输出值在100以内不再解三角函数算相对角度增量了,虽然更为合理,粗略第当作线性近似吧
y = y_read - y_stop_center
now_time = time()
delta_time = now_time - last_time
if delta_time > period:
last_time = now_time
# print(x,y)
sim_key_by_time(x, y, thresh=5, period=1 * period)
# sim_key_by_press(x, y, div=10)
# sim_key_by_shortest_hold(x,y,div=30)
# pyautogui.rightClick()
# pyautogui.hotkey('ctrl', 'v')
| 23.898551 | 119 | 0.466343 | [
"MIT"
] | solidsteam/myJoystic | simKeyControlGame_temp2.py | 5,027 | Python |
#!/usr/bin/env python3
'''
A series of test for PyKMCFile class.
'''
import sys
import os
import subprocess
import kmer_utils
import init_sys_path
import py_kmc_api as pka
import pytest
if not init_sys_path.is_windows():
import resource
@pytest.fixture(scope="module", autouse=True)
def create_kmc_db():
'''
Set up tests and clean up after.
'''
kmer_len = 17
memory = 2 #GB
cutoff_min = 1
sig_len = 9
reads_src = 'input.fastq'
reads = (
'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG',
'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG',
'GTCGATGCATCGATGCTGATGCTGCTGTGCTAGTAGCGTCTGAGGGCTA'
)
_save_reads_as_fastq(reads, reads_src)
kmers = _cout_kmers(reads, kmer_len)
absent_kmers = _generate_not_existing_kmers(kmers, kmer_len)
_run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src)
result = {
'kmers': kmers,
'kmer_len': kmer_len,
'sig_len': sig_len,
'absent_kmers': absent_kmers
}
yield result
os.remove(reads_src)
os.remove('kmc_db.kmc_pre')
os.remove('kmc_db.kmc_suf')
def _cout_kmers(reads, kmer_len):
''' Simple k-mer counting routine. '''
kmers = {}
for read in reads:
for start in range(0, len(read) - kmer_len + 1):
kmer = read[start:start+kmer_len]
if 'N' in kmer:
continue
rev = kmer_utils.rev_comp(kmer)
if rev < kmer:
kmer = rev
if kmer in kmers.keys():
kmers[kmer] += 1
else:
kmers[kmer] = 1
return kmers
def _save_reads_as_fastq(reads, file_name):
''' Save reads from input to file named file_name. '''
file = open(file_name, 'w')
for read in reads:
file.write("@TEST\n")
file.write(read + "\n")
file.write("+TEST\n")
file.write("I"*len(read) + "\n")
file.close()
def _generate_not_existing_kmers(kmers, kmer_len):
''' Generate k-mers that are not present in the database.
:kmers: existing k-mers
:kmer_len: length of k-mers
'''
def increment_kmer(kmer, start):
''' Increments k-mer to next lexographical.
Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol). '''
def replace_char(string, pos, new_char):
''' Create new string with character at :pos: changed to :new_char:. '''
if pos < 0:
pos = len(string) + pos
return string[:pos] + new_char + string[pos+1:]
for i in range(start, len(kmer)):
if kmer[-1-i] == 'A':
return replace_char(kmer, -1 - i, 'C')
if kmer[-1-i] == 'C':
return replace_char(kmer, -1 - i, 'G')
if kmer[-1-i] == 'T':
return replace_char(kmer, -1 - i, 'T')
kmer = replace_char(kmer, -1 - i, 'T')
return kmer
absent_kmers = []
for i in range(0, kmer_len):
for kmer_str in kmers.keys():
inc_kmer = increment_kmer(kmer_str, i)
if not inc_kmer in kmers.keys():
absent_kmers.append(inc_kmer)
return absent_kmers
def _run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src):
''' Runs kmc. '''
if init_sys_path.is_linux() or init_sys_path.is_mac():
kmc_path = os.path.join(os.path.dirname(__file__), '../../bin/kmc')
elif init_sys_path.is_windows():
kmc_path = os.path.join(os.path.dirname(__file__), '../../x64/Release/kmer_counter.exe')
if init_sys_path.is_mac():
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, 2048))
subprocess.call([kmc_path,
'-ci{}'.format(cutoff_min),
'-k{}'.format(kmer_len),
'-m{}'.format(memory),
'-p{}'.format(sig_len),
reads_src,
'kmc_db',
'.'
])
def _open_for_listing():
''' Open kmc database for listing and check if opened sucessfully. '''
kmc_file = pka.KMCFile()
assert kmc_file.OpenForListing('kmc_db')
return kmc_file
def _open_for_ra():
''' Open kmc database for random access and check if opened sucessfully. '''
kmc_file = pka.KMCFile()
assert kmc_file.OpenForRA('kmc_db')
return kmc_file
def test_info(create_kmc_db):
'''
Test if some fields in object returned from Info are set properly.
'''
pattern = create_kmc_db
kmc_file = _open_for_listing()
info = kmc_file.Info()
assert info.kmer_length == pattern['kmer_len']
assert info.mode == 0 # no Quake mode (quake not supported anymore)
assert info.counter_size == 1
assert info.signature_len == pattern['sig_len']
assert info.min_count == 1
assert info.both_strands
assert info.total_kmers == len(pattern['kmers'])
def test_kmc_file_next_kmer(create_kmc_db):
''' Test if all counted k-mers are returned by KMC API using NextKmer method. '''
pattern = create_kmc_db['kmers']
kmc_file = _open_for_listing()
counter = pka.Count()
kmer = pka.KmerAPI(create_kmc_db['kmer_len'])
res = {}
while kmc_file.ReadNextKmer(kmer, counter):
res[str(kmer)] = counter.value
assert res == pattern
def test_get_counters_for_read(create_kmc_db):
''' Test case for GetCountersForRead method of KMCFile. '''
kmers = create_kmc_db['kmers']
read = "GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCGTAAACGACGATCAGTCATGGTCGAG"
pattern = []
kmer_len = create_kmc_db['kmer_len']
for i in range(0, len(read) - kmer_len + 1):
kmer = read[i:i+kmer_len]
if 'N' in kmer:
pattern.append(0)
continue
rev = kmer_utils.rev_comp(kmer)
if rev < kmer:
kmer = rev
if not kmer in kmers.keys():
pattern.append(0)
else:
pattern.append(kmers[kmer])
kmc_file = _open_for_ra()
res = pka.CountVec()
kmc_file.GetCountersForRead(read, res)
assert res.value == pattern
def test_check_kmer(create_kmc_db):
'''
Test case for CheckKmer method.
Check if are k-mers from input are present in the database and
if some not present in the input are absent in the output.
'''
kmers = create_kmc_db['kmers']
kmer_len = create_kmc_db['kmer_len']
kmer = pka.KmerAPI(kmer_len)
counter = pka.Count()
kmc_file = _open_for_ra()
for kmer_str, count in kmers.items():
kmer.from_string(kmer_str)
assert kmc_file.CheckKmer(kmer, counter)
assert counter.value == count
absent_kmers = create_kmc_db['absent_kmers']
for kmer_str in absent_kmers:
kmer.from_string(kmer_str)
assert not kmc_file.CheckKmer(kmer, counter)
| 31.420091 | 96 | 0.61183 | [
"Unlicense"
] | refresh-bio/KMC | tests/py_kmc_api/test_py_kmc_file.py | 6,881 | Python |
import tensorflow as tf
def leakyrelu(x, leak=0.01):
"""
leakyrelu激活函数
Args:
x (Tensor): input
leak (int): x<0时的斜率
Returns:
Tensor
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
| 15.222222 | 34 | 0.474453 | [
"BSD-2-Clause"
] | tangxyw/RecAlgorithm | algorithm/BST/leakyrelu.py | 290 | Python |
from unittest import TestCase
from generative_playground.codec.hypergraph_grammar import HypergraphGrammar
from generative_playground.molecules.models.conditional_probability_model import CondtionalProbabilityModel
from generative_playground.models.pg_runner import PolicyGradientRunner
class TestStart(TestCase):
def test_get_set_params_as_vector(self):
grammar_cache = 'hyper_grammar_guac_10k_with_clique_collapse.pickle' # 'hyper_grammar.pickle'
first_runner = PolicyGradientRunner('hypergraph:' + grammar_cache,
BATCH_SIZE=10,
reward_fun=lambda x: 0,
max_steps=60,
num_batches=2,
lr=0.05,
entropy_wgt=0.0,
# lr_schedule=shifted_cosine_schedule,
root_name='test',
preload_file_root_name=None,
plot_metrics=True,
save_location='./data',
metric_smooth=0.0,
decoder_type='graph_conditional', # 'rnn_graph',# 'attention',
on_policy_loss_type='advantage_record',
rule_temperature_schedule=None,
# lambda x: toothy_exp_schedule(x, scale=num_batches),
eps=0.0,
priors='conditional',
)
coeffs = first_runner.get_model_coeff_vector()
coeffs[0] = 1
first_runner.set_model_coeff_vector(coeffs)
coeffs2 = first_runner.get_model_coeff_vector()
assert coeffs2[0] == coeffs[0]
def test_get_set_params_as_property(self):
grammar_cache = 'hyper_grammar_guac_10k_with_clique_collapse.pickle' # 'hyper_grammar.pickle'
first_runner = PolicyGradientRunner('hypergraph:' + grammar_cache,
BATCH_SIZE=10,
reward_fun=lambda x: 0,
max_steps=60,
num_batches=2,
lr=0.05,
entropy_wgt=0.0,
# lr_schedule=shifted_cosine_schedule,
root_name='test',
preload_file_root_name=None,
plot_metrics=True,
save_location='./data',
metric_smooth=0.0,
decoder_type='graph_conditional', # 'rnn_graph',# 'attention',
on_policy_loss_type='advantage_record',
rule_temperature_schedule=None,
# lambda x: toothy_exp_schedule(x, scale=num_batches),
eps=0.0,
priors='conditional',
)
coeffs = first_runner.params
coeffs[0] = 1
first_runner.params = coeffs
coeffs2 = first_runner.params
assert coeffs2[0] == coeffs[0]
| 59.777778 | 107 | 0.430165 | [
"MIT"
] | ZmeiGorynych/generative_playground | src/tests/runner_tests.py | 3,766 | Python |
import glob
print(glob.glob("./src/ibmaemagic/sdk/*"))
# import sys
# sys.path.append("./src/ibmaemagic/magic/")
# from analytic_magic_client import AnalyticMagicClient
# import analytic_engine_client.AnalyticEngineClient
# from ibmaemagic.magic.analytic_magic_client import AnalyticMagicClient
# from ibmaemagic.sdk.analytic_engine_client import AnalyticEngineClient
# from ibmaemagic import AnalyticEngineClient
import sys
sys.path.append("./src/ibmaemagic/sdk/")
from analytic_engine_client import AnalyticEngineClient | 32.8125 | 72 | 0.832381 | [
"MIT"
] | DSE-JARVIS/3genius | tests/sdk/foo.py | 525 | Python |
from pyinfra import host
from pyinfra.modules import git, pip, server
# Ensure the state of git repositories
git.repo(
{'Clone pyinfra repository'},
'[email protected]:Fizzadar/pyinfra',
host.data.app_dir,
branch='develop',
ssh_keyscan=True,
sudo=True,
# Carry SSH agent details w/sudo
preserve_sudo_env=True,
)
# Manage pip packages
did_install = pip.packages(
{'Install virtualenv with pip'},
['virtualenv'],
sudo=True,
)
# Use operation meta to affect the deploy
if did_install.changed:
server.shell(
'echo "Clean package build/etc"',
)
# Create a virtualenv
server.shell(
{'Setup the virtualenv'},
'virtualenv {{ host.data.env_dir }}',
sudo=True,
sudo_user='pyinfra',
)
# and manage pip within it
pip.packages(
{'Install Python packages with pip'},
['ElasticQuery', 'JsonTest'],
virtualenv=host.data.env_dir,
sudo=True,
sudo_user='pyinfra',
)
| 21.454545 | 44 | 0.670551 | [
"MIT"
] | VilhelmPrytz/pyinfra | examples/python_app.py | 944 | Python |
# Generated by Django 3.0.3 on 2021-03-05 03:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=25, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50 | 266 | 0.637059 | [
"MIT"
] | liweisherry/recipe-app-api | app/core/migrations/0001_initial.py | 1,700 | Python |
# -*- coding: utf-8 -*-
from dfa import *
"""
Created on Mon Apr 1 09:50:10 2019
@author: Savio
"""
#Example of a simple DFA
#DFA only accepts odd-sized string
states = {0, 1}
alphabet = {'0','1'}
transition = {
(0, '0'): 1,
(0, '1'): 1,
(1, '0'): 0,
(1, '1'): 0,
}
start_state = 0
accept_states = {1}
dfa1 = DFA(states, alphabet, transition, start_state, accept_states)
string = list('010') #Accept
#string= list('1010') #Reject
print(dfa1.run(string))
| 18.259259 | 69 | 0.578093 | [
"MIT"
] | saviorabelo/dfa | main.py | 493 | Python |
from amaranth import *
from amaranth.asserts import *
from amaranth.utils import log2_int
from amaranth_soc import wishbone
from amaranth_soc.memory import MemoryMap
from amaranth_soc.periph import ConstantMap
from . import Peripheral
from ..cores import litedram
__all__ = ["WritebackCache", "SDRAMPeripheral"]
class WritebackCache(Elaboratable):
"""Write-back cache.
A write-back cache designed to bridge the SoC interconnect to LiteDRAM.
Parameters
----------
dram_port : :class:`litedram.NativePort`
LiteDRAM user port.
size : int
Cache size.
data_width : int
Initiator bus data width.
granularity : int
Initiator bus granularity.
dirty_init : bool
Dirty initialization. Defaults to ``False``. May be useful for simulation.
Attributes
----------
intr_bus : :class:`amaranth_soc.wishbone.Interface`
Initiator bus, with support for incremental bursts.
"""
def __init__(self, dram_port, *, size, data_width, granularity=None, dirty_init=False):
if not isinstance(dram_port, litedram.NativePort):
raise TypeError("DRAM port must be an instance of lambdasoc.cores.litedram.NativePort, "
"not {!r}"
.format(dram_port))
if not isinstance(size, int) or size <= 0 or size & size - 1:
raise ValueError("Cache size must be a positive power of two integer, not {!r}"
.format(size))
if not isinstance(data_width, int) or data_width <= 0 or data_width & data_width - 1:
raise ValueError("Data width must be a positive power of two integer, not {!r}"
.format(data_width))
if dram_port.data_width % data_width != 0:
raise ValueError("DRAM port data width must be a multiple of data width, but {} is "
"not a multiple of {}"
.format(dram_port.data_width, data_width))
self.intr_bus = wishbone.Interface(
addr_width = dram_port.addr_width + log2_int(dram_port.data_width // data_width),
data_width = data_width,
granularity = granularity,
features = {"cti", "bte"},
)
intr_map = MemoryMap(
addr_width = self.intr_bus.addr_width + log2_int(data_width // granularity),
data_width = granularity,
)
try:
intr_map.add_window(dram_port.memory_map)
except AttributeError:
pass
self.intr_bus.memory_map = intr_map
self.dram_port = dram_port
self.size = size
self.dirty_init = bool(dirty_init)
def elaborate(self, platform):
m = Module()
ratio = self.dram_port.data_width // self.intr_bus.data_width
nb_lines = (self.size * self.intr_bus.granularity) // self.dram_port.data_width
intr_adr = Record([
("offset", log2_int(ratio)),
("line", log2_int(nb_lines)),
("tag", len(self.intr_bus.adr) - log2_int(nb_lines) - log2_int(ratio)),
])
m.d.comb += intr_adr.eq(self.intr_bus.adr),
intr_adr_next = Record.like(intr_adr)
with m.Switch(self.intr_bus.bte):
with m.Case(wishbone.BurstTypeExt.LINEAR):
m.d.comb += intr_adr_next.eq(intr_adr + 1)
with m.Case(wishbone.BurstTypeExt.WRAP_4):
m.d.comb += intr_adr_next[:2].eq(intr_adr[:2] + 1)
m.d.comb += intr_adr_next[2:].eq(intr_adr[2:])
with m.Case(wishbone.BurstTypeExt.WRAP_8):
m.d.comb += intr_adr_next[:3].eq(intr_adr[:3] + 1)
m.d.comb += intr_adr_next[3:].eq(intr_adr[3:])
with m.Case(wishbone.BurstTypeExt.WRAP_16):
m.d.comb += intr_adr_next[:4].eq(intr_adr[:4] + 1)
m.d.comb += intr_adr_next[4:].eq(intr_adr[4:])
tag_rp_data = Record([
("tag", intr_adr.tag.shape()),
("dirty", 1),
])
tag_wp_data = Record.like(tag_rp_data)
tag_mem = Memory(width=len(tag_rp_data), depth=nb_lines)
if self.dirty_init:
tag_mem.init = [-1 for _ in range(nb_lines)]
m.submodules.tag_rp = tag_rp = tag_mem.read_port(transparent=False)
m.submodules.tag_wp = tag_wp = tag_mem.write_port()
tag_rp.en.reset = 0
m.d.comb += [
tag_rp_data.eq(tag_rp.data),
tag_wp.data.eq(tag_wp_data),
]
dat_mem = Memory(width=self.dram_port.data_width, depth=nb_lines)
m.submodules.dat_rp = dat_rp = dat_mem.read_port(transparent=False)
m.submodules.dat_wp = dat_wp = dat_mem.write_port(granularity=self.intr_bus.granularity)
dat_rp.en.reset = 0
intr_bus_r = Record.like(self.intr_bus)
intr_adr_r = Record.like(intr_adr)
m.d.comb += intr_adr_r.eq(intr_bus_r.adr)
with m.FSM() as fsm:
with m.State("CHECK"):
m.d.sync += [
intr_bus_r.cyc.eq(self.intr_bus.cyc),
intr_bus_r.stb.eq(self.intr_bus.stb),
intr_bus_r.adr.eq(self.intr_bus.adr),
]
# Tag/Data memory read
with m.If(self.intr_bus.cyc & self.intr_bus.stb):
with m.If(self.intr_bus.ack & (self.intr_bus.cti == wishbone.CycleType.INCR_BURST)):
m.d.comb += [
tag_rp.addr.eq(intr_adr_next.line),
dat_rp.addr.eq(intr_adr_next.line),
]
with m.Else():
m.d.comb += [
tag_rp.addr.eq(intr_adr.line),
dat_rp.addr.eq(intr_adr.line),
]
with m.If(~intr_bus_r.cyc | ~intr_bus_r.stb | self.intr_bus.ack):
m.d.comb += [
tag_rp.en.eq(1),
dat_rp.en.eq(1),
]
m.d.comb += [
self.intr_bus.dat_r.eq(
dat_rp.data.word_select(intr_adr.offset, len(self.intr_bus.dat_r))
),
]
# Tag/Data memory write
m.d.comb += [
tag_wp.addr .eq(intr_adr.line),
tag_wp_data.tag .eq(intr_adr.tag),
tag_wp_data.dirty.eq(1),
dat_wp.addr .eq(intr_adr.line),
dat_wp.data .eq(Repl(self.intr_bus.dat_w, ratio)),
]
with m.If(self.intr_bus.cyc & self.intr_bus.stb):
with m.If(intr_adr.tag == tag_rp_data.tag):
m.d.comb += self.intr_bus.ack.eq(intr_bus_r.cyc & intr_bus_r.stb)
with m.If(self.intr_bus.we & self.intr_bus.ack):
m.d.comb += [
tag_wp.en.eq(1),
dat_wp.en.word_select(intr_adr.offset, len(self.intr_bus.sel)).eq(self.intr_bus.sel),
]
with m.Elif(intr_bus_r.cyc & intr_bus_r.stb):
m.d.sync += [
intr_bus_r.cyc.eq(0),
intr_bus_r.stb.eq(0),
]
with m.If(tag_rp_data.dirty):
m.next = "EVICT"
with m.Else():
m.next = "REFILL"
with m.State("EVICT"):
evict_done = Record([("cmd", 1), ("w", 1)])
with m.If(evict_done.all()):
m.d.sync += evict_done.eq(0)
m.next = "REFILL"
# Command
m.d.comb += [
self.dram_port.cmd.valid.eq(~evict_done.cmd),
self.dram_port.cmd.last .eq(0),
self.dram_port.cmd.addr .eq(Cat(intr_adr_r.line, tag_rp_data.tag)),
self.dram_port.cmd.we .eq(1),
]
with m.If(self.dram_port.cmd.valid & self.dram_port.cmd.ready):
m.d.sync += evict_done.cmd.eq(1)
# Write
m.d.comb += [
self.dram_port.w.valid.eq(~evict_done.w),
self.dram_port.w.we .eq(Repl(Const(1), self.dram_port.data_width // 8)),
self.dram_port.w.data .eq(dat_rp.data),
]
with m.If(self.dram_port.w.valid & self.dram_port.w.ready):
m.d.sync += evict_done.w.eq(1)
with m.State("REFILL"):
refill_done = Record([("cmd", 1), ("r", 1)])
with m.If(refill_done.all()):
m.d.sync += refill_done.eq(0)
m.next = "CHECK"
# Command
m.d.comb += [
self.dram_port.cmd.valid.eq(~refill_done.cmd),
self.dram_port.cmd.last .eq(1),
self.dram_port.cmd.addr .eq(Cat(intr_adr_r.line, intr_adr_r.tag)),
self.dram_port.cmd.we .eq(0),
]
with m.If(self.dram_port.cmd.valid & self.dram_port.cmd.ready):
m.d.sync += refill_done.cmd.eq(1)
# Read
m.d.comb += [
self.dram_port.r.ready.eq(~refill_done.r),
tag_wp.addr .eq(intr_adr_r.line),
tag_wp.en .eq((self.dram_port.r.valid & self.dram_port.r.ready)),
tag_wp_data.tag .eq(intr_adr_r.tag),
tag_wp_data.dirty.eq(0),
dat_wp.addr .eq(intr_adr_r.line),
dat_wp.en .eq(Repl((self.dram_port.r.valid & self.dram_port.r.ready), len(dat_wp.en))),
dat_wp.data .eq(self.dram_port.r.data),
]
with m.If(self.dram_port.r.valid & self.dram_port.r.ready):
m.d.sync += refill_done.r.eq(1)
if platform == "formal":
with m.If(Initial()):
m.d.comb += [
Assume(fsm.ongoing("CHECK")),
Assume(~intr_bus_r.cyc),
Assume(~evict_done.any()),
Assume(~refill_done.any()),
]
return m
class SDRAMPeripheral(Peripheral, Elaboratable):
"""SDRAM controller peripheral.
Parameters
----------
core : :class:`litedram.Core`
LiteDRAM core.
cache_size : int
Cache size, in bytes.
cache_dirty_init : boot
Initialize cache as dirty. Defaults to `False`.
"""
def __init__(self, *, core, cache_size, cache_dirty_init=False):
super().__init__()
if not isinstance(core, litedram.Core):
raise TypeError("LiteDRAM core must be an instance of lambdasoc.cores.litedram.Core, "
"not {!r}"
.format(core))
self.core = core
data_width = core.ctrl_bus.data_width
granularity = core.ctrl_bus.granularity
granularity_bits = log2_int(data_width // granularity)
# Data path : bridge -> cache -> LiteDRAM user port
self._data_bus = self.window(
addr_width = core.user_port.addr_width
+ log2_int(core.user_port.data_width // 8)
- granularity_bits,
data_width = data_width,
granularity = granularity,
features = {"cti", "bte"},
)
data_map = MemoryMap(
addr_width = self._data_bus.addr_width + granularity_bits,
data_width = granularity,
alignment = 0,
)
self._cache = WritebackCache(
core.user_port,
size = cache_size,
data_width = data_width,
granularity = granularity,
dirty_init = cache_dirty_init,
)
data_map.add_window(self._cache.intr_bus.memory_map)
self._data_bus.memory_map = data_map
# Control path : bridge -> LiteDRAM control port
self._ctrl_bus = self.window(
addr_width = core._ctrl_bus.addr_width,
data_width = data_width,
granularity = granularity,
addr = core.size,
)
ctrl_map = MemoryMap(
addr_width = self._ctrl_bus.addr_width + granularity_bits,
data_width = granularity,
alignment = 0,
)
ctrl_map.add_window(core.ctrl_bus.memory_map)
self._ctrl_bus.memory_map = ctrl_map
self._bridge = self.bridge(data_width=data_width, granularity=granularity)
self.bus = self._bridge.bus
@property
def constant_map(self):
return ConstantMap(
SIZE = self.core.size,
CACHE_SIZE = self._cache.size,
)
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
m.submodules.cache = self._cache
m.submodules.core = self.core
m.d.comb += [
self._cache.intr_bus.adr .eq(self._data_bus.adr),
self._cache.intr_bus.cyc .eq(self._data_bus.cyc),
self._cache.intr_bus.stb .eq(self._data_bus.stb),
self._cache.intr_bus.sel .eq(self._data_bus.sel),
self._cache.intr_bus.we .eq(self._data_bus.we),
self._cache.intr_bus.dat_w.eq(self._data_bus.dat_w),
self._cache.intr_bus.cti .eq(self._data_bus.cti),
self._cache.intr_bus.bte .eq(self._data_bus.bte),
self._data_bus.ack .eq(self._cache.intr_bus.ack),
self._data_bus.dat_r.eq(self._cache.intr_bus.dat_r),
self.core.ctrl_bus.adr .eq(self._ctrl_bus.adr),
self.core.ctrl_bus.cyc .eq(self._ctrl_bus.cyc),
self.core.ctrl_bus.stb .eq(self._ctrl_bus.stb),
self.core.ctrl_bus.sel .eq(self._ctrl_bus.sel),
self.core.ctrl_bus.we .eq(self._ctrl_bus.we),
self.core.ctrl_bus.dat_w.eq(self._ctrl_bus.dat_w),
self._ctrl_bus.ack .eq(self.core.ctrl_bus.ack),
self._ctrl_bus.dat_r.eq(self.core.ctrl_bus.dat_r),
]
return m
| 40.146814 | 117 | 0.52377 | [
"BSD-2-Clause"
] | gregdavill/lambdasoc | lambdasoc/periph/sdram.py | 14,493 | Python |
# -*- coding: utf-8 -*-
import json
import logging
import re
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
search_url = 'https://www.novelall.com/search/?name=%s'
class NovelAllCrawler(Crawler):
base_url = 'https://www.novelall.com/'
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % query)
results = []
for a in soup.select('.cover-info p.title a')[:20]:
url = self.absolute_url(a['href'])
results.append({
'url': url,
'title': a.text.strip(),
})
# end for
return results
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url + '?waring=1')
self.novel_title = soup.find(
'div', {"class": "manga-detail"}).find('h1').text
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.find('div', {"class": "manga-detail"}).find('img')['src'])
logger.info('Novel cover: %s', self.novel_cover)
author = soup.find(
'div', {"class": "detail-info"}).find('a').text.split(',')
if len(author) == 2:
self.novel_author = author[0] + ' (' + author[1] + ')'
else:
self.novel_author = ' '.join(author)
# end if
logger.info('Novel author: %s', self.novel_author)
chapters = soup.find(
'div', {"class": "manga-detailchapter"}).findAll('a', title=True)
chapters.reverse()
for a in chapters:
for span in a.findAll('span'):
span.extract()
# end for
# end for
for x in chapters:
chap_id = len(self.chapters) + 1
if len(self.chapters) % 100 == 0:
vol_id = chap_id//100 + 1
vol_title = 'Volume ' + str(vol_id)
self.volumes.append({
'id': vol_id,
'title': vol_title,
})
# end if
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'url': self.absolute_url(x['href']),
'title': x['title'] or ('Chapter %d' % chap_id),
})
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
contents = soup.find('div', {'class': 'reading-box'})
self.clean_contents(contents)
return str(contents)
# end def
# end class
| 32.454545 | 80 | 0.520658 | [
"Apache-2.0"
] | BorgSquared/lightnovel-crawler | sources/novelall.py | 2,856 | Python |
import requests
import ratelimit
from arcas.tools import Api
from .api_key import api_key
from arcas.tools import APIError
class Ieee(Api):
"""
API argument is 'ieee'.
"""
def __init__(self):
self.standard = 'https://ieeexploreapi.ieee.org/api/v1/search/articles?'
self.key_api = api_key
def create_url_search(self, parameters):
"""Creates the search url, combining the standard url and various
search parameters."""
url = self.standard
url += parameters[0]
for i in parameters[1:]:
url += '&{}'.format(i)
url += '&apikey={}'.format(self.key_api)
return url
@staticmethod
@ratelimit.rate_limited(3)
def make_request(url):
"""Request from an API and returns response."""
response = requests.get(url, stream=True, verify=False)
if response.status_code != 200:
raise APIError(response.status_code)
return response
def to_dataframe(self, raw_article):
"""A function which takes a dictionary with structure of the IEEE
results and transform it to a standardized format.
"""
raw_article['url'] = raw_article.get('html_url', None)
try:
raw_article['author'] = [author['full_name'] for author in raw_article['authors']['authors']]
except KeyError:
raw_article['author'] = ['No authors found for this document.']
raw_article['abstract'] = raw_article.get('abstract', None)
if raw_article['content_type'] == 'Conferences':
date = raw_article.get('conference_dates', None)
else:
date = raw_article.get('publication_date', None)
if date is not None:
date = int(date.split(' ')[-1])
raw_article['date'] = date
category = raw_article.get('index_terms', None)
if category is not None:
try:
category = category['author_terms']['terms']
except KeyError:
try:
category = category['ieee_terms']['terms']
except KeyError:
category = None
raw_article['doi'] = raw_article.get('doi', None)
raw_article['category'] = category
raw_article['journal'] = raw_article.get('publication_title', None)
raw_article['provenance'] = 'IEEE'
raw_article['key'], raw_article['unique_key'] = self.create_keys(raw_article)
raw_article['open_access'] = raw_article['access_type'] == 'OPEN_ACCESS'
raw_article['score'] = 'Not available'
return self.dict_to_dataframe(raw_article)
def parse(self, root):
"""Parsing the xml file"""
if root['total_records'] == 0:
return False
return root['articles']
@staticmethod
def parameters_fix(author=None, title=None, abstract=None, year=None,
records=None, start=None, category=None, journal=None,
keyword=None):
parameters = []
if author is not None:
parameters.append('author={}'.format(author))
if title is not None:
parameters.append('article_title={}'.format(title))
if abstract is not None:
parameters.append('abstract={}'.format(abstract))
if year is not None:
parameters.append('publication_year={}'.format(year))
if category is not None:
parameters.append('index_terms={}'.format(category))
if journal is not None:
parameters.append('publication_title={}'.format(journal))
if keyword is not None:
parameters.append('querytext={}'.format(keyword))
if records is not None:
parameters.append('max_records={}'.format(records))
if start is not None:
parameters.append('start_record={}'.format(start))
return parameters
@staticmethod
def get_root(response):
root = response.json()
return root
| 36.6 | 105 | 0.598609 | [
"MIT"
] | ArcasProject/Arcas | src/arcas/IEEE/main.py | 4,026 | Python |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import object
from argparse import ArgumentParser
from copy import deepcopy
from httplib2 import Http
COHORT_DATASETS = {
'prod': 'cloud_deployment_cohorts',
'staging': 'cloud_deployment_cohorts',
'dev': 'dev_deployment_cohorts'
}
COHORT_TABLES = {
'prod': 'prod_cohorts',
'staging': 'staging_cohorts'
}
from apiclient.discovery import build
from oauth2client.client import GoogleCredentials
from isb_cgc.settings import get_project_identifier
def authorize_and_get_bq_service():
credentials = GoogleCredentials.get_application_default().create_scoped(['https://www.googleapis.com/auth/bigquery'])
http = Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
return bigquery_service
# TODO Use bq_data_access.BigQueryCohortSupport
class BigQueryCohortSupport(object):
cohort_schema = [
{
"name": "cohort_id",
"type": "INTEGER",
"mode": "REQUIRED"
},
{
"name": "patient_barcode",
"type": "STRING"
},
{
"name": "sample_barcode",
"type": "STRING"
},
{
"name": "aliquot_barcode",
"type": "STRING"
}
]
patient_type = 'patient'
sample_type = 'sample'
@classmethod
def get_schema(cls):
return deepcopy(cls.cohort_schema)
def __init__(self, service, project_id, dataset_id, table_id):
self.service = service
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
def _build_request_body_from_rows(self, rows):
insertable_rows = []
for row in rows:
insertable_rows.append({
'json': row
})
return {
"rows": insertable_rows
}
def _streaming_insert(self, rows):
table_data = self.service.tabledata()
body = self._build_request_body_from_rows(rows)
response = table_data.insertAll(projectId=self.project_id,
datasetId=self.dataset_id,
tableId=self.table_id,
body=body).execute()
return response
def _build_cohort_row(self, cohort_id,
patient_barcode=None, sample_barcode=None, aliquot_barcode=None):
return {
'cohort_id': cohort_id,
'patient_barcode': patient_barcode,
'sample_barcode': sample_barcode,
'aliquot_barcode': aliquot_barcode
}
def add_cohort_with_sample_barcodes(self, cohort_id, barcodes):
rows = []
for sample_barcode in barcodes:
patient_barcode = sample_barcode[:12]
rows.append(self._build_cohort_row(cohort_id, patient_barcode, sample_barcode, None))
response = self._streaming_insert(rows)
return response
def create_table(dataset_id, table_id):
print("Creating table {0}.{1}".format(dataset_id, table_id))
project_id = get_project_identifier()
schema = BigQueryCohortSupport.get_schema()
dataset_args = {
'projectId': project_id,
'datasetId': dataset_id
}
table_ref = {
'tableId': table_id,
'projectId': project_id,
'datasetId': dataset_id
}
table = {
'tableReference': table_ref,
'schema': {
'fields': schema
}
}
service = authorize_and_get_bq_service()
table = service.tables().insert(
body=table,
**dataset_args
).execute()
return table
def prod_table(args):
dataset_id = COHORT_DATASETS['prod']
table_id = COHORT_TABLES['prod']
if args.cmd == 'create':
create_table(dataset_id, table_id)
def staging_table(args):
dataset_id = COHORT_DATASETS['staging']
table_id = COHORT_TABLES['staging']
if args.cmd == 'create':
create_table(dataset_id, table_id)
def dev_table(args):
dataset_id = COHORT_DATASETS['dev']
if args.cmd == 'create':
create_table(dataset_id, args.table)
def main():
parser = ArgumentParser(description="Cohort table utility")
subparsers = parser.add_subparsers(help='commands')
# Staging deployment
staging_parser = subparsers.add_parser('staging', help="Staging deployment")
staging_parser.add_argument('cmd', choices=['delete', 'create'])
staging_parser.set_defaults(func=staging_table)
# Production deployment
prod_parser = subparsers.add_parser('prod', help="Production deployment")
prod_parser.add_argument('cmd', choices=['delete', 'create'])
prod_parser.set_defaults(func=prod_table)
# Development deployment
dev_parser = subparsers.add_parser('dev', help="Local development deployment")
dev_parser.add_argument('cmd', choices=['delete', 'create'])
dev_parser.add_argument('table', type=str, help='Table name for local developer')
dev_parser.set_defaults(func=dev_table)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| 28.97 | 121 | 0.647566 | [
"Apache-2.0"
] | isb-cgc/ISB-CGC-Webapp | scripts/bigquery/cohort_table_utils.py | 5,794 | Python |
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("ShowDebugUI", "0")
]
if TICI:
default_params.append(("IsUploadRawEnabled", "1"))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
system("am startservice com.neokii.openpilot/.MainService")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
| 27.779221 | 99 | 0.699548 | [
"MIT"
] | Superkingggg/op4 | selfdrive/manager/manager.py | 6,417 | Python |
from __future__ import print_function, division
import _init_paths
import math
import os.path as osp
from shapely.geometry import Polygon
from gen_data import get_cent
from bbox_util import is_rect
import argparse
import sys
from model.config import cfg
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate txt result file')
parser.add_argument('--dir', dest='base_dir',
help='result base dir',
default='/home/hezheqi/data/frame/result', type=str)
parser.add_argument('--gt', dest='gt_dir',
help='gt base dir',
default='/data/hezheqi/frame/test/gt', type=str)
parser.add_argument('--name', dest='name',
help='out name', default=None, type=str)
parser.add_argument('--list', dest='img_list_dir',
help='image list', default='/data/hezheqi/frame/test/img_list.txt', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def read_txt(name, use_bound=False, rect_label=None, is_gt=False):
ret = []
if not osp.exists(name):
return ret
with open(name) as fin:
for line in fin:
info = line.strip().split()
if len(info) == 1:
continue
if is_gt and len(info) != 9:
continue
info = list(map(int, info))
# for i in range(len(info)):
# info[i] = max(0, info[i])
if rect_label != None: # only use rectangle gt
rect_label.append(is_rect(info[1:]))
pts = [(info[i], info[i + 1]) for i in range(1, len(info), 2)]
cx, cy = get_cent(info[1:])
pts.sort(key=lambda a: math.atan2(a[1] - cy, a[0] - cx))
# if is_gt:
# print(pts)
frame = Polygon(pts)
if use_bound:
x1, y1, x2, y2 = frame.bounds
# print(x1, y1, x2, y2)
frame = Polygon([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
if not frame.is_valid:
print(info[0])
continue
# frame = frame.convex_hull
ret.append(frame)
return ret
def calculate_iou(p1, p2):
a1 = p1.area
a2 = p2.area
# print(a1, a2)
# print(p1.is_valid, p2.is_valid)
intersection = p1.intersection(p2).area
return intersection / (a1 + a2 - intersection)
def verify_point_distance(poly1, poly2):
pts1 = list(poly1.exterior.coords)
pts2 = list(poly2.exterior.coords)
for p1, p2 in zip(pts1, pts2):
dis = math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)
if dis > 2500:
return False
return True
def eval_one(results, gts, point_dis=False, rect_label=None):
'''
:param results:
:param gts:
:param point_dis:
:param rect_label: use rectangle or not
:return right_num, error_num, mid_num
'''
m = len(gts)
is_used = [False] * m
right_num = 0
err_num = 0
mid_num = 0
for res in results:
if not point_dis:
max_iou = -1
max_index = -1
for j, gt in enumerate(gts):
if is_used[j]:
continue
iou = calculate_iou(res, gt)
if max_iou < iou:
max_iou = iou
max_index = j
if max_iou > th:
is_used[max_index] = True
if rect_label == None:
right_num += 1
elif rect_label[max_index]:
right_num += 1
elif not rect_label[max_index]:
mid_num += 1
else:
err_num += 1
else:
flag = False
for j, gt in enumerate(gts):
if is_used[j]:
continue
if verify_point_distance(res, gt):
is_used[j] = True
right_num += 1
flag = True
break
if not flag:
err_num += 1
assert (right_num <= m)
assert (err_num <= len(results))
return right_num, err_num, mid_num
def evaluate(mean_f=True, point_dis=False, rect_flag=False):
name_list = open(name_list_dir).read().strip().split('\n')
fout = open(osp.join(cfg.DATA_DIR, 'wrong.txt'), 'w')
precision, recall, page_correct = 0, 0, 0
right_all, error_all, gt_all, res_all = 0, 0, 0, 0
for name in name_list:
results = read_txt(osp.join(res_base_dir, name + '.txt'), use_bound=False)
if rect_flag:
rect_label = []
else:
rect_label = None
gts = read_txt(osp.join(gt_base_dir, name + '.txt'), rect_label=rect_label,
is_gt=True, use_bound=False)
right_num, error_num, mid_num = eval_one(results, gts, rect_label=rect_label, point_dis=point_dis)
# right_num, error_num, mid_num = eval_one(results, gts)
right_all += right_num
error_all += error_num
gt_all += len(gts) - mid_num
res_all += len(results) - mid_num
if len(results) - mid_num > 0:
precision += right_num / (len(results) - mid_num)
if len(gts) - mid_num > 0:
recall += right_num / (len(gts) - mid_num)
if right_num == len(gts) and error_num == 0:
# if right_num == len(gts):
page_correct += 1
else:
fout.write('{}\n'.format(name))
n = len(name_list)
precision /= n
recall /= n
page_correct /= n
f1 = 2 * precision * recall / (precision + recall)
print('{} {:.5f} {:.5f} {:.5f} {:.5f}'.format(th, precision, recall, f1, page_correct))
if not mean_f:
precision = right_all / res_all
recall = right_all / gt_all
f1 = 2 * precision * recall / (precision + recall)
# print(th, precision, recall, f1, page_correct)
print('{} {:.5f} {:.5f} {:.5f} {:.5f}'.format(th, precision, recall, f1, page_correct))
if __name__ == '__main__':
# gt_base_dir = '/data/datasets/frame/test_2000/gt'
# res_base_dir = '/data/datasets/frame/result/result_all_0.8_th0.75'
# res_base_dir = '/data3/dt'
# res_base_dir = '/data/datasets/frame/result/result_ssd_th0.75'
# res_base_dir = '/home/hezheqi/data/frame/result/faster_reg2_poly'
# res_base_dir = '/home/hezheqi/Project/dpreg/net/results/pages_mult/txt'
# res_base_dir = '/home/cpdp/Documents/yf-workspace/data/2000_res_txt'
# res_base_dir = '/data3/20w_results/ly_crf_new'
# res_base_dir = '/data3/20w_results/dt'
# res_base_dir = '/home/cpdp/Documents/yf-workspace/data/29845_LD_DRR'
# res_base_dir = '/data/datasets/frame/result/result_2000_0.8_th0.75'
# name_list_dir = '/data/datasets/frame/test_2000/img_list.txt'
args = parse_args()
gt_base_dir = args.gt_dir
res_base_dir = osp.join(args.base_dir, args.name)
th = 0.9
name_list_dir = args.img_list_dir
evaluate(mean_f=False, point_dis=False)
# evaluate(False, True)
| 31.357488 | 102 | 0.620243 | [
"MIT"
] | lz20061213/quadrilateral | tools/eval_frame.py | 6,491 | Python |
from twisted.protocols import stateful
from twisted.internet import reactor
from twisted.internet.protocol import Factory, Protocol
from twisted.internet.endpoints import TCP4ClientEndpoint
from datetime import datetime
import sys
import struct
import zlib
import time
import threading
import Queue
import uuid
import AmmoMessages_pb2
MAGIC_NUMBER = 0xfeedbeef
DEFAULT_PRIORITY = 0
DEFAULT_RESERVED1 = 0
DEFAULT_RESERVED2 = 0
DEFAULT_RESERVED3 = 0
class AndroidProtocol(stateful.StatefulProtocol):
'''
This class implements the stateful Android <-> Gateway protocol. It contains
an 8-byte header with messageSize and a checksum, then a protobuf
MessageWrapper object (of length messageSize).
We use Twisted's StatefulProtocol to implement this protocol-- states are
composed of a callback function and a size; Twisted calls the callback
function when <size> data has been received. The callback functions return
the next state.
'''
_messageSize = 0
_checksum = 0
_onMessageAvailableCallback = None
def getInitialState(self):
return (self.receiveHeader, 20) #initial state receives the header
def receiveHeader(self, data):
(magicNumber, messageSize, priority, error, reserved2, reserved3, checksum, headerChecksum) = struct.unpack("<IIbbbbii", data)
calculatedHeaderChecksum = zlib.crc32(data[:16])
if magicNumber != MAGIC_NUMBER:
print "Invalid magic number!"
if calculatedHeaderChecksum != headerChecksum:
print "Header checksum error!"
print "Expected", headerChecksum
print "Calculated", calculatedHeaderChecksum
if error != 0 and messageSize == 0 and checksum == 0:
print "Error received from gateway:"
print " ", error,
if error == 1:
print "Invalid magic number"
elif error == 2:
print "Invalid header checksum"
elif error == 3:
print "Invalid message checksum"
elif error == 4:
print "Message too large"
else:
print "Unknown error"
return (self.receiveHeader, 20)
else:
self._messageSize = messageSize
self._checksum = checksum
return (self.receiveData, self._messageSize)
def receiveData(self, data):
calculatedChecksum = zlib.crc32(data)
if calculatedChecksum != self._checksum:
print "Checksum error!"
msg = AmmoMessages_pb2.MessageWrapper()
msg.ParseFromString(data)
if self._onMessageAvailableCallback != None:
self._onMessageAvailableCallback(msg)
return (self.receiveHeader, 20)
def sendMessageWrapper(self, msg):
serializedMsg = msg.SerializeToString()
messageHeader = struct.pack("<IIbbbbi", MAGIC_NUMBER, len(serializedMsg), DEFAULT_PRIORITY, DEFAULT_RESERVED1, DEFAULT_RESERVED2, DEFAULT_RESERVED3, zlib.crc32(serializedMsg))
headerChecksum = zlib.crc32(messageHeader)
messageHeader = messageHeader + struct.pack("i", headerChecksum)
self.transport.write(messageHeader) #little-endian byte order for now
self.transport.write(serializedMsg);
def connectionMade(self):
pass
def connectionLost(self, reason):
print "Connection lost:"
reason.printTraceback()
#TODO: signal the authentication loop so it knows we disconnected too
def setOnMessageAvailableCallback(self, callback):
self._onMessageAvailableCallback = callback
class AuthenticationFailure(Exception):
pass
class MessageScope:
GLOBAL = 0
LOCAL = 1
class MessagePriority:
AUTH = 127
CTRL = 126
FLASH = 96
URGENT = 64
IMPORTANT = 32
NORMAL = 0
BACKGROUND = -32
class AndroidConnector(threading.Thread):
_address = ""
_port = 0
_deviceId = ""
_userId = ""
_userKey = ""
_protocol = None
_authenticated = False
_cancelled = False
_authCondition = None
_messageQueueEnabled = True
_messageQueue = None
_messageCallback = None
def __init__(self, address, port, deviceId, userId, userKey, heartbeatPeriod = 30):
threading.Thread.__init__(self)
self._address = address
self._port = port
self._deviceId = deviceId
self._userId = userId
self._userKey = userKey
self._heartbeatNumber = 0
self._heartbeatPeriod = heartbeatPeriod
self._authenticated = False
self._cancelled = False
self._authCondition = threading.Condition()
self._messageQueueEnabled = True
self._messageQueue = Queue.Queue()
self._messageCallback = None
def _gotProtocol(self, p):
self._protocol = p
self._onConnect()
def _onError(self, failure):
failure.printTraceback()
reactor.stop()
self._authCondition.acquire()
self._cancelled = True
self._authCondition.notifyAll()
self._authCondition.release()
def _connect(self):
factory = Factory()
factory.protocol = AndroidProtocol
point = TCP4ClientEndpoint(reactor, self._address, self._port)
d = point.connect(factory)
d.addCallback(self._gotProtocol)
d.addErrback(self._onError)
def run(self):
if reactor.running == False:
self._connect()
print "Running reactor"
reactor.run(False) #Argument False tells the reactor that it's not on the
#main thread, so it doesn't attempt to register signal
#handlers (which doesn't work on other threads)
print "Reactor stopped"
else:
reactor.callFromThread(self._connect)
print "Reactor is already running... this background thread will exit."
def _onConnect(self):
self._protocol.setOnMessageAvailableCallback(self._onMessageAvailable)
self._sendAuthMessage()
def _onMessageAvailable(self, msg):
if self._authenticated == False:
if msg.type == AmmoMessages_pb2.MessageWrapper.AUTHENTICATION_RESULT:
if msg.authentication_result.result == AmmoMessages_pb2.AuthenticationResult.SUCCESS:
print "Authentication succeeded."
self._authCondition.acquire()
self._authenticated = True
self._authCondition.notifyAll()
self._authCondition.release()
if(self._heartbeatPeriod > 0):
self._sendAndScheduleHeartbeat()
else:
print "Authentication failed."
raise AuthenticationFailure("Auth failed: " + msg.authentication_result.message)
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
if msg.data_message.thresholds.device_delivered == True:
self.pushAcknowledgement(msg.data_message.uri, msg.data_message.origin_device, msg.data_message.user_id, self._deviceId, self._userId)
time = datetime.now()
if self._messageCallback != None:
self._messageCallback(self, msg)
if self._messageQueueEnabled:
self._messageQueue.put((msg, time))
def _sendAuthMessage(self):
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.AUTHENTICATION_MESSAGE
m.message_priority = MessagePriority.AUTH
m.authentication_message.device_id = self._deviceId
m.authentication_message.user_id = self._userId
m.authentication_message.user_key = self._userKey
print "Sending auth message"
self._protocol.sendMessageWrapper(m)
def _sendAndScheduleHeartbeat(self):
self.heartbeat()
if(self._heartbeatPeriod > 0):
reactor.callLater(self._heartbeatPeriod, self._sendAndScheduleHeartbeat)
def dequeueMessage(self):
'''
Dequeues a message from the message queue and returns it. Returns 'none' if
the queue is empty; otherwise, it returns a pair (message, timeReceived).
'''
item = None
try:
item = self._messageQueue.get(False) #don't block if queue is empty; raises Empty exception instead
except Queue.Empty:
item = None
pass
return item
def isDataAvailable(self):
'''
Checks to see if data is available in the message queue. Note that, since
the message queue is filled from a background thread (and could be emptied
from a background thread), this method returning true/false does not
necessarily mean that a message will or will not be present when
dequeueMessage() is called.
'''
return not self._messageQueue.empty()
def push(self, uri, mimeType, data, scope = MessageScope.GLOBAL, priority = MessagePriority.NORMAL, ackDeviceDelivered = False, ackPluginDelivered = False, ackAndroidPluginReceived = True):
'''
Sends a push message with the specified URI and MIME type to the gateway.
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE
m.message_priority = priority
m.data_message.uri = uri
m.data_message.mime_type = mimeType
m.data_message.data = data
m.data_message.origin_device = self._deviceId
m.data_message.user_id = self._userId
m.data_message.thresholds.device_delivered = ackDeviceDelivered
m.data_message.thresholds.plugin_delivered = ackPluginDelivered
m.data_message.thresholds.android_plugin_received = ackAndroidPluginReceived
if scope == MessageScope.GLOBAL:
m.data_message.scope = AmmoMessages_pb2.GLOBAL
else:
m.data_message.scope = AmmoMessages_pb2.LOCAL
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def pushAcknowledgement(self, uid, destinationDevice, destinationUser, acknowledgingDevice, acknowledgingUser):
'''
Sends a push acknowledgement back to the specified device. The
destinationDevice parameter should match the origin_device parameter from
the push message which was received.
Scripts shouldn't normally need to call this directly; AndroidConnector
will automatically generate an acknowledgement if the message indicates
that an acknowledgement is required.
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.PUSH_ACKNOWLEDGEMENT
m.message_priority = MessagePriority.CTRL
m.push_acknowledgement.uri = uid
m.push_acknowledgement.destination_device = destinationDevice
m.push_acknowledgement.acknowledging_device = acknowledgingDevice
m.push_acknowledgement.destination_user = destinationUser
m.push_acknowledgement.acknowledging_user = acknowledgingUser
m.push_acknowledgement.threshold.device_delivered = True
m.push_acknowledgement.threshold.plugin_delivered = False
m.push_acknowledgement.threshold.android_plugin_received = False
m.push_acknowledgement.status = AmmoMessages_pb2.PushAcknowledgement.SUCCESS
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def subscribe(self, mimeType, scope = MessageScope.GLOBAL):
'''
Subscribes to push data with the specified MIME type.
By default, data received will be placed in the message queue. The caller
should periodically call dequeueMessage to receive the push messages that
it subscribed to.
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.SUBSCRIBE_MESSAGE
m.message_priority = MessagePriority.CTRL
m.subscribe_message.mime_type = mimeType
if scope == MessageScope.GLOBAL:
m.subscribe_message.scope = AmmoMessages_pb2.GLOBAL
else:
m.subscribe_message.scope = AmmoMessages_pb2.LOCAL
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def pullRequest(self, mimeType, query, projection, maxResults, startFromCount, liveQuery, priority = MessagePriority.NORMAL):
'''
Sends a pull request with the specified parameters. Note that the request
UID and device ID are automatically set to the correct values (request UID
is a generated UUID, and device ID is the device ID passed to the
constructor of this AndroidConnector object).
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.PULL_REQUEST
m.message_priority = priority
m.pull_request.request_uid = uuid.uuid1().hex
m.pull_request.mime_type = mimeType
m.pull_request.query = query
m.pull_request.projection = projection
m.pull_request.max_results = maxResults
m.pull_request.start_from_count = startFromCount
m.pull_request.live_query = liveQuery
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def heartbeat(self):
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.HEARTBEAT
m.message_priority = MessagePriority.NORMAL
m.heartbeat.sequence_number = self._heartbeatNumber
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
self._heartbeatNumber = self._heartbeatNumber + 1
def waitForAuthentication(self):
'''
Waits for the AndroidConnector to connect to the Android Gateway Plugin, and
waits for successful authentication.
This method MUST be called after the AndroidConnector's background thread
is started. Attempting to call any other member methods of this class
before authentication is complete has undefined behavior.
'''
with self._authCondition:
while not (self._cancelled or self._authenticated):
self._authCondition.wait(1)
if self._authenticated == False:
raise AuthenticationFailure("Connection failure or interrupt during waitForAuthentication")
def registerMessageCallback(self, callback):
'''
Registers a callback method to be called when a message is received. Note
that this callback is called on the *event loop's* thread-- which may not
be the thread where the caller (of this method) is running. The caller is
expected to handle any synchronization issues which might result.
Also note that registering this callback does not disable the message queue--
the consumer of AndroidConnector will want to either drain this queue or
disable it with AndroidConnector.setMessageQueueEnabled(False) to avoid
memory leaks.
'''
self._messageCallback = callback
def setMessageQueueEnabled(self, enabled):
'''
Enables or disables the message queue. The message queue is enabled by
default; you might want to disable it if, for example, you only want to
print messages as they are received in a callback.
setMessageQueueEnabled(false) should almost always be used in conjunction
with registerMessageCallback, or you will lose any messages received while
the message queue is disabled.
'''
self._messageQueueEnabled = enabled
# Main method for this class (not run when it's imported).
# This is a usage example for the AndroidConnector-- it subscribes to a data
# type, then prints out any data that it receives with that type.
if __name__ == "__main__":
print "Android Gateway Tester"
connector = AndroidConnector("localhost", 33289, "device:test/pythonTestDriver1", "user:user/testPythonUser1", "")
try:
connector.start()
connector.waitForAuthentication()
print "Subscribing to type text/plain"
connector.subscribe("text/plain")
while True:
while(connector.isDataAvailable()):
result = connector.dequeueMessage()
if(result != None):
(msg, receivedTime) = result
print "Message received at:", receivedTime
print msg
time.sleep(0.5)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
# re-raising the exception so we get a traceback (useful for debugging,
# occasionally). Real "applications"/testdrivers shouldn't do this.
raise
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
| 36.714953 | 191 | 0.722159 | [
"MIT"
] | isis-ammo/ammo-gateway | AndroidGatewayPlugin/Testdriver/AndroidConnector/ammo/AndroidConnector.py | 15,714 | Python |
# Generated by Django 3.1.8 on 2021-05-20 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0020_auto_20210415_0831'),
]
operations = [
migrations.AddField(
model_name='candidateusermodel',
name='organization',
field=models.CharField(default='not indicated', max_length=255, verbose_name='organization'),
),
]
| 23.947368 | 105 | 0.635165 | [
"MIT"
] | AlenaYanish/Data_converter | users/migrations/0021_candidateusermodel_organization.py | 455 | Python |
import math
import numpy as np
import pytest
import tensorflow as tf
import kerastuner as kt
from kerastuner.engine import hyperparameters as hp_module
from kerastuner.engine import trial as trial_module
from kerastuner.tuners import bayesian as bo_module
@pytest.fixture(scope="function")
def tmp_dir(tmpdir_factory):
return tmpdir_factory.mktemp("bayesian_test", numbered=True)
def build_model(hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(2, 2)))
for i in range(3):
model.add(
tf.keras.layers.Dense(
units=hp.Int("units_" + str(i), 2, 4, 2), activation="relu"
)
)
model.add(tf.keras.layers.Dense(2, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice("learning_rate", [1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
def test_gpr_mse_is_small():
x_train = np.random.rand(1000, 2)
y_train = np.multiply(x_train, x_train).mean(axis=-1)
x_test = np.random.rand(1000, 2)
y_test = np.multiply(x_test, x_test).mean(axis=-1)
gpr = bo_module.GaussianProcessRegressor(alpha=1e-4, seed=3)
gpr.fit(x_train, y_train)
y_predict_mean, y_predict_std = gpr.predict(x_test)
assert ((y_predict_mean - y_test) ** 2).mean(axis=0) < 1e-8
assert y_predict_std.shape == (1000,)
def test_bayesian_oracle(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Int("b", 3, 10, default=3)
hps.Float("c", 0, 1, 0.1, default=0)
hps.Fixed("d", 7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"),
max_trials=20,
num_initial_points=2,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_dir, "untitled")
for i in range(5):
trial = oracle.create_trial(str(i))
oracle.update_trial(trial.trial_id, {"score": i})
oracle.end_trial(trial.trial_id, "COMPLETED")
def test_bayesian_oracle_with_zero_y(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Int("b", 3, 10, default=3)
hps.Float("c", 0, 1, 0.1, default=0)
hps.Fixed("d", 7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"),
max_trials=20,
num_initial_points=2,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_dir, "untitled")
for i in range(5):
trial = oracle.create_trial(str(i))
oracle.update_trial(trial.trial_id, {"score": 0})
oracle.end_trial(trial.trial_id, "COMPLETED")
def test_bayesian_dynamic_space(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
oracle = bo_module.BayesianOptimizationOracle(
objective="val_acc", max_trials=20, num_initial_points=10
)
oracle._set_project_dir(tmp_dir, "untitled")
oracle.hyperparameters = hps
for i in range(10):
oracle._populate_space(str(i))
hps.Int("b", 3, 10, default=3)
assert "b" in oracle._populate_space("1_0")["values"]
hps.Float("c", 0, 1, 0.1, default=0)
assert "c" in oracle._populate_space("1_1")["values"]
hps.Fixed("d", 7)
assert "d" in oracle._populate_space("1_2")["values"]
hps.Choice("e", [9, 0], default=9)
assert "e" in oracle._populate_space("1_3")["values"]
def test_bayesian_save_reload(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Choice("b", [3, 4], default=3)
hps.Choice("c", [5, 6], default=5)
hps.Choice("d", [7, 8], default=7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps
)
oracle._set_project_dir(tmp_dir, "untitled")
for _ in range(3):
trial = oracle.create_trial("tuner_id")
oracle.update_trial(trial.trial_id, {"score": 1.0})
oracle.end_trial(trial.trial_id, "COMPLETED")
oracle.save()
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps
)
oracle._set_project_dir(tmp_dir, "untitled")
oracle.reload()
for trial_id in range(3):
trial = oracle.create_trial("tuner_id")
oracle.update_trial(trial.trial_id, {"score": 1.0})
oracle.end_trial(trial.trial_id, "COMPLETED")
assert len(oracle.trials) == 6
def test_bayesian_optimization_tuner(tmp_dir):
tuner = bo_module.BayesianOptimization(
build_model, objective="val_accuracy", max_trials=15, directory=tmp_dir
)
assert isinstance(tuner.oracle, bo_module.BayesianOptimizationOracle)
def test_bayesian_optimization_tuner_set_alpha_beta(tmp_dir):
tuner = bo_module.BayesianOptimization(
build_model,
alpha=1e-4,
beta=2.6,
objective="val_accuracy",
max_trials=15,
directory=tmp_dir,
)
assert isinstance(tuner.oracle, bo_module.BayesianOptimizationOracle)
def test_save_before_result(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Int("b", 3, 10, default=3)
hps.Float("c", 0, 1, 0.1, default=0)
hps.Fixed("d", 7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"), max_trials=10, hyperparameters=hps
)
oracle._set_project_dir(tmp_dir, "untitled")
oracle._populate_space(str(1))
oracle.save()
def test_bayesian_oracle_maximize(tmp_dir):
hps = hp_module.HyperParameters()
hps.Int("a", -100, 100)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", direction="max"),
max_trials=20,
hyperparameters=hps,
num_initial_points=2,
)
oracle._set_project_dir(tmp_dir, "untitled")
# Make examples with high 'a' and high score.
for i in range(5):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["a"] = 10 * i
trial.score = i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
# Make examples with low 'a' and low score
for i in range(5):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["a"] = -10 * i
trial.score = -i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
trial = oracle.create_trial("tuner0")
assert trial.status == "RUNNING"
# Assert that the oracle suggests hps it thinks will maximize.
assert trial.hyperparameters.get("a") > 0
def test_hyperparameters_added(tmp_dir):
hps = hp_module.HyperParameters()
hps.Int("a", -100, 100)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", direction="max"),
max_trials=20,
hyperparameters=hps,
num_initial_points=2,
)
oracle._set_project_dir(tmp_dir, "untitled")
# Populate initial trials.
for i in range(10):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["a"] = 10 * i
trial.score = i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
# Update the space.
new_hps = hp_module.HyperParameters()
new_hps.Float("b", 3.2, 6.4, step=0.2, default=3.6)
new_hps.Boolean("c", default=True)
oracle.update_space(new_hps)
# Make a new trial, it should have b set.
trial = oracle.create_trial("tuner0")
assert trial.status == "RUNNING"
assert "b" in trial.hyperparameters.values
assert "c" in trial.hyperparameters.values
def test_step_respected(tmp_dir):
hps = hp_module.HyperParameters()
hps.Float("c", 0, 10, step=3)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", direction="max"),
max_trials=20,
hyperparameters=hps,
num_initial_points=2,
)
oracle._set_project_dir(tmp_dir, "untitled")
# Populate initial trials.
for i in range(10):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["c"] = 3.0
trial.score = i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
trial = oracle.create_trial("tuner0")
# Check that oracle respects the `step` param.
assert trial.hyperparameters.get("c") in {0, 3, 6, 9}
def test_float_optimization(tmp_dir):
def build_model(hp):
# Maximum at a=-1, b=1, c=1, d=0 with score=3
return -1 * hp["a"] ** 3 + hp["b"] ** 3 + hp["c"] - abs(hp["d"])
class PolynomialTuner(kt.engine.base_tuner.BaseTuner):
def run_trial(self, trial):
hps = trial.hyperparameters
score = self.hypermodel.build(hps)
self.oracle.update_trial(trial.trial_id, {"score": score})
hps = hp_module.HyperParameters()
hps.Float("a", -1, 1)
hps.Float("b", -1, 1)
hps.Float("c", -1, 1)
hps.Float("d", -1, 1)
tuner = PolynomialTuner(
hypermodel=build_model,
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective("score", "max"),
hyperparameters=hps,
max_trials=50,
),
directory=tmp_dir,
)
tuner.search()
atol, rtol = 1e-1, 1e-1
best_trial = tuner.oracle.get_best_trials()[0]
best_hps = best_trial.hyperparameters
assert np.isclose(best_trial.score, 3, atol=atol, rtol=rtol)
assert np.isclose(best_hps["a"], -1, atol=atol, rtol=rtol)
assert np.isclose(best_hps["b"], 1, atol=atol, rtol=rtol)
assert np.isclose(best_hps["c"], 1, atol=atol, rtol=rtol)
assert np.isclose(best_hps["d"], 0, atol=atol, rtol=rtol)
def test_distributed_optimization(tmp_dir):
hps = hp_module.HyperParameters()
hps.Int("a", 0, 10)
hps.Float("b", -1, 1, step=0.1)
hps.Float("c", 1e-5, 1e-2, sampling="log")
def evaluate(hp):
# Minimum at a=4, b=1, c=1e-3 with score=-1
return abs(hp["a"] - 4) - hp["b"] + 0.1 * abs(3 + math.log(hp["c"], 10))
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "min"), hyperparameters=hps, max_trials=60
)
oracle._set_project_dir(tmp_dir, "untitled")
tuners = 4
for _ in range(10):
trials = []
for i in range(tuners):
trial = oracle.create_trial("tuner_" + str(i))
trials.append(trial)
for trial in trials:
oracle.update_trial(
trial.trial_id, {"score": evaluate(trial.hyperparameters)}
)
for trial in trials:
oracle.end_trial(trial.trial_id, "COMPLETED")
atol, rtol = 1e-1, 1e-1
best_trial = oracle.get_best_trials()[0]
best_hps = best_trial.hyperparameters
# The minimum is not always found but it is always close.
assert best_trial.score < -0.8, best_hps.values
assert np.isclose(best_hps["a"], 4, atol=atol, rtol=rtol)
assert np.isclose(best_hps["b"], 1, atol=atol, rtol=rtol)
# For log-scale param, just check that the order of magnitude is correct.
log_best_c = math.log(best_hps["c"], 10)
assert log_best_c > -4 and log_best_c < -2
| 32.774929 | 82 | 0.644385 | [
"Apache-2.0"
] | jpodivin/keras-tuner | tests/kerastuner/tuners/bayesian_test.py | 11,504 | Python |
import abc
from ... import errors, utils
from ...tl import types
class ChatGetter(abc.ABC):
"""
Helper base class that introduces the `chat`, `input_chat`
and `chat_id` properties and `get_chat` and `get_input_chat`
methods.
Subclasses **must** have the following private members: `_chat`,
`_input_chat`, `_chat_peer`, `_broadcast` and `_client`. As an end
user, you should not worry about this.
"""
def __init__(self):
self._chat = self._input_chat = self._chat_peer = \
self._client = self._broadcast = None
@property
def chat(self):
"""
Returns the :tl:`User`, :tl:`Chat` or :tl:`Channel` where this object
belongs to. It may be ``None`` if Telegram didn't send the chat.
If you're using `telethon.events`, use `get_chat` instead.
"""
return self._chat
async def get_chat(self):
"""
Returns `chat`, but will make an API call to find the
chat unless it's already cached.
"""
# See `get_sender` for information about 'min'.
if (self._chat is None or getattr(self._chat, 'min', None))\
and await self.get_input_chat():
try:
self._chat =\
await self._client.get_entity(self._input_chat)
except ValueError:
await self._refetch_chat()
return self._chat
@property
def input_chat(self):
"""
This :tl:`InputPeer` is the input version of the chat where the
message was sent. Similarly to `input_sender`, this doesn't have
things like username or similar, but still useful in some cases.
Note that this might not be available if the library doesn't
have enough information available.
"""
if self._input_chat is None and self._chat_peer and self._client:
try:
self._input_chat = self._client._entity_cache[self._chat_peer]
except KeyError:
pass
return self._input_chat
async def get_input_chat(self):
"""
Returns `input_chat`, but will make an API call to find the
input chat unless it's already cached.
"""
if self.input_chat is None and self.chat_id and self._client:
try:
# The chat may be recent, look in dialogs
target = self.chat_id
async for d in self._client.iter_dialogs(100):
if d.id == target:
self._chat = d.entity
self._input_chat = d.input_entity
break
except errors.RPCError:
pass
return self._input_chat
@property
def chat_id(self):
"""
Returns the marked chat integer ID. Note that this value **will
be different** from `to_id` for incoming private messages, since
the chat *to* which the messages go is to your own person, but
the *chat* itself is with the one who sent the message.
TL;DR; this gets the ID that you expect.
"""
return utils.get_peer_id(self._chat_peer) if self._chat_peer else None
@property
def is_private(self):
"""True if the message was sent as a private message."""
return isinstance(self._chat_peer, types.PeerUser)
@property
def is_group(self):
"""True if the message was sent on a group or megagroup."""
if self._broadcast is None and self.chat:
self._broadcast = getattr(self.chat, 'broadcast', None)
return (
isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel))
and not self._broadcast
)
@property
def is_channel(self):
"""True if the message was sent on a megagroup or channel."""
return isinstance(self._chat_peer, types.PeerChannel)
async def _refetch_chat(self):
"""
Re-fetches chat information through other means.
"""
| 33.725 | 78 | 0.594514 | [
"MIT"
] | bb010g/Telethon | telethon/tl/custom/chatgetter.py | 4,047 | Python |
Subsets and Splits